problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
25.4k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
582
39.1k
num_tokens
int64
271
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_23020
rasdani/github-patches
git_diff
Project-MONAI__MONAI-6922
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- robust username masking in print_config https://github.com/Project-MONAI/MONAI/blob/6f5005fddfaf935cc9b723be823f446a09136d11/monai/config/deviceconfig.py#L103-L110 if home directories are in other locations, eg. networked directories, the regex patterns won't match. I'd suggest a more thorough approach is to get the username with `getpass.getuser()` or `os.path.basename(os.path.expanduser("~"))` then replace all instances of that string in the output with "<username>". This will catch more places but will miss paths that might contain other identifying names, but these should be very rare. _Originally posted by @ericspod in https://github.com/Project-MONAI/MONAI/issues/6913#issuecomment-1699093111_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `monai/config/deviceconfig.py` Content: ``` 1 # Copyright (c) MONAI Consortium 2 # Licensed under the Apache License, Version 2.0 (the "License"); 3 # you may not use this file except in compliance with the License. 4 # You may obtain a copy of the License at 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # Unless required by applicable law or agreed to in writing, software 7 # distributed under the License is distributed on an "AS IS" BASIS, 8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 # See the License for the specific language governing permissions and 10 # limitations under the License. 11 12 from __future__ import annotations 13 14 import os 15 import platform 16 import re 17 import sys 18 from collections import OrderedDict 19 from typing import TextIO 20 21 import numpy as np 22 import torch 23 24 import monai 25 from monai.utils.module import OptionalImportError, get_package_version, optional_import 26 27 try: 28 _, HAS_EXT = optional_import("monai._C") 29 USE_COMPILED = HAS_EXT and os.getenv("BUILD_MONAI", "0") == "1" 30 except (OptionalImportError, ImportError, AttributeError): 31 HAS_EXT = USE_COMPILED = False 32 33 USE_META_DICT = os.environ.get("USE_META_DICT", "0") == "1" # set to True for compatibility, use meta dict. 34 35 psutil, has_psutil = optional_import("psutil") 36 psutil_version = psutil.__version__ if has_psutil else "NOT INSTALLED or UNKNOWN VERSION." 37 38 __all__ = [ 39 "print_config", 40 "get_system_info", 41 "print_system_info", 42 "get_gpu_info", 43 "print_gpu_info", 44 "print_debug_info", 45 "USE_COMPILED", 46 "USE_META_DICT", 47 "IgniteInfo", 48 ] 49 50 51 def get_config_values(): 52 """ 53 Read the package versions into a dictionary. 54 """ 55 output = OrderedDict() 56 57 output["MONAI"] = monai.__version__ 58 output["Numpy"] = np.version.full_version 59 output["Pytorch"] = torch.__version__ 60 61 return output 62 63 64 def get_optional_config_values(): 65 """ 66 Read the optional package versions into a dictionary. 67 """ 68 output = OrderedDict() 69 70 output["Pytorch Ignite"] = get_package_version("ignite") 71 output["ITK"] = get_package_version("itk") 72 output["Nibabel"] = get_package_version("nibabel") 73 output["scikit-image"] = get_package_version("skimage") 74 output["scipy"] = get_package_version("scipy") 75 output["Pillow"] = get_package_version("PIL") 76 output["Tensorboard"] = get_package_version("tensorboard") 77 output["gdown"] = get_package_version("gdown") 78 output["TorchVision"] = get_package_version("torchvision") 79 output["tqdm"] = get_package_version("tqdm") 80 output["lmdb"] = get_package_version("lmdb") 81 output["psutil"] = psutil_version 82 output["pandas"] = get_package_version("pandas") 83 output["einops"] = get_package_version("einops") 84 output["transformers"] = get_package_version("transformers") 85 output["mlflow"] = get_package_version("mlflow") 86 output["pynrrd"] = get_package_version("nrrd") 87 output["clearml"] = get_package_version("clearml") 88 89 return output 90 91 92 def print_config(file=sys.stdout): 93 """ 94 Print the package versions to `file`. 95 96 Args: 97 file: `print()` text stream file. Defaults to `sys.stdout`. 98 """ 99 for k, v in get_config_values().items(): 100 print(f"{k} version: {v}", file=file, flush=True) 101 print(f"MONAI flags: HAS_EXT = {HAS_EXT}, USE_COMPILED = {USE_COMPILED}, USE_META_DICT = {USE_META_DICT}") 102 print(f"MONAI rev id: {monai.__revision_id__}") 103 masked_file_path = re.sub( 104 r"/home/\w+/", 105 "/home/<username>/", 106 re.sub( 107 r"/Users/\w+/", 108 "/Users/<username>/", 109 re.sub(r"C:\\Users\\\w+\\", r"C:\\Users\\<username>\\", monai.__file__), 110 ), 111 ) 112 print(f"MONAI __file__: {masked_file_path}", file=file, flush=True) 113 print("\nOptional dependencies:", file=file, flush=True) 114 for k, v in get_optional_config_values().items(): 115 print(f"{k} version: {v}", file=file, flush=True) 116 print("\nFor details about installing the optional dependencies, please visit:", file=file, flush=True) 117 print( 118 " https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies\n", 119 file=file, 120 flush=True, 121 ) 122 123 124 def _dict_append(in_dict, key, fn): 125 try: 126 in_dict[key] = fn() if callable(fn) else fn 127 except BaseException: 128 in_dict[key] = "UNKNOWN for given OS" 129 130 131 def get_system_info() -> OrderedDict: 132 """ 133 Get system info as an ordered dictionary. 134 """ 135 output: OrderedDict = OrderedDict() 136 137 _dict_append(output, "System", platform.system) 138 if output["System"] == "Windows": 139 _dict_append(output, "Win32 version", platform.win32_ver) 140 if hasattr(platform, "win32_edition"): 141 _dict_append(output, "Win32 edition", platform.win32_edition) 142 143 elif output["System"] == "Darwin": 144 _dict_append(output, "Mac version", lambda: platform.mac_ver()[0]) 145 else: 146 with open("/etc/os-release") as rel_f: 147 linux_ver = re.search(r'PRETTY_NAME="(.*)"', rel_f.read()) 148 if linux_ver: 149 _dict_append(output, "Linux version", lambda: linux_ver.group(1)) 150 151 _dict_append(output, "Platform", platform.platform) 152 _dict_append(output, "Processor", platform.processor) 153 _dict_append(output, "Machine", platform.machine) 154 _dict_append(output, "Python version", platform.python_version) 155 156 if not has_psutil: 157 _dict_append(output, "`psutil` missing", lambda: "run `pip install monai[psutil]`") 158 else: 159 p = psutil.Process() 160 with p.oneshot(): 161 _dict_append(output, "Process name", p.name) 162 _dict_append(output, "Command", p.cmdline) 163 _dict_append(output, "Open files", p.open_files) 164 _dict_append(output, "Num physical CPUs", lambda: psutil.cpu_count(logical=False)) 165 _dict_append(output, "Num logical CPUs", lambda: psutil.cpu_count(logical=True)) 166 _dict_append(output, "Num usable CPUs", lambda: len(psutil.Process().cpu_affinity())) 167 _dict_append(output, "CPU usage (%)", lambda: psutil.cpu_percent(percpu=True)) 168 _dict_append(output, "CPU freq. (MHz)", lambda: round(psutil.cpu_freq(percpu=False)[0])) 169 _dict_append( 170 output, 171 "Load avg. in last 1, 5, 15 mins (%)", 172 lambda: [round(x / psutil.cpu_count() * 100, 1) for x in psutil.getloadavg()], 173 ) 174 _dict_append(output, "Disk usage (%)", lambda: psutil.disk_usage(os.getcwd()).percent) 175 _dict_append( 176 output, 177 "Avg. sensor temp. (Celsius)", 178 lambda: np.round( 179 np.mean([item.current for sublist in psutil.sensors_temperatures().values() for item in sublist], 1) 180 ), 181 ) 182 mem = psutil.virtual_memory() 183 _dict_append(output, "Total physical memory (GB)", lambda: round(mem.total / 1024**3, 1)) 184 _dict_append(output, "Available memory (GB)", lambda: round(mem.available / 1024**3, 1)) 185 _dict_append(output, "Used memory (GB)", lambda: round(mem.used / 1024**3, 1)) 186 187 return output 188 189 190 def print_system_info(file: TextIO = sys.stdout) -> None: 191 """ 192 Print system info to `file`. Requires the optional library, `psutil`. 193 194 Args: 195 file: `print()` text stream file. Defaults to `sys.stdout`. 196 """ 197 if not has_psutil: 198 print("`psutil` required for `print_system_info`", file=file, flush=True) 199 else: 200 for k, v in get_system_info().items(): 201 print(f"{k}: {v}", file=file, flush=True) 202 203 204 def get_gpu_info() -> OrderedDict: 205 output: OrderedDict = OrderedDict() 206 207 num_gpus = torch.cuda.device_count() 208 _dict_append(output, "Num GPUs", lambda: num_gpus) 209 210 _dict_append(output, "Has CUDA", lambda: bool(torch.cuda.is_available())) 211 212 if output["Has CUDA"]: 213 _dict_append(output, "CUDA version", lambda: torch.version.cuda) 214 cudnn_ver = torch.backends.cudnn.version() 215 _dict_append(output, "cuDNN enabled", lambda: bool(cudnn_ver)) 216 _dict_append(output, "NVIDIA_TF32_OVERRIDE", os.environ.get("NVIDIA_TF32_OVERRIDE")) 217 _dict_append(output, "TORCH_ALLOW_TF32_CUBLAS_OVERRIDE", os.environ.get("TORCH_ALLOW_TF32_CUBLAS_OVERRIDE")) 218 219 if cudnn_ver: 220 _dict_append(output, "cuDNN version", lambda: cudnn_ver) 221 222 if num_gpus > 0: 223 _dict_append(output, "Current device", torch.cuda.current_device) 224 _dict_append(output, "Library compiled for CUDA architectures", torch.cuda.get_arch_list) 225 226 for gpu in range(num_gpus): 227 gpu_info = torch.cuda.get_device_properties(gpu) 228 _dict_append(output, f"GPU {gpu} Name", gpu_info.name) 229 _dict_append(output, f"GPU {gpu} Is integrated", bool(gpu_info.is_integrated)) 230 _dict_append(output, f"GPU {gpu} Is multi GPU board", bool(gpu_info.is_multi_gpu_board)) 231 _dict_append(output, f"GPU {gpu} Multi processor count", gpu_info.multi_processor_count) 232 _dict_append(output, f"GPU {gpu} Total memory (GB)", round(gpu_info.total_memory / 1024**3, 1)) 233 _dict_append(output, f"GPU {gpu} CUDA capability (maj.min)", f"{gpu_info.major}.{gpu_info.minor}") 234 235 return output 236 237 238 def print_gpu_info(file: TextIO = sys.stdout) -> None: 239 """ 240 Print GPU info to `file`. 241 242 Args: 243 file: `print()` text stream file. Defaults to `sys.stdout`. 244 """ 245 for k, v in get_gpu_info().items(): 246 print(f"{k}: {v}", file=file, flush=True) 247 248 249 def print_debug_info(file: TextIO = sys.stdout) -> None: 250 """ 251 Print config (installed dependencies, etc.) and system info for debugging. 252 253 Args: 254 file: `print()` text stream file. Defaults to `sys.stdout`. 255 """ 256 print("================================", file=file, flush=True) 257 print("Printing MONAI config...", file=file, flush=True) 258 print("================================", file=file, flush=True) 259 print_config(file) 260 print("\n================================", file=file, flush=True) 261 print("Printing system config...") 262 print("================================", file=file, flush=True) 263 print_system_info(file) 264 print("\n================================", file=file, flush=True) 265 print("Printing GPU config...") 266 print("================================", file=file, flush=True) 267 print_gpu_info(file) 268 269 270 class IgniteInfo: 271 """ 272 Config information of the PyTorch ignite package. 273 274 """ 275 276 OPT_IMPORT_VERSION = "0.4.4" 277 278 279 if __name__ == "__main__": 280 print_debug_info() 281 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/monai/config/deviceconfig.py b/monai/config/deviceconfig.py --- a/monai/config/deviceconfig.py +++ b/monai/config/deviceconfig.py @@ -11,6 +11,7 @@ from __future__ import annotations +import getpass import os import platform import re @@ -100,15 +101,8 @@ print(f"{k} version: {v}", file=file, flush=True) print(f"MONAI flags: HAS_EXT = {HAS_EXT}, USE_COMPILED = {USE_COMPILED}, USE_META_DICT = {USE_META_DICT}") print(f"MONAI rev id: {monai.__revision_id__}") - masked_file_path = re.sub( - r"/home/\w+/", - "/home/<username>/", - re.sub( - r"/Users/\w+/", - "/Users/<username>/", - re.sub(r"C:\\Users\\\w+\\", r"C:\\Users\\<username>\\", monai.__file__), - ), - ) + username = getpass.getuser() + masked_file_path = re.sub(username, "<username>", monai.__file__) print(f"MONAI __file__: {masked_file_path}", file=file, flush=True) print("\nOptional dependencies:", file=file, flush=True) for k, v in get_optional_config_values().items():
{"golden_diff": "diff --git a/monai/config/deviceconfig.py b/monai/config/deviceconfig.py\n--- a/monai/config/deviceconfig.py\n+++ b/monai/config/deviceconfig.py\n@@ -11,6 +11,7 @@\n \n from __future__ import annotations\n \n+import getpass\n import os\n import platform\n import re\n@@ -100,15 +101,8 @@\n print(f\"{k} version: {v}\", file=file, flush=True)\n print(f\"MONAI flags: HAS_EXT = {HAS_EXT}, USE_COMPILED = {USE_COMPILED}, USE_META_DICT = {USE_META_DICT}\")\n print(f\"MONAI rev id: {monai.__revision_id__}\")\n- masked_file_path = re.sub(\n- r\"/home/\\w+/\",\n- \"/home/<username>/\",\n- re.sub(\n- r\"/Users/\\w+/\",\n- \"/Users/<username>/\",\n- re.sub(r\"C:\\\\Users\\\\\\w+\\\\\", r\"C:\\\\Users\\\\<username>\\\\\", monai.__file__),\n- ),\n- )\n+ username = getpass.getuser()\n+ masked_file_path = re.sub(username, \"<username>\", monai.__file__)\n print(f\"MONAI __file__: {masked_file_path}\", file=file, flush=True)\n print(\"\\nOptional dependencies:\", file=file, flush=True)\n for k, v in get_optional_config_values().items():\n", "issue": "robust username masking in print_config\nhttps://github.com/Project-MONAI/MONAI/blob/6f5005fddfaf935cc9b723be823f446a09136d11/monai/config/deviceconfig.py#L103-L110\r\nif home directories are in other locations, eg. networked directories, the regex patterns won't match. I'd suggest a more thorough approach is to get the username with `getpass.getuser()` or `os.path.basename(os.path.expanduser(\"~\"))` then replace all instances of that string in the output with \"<username>\". This will catch more places but will miss paths that might contain other identifying names, but these should be very rare.\r\n\r\n_Originally posted by @ericspod in https://github.com/Project-MONAI/MONAI/issues/6913#issuecomment-1699093111_\r\n \n", "before_files": [{"content": "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nimport os\nimport platform\nimport re\nimport sys\nfrom collections import OrderedDict\nfrom typing import TextIO\n\nimport numpy as np\nimport torch\n\nimport monai\nfrom monai.utils.module import OptionalImportError, get_package_version, optional_import\n\ntry:\n _, HAS_EXT = optional_import(\"monai._C\")\n USE_COMPILED = HAS_EXT and os.getenv(\"BUILD_MONAI\", \"0\") == \"1\"\nexcept (OptionalImportError, ImportError, AttributeError):\n HAS_EXT = USE_COMPILED = False\n\nUSE_META_DICT = os.environ.get(\"USE_META_DICT\", \"0\") == \"1\" # set to True for compatibility, use meta dict.\n\npsutil, has_psutil = optional_import(\"psutil\")\npsutil_version = psutil.__version__ if has_psutil else \"NOT INSTALLED or UNKNOWN VERSION.\"\n\n__all__ = [\n \"print_config\",\n \"get_system_info\",\n \"print_system_info\",\n \"get_gpu_info\",\n \"print_gpu_info\",\n \"print_debug_info\",\n \"USE_COMPILED\",\n \"USE_META_DICT\",\n \"IgniteInfo\",\n]\n\n\ndef get_config_values():\n \"\"\"\n Read the package versions into a dictionary.\n \"\"\"\n output = OrderedDict()\n\n output[\"MONAI\"] = monai.__version__\n output[\"Numpy\"] = np.version.full_version\n output[\"Pytorch\"] = torch.__version__\n\n return output\n\n\ndef get_optional_config_values():\n \"\"\"\n Read the optional package versions into a dictionary.\n \"\"\"\n output = OrderedDict()\n\n output[\"Pytorch Ignite\"] = get_package_version(\"ignite\")\n output[\"ITK\"] = get_package_version(\"itk\")\n output[\"Nibabel\"] = get_package_version(\"nibabel\")\n output[\"scikit-image\"] = get_package_version(\"skimage\")\n output[\"scipy\"] = get_package_version(\"scipy\")\n output[\"Pillow\"] = get_package_version(\"PIL\")\n output[\"Tensorboard\"] = get_package_version(\"tensorboard\")\n output[\"gdown\"] = get_package_version(\"gdown\")\n output[\"TorchVision\"] = get_package_version(\"torchvision\")\n output[\"tqdm\"] = get_package_version(\"tqdm\")\n output[\"lmdb\"] = get_package_version(\"lmdb\")\n output[\"psutil\"] = psutil_version\n output[\"pandas\"] = get_package_version(\"pandas\")\n output[\"einops\"] = get_package_version(\"einops\")\n output[\"transformers\"] = get_package_version(\"transformers\")\n output[\"mlflow\"] = get_package_version(\"mlflow\")\n output[\"pynrrd\"] = get_package_version(\"nrrd\")\n output[\"clearml\"] = get_package_version(\"clearml\")\n\n return output\n\n\ndef print_config(file=sys.stdout):\n \"\"\"\n Print the package versions to `file`.\n\n Args:\n file: `print()` text stream file. Defaults to `sys.stdout`.\n \"\"\"\n for k, v in get_config_values().items():\n print(f\"{k} version: {v}\", file=file, flush=True)\n print(f\"MONAI flags: HAS_EXT = {HAS_EXT}, USE_COMPILED = {USE_COMPILED}, USE_META_DICT = {USE_META_DICT}\")\n print(f\"MONAI rev id: {monai.__revision_id__}\")\n masked_file_path = re.sub(\n r\"/home/\\w+/\",\n \"/home/<username>/\",\n re.sub(\n r\"/Users/\\w+/\",\n \"/Users/<username>/\",\n re.sub(r\"C:\\\\Users\\\\\\w+\\\\\", r\"C:\\\\Users\\\\<username>\\\\\", monai.__file__),\n ),\n )\n print(f\"MONAI __file__: {masked_file_path}\", file=file, flush=True)\n print(\"\\nOptional dependencies:\", file=file, flush=True)\n for k, v in get_optional_config_values().items():\n print(f\"{k} version: {v}\", file=file, flush=True)\n print(\"\\nFor details about installing the optional dependencies, please visit:\", file=file, flush=True)\n print(\n \" https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies\\n\",\n file=file,\n flush=True,\n )\n\n\ndef _dict_append(in_dict, key, fn):\n try:\n in_dict[key] = fn() if callable(fn) else fn\n except BaseException:\n in_dict[key] = \"UNKNOWN for given OS\"\n\n\ndef get_system_info() -> OrderedDict:\n \"\"\"\n Get system info as an ordered dictionary.\n \"\"\"\n output: OrderedDict = OrderedDict()\n\n _dict_append(output, \"System\", platform.system)\n if output[\"System\"] == \"Windows\":\n _dict_append(output, \"Win32 version\", platform.win32_ver)\n if hasattr(platform, \"win32_edition\"):\n _dict_append(output, \"Win32 edition\", platform.win32_edition)\n\n elif output[\"System\"] == \"Darwin\":\n _dict_append(output, \"Mac version\", lambda: platform.mac_ver()[0])\n else:\n with open(\"/etc/os-release\") as rel_f:\n linux_ver = re.search(r'PRETTY_NAME=\"(.*)\"', rel_f.read())\n if linux_ver:\n _dict_append(output, \"Linux version\", lambda: linux_ver.group(1))\n\n _dict_append(output, \"Platform\", platform.platform)\n _dict_append(output, \"Processor\", platform.processor)\n _dict_append(output, \"Machine\", platform.machine)\n _dict_append(output, \"Python version\", platform.python_version)\n\n if not has_psutil:\n _dict_append(output, \"`psutil` missing\", lambda: \"run `pip install monai[psutil]`\")\n else:\n p = psutil.Process()\n with p.oneshot():\n _dict_append(output, \"Process name\", p.name)\n _dict_append(output, \"Command\", p.cmdline)\n _dict_append(output, \"Open files\", p.open_files)\n _dict_append(output, \"Num physical CPUs\", lambda: psutil.cpu_count(logical=False))\n _dict_append(output, \"Num logical CPUs\", lambda: psutil.cpu_count(logical=True))\n _dict_append(output, \"Num usable CPUs\", lambda: len(psutil.Process().cpu_affinity()))\n _dict_append(output, \"CPU usage (%)\", lambda: psutil.cpu_percent(percpu=True))\n _dict_append(output, \"CPU freq. (MHz)\", lambda: round(psutil.cpu_freq(percpu=False)[0]))\n _dict_append(\n output,\n \"Load avg. in last 1, 5, 15 mins (%)\",\n lambda: [round(x / psutil.cpu_count() * 100, 1) for x in psutil.getloadavg()],\n )\n _dict_append(output, \"Disk usage (%)\", lambda: psutil.disk_usage(os.getcwd()).percent)\n _dict_append(\n output,\n \"Avg. sensor temp. (Celsius)\",\n lambda: np.round(\n np.mean([item.current for sublist in psutil.sensors_temperatures().values() for item in sublist], 1)\n ),\n )\n mem = psutil.virtual_memory()\n _dict_append(output, \"Total physical memory (GB)\", lambda: round(mem.total / 1024**3, 1))\n _dict_append(output, \"Available memory (GB)\", lambda: round(mem.available / 1024**3, 1))\n _dict_append(output, \"Used memory (GB)\", lambda: round(mem.used / 1024**3, 1))\n\n return output\n\n\ndef print_system_info(file: TextIO = sys.stdout) -> None:\n \"\"\"\n Print system info to `file`. Requires the optional library, `psutil`.\n\n Args:\n file: `print()` text stream file. Defaults to `sys.stdout`.\n \"\"\"\n if not has_psutil:\n print(\"`psutil` required for `print_system_info`\", file=file, flush=True)\n else:\n for k, v in get_system_info().items():\n print(f\"{k}: {v}\", file=file, flush=True)\n\n\ndef get_gpu_info() -> OrderedDict:\n output: OrderedDict = OrderedDict()\n\n num_gpus = torch.cuda.device_count()\n _dict_append(output, \"Num GPUs\", lambda: num_gpus)\n\n _dict_append(output, \"Has CUDA\", lambda: bool(torch.cuda.is_available()))\n\n if output[\"Has CUDA\"]:\n _dict_append(output, \"CUDA version\", lambda: torch.version.cuda)\n cudnn_ver = torch.backends.cudnn.version()\n _dict_append(output, \"cuDNN enabled\", lambda: bool(cudnn_ver))\n _dict_append(output, \"NVIDIA_TF32_OVERRIDE\", os.environ.get(\"NVIDIA_TF32_OVERRIDE\"))\n _dict_append(output, \"TORCH_ALLOW_TF32_CUBLAS_OVERRIDE\", os.environ.get(\"TORCH_ALLOW_TF32_CUBLAS_OVERRIDE\"))\n\n if cudnn_ver:\n _dict_append(output, \"cuDNN version\", lambda: cudnn_ver)\n\n if num_gpus > 0:\n _dict_append(output, \"Current device\", torch.cuda.current_device)\n _dict_append(output, \"Library compiled for CUDA architectures\", torch.cuda.get_arch_list)\n\n for gpu in range(num_gpus):\n gpu_info = torch.cuda.get_device_properties(gpu)\n _dict_append(output, f\"GPU {gpu} Name\", gpu_info.name)\n _dict_append(output, f\"GPU {gpu} Is integrated\", bool(gpu_info.is_integrated))\n _dict_append(output, f\"GPU {gpu} Is multi GPU board\", bool(gpu_info.is_multi_gpu_board))\n _dict_append(output, f\"GPU {gpu} Multi processor count\", gpu_info.multi_processor_count)\n _dict_append(output, f\"GPU {gpu} Total memory (GB)\", round(gpu_info.total_memory / 1024**3, 1))\n _dict_append(output, f\"GPU {gpu} CUDA capability (maj.min)\", f\"{gpu_info.major}.{gpu_info.minor}\")\n\n return output\n\n\ndef print_gpu_info(file: TextIO = sys.stdout) -> None:\n \"\"\"\n Print GPU info to `file`.\n\n Args:\n file: `print()` text stream file. Defaults to `sys.stdout`.\n \"\"\"\n for k, v in get_gpu_info().items():\n print(f\"{k}: {v}\", file=file, flush=True)\n\n\ndef print_debug_info(file: TextIO = sys.stdout) -> None:\n \"\"\"\n Print config (installed dependencies, etc.) and system info for debugging.\n\n Args:\n file: `print()` text stream file. Defaults to `sys.stdout`.\n \"\"\"\n print(\"================================\", file=file, flush=True)\n print(\"Printing MONAI config...\", file=file, flush=True)\n print(\"================================\", file=file, flush=True)\n print_config(file)\n print(\"\\n================================\", file=file, flush=True)\n print(\"Printing system config...\")\n print(\"================================\", file=file, flush=True)\n print_system_info(file)\n print(\"\\n================================\", file=file, flush=True)\n print(\"Printing GPU config...\")\n print(\"================================\", file=file, flush=True)\n print_gpu_info(file)\n\n\nclass IgniteInfo:\n \"\"\"\n Config information of the PyTorch ignite package.\n\n \"\"\"\n\n OPT_IMPORT_VERSION = \"0.4.4\"\n\n\nif __name__ == \"__main__\":\n print_debug_info()\n", "path": "monai/config/deviceconfig.py"}], "after_files": [{"content": "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nimport getpass\nimport os\nimport platform\nimport re\nimport sys\nfrom collections import OrderedDict\nfrom typing import TextIO\n\nimport numpy as np\nimport torch\n\nimport monai\nfrom monai.utils.module import OptionalImportError, get_package_version, optional_import\n\ntry:\n _, HAS_EXT = optional_import(\"monai._C\")\n USE_COMPILED = HAS_EXT and os.getenv(\"BUILD_MONAI\", \"0\") == \"1\"\nexcept (OptionalImportError, ImportError, AttributeError):\n HAS_EXT = USE_COMPILED = False\n\nUSE_META_DICT = os.environ.get(\"USE_META_DICT\", \"0\") == \"1\" # set to True for compatibility, use meta dict.\n\npsutil, has_psutil = optional_import(\"psutil\")\npsutil_version = psutil.__version__ if has_psutil else \"NOT INSTALLED or UNKNOWN VERSION.\"\n\n__all__ = [\n \"print_config\",\n \"get_system_info\",\n \"print_system_info\",\n \"get_gpu_info\",\n \"print_gpu_info\",\n \"print_debug_info\",\n \"USE_COMPILED\",\n \"USE_META_DICT\",\n \"IgniteInfo\",\n]\n\n\ndef get_config_values():\n \"\"\"\n Read the package versions into a dictionary.\n \"\"\"\n output = OrderedDict()\n\n output[\"MONAI\"] = monai.__version__\n output[\"Numpy\"] = np.version.full_version\n output[\"Pytorch\"] = torch.__version__\n\n return output\n\n\ndef get_optional_config_values():\n \"\"\"\n Read the optional package versions into a dictionary.\n \"\"\"\n output = OrderedDict()\n\n output[\"Pytorch Ignite\"] = get_package_version(\"ignite\")\n output[\"ITK\"] = get_package_version(\"itk\")\n output[\"Nibabel\"] = get_package_version(\"nibabel\")\n output[\"scikit-image\"] = get_package_version(\"skimage\")\n output[\"scipy\"] = get_package_version(\"scipy\")\n output[\"Pillow\"] = get_package_version(\"PIL\")\n output[\"Tensorboard\"] = get_package_version(\"tensorboard\")\n output[\"gdown\"] = get_package_version(\"gdown\")\n output[\"TorchVision\"] = get_package_version(\"torchvision\")\n output[\"tqdm\"] = get_package_version(\"tqdm\")\n output[\"lmdb\"] = get_package_version(\"lmdb\")\n output[\"psutil\"] = psutil_version\n output[\"pandas\"] = get_package_version(\"pandas\")\n output[\"einops\"] = get_package_version(\"einops\")\n output[\"transformers\"] = get_package_version(\"transformers\")\n output[\"mlflow\"] = get_package_version(\"mlflow\")\n output[\"pynrrd\"] = get_package_version(\"nrrd\")\n output[\"clearml\"] = get_package_version(\"clearml\")\n\n return output\n\n\ndef print_config(file=sys.stdout):\n \"\"\"\n Print the package versions to `file`.\n\n Args:\n file: `print()` text stream file. Defaults to `sys.stdout`.\n \"\"\"\n for k, v in get_config_values().items():\n print(f\"{k} version: {v}\", file=file, flush=True)\n print(f\"MONAI flags: HAS_EXT = {HAS_EXT}, USE_COMPILED = {USE_COMPILED}, USE_META_DICT = {USE_META_DICT}\")\n print(f\"MONAI rev id: {monai.__revision_id__}\")\n username = getpass.getuser()\n masked_file_path = re.sub(username, \"<username>\", monai.__file__)\n print(f\"MONAI __file__: {masked_file_path}\", file=file, flush=True)\n print(\"\\nOptional dependencies:\", file=file, flush=True)\n for k, v in get_optional_config_values().items():\n print(f\"{k} version: {v}\", file=file, flush=True)\n print(\"\\nFor details about installing the optional dependencies, please visit:\", file=file, flush=True)\n print(\n \" https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies\\n\",\n file=file,\n flush=True,\n )\n\n\ndef _dict_append(in_dict, key, fn):\n try:\n in_dict[key] = fn() if callable(fn) else fn\n except BaseException:\n in_dict[key] = \"UNKNOWN for given OS\"\n\n\ndef get_system_info() -> OrderedDict:\n \"\"\"\n Get system info as an ordered dictionary.\n \"\"\"\n output: OrderedDict = OrderedDict()\n\n _dict_append(output, \"System\", platform.system)\n if output[\"System\"] == \"Windows\":\n _dict_append(output, \"Win32 version\", platform.win32_ver)\n if hasattr(platform, \"win32_edition\"):\n _dict_append(output, \"Win32 edition\", platform.win32_edition)\n\n elif output[\"System\"] == \"Darwin\":\n _dict_append(output, \"Mac version\", lambda: platform.mac_ver()[0])\n else:\n with open(\"/etc/os-release\") as rel_f:\n linux_ver = re.search(r'PRETTY_NAME=\"(.*)\"', rel_f.read())\n if linux_ver:\n _dict_append(output, \"Linux version\", lambda: linux_ver.group(1))\n\n _dict_append(output, \"Platform\", platform.platform)\n _dict_append(output, \"Processor\", platform.processor)\n _dict_append(output, \"Machine\", platform.machine)\n _dict_append(output, \"Python version\", platform.python_version)\n\n if not has_psutil:\n _dict_append(output, \"`psutil` missing\", lambda: \"run `pip install monai[psutil]`\")\n else:\n p = psutil.Process()\n with p.oneshot():\n _dict_append(output, \"Process name\", p.name)\n _dict_append(output, \"Command\", p.cmdline)\n _dict_append(output, \"Open files\", p.open_files)\n _dict_append(output, \"Num physical CPUs\", lambda: psutil.cpu_count(logical=False))\n _dict_append(output, \"Num logical CPUs\", lambda: psutil.cpu_count(logical=True))\n _dict_append(output, \"Num usable CPUs\", lambda: len(psutil.Process().cpu_affinity()))\n _dict_append(output, \"CPU usage (%)\", lambda: psutil.cpu_percent(percpu=True))\n _dict_append(output, \"CPU freq. (MHz)\", lambda: round(psutil.cpu_freq(percpu=False)[0]))\n _dict_append(\n output,\n \"Load avg. in last 1, 5, 15 mins (%)\",\n lambda: [round(x / psutil.cpu_count() * 100, 1) for x in psutil.getloadavg()],\n )\n _dict_append(output, \"Disk usage (%)\", lambda: psutil.disk_usage(os.getcwd()).percent)\n _dict_append(\n output,\n \"Avg. sensor temp. (Celsius)\",\n lambda: np.round(\n np.mean([item.current for sublist in psutil.sensors_temperatures().values() for item in sublist], 1)\n ),\n )\n mem = psutil.virtual_memory()\n _dict_append(output, \"Total physical memory (GB)\", lambda: round(mem.total / 1024**3, 1))\n _dict_append(output, \"Available memory (GB)\", lambda: round(mem.available / 1024**3, 1))\n _dict_append(output, \"Used memory (GB)\", lambda: round(mem.used / 1024**3, 1))\n\n return output\n\n\ndef print_system_info(file: TextIO = sys.stdout) -> None:\n \"\"\"\n Print system info to `file`. Requires the optional library, `psutil`.\n\n Args:\n file: `print()` text stream file. Defaults to `sys.stdout`.\n \"\"\"\n if not has_psutil:\n print(\"`psutil` required for `print_system_info`\", file=file, flush=True)\n else:\n for k, v in get_system_info().items():\n print(f\"{k}: {v}\", file=file, flush=True)\n\n\ndef get_gpu_info() -> OrderedDict:\n output: OrderedDict = OrderedDict()\n\n num_gpus = torch.cuda.device_count()\n _dict_append(output, \"Num GPUs\", lambda: num_gpus)\n\n _dict_append(output, \"Has CUDA\", lambda: bool(torch.cuda.is_available()))\n\n if output[\"Has CUDA\"]:\n _dict_append(output, \"CUDA version\", lambda: torch.version.cuda)\n cudnn_ver = torch.backends.cudnn.version()\n _dict_append(output, \"cuDNN enabled\", lambda: bool(cudnn_ver))\n _dict_append(output, \"NVIDIA_TF32_OVERRIDE\", os.environ.get(\"NVIDIA_TF32_OVERRIDE\"))\n _dict_append(output, \"TORCH_ALLOW_TF32_CUBLAS_OVERRIDE\", os.environ.get(\"TORCH_ALLOW_TF32_CUBLAS_OVERRIDE\"))\n\n if cudnn_ver:\n _dict_append(output, \"cuDNN version\", lambda: cudnn_ver)\n\n if num_gpus > 0:\n _dict_append(output, \"Current device\", torch.cuda.current_device)\n _dict_append(output, \"Library compiled for CUDA architectures\", torch.cuda.get_arch_list)\n\n for gpu in range(num_gpus):\n gpu_info = torch.cuda.get_device_properties(gpu)\n _dict_append(output, f\"GPU {gpu} Name\", gpu_info.name)\n _dict_append(output, f\"GPU {gpu} Is integrated\", bool(gpu_info.is_integrated))\n _dict_append(output, f\"GPU {gpu} Is multi GPU board\", bool(gpu_info.is_multi_gpu_board))\n _dict_append(output, f\"GPU {gpu} Multi processor count\", gpu_info.multi_processor_count)\n _dict_append(output, f\"GPU {gpu} Total memory (GB)\", round(gpu_info.total_memory / 1024**3, 1))\n _dict_append(output, f\"GPU {gpu} CUDA capability (maj.min)\", f\"{gpu_info.major}.{gpu_info.minor}\")\n\n return output\n\n\ndef print_gpu_info(file: TextIO = sys.stdout) -> None:\n \"\"\"\n Print GPU info to `file`.\n\n Args:\n file: `print()` text stream file. Defaults to `sys.stdout`.\n \"\"\"\n for k, v in get_gpu_info().items():\n print(f\"{k}: {v}\", file=file, flush=True)\n\n\ndef print_debug_info(file: TextIO = sys.stdout) -> None:\n \"\"\"\n Print config (installed dependencies, etc.) and system info for debugging.\n\n Args:\n file: `print()` text stream file. Defaults to `sys.stdout`.\n \"\"\"\n print(\"================================\", file=file, flush=True)\n print(\"Printing MONAI config...\", file=file, flush=True)\n print(\"================================\", file=file, flush=True)\n print_config(file)\n print(\"\\n================================\", file=file, flush=True)\n print(\"Printing system config...\")\n print(\"================================\", file=file, flush=True)\n print_system_info(file)\n print(\"\\n================================\", file=file, flush=True)\n print(\"Printing GPU config...\")\n print(\"================================\", file=file, flush=True)\n print_gpu_info(file)\n\n\nclass IgniteInfo:\n \"\"\"\n Config information of the PyTorch ignite package.\n\n \"\"\"\n\n OPT_IMPORT_VERSION = \"0.4.4\"\n\n\nif __name__ == \"__main__\":\n print_debug_info()\n", "path": "monai/config/deviceconfig.py"}]}
3,822
314
gh_patches_debug_20868
rasdani/github-patches
git_diff
pytorch__vision-2654
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Docs of some functions written are missing ## 📚 Documentation A simple issue, Docs are missing on the torchvision website for following functions written in torchvision. I guess we should add these docs on the webpage, as end-users will benefit from using these functions. Most people will not look at source code to find these functions but refer to docs. Missing docs that I found - [x] Image reading functions [here](https://github.com/pytorch/vision/blob/master/torchvision/io/image.py) We have docs for video io functions, so maybe image should too be there. - [x] Torchvision ops from [boxes.py](https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py). Docs are added for NMS. but we are missing IoU, Box area and some classes. Partly fixed in #2642 Please do let me know if some other docs or missing as well. Also, I can raise a PR to fix these, please do let me know if it is needed! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchvision/io/__init__.py` Content: ``` 1 from ._video_opt import ( 2 Timebase, 3 VideoMetaData, 4 _HAS_VIDEO_OPT, 5 _probe_video_from_file, 6 _probe_video_from_memory, 7 _read_video_from_file, 8 _read_video_from_memory, 9 _read_video_timestamps_from_file, 10 _read_video_timestamps_from_memory, 11 ) 12 from .video import ( 13 read_video, 14 read_video_timestamps, 15 write_video, 16 ) 17 18 19 __all__ = [ 20 "write_video", 21 "read_video", 22 "read_video_timestamps", 23 "_read_video_from_file", 24 "_read_video_timestamps_from_file", 25 "_probe_video_from_file", 26 "_read_video_from_memory", 27 "_read_video_timestamps_from_memory", 28 "_probe_video_from_memory", 29 "_HAS_VIDEO_OPT", 30 "_read_video_clip_from_memory", 31 "_read_video_meta_data", 32 "VideoMetaData", 33 "Timebase" 34 ] 35 ``` Path: `torchvision/ops/__init__.py` Content: ``` 1 from .boxes import nms, box_iou 2 from .new_empty_tensor import _new_empty_tensor 3 from .deform_conv import deform_conv2d, DeformConv2d 4 from .roi_align import roi_align, RoIAlign 5 from .roi_pool import roi_pool, RoIPool 6 from .ps_roi_align import ps_roi_align, PSRoIAlign 7 from .ps_roi_pool import ps_roi_pool, PSRoIPool 8 from .poolers import MultiScaleRoIAlign 9 from .feature_pyramid_network import FeaturePyramidNetwork 10 11 from ._register_onnx_ops import _register_custom_op 12 13 _register_custom_op() 14 15 16 __all__ = [ 17 'deform_conv2d', 'DeformConv2d', 'nms', 'roi_align', 'RoIAlign', 'roi_pool', 18 'RoIPool', '_new_empty_tensor', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool', 19 'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork' 20 ] 21 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchvision/io/__init__.py b/torchvision/io/__init__.py --- a/torchvision/io/__init__.py +++ b/torchvision/io/__init__.py @@ -15,7 +15,6 @@ write_video, ) - __all__ = [ "write_video", "read_video", diff --git a/torchvision/ops/__init__.py b/torchvision/ops/__init__.py --- a/torchvision/ops/__init__.py +++ b/torchvision/ops/__init__.py @@ -1,4 +1,4 @@ -from .boxes import nms, box_iou +from .boxes import nms, batched_nms, remove_small_boxes, clip_boxes_to_image, box_area, box_iou from .new_empty_tensor import _new_empty_tensor from .deform_conv import deform_conv2d, DeformConv2d from .roi_align import roi_align, RoIAlign @@ -14,7 +14,8 @@ __all__ = [ - 'deform_conv2d', 'DeformConv2d', 'nms', 'roi_align', 'RoIAlign', 'roi_pool', + 'deform_conv2d', 'DeformConv2d', 'nms', 'batched_nms', 'remove_small_boxes', + 'clip_boxes_to_image', 'box_area', 'box_iou', 'roi_align', 'RoIAlign', 'roi_pool', 'RoIPool', '_new_empty_tensor', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool', 'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork' ]
{"golden_diff": "diff --git a/torchvision/io/__init__.py b/torchvision/io/__init__.py\n--- a/torchvision/io/__init__.py\n+++ b/torchvision/io/__init__.py\n@@ -15,7 +15,6 @@\n write_video,\n )\n \n-\n __all__ = [\n \"write_video\",\n \"read_video\",\ndiff --git a/torchvision/ops/__init__.py b/torchvision/ops/__init__.py\n--- a/torchvision/ops/__init__.py\n+++ b/torchvision/ops/__init__.py\n@@ -1,4 +1,4 @@\n-from .boxes import nms, box_iou\n+from .boxes import nms, batched_nms, remove_small_boxes, clip_boxes_to_image, box_area, box_iou\n from .new_empty_tensor import _new_empty_tensor\n from .deform_conv import deform_conv2d, DeformConv2d\n from .roi_align import roi_align, RoIAlign\n@@ -14,7 +14,8 @@\n \n \n __all__ = [\n- 'deform_conv2d', 'DeformConv2d', 'nms', 'roi_align', 'RoIAlign', 'roi_pool',\n+ 'deform_conv2d', 'DeformConv2d', 'nms', 'batched_nms', 'remove_small_boxes',\n+ 'clip_boxes_to_image', 'box_area', 'box_iou', 'roi_align', 'RoIAlign', 'roi_pool',\n 'RoIPool', '_new_empty_tensor', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool',\n 'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork'\n ]\n", "issue": "Docs of some functions written are missing\n## \ud83d\udcda Documentation\r\n\r\nA simple issue, Docs are missing on the torchvision website for following functions written in torchvision.\r\n\r\nI guess we should add these docs on the webpage, as end-users will benefit from using these functions. \r\n\r\nMost people will not look at source code to find these functions but refer to docs.\r\n\r\nMissing docs that I found\r\n\r\n- [x] Image reading functions [here](https://github.com/pytorch/vision/blob/master/torchvision/io/image.py)\r\nWe have docs for video io functions, so maybe image should too be there.\r\n\r\n- [x] Torchvision ops from [boxes.py](https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py). Docs are added for NMS. but we are missing IoU, Box area and some classes. Partly fixed in #2642 \r\n\r\nPlease do let me know if some other docs or missing as well.\r\n\r\nAlso, I can raise a PR to fix these, please do let me know if it is needed!\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from ._video_opt import (\n Timebase,\n VideoMetaData,\n _HAS_VIDEO_OPT,\n _probe_video_from_file,\n _probe_video_from_memory,\n _read_video_from_file,\n _read_video_from_memory,\n _read_video_timestamps_from_file,\n _read_video_timestamps_from_memory,\n)\nfrom .video import (\n read_video,\n read_video_timestamps,\n write_video,\n)\n\n\n__all__ = [\n \"write_video\",\n \"read_video\",\n \"read_video_timestamps\",\n \"_read_video_from_file\",\n \"_read_video_timestamps_from_file\",\n \"_probe_video_from_file\",\n \"_read_video_from_memory\",\n \"_read_video_timestamps_from_memory\",\n \"_probe_video_from_memory\",\n \"_HAS_VIDEO_OPT\",\n \"_read_video_clip_from_memory\",\n \"_read_video_meta_data\",\n \"VideoMetaData\",\n \"Timebase\"\n]\n", "path": "torchvision/io/__init__.py"}, {"content": "from .boxes import nms, box_iou\nfrom .new_empty_tensor import _new_empty_tensor\nfrom .deform_conv import deform_conv2d, DeformConv2d\nfrom .roi_align import roi_align, RoIAlign\nfrom .roi_pool import roi_pool, RoIPool\nfrom .ps_roi_align import ps_roi_align, PSRoIAlign\nfrom .ps_roi_pool import ps_roi_pool, PSRoIPool\nfrom .poolers import MultiScaleRoIAlign\nfrom .feature_pyramid_network import FeaturePyramidNetwork\n\nfrom ._register_onnx_ops import _register_custom_op\n\n_register_custom_op()\n\n\n__all__ = [\n 'deform_conv2d', 'DeformConv2d', 'nms', 'roi_align', 'RoIAlign', 'roi_pool',\n 'RoIPool', '_new_empty_tensor', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool',\n 'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork'\n]\n", "path": "torchvision/ops/__init__.py"}], "after_files": [{"content": "from ._video_opt import (\n Timebase,\n VideoMetaData,\n _HAS_VIDEO_OPT,\n _probe_video_from_file,\n _probe_video_from_memory,\n _read_video_from_file,\n _read_video_from_memory,\n _read_video_timestamps_from_file,\n _read_video_timestamps_from_memory,\n)\nfrom .video import (\n read_video,\n read_video_timestamps,\n write_video,\n)\n\n__all__ = [\n \"write_video\",\n \"read_video\",\n \"read_video_timestamps\",\n \"_read_video_from_file\",\n \"_read_video_timestamps_from_file\",\n \"_probe_video_from_file\",\n \"_read_video_from_memory\",\n \"_read_video_timestamps_from_memory\",\n \"_probe_video_from_memory\",\n \"_HAS_VIDEO_OPT\",\n \"_read_video_clip_from_memory\",\n \"_read_video_meta_data\",\n \"VideoMetaData\",\n \"Timebase\"\n]\n", "path": "torchvision/io/__init__.py"}, {"content": "from .boxes import nms, batched_nms, remove_small_boxes, clip_boxes_to_image, box_area, box_iou\nfrom .new_empty_tensor import _new_empty_tensor\nfrom .deform_conv import deform_conv2d, DeformConv2d\nfrom .roi_align import roi_align, RoIAlign\nfrom .roi_pool import roi_pool, RoIPool\nfrom .ps_roi_align import ps_roi_align, PSRoIAlign\nfrom .ps_roi_pool import ps_roi_pool, PSRoIPool\nfrom .poolers import MultiScaleRoIAlign\nfrom .feature_pyramid_network import FeaturePyramidNetwork\n\nfrom ._register_onnx_ops import _register_custom_op\n\n_register_custom_op()\n\n\n__all__ = [\n 'deform_conv2d', 'DeformConv2d', 'nms', 'batched_nms', 'remove_small_boxes',\n 'clip_boxes_to_image', 'box_area', 'box_iou', 'roi_align', 'RoIAlign', 'roi_pool',\n 'RoIPool', '_new_empty_tensor', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool',\n 'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork'\n]\n", "path": "torchvision/ops/__init__.py"}]}
1,004
376
gh_patches_debug_30433
rasdani/github-patches
git_diff
nipy__nipype-2582
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- mrtrix3.ResponseSD - Handling of multiple b-values ### Summary When running ResponseSD() (and assuming EstimateFOD(), which has a similar input), current interface does not seem to handle multiple b-values (possibly due to the input `max_sh` defaulting to a single integer value). ### Actual behavior Get an error specifying the number of manually-defined lmax's does not match number of b-values (see last line). ```Command: mrconvert /home/tkai/Desktop/tmp2/genACTTractography/act_preproc_wf/MRConvert/dwi.mif /home/tkai/Desktop/tmp2/genACTTractography/act_preproc_wf/dwi2response/dwi2response-tmp-U774DL/dwi.mif -strides 0,0,0,1 -fslgrad /home/tkai/graham/scratch/WholeBrain/derivatives/prepdwi_0.0.6a/prepdwi/sub-5082/dwi/sub-5082_dwi_space-T1w_preproc.bvec /home/tkai/graham/scratch/WholeBrain/derivatives/prepdwi_0.0.6a/prepdwi/sub-5082/dwi/sub-5082_dwi_space-T1w_preproc.bval Command: dwi2mask /home/tkai/Desktop/tmp2/genACTTractography/act_preproc_wf/MRConvert/dwi.mif /home/tkai/Desktop/tmp2/genACTTractography/act_preproc_wf/dwi2response/dwi2response-tmp-U774DL/mask.mif Command: mrconvert /home/tkai/Desktop/tmp2/genACTTractography/act_preproc_wf/Generate5tt/5tt.mif /home/tkai/Desktop/tmp2/genACTTractography/act_preproc_wf/dwi2response/dwi2response-tmp-U774DL/5tt.mif dwi2response: Changing to temporary directory (/home/tkai/Desktop/tmp2/genACTTractography/act_preproc_wf/dwi2response/dwi2response-tmp-U774DL/) Command: 5ttcheck 5tt.mif dwi2response: [ERROR] Number of manually-defined lmax's (1) does not match number of b-values (3) ``` ### Expected behavior The lmax, if manually defined should allow for more than a single integer to match the number of b-values (e.g. -lmax 0,8,8 in this case), otherwise no default from the Nipype interface. At the moment, I've forked a copy of nipype and made a temporary solution with the input `max_sh` passing the expected input as a string, removing the default to run without error. ### How to replicate the behavior Use of multi-shell dwi data with mrt.ResponseSD and algorithm 'msmt_5tt'. ### Script/Workflow details Current pipeline code is stored at [https://github.com/kaitj/mrtpipelines](https://github.com/kaitj/mrtpipelines) ### Platform details: ``` python3 -c "import nipype; print(nipype.get_info()); print(nipype.__version__)" {'nibabel_version': '2.2.1', 'numpy_version': '1.14.3', 'nipype_version': '1.0.4-dev+g5a96ea5', 'commit_source': 'repository', 'sys_version': '3.5.2 (default, Nov 23 2017, 16:37:01) \n[GCC 5.4.0 20160609]', 'traits_version': '4.6.0', 'scipy_version': '1.1.0', 'pkg_path': '/home/tkai/git/nipype/nipype', 'sys_platform': 'linux', 'commit_hash': '5a96ea5', 'networkx_version': '2.1', 'sys_executable': '/usr/bin/python3'} 1.0.4-dev+g5a96ea5 ``` ### Execution environment Choose one - Container [Tag: ???] - My python environment inside container [Base Tag: ???] - My python environment outside container --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nipype/interfaces/mrtrix3/preprocess.py` Content: ``` 1 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- 2 # vi: set ft=python sts=4 ts=4 sw=4 et: 3 # -*- coding: utf-8 -*- 4 from __future__ import (print_function, division, unicode_literals, 5 absolute_import) 6 7 import os.path as op 8 9 from ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec, 10 File, isdefined, Undefined) 11 from .base import MRTrix3BaseInputSpec, MRTrix3Base 12 13 14 class ResponseSDInputSpec(MRTrix3BaseInputSpec): 15 algorithm = traits.Enum( 16 'msmt_5tt', 17 'dhollander', 18 'tournier', 19 'tax', 20 argstr='%s', 21 position=1, 22 mandatory=True, 23 desc='response estimation algorithm (multi-tissue)') 24 in_file = File( 25 exists=True, 26 argstr='%s', 27 position=-5, 28 mandatory=True, 29 desc='input DWI image') 30 mtt_file = File(argstr='%s', position=-4, desc='input 5tt image') 31 wm_file = File( 32 'wm.txt', 33 argstr='%s', 34 position=-3, 35 usedefault=True, 36 desc='output WM response text file') 37 gm_file = File( 38 argstr='%s', position=-2, desc='output GM response text file') 39 csf_file = File( 40 argstr='%s', position=-1, desc='output CSF response text file') 41 in_mask = File( 42 exists=True, argstr='-mask %s', desc='provide initial mask image') 43 max_sh = traits.Int( 44 8, usedefault=True, 45 argstr='-lmax %d', 46 desc='maximum harmonic degree of response function') 47 48 49 class ResponseSDOutputSpec(TraitedSpec): 50 wm_file = File(argstr='%s', desc='output WM response text file') 51 gm_file = File(argstr='%s', desc='output GM response text file') 52 csf_file = File(argstr='%s', desc='output CSF response text file') 53 54 55 class ResponseSD(MRTrix3Base): 56 """ 57 Estimate response function(s) for spherical deconvolution using the specified algorithm. 58 59 Example 60 ------- 61 62 >>> import nipype.interfaces.mrtrix3 as mrt 63 >>> resp = mrt.ResponseSD() 64 >>> resp.inputs.in_file = 'dwi.mif' 65 >>> resp.inputs.algorithm = 'tournier' 66 >>> resp.inputs.grad_fsl = ('bvecs', 'bvals') 67 >>> resp.cmdline # doctest: +ELLIPSIS 68 'dwi2response tournier -fslgrad bvecs bvals -lmax 8 dwi.mif wm.txt' 69 >>> resp.run() # doctest: +SKIP 70 """ 71 72 _cmd = 'dwi2response' 73 input_spec = ResponseSDInputSpec 74 output_spec = ResponseSDOutputSpec 75 76 def _list_outputs(self): 77 outputs = self.output_spec().get() 78 outputs['wm_file'] = op.abspath(self.inputs.wm_file) 79 if self.inputs.gm_file != Undefined: 80 outputs['gm_file'] = op.abspath(self.inputs.gm_file) 81 if self.inputs.csf_file != Undefined: 82 outputs['csf_file'] = op.abspath(self.inputs.csf_file) 83 return outputs 84 85 86 class ACTPrepareFSLInputSpec(CommandLineInputSpec): 87 in_file = File( 88 exists=True, 89 argstr='%s', 90 mandatory=True, 91 position=-2, 92 desc='input anatomical image') 93 94 out_file = File( 95 'act_5tt.mif', 96 argstr='%s', 97 mandatory=True, 98 position=-1, 99 usedefault=True, 100 desc='output file after processing') 101 102 103 class ACTPrepareFSLOutputSpec(TraitedSpec): 104 out_file = File(exists=True, desc='the output response file') 105 106 107 class ACTPrepareFSL(CommandLine): 108 """ 109 Generate anatomical information necessary for Anatomically 110 Constrained Tractography (ACT). 111 112 Example 113 ------- 114 115 >>> import nipype.interfaces.mrtrix3 as mrt 116 >>> prep = mrt.ACTPrepareFSL() 117 >>> prep.inputs.in_file = 'T1.nii.gz' 118 >>> prep.cmdline # doctest: +ELLIPSIS 119 'act_anat_prepare_fsl T1.nii.gz act_5tt.mif' 120 >>> prep.run() # doctest: +SKIP 121 """ 122 123 _cmd = 'act_anat_prepare_fsl' 124 input_spec = ACTPrepareFSLInputSpec 125 output_spec = ACTPrepareFSLOutputSpec 126 127 def _list_outputs(self): 128 outputs = self.output_spec().get() 129 outputs['out_file'] = op.abspath(self.inputs.out_file) 130 return outputs 131 132 133 class ReplaceFSwithFIRSTInputSpec(CommandLineInputSpec): 134 in_file = File( 135 exists=True, 136 argstr='%s', 137 mandatory=True, 138 position=-4, 139 desc='input anatomical image') 140 in_t1w = File( 141 exists=True, 142 argstr='%s', 143 mandatory=True, 144 position=-3, 145 desc='input T1 image') 146 in_config = File( 147 exists=True, 148 argstr='%s', 149 position=-2, 150 desc='connectome configuration file') 151 152 out_file = File( 153 'aparc+first.mif', 154 argstr='%s', 155 mandatory=True, 156 position=-1, 157 usedefault=True, 158 desc='output file after processing') 159 160 161 class ReplaceFSwithFIRSTOutputSpec(TraitedSpec): 162 out_file = File(exists=True, desc='the output response file') 163 164 165 class ReplaceFSwithFIRST(CommandLine): 166 """ 167 Replace deep gray matter structures segmented with FSL FIRST in a 168 FreeSurfer parcellation. 169 170 Example 171 ------- 172 173 >>> import nipype.interfaces.mrtrix3 as mrt 174 >>> prep = mrt.ReplaceFSwithFIRST() 175 >>> prep.inputs.in_file = 'aparc+aseg.nii' 176 >>> prep.inputs.in_t1w = 'T1.nii.gz' 177 >>> prep.inputs.in_config = 'mrtrix3_labelconfig.txt' 178 >>> prep.cmdline # doctest: +ELLIPSIS 179 'fs_parc_replace_sgm_first aparc+aseg.nii T1.nii.gz \ 180 mrtrix3_labelconfig.txt aparc+first.mif' 181 >>> prep.run() # doctest: +SKIP 182 """ 183 184 _cmd = 'fs_parc_replace_sgm_first' 185 input_spec = ReplaceFSwithFIRSTInputSpec 186 output_spec = ReplaceFSwithFIRSTOutputSpec 187 188 def _list_outputs(self): 189 outputs = self.output_spec().get() 190 outputs['out_file'] = op.abspath(self.inputs.out_file) 191 return outputs 192 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -7,7 +7,7 @@ import os.path as op from ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec, - File, isdefined, Undefined) + File, isdefined, Undefined, InputMultiObject) from .base import MRTrix3BaseInputSpec, MRTrix3Base @@ -40,10 +40,14 @@ argstr='%s', position=-1, desc='output CSF response text file') in_mask = File( exists=True, argstr='-mask %s', desc='provide initial mask image') - max_sh = traits.Int( - 8, usedefault=True, - argstr='-lmax %d', - desc='maximum harmonic degree of response function') + max_sh = InputMultiObject( + traits.Int, + value=[8], + usedefault=True, + argstr='-lmax %s', + sep=',', + desc=('maximum harmonic degree of response function - single value for ' + 'single-shell response, list for multi-shell response')) class ResponseSDOutputSpec(TraitedSpec): @@ -67,6 +71,11 @@ >>> resp.cmdline # doctest: +ELLIPSIS 'dwi2response tournier -fslgrad bvecs bvals -lmax 8 dwi.mif wm.txt' >>> resp.run() # doctest: +SKIP + + # We can also pass in multiple harmonic degrees in the case of multi-shell + >>> resp.inputs.max_sh = [6,8,10] + >>> resp.cmdline + 'dwi2response tournier -fslgrad bvecs bvals -lmax 6,8,10 dwi.mif wm.txt' """ _cmd = 'dwi2response'
{"golden_diff": "diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py\n--- a/nipype/interfaces/mrtrix3/preprocess.py\n+++ b/nipype/interfaces/mrtrix3/preprocess.py\n@@ -7,7 +7,7 @@\n import os.path as op\n \n from ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec,\n- File, isdefined, Undefined)\n+ File, isdefined, Undefined, InputMultiObject)\n from .base import MRTrix3BaseInputSpec, MRTrix3Base\n \n \n@@ -40,10 +40,14 @@\n argstr='%s', position=-1, desc='output CSF response text file')\n in_mask = File(\n exists=True, argstr='-mask %s', desc='provide initial mask image')\n- max_sh = traits.Int(\n- 8, usedefault=True,\n- argstr='-lmax %d',\n- desc='maximum harmonic degree of response function')\n+ max_sh = InputMultiObject(\n+ traits.Int,\n+ value=[8],\n+ usedefault=True,\n+ argstr='-lmax %s',\n+ sep=',',\n+ desc=('maximum harmonic degree of response function - single value for '\n+ 'single-shell response, list for multi-shell response'))\n \n \n class ResponseSDOutputSpec(TraitedSpec):\n@@ -67,6 +71,11 @@\n >>> resp.cmdline # doctest: +ELLIPSIS\n 'dwi2response tournier -fslgrad bvecs bvals -lmax 8 dwi.mif wm.txt'\n >>> resp.run() # doctest: +SKIP\n+\n+ # We can also pass in multiple harmonic degrees in the case of multi-shell\n+ >>> resp.inputs.max_sh = [6,8,10]\n+ >>> resp.cmdline\n+ 'dwi2response tournier -fslgrad bvecs bvals -lmax 6,8,10 dwi.mif wm.txt'\n \"\"\"\n \n _cmd = 'dwi2response'\n", "issue": "mrtrix3.ResponseSD - Handling of multiple b-values\n### Summary\r\nWhen running ResponseSD() (and assuming EstimateFOD(), which has a similar input), current interface does not seem to handle multiple b-values (possibly due to the input `max_sh` defaulting to a single integer value).\r\n\r\n### Actual behavior\r\nGet an error specifying the number of manually-defined lmax's does not match number of b-values (see last line).\r\n\r\n```Command: mrconvert /home/tkai/Desktop/tmp2/genACTTractography/act_preproc_wf/MRConvert/dwi.mif /home/tkai/Desktop/tmp2/genACTTractography/act_preproc_wf/dwi2response/dwi2response-tmp-U774DL/dwi.mif -strides 0,0,0,1 -fslgrad /home/tkai/graham/scratch/WholeBrain/derivatives/prepdwi_0.0.6a/prepdwi/sub-5082/dwi/sub-5082_dwi_space-T1w_preproc.bvec /home/tkai/graham/scratch/WholeBrain/derivatives/prepdwi_0.0.6a/prepdwi/sub-5082/dwi/sub-5082_dwi_space-T1w_preproc.bval\r\nCommand: dwi2mask /home/tkai/Desktop/tmp2/genACTTractography/act_preproc_wf/MRConvert/dwi.mif /home/tkai/Desktop/tmp2/genACTTractography/act_preproc_wf/dwi2response/dwi2response-tmp-U774DL/mask.mif\r\nCommand: mrconvert /home/tkai/Desktop/tmp2/genACTTractography/act_preproc_wf/Generate5tt/5tt.mif /home/tkai/Desktop/tmp2/genACTTractography/act_preproc_wf/dwi2response/dwi2response-tmp-U774DL/5tt.mif\r\ndwi2response: Changing to temporary directory (/home/tkai/Desktop/tmp2/genACTTractography/act_preproc_wf/dwi2response/dwi2response-tmp-U774DL/)\r\nCommand: 5ttcheck 5tt.mif\r\ndwi2response: [ERROR] Number of manually-defined lmax's (1) does not match number of b-values (3)\r\n```\r\n\r\n### Expected behavior\r\nThe lmax, if manually defined should allow for more than a single integer to match the number of b-values (e.g. -lmax 0,8,8 in this case), otherwise no default from the Nipype interface.\r\n\r\nAt the moment, I've forked a copy of nipype and made a temporary solution with the input `max_sh` passing the expected input as a string, removing the default to run without error.\r\n\r\n### How to replicate the behavior\r\nUse of multi-shell dwi data with mrt.ResponseSD and algorithm 'msmt_5tt'.\r\n\r\n### Script/Workflow details\r\n\r\nCurrent pipeline code is stored at [https://github.com/kaitj/mrtpipelines](https://github.com/kaitj/mrtpipelines)\r\n\r\n### Platform details:\r\n\r\n```\r\npython3 -c \"import nipype; print(nipype.get_info()); print(nipype.__version__)\"\r\n{'nibabel_version': '2.2.1', 'numpy_version': '1.14.3', 'nipype_version': '1.0.4-dev+g5a96ea5', 'commit_source': 'repository', 'sys_version': '3.5.2 (default, Nov 23 2017, 16:37:01) \\n[GCC 5.4.0 20160609]', 'traits_version': '4.6.0', 'scipy_version': '1.1.0', 'pkg_path': '/home/tkai/git/nipype/nipype', 'sys_platform': 'linux', 'commit_hash': '5a96ea5', 'networkx_version': '2.1', 'sys_executable': '/usr/bin/python3'}\r\n1.0.4-dev+g5a96ea5\r\n```\r\n\r\n### Execution environment\r\n\r\nChoose one\r\n- Container [Tag: ???]\r\n- My python environment inside container [Base Tag: ???]\r\n- My python environment outside container\r\n\n", "before_files": [{"content": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n# -*- coding: utf-8 -*-\nfrom __future__ import (print_function, division, unicode_literals,\n absolute_import)\n\nimport os.path as op\n\nfrom ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec,\n File, isdefined, Undefined)\nfrom .base import MRTrix3BaseInputSpec, MRTrix3Base\n\n\nclass ResponseSDInputSpec(MRTrix3BaseInputSpec):\n algorithm = traits.Enum(\n 'msmt_5tt',\n 'dhollander',\n 'tournier',\n 'tax',\n argstr='%s',\n position=1,\n mandatory=True,\n desc='response estimation algorithm (multi-tissue)')\n in_file = File(\n exists=True,\n argstr='%s',\n position=-5,\n mandatory=True,\n desc='input DWI image')\n mtt_file = File(argstr='%s', position=-4, desc='input 5tt image')\n wm_file = File(\n 'wm.txt',\n argstr='%s',\n position=-3,\n usedefault=True,\n desc='output WM response text file')\n gm_file = File(\n argstr='%s', position=-2, desc='output GM response text file')\n csf_file = File(\n argstr='%s', position=-1, desc='output CSF response text file')\n in_mask = File(\n exists=True, argstr='-mask %s', desc='provide initial mask image')\n max_sh = traits.Int(\n 8, usedefault=True,\n argstr='-lmax %d',\n desc='maximum harmonic degree of response function')\n\n\nclass ResponseSDOutputSpec(TraitedSpec):\n wm_file = File(argstr='%s', desc='output WM response text file')\n gm_file = File(argstr='%s', desc='output GM response text file')\n csf_file = File(argstr='%s', desc='output CSF response text file')\n\n\nclass ResponseSD(MRTrix3Base):\n \"\"\"\n Estimate response function(s) for spherical deconvolution using the specified algorithm.\n\n Example\n -------\n\n >>> import nipype.interfaces.mrtrix3 as mrt\n >>> resp = mrt.ResponseSD()\n >>> resp.inputs.in_file = 'dwi.mif'\n >>> resp.inputs.algorithm = 'tournier'\n >>> resp.inputs.grad_fsl = ('bvecs', 'bvals')\n >>> resp.cmdline # doctest: +ELLIPSIS\n 'dwi2response tournier -fslgrad bvecs bvals -lmax 8 dwi.mif wm.txt'\n >>> resp.run() # doctest: +SKIP\n \"\"\"\n\n _cmd = 'dwi2response'\n input_spec = ResponseSDInputSpec\n output_spec = ResponseSDOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['wm_file'] = op.abspath(self.inputs.wm_file)\n if self.inputs.gm_file != Undefined:\n outputs['gm_file'] = op.abspath(self.inputs.gm_file)\n if self.inputs.csf_file != Undefined:\n outputs['csf_file'] = op.abspath(self.inputs.csf_file)\n return outputs\n\n\nclass ACTPrepareFSLInputSpec(CommandLineInputSpec):\n in_file = File(\n exists=True,\n argstr='%s',\n mandatory=True,\n position=-2,\n desc='input anatomical image')\n\n out_file = File(\n 'act_5tt.mif',\n argstr='%s',\n mandatory=True,\n position=-1,\n usedefault=True,\n desc='output file after processing')\n\n\nclass ACTPrepareFSLOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc='the output response file')\n\n\nclass ACTPrepareFSL(CommandLine):\n \"\"\"\n Generate anatomical information necessary for Anatomically\n Constrained Tractography (ACT).\n\n Example\n -------\n\n >>> import nipype.interfaces.mrtrix3 as mrt\n >>> prep = mrt.ACTPrepareFSL()\n >>> prep.inputs.in_file = 'T1.nii.gz'\n >>> prep.cmdline # doctest: +ELLIPSIS\n 'act_anat_prepare_fsl T1.nii.gz act_5tt.mif'\n >>> prep.run() # doctest: +SKIP\n \"\"\"\n\n _cmd = 'act_anat_prepare_fsl'\n input_spec = ACTPrepareFSLInputSpec\n output_spec = ACTPrepareFSLOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_file'] = op.abspath(self.inputs.out_file)\n return outputs\n\n\nclass ReplaceFSwithFIRSTInputSpec(CommandLineInputSpec):\n in_file = File(\n exists=True,\n argstr='%s',\n mandatory=True,\n position=-4,\n desc='input anatomical image')\n in_t1w = File(\n exists=True,\n argstr='%s',\n mandatory=True,\n position=-3,\n desc='input T1 image')\n in_config = File(\n exists=True,\n argstr='%s',\n position=-2,\n desc='connectome configuration file')\n\n out_file = File(\n 'aparc+first.mif',\n argstr='%s',\n mandatory=True,\n position=-1,\n usedefault=True,\n desc='output file after processing')\n\n\nclass ReplaceFSwithFIRSTOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc='the output response file')\n\n\nclass ReplaceFSwithFIRST(CommandLine):\n \"\"\"\n Replace deep gray matter structures segmented with FSL FIRST in a\n FreeSurfer parcellation.\n\n Example\n -------\n\n >>> import nipype.interfaces.mrtrix3 as mrt\n >>> prep = mrt.ReplaceFSwithFIRST()\n >>> prep.inputs.in_file = 'aparc+aseg.nii'\n >>> prep.inputs.in_t1w = 'T1.nii.gz'\n >>> prep.inputs.in_config = 'mrtrix3_labelconfig.txt'\n >>> prep.cmdline # doctest: +ELLIPSIS\n 'fs_parc_replace_sgm_first aparc+aseg.nii T1.nii.gz \\\nmrtrix3_labelconfig.txt aparc+first.mif'\n >>> prep.run() # doctest: +SKIP\n \"\"\"\n\n _cmd = 'fs_parc_replace_sgm_first'\n input_spec = ReplaceFSwithFIRSTInputSpec\n output_spec = ReplaceFSwithFIRSTOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_file'] = op.abspath(self.inputs.out_file)\n return outputs\n", "path": "nipype/interfaces/mrtrix3/preprocess.py"}], "after_files": [{"content": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n# -*- coding: utf-8 -*-\nfrom __future__ import (print_function, division, unicode_literals,\n absolute_import)\n\nimport os.path as op\n\nfrom ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec,\n File, isdefined, Undefined, InputMultiObject)\nfrom .base import MRTrix3BaseInputSpec, MRTrix3Base\n\n\nclass ResponseSDInputSpec(MRTrix3BaseInputSpec):\n algorithm = traits.Enum(\n 'msmt_5tt',\n 'dhollander',\n 'tournier',\n 'tax',\n argstr='%s',\n position=1,\n mandatory=True,\n desc='response estimation algorithm (multi-tissue)')\n in_file = File(\n exists=True,\n argstr='%s',\n position=-5,\n mandatory=True,\n desc='input DWI image')\n mtt_file = File(argstr='%s', position=-4, desc='input 5tt image')\n wm_file = File(\n 'wm.txt',\n argstr='%s',\n position=-3,\n usedefault=True,\n desc='output WM response text file')\n gm_file = File(\n argstr='%s', position=-2, desc='output GM response text file')\n csf_file = File(\n argstr='%s', position=-1, desc='output CSF response text file')\n in_mask = File(\n exists=True, argstr='-mask %s', desc='provide initial mask image')\n max_sh = InputMultiObject(\n traits.Int,\n value=[8],\n usedefault=True,\n argstr='-lmax %s',\n sep=',',\n desc=('maximum harmonic degree of response function - single value for '\n 'single-shell response, list for multi-shell response'))\n\n\nclass ResponseSDOutputSpec(TraitedSpec):\n wm_file = File(argstr='%s', desc='output WM response text file')\n gm_file = File(argstr='%s', desc='output GM response text file')\n csf_file = File(argstr='%s', desc='output CSF response text file')\n\n\nclass ResponseSD(MRTrix3Base):\n \"\"\"\n Estimate response function(s) for spherical deconvolution using the specified algorithm.\n\n Example\n -------\n\n >>> import nipype.interfaces.mrtrix3 as mrt\n >>> resp = mrt.ResponseSD()\n >>> resp.inputs.in_file = 'dwi.mif'\n >>> resp.inputs.algorithm = 'tournier'\n >>> resp.inputs.grad_fsl = ('bvecs', 'bvals')\n >>> resp.cmdline # doctest: +ELLIPSIS\n 'dwi2response tournier -fslgrad bvecs bvals -lmax 8 dwi.mif wm.txt'\n >>> resp.run() # doctest: +SKIP\n\n # We can also pass in multiple harmonic degrees in the case of multi-shell\n >>> resp.inputs.max_sh = [6,8,10]\n >>> resp.cmdline\n 'dwi2response tournier -fslgrad bvecs bvals -lmax 6,8,10 dwi.mif wm.txt'\n \"\"\"\n\n _cmd = 'dwi2response'\n input_spec = ResponseSDInputSpec\n output_spec = ResponseSDOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['wm_file'] = op.abspath(self.inputs.wm_file)\n if self.inputs.gm_file != Undefined:\n outputs['gm_file'] = op.abspath(self.inputs.gm_file)\n if self.inputs.csf_file != Undefined:\n outputs['csf_file'] = op.abspath(self.inputs.csf_file)\n return outputs\n\n\nclass ACTPrepareFSLInputSpec(CommandLineInputSpec):\n in_file = File(\n exists=True,\n argstr='%s',\n mandatory=True,\n position=-2,\n desc='input anatomical image')\n\n out_file = File(\n 'act_5tt.mif',\n argstr='%s',\n mandatory=True,\n position=-1,\n usedefault=True,\n desc='output file after processing')\n\n\nclass ACTPrepareFSLOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc='the output response file')\n\n\nclass ACTPrepareFSL(CommandLine):\n \"\"\"\n Generate anatomical information necessary for Anatomically\n Constrained Tractography (ACT).\n\n Example\n -------\n\n >>> import nipype.interfaces.mrtrix3 as mrt\n >>> prep = mrt.ACTPrepareFSL()\n >>> prep.inputs.in_file = 'T1.nii.gz'\n >>> prep.cmdline # doctest: +ELLIPSIS\n 'act_anat_prepare_fsl T1.nii.gz act_5tt.mif'\n >>> prep.run() # doctest: +SKIP\n \"\"\"\n\n _cmd = 'act_anat_prepare_fsl'\n input_spec = ACTPrepareFSLInputSpec\n output_spec = ACTPrepareFSLOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_file'] = op.abspath(self.inputs.out_file)\n return outputs\n\n\nclass ReplaceFSwithFIRSTInputSpec(CommandLineInputSpec):\n in_file = File(\n exists=True,\n argstr='%s',\n mandatory=True,\n position=-4,\n desc='input anatomical image')\n in_t1w = File(\n exists=True,\n argstr='%s',\n mandatory=True,\n position=-3,\n desc='input T1 image')\n in_config = File(\n exists=True,\n argstr='%s',\n position=-2,\n desc='connectome configuration file')\n\n out_file = File(\n 'aparc+first.mif',\n argstr='%s',\n mandatory=True,\n position=-1,\n usedefault=True,\n desc='output file after processing')\n\n\nclass ReplaceFSwithFIRSTOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc='the output response file')\n\n\nclass ReplaceFSwithFIRST(CommandLine):\n \"\"\"\n Replace deep gray matter structures segmented with FSL FIRST in a\n FreeSurfer parcellation.\n\n Example\n -------\n\n >>> import nipype.interfaces.mrtrix3 as mrt\n >>> prep = mrt.ReplaceFSwithFIRST()\n >>> prep.inputs.in_file = 'aparc+aseg.nii'\n >>> prep.inputs.in_t1w = 'T1.nii.gz'\n >>> prep.inputs.in_config = 'mrtrix3_labelconfig.txt'\n >>> prep.cmdline # doctest: +ELLIPSIS\n 'fs_parc_replace_sgm_first aparc+aseg.nii T1.nii.gz \\\nmrtrix3_labelconfig.txt aparc+first.mif'\n >>> prep.run() # doctest: +SKIP\n \"\"\"\n\n _cmd = 'fs_parc_replace_sgm_first'\n input_spec = ReplaceFSwithFIRSTInputSpec\n output_spec = ReplaceFSwithFIRSTOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_file'] = op.abspath(self.inputs.out_file)\n return outputs\n", "path": "nipype/interfaces/mrtrix3/preprocess.py"}]}
3,154
469
gh_patches_debug_59180
rasdani/github-patches
git_diff
TheAlgorithms__Python-295
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ProjectEuler -- Problem 1 -- solv2.py -- Error For the Input ```1000``` I get ```233366.4```. The correct answer should be ```233168``` See [file](https://github.com/TheAlgorithms/Python/blob/master/Project%20Euler/Problem%2001/sol2.py) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `Project Euler/Problem 01/sol2.py` Content: ``` 1 ''' 2 Problem Statement: 3 If we list all the natural numbers below 10 that are multiples of 3 or 5, 4 we get 3,5,6 and 9. The sum of these multiples is 23. 5 Find the sum of all the multiples of 3 or 5 below N. 6 ''' 7 from __future__ import print_function 8 try: 9 raw_input # Python 2 10 except NameError: 11 raw_input = input # Python 3 12 n = int(raw_input().strip()) 13 sum = 0 14 terms = (n-1)/3 15 sum+= ((terms)*(6+(terms-1)*3))/2 #sum of an A.P. 16 terms = (n-1)/5 17 sum+= ((terms)*(10+(terms-1)*5))/2 18 terms = (n-1)/15 19 sum-= ((terms)*(30+(terms-1)*15))/2 20 print(sum) 21 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/Project Euler/Problem 01/sol2.py b/Project Euler/Problem 01/sol2.py --- a/Project Euler/Problem 01/sol2.py +++ b/Project Euler/Problem 01/sol2.py @@ -11,10 +11,10 @@ raw_input = input # Python 3 n = int(raw_input().strip()) sum = 0 -terms = (n-1)/3 -sum+= ((terms)*(6+(terms-1)*3))/2 #sum of an A.P. -terms = (n-1)/5 -sum+= ((terms)*(10+(terms-1)*5))/2 -terms = (n-1)/15 -sum-= ((terms)*(30+(terms-1)*15))/2 +terms = (n-1)//3 +sum+= ((terms)*(6+(terms-1)*3))//2 #sum of an A.P. +terms = (n-1)//5 +sum+= ((terms)*(10+(terms-1)*5))//2 +terms = (n-1)//15 +sum-= ((terms)*(30+(terms-1)*15))//2 print(sum)
{"golden_diff": "diff --git a/Project Euler/Problem 01/sol2.py b/Project Euler/Problem 01/sol2.py\n--- a/Project Euler/Problem 01/sol2.py\t\n+++ b/Project Euler/Problem 01/sol2.py\t\n@@ -11,10 +11,10 @@\n raw_input = input # Python 3\n n = int(raw_input().strip())\n sum = 0\n-terms = (n-1)/3\n-sum+= ((terms)*(6+(terms-1)*3))/2 #sum of an A.P.\n-terms = (n-1)/5\n-sum+= ((terms)*(10+(terms-1)*5))/2\n-terms = (n-1)/15\n-sum-= ((terms)*(30+(terms-1)*15))/2\n+terms = (n-1)//3\n+sum+= ((terms)*(6+(terms-1)*3))//2 #sum of an A.P.\n+terms = (n-1)//5\n+sum+= ((terms)*(10+(terms-1)*5))//2\n+terms = (n-1)//15\n+sum-= ((terms)*(30+(terms-1)*15))//2\n print(sum)\n", "issue": "ProjectEuler -- Problem 1 -- solv2.py -- Error\nFor the Input ```1000``` I get ```233366.4```. The correct answer should be ```233168``` \r\nSee [file](https://github.com/TheAlgorithms/Python/blob/master/Project%20Euler/Problem%2001/sol2.py)\n", "before_files": [{"content": "'''\nProblem Statement:\nIf we list all the natural numbers below 10 that are multiples of 3 or 5,\nwe get 3,5,6 and 9. The sum of these multiples is 23.\nFind the sum of all the multiples of 3 or 5 below N.\n'''\nfrom __future__ import print_function\ntry:\n raw_input # Python 2\nexcept NameError:\n raw_input = input # Python 3\nn = int(raw_input().strip())\nsum = 0\nterms = (n-1)/3\nsum+= ((terms)*(6+(terms-1)*3))/2 #sum of an A.P.\nterms = (n-1)/5\nsum+= ((terms)*(10+(terms-1)*5))/2\nterms = (n-1)/15\nsum-= ((terms)*(30+(terms-1)*15))/2\nprint(sum)\n", "path": "Project Euler/Problem 01/sol2.py"}], "after_files": [{"content": "'''\nProblem Statement:\nIf we list all the natural numbers below 10 that are multiples of 3 or 5,\nwe get 3,5,6 and 9. The sum of these multiples is 23.\nFind the sum of all the multiples of 3 or 5 below N.\n'''\nfrom __future__ import print_function\ntry:\n raw_input # Python 2\nexcept NameError:\n raw_input = input # Python 3\nn = int(raw_input().strip())\nsum = 0\nterms = (n-1)//3\nsum+= ((terms)*(6+(terms-1)*3))//2 #sum of an A.P.\nterms = (n-1)//5\nsum+= ((terms)*(10+(terms-1)*5))//2\nterms = (n-1)//15\nsum-= ((terms)*(30+(terms-1)*15))//2\nprint(sum)\n", "path": "Project Euler/Problem 01/sol2.py"}]}
579
277
gh_patches_debug_5315
rasdani/github-patches
git_diff
Flexget__Flexget-3935
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- BUG: Unhandled error in plugin convert_magnet: 'save_path must be set in add_torrent_params' ### Expected behaviour: <!--- Should be convert the magnet URI to torrent file ---> ### Actual behaviour: ### Steps to reproduce: - Step 1: flexget execute #### Config: ```yaml templates: torrent: transmission: host: localhost port: 9091 username: XXXXX password: XXXXXX tv_showrss: all_series: yes convert_magnet: yes seen: local download: /mnt/kodi/complete/peliculas/ template: torrent movies: template: torrent imdb: min_score: 5 min_votes: 100 min_year: 2020 reject_genres: - horror - porn - anime - xxx accept_languages: - english imdb_lookup: yes unique: field: - imdb_id - movie_name action: reject seen: local convert_magnet: yes quality: 2160p download: /mnt/kodi/complete/peliculas/ tasks: series_task: rss: url: http://showrss.info/user/XXXXX.rss?magnets=true&namespaces=true&name=clean&quality=null&re=null escape: yes link: - link - magneturl template: tv_showrss retry_failed: retry_time: 30 minutes # Base time in between retries retry_time_multiplier: 2 # Amount retry time will be multiplied by after each successive failure max_retries: 5 movies_task: rss: url: https://XXXXX.org/rssdd.php?categories=50;52 escape: yes link: - link - magneturl template: movies move-episodes: metainfo_series: yes require_field: series_name accept_all: yes thetvdb_lookup: yes all_series: parse_only: yes filesystem: path: /mnt/kodi/complete/peliculas/ regexp: '.*\.(avi|mkv|mp4|mpg)$' recursive: yes regexp: reject: - sample move: clean_source: 50 to: '/mnt/kodi/complete/series/{{series_name}}/' clean_source: 1000 along: extensions: - sub - srt ``` #### Log: <details> <summary>(click to expand)</summary> ``` 2023-05-28 12:27:57 CRITICAL task movies_task BUG: Unhandled error in plugin convert_magnet: 'save_path must be set in add_torrent_params' Traceback (most recent call last): File "/usr/lib/python3.10/threading.py", line 973, in _bootstrap self._bootstrap_inner() │ └ <function Thread._bootstrap_inner at 0x7f6954bddb40> └ <Thread(task_queue, started daemon 140090266555968)> File "/usr/lib/python3.10/threading.py", line 1016, in _bootstrap_inner self.run() │ └ <function Thread.run at 0x7f6954bdd870> └ <Thread(task_queue, started daemon 140090266555968)> File "/usr/lib/python3.10/threading.py", line 953, in run self._target(*self._args, **self._kwargs) │ │ │ │ │ └ {} │ │ │ │ └ <Thread(task_queue, started daemon 140090266555968)> │ │ │ └ () │ │ └ <Thread(task_queue, started daemon 140090266555968)> │ └ <bound method TaskQueue.run of <flexget.task_queue.TaskQueue object at 0x7f694ed09a50>> └ <Thread(task_queue, started daemon 140090266555968)> File "/usr/local/lib/python3.10/dist-packages/flexget/task_queue.py", line 46, in run self.current_task.execute() │ │ └ <function Task.execute at 0x7f6951d9dc60> │ └ <flexget.task.Task object at 0x7f694e9a7d30> └ <flexget.task_queue.TaskQueue object at 0x7f694ed09a50> File "/usr/local/lib/python3.10/dist-packages/flexget/task.py", line 87, in wrapper return func(self, *args, **kw) │ │ │ └ {} │ │ └ () │ └ <flexget.task.Task object at 0x7f694e9a7d30> └ <function Task.execute at 0x7f6951d9dbd0> File "/usr/local/lib/python3.10/dist-packages/flexget/task.py", line 725, in execute self._execute() │ └ <function Task._execute at 0x7f6951d9db40> └ <flexget.task.Task object at 0x7f694e9a7d30> File "/usr/local/lib/python3.10/dist-packages/flexget/task.py", line 694, in _execute self.__run_task_phase(phase) │ └ 'download' └ <flexget.task.Task object at 0x7f694e9a7d30> File "/usr/local/lib/python3.10/dist-packages/flexget/task.py", line 514, in __run_task_phase response = self.__run_plugin(plugin, phase, args) │ │ │ └ (<flexget.task.Task object at 0x7f694e9a7d30>, True) │ │ └ 'download' │ └ <PluginInfo(name=convert_magnet)> └ <flexget.task.Task object at 0x7f694e9a7d30> > File "/usr/local/lib/python3.10/dist-packages/flexget/task.py", line 547, in __run_plugin result = method(*args, **kwargs) │ │ └ {} │ └ (<flexget.task.Task object at 0x7f694e9a7d30>, True) └ <Event(name=plugin.convert_magnet.download,func=on_task_download,priority=130)> File "/usr/local/lib/python3.10/dist-packages/flexget/event.py", line 20, in __call__ return self.func(*args, **kwargs) │ │ │ └ {} │ │ └ (<flexget.task.Task object at 0x7f694e9a7d30>, True) │ └ <bound method ConvertMagnet.on_task_download of <flexget.components.bittorrent.convert_magnet.ConvertMagnet object at 0x7f694... └ <Event(name=plugin.convert_magnet.download,func=on_task_download,priority=130)> File "/usr/local/lib/python3.10/dist-packages/flexget/components/bittorrent/convert_magnet.py", line 102, in on_task_download torrent_file = self.magnet_to_torrent(entry['url'], converted_path, timeout) │ │ │ │ └ 30.0 │ │ │ └ '/home/wooltar/.flexget/converted' │ │ └ <Entry(title=Venom.Let.There.Be.Carnage.2021.2160p.UHD.BluRay.x265.10bit.HDR.TrueHD.7.1.Atmos-RARBG,state=accepted)> │ └ <function ConvertMagnet.magnet_to_torrent at 0x7f69500f1900> └ <flexget.components.bittorrent.convert_magnet.ConvertMagnet object at 0x7f694ecbee60> File "/usr/local/lib/python3.10/dist-packages/flexget/components/bittorrent/convert_magnet.py", line 47, in magnet_to_torrent handle = session.add_torrent(params) │ │ └ <libtorrent.add_torrent_params object at 0x7f69483267f0> │ └ <Boost.Python.function object at 0x7f6948036b80> └ <libtorrent.session object at 0x7f694ec13150>``` ``` </details> ### Additional information: - FlexGet version: 3.7.2 - Python version: 3.10.6 - Installation method: pip3 - Using daemon (yes/no): no - OS and version: Ubuntu 22.04.2 LTS - Link to crash log: --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `flexget/components/bittorrent/convert_magnet.py` Content: ``` 1 import os 2 import time 3 from urllib.parse import quote 4 5 from loguru import logger 6 7 from flexget import plugin 8 from flexget.event import event 9 from flexget.utils.pathscrub import pathscrub 10 from flexget.utils.tools import parse_timedelta 11 12 logger = logger.bind(name='convert_magnet') 13 14 15 class ConvertMagnet: 16 """Convert magnet only entries to a torrent file""" 17 18 schema = { 19 "oneOf": [ 20 # Allow convert_magnet: no form to turn off plugin altogether 21 {"type": "boolean"}, 22 { 23 "type": "object", 24 "properties": { 25 "timeout": {"type": "string", "format": "interval"}, 26 "force": {"type": "boolean"}, 27 }, 28 "additionalProperties": False, 29 }, 30 ] 31 } 32 33 def magnet_to_torrent(self, magnet_uri, destination_folder, timeout): 34 import libtorrent 35 36 params = libtorrent.parse_magnet_uri(magnet_uri) 37 session = libtorrent.session() 38 lt_version = [int(v) for v in libtorrent.version.split('.')] 39 if lt_version > [0, 16, 13, 0] and lt_version < [1, 1, 3, 0]: 40 # for some reason the info_hash needs to be bytes but it's a struct called sha1_hash 41 params['info_hash'] = params['info_hash'].to_bytes() 42 if lt_version < [1, 2]: 43 # for versions < 1.2 44 params['url'] = magnet_uri 45 else: 46 params.url = magnet_uri 47 handle = session.add_torrent(params) 48 logger.debug('Acquiring torrent metadata for magnet {}', magnet_uri) 49 timeout_value = timeout 50 while not handle.has_metadata(): 51 time.sleep(0.1) 52 timeout_value -= 0.1 53 if timeout_value <= 0: 54 raise plugin.PluginError(f'Timed out after {timeout} seconds trying to magnetize') 55 logger.debug('Metadata acquired') 56 torrent_info = handle.get_torrent_info() 57 torrent_file = libtorrent.create_torrent(torrent_info) 58 torrent_path = pathscrub( 59 os.path.join(destination_folder, torrent_info.name() + ".torrent") 60 ) 61 with open(torrent_path, "wb") as f: 62 f.write(libtorrent.bencode(torrent_file.generate())) 63 logger.debug('Torrent file wrote to {}', torrent_path) 64 return torrent_path 65 66 def prepare_config(self, config): 67 if not isinstance(config, dict): 68 config = {} 69 config.setdefault('timeout', '30 seconds') 70 config.setdefault('force', False) 71 return config 72 73 @plugin.priority(plugin.PRIORITY_FIRST) 74 def on_task_start(self, task, config): 75 if config is False: 76 return 77 try: 78 import libtorrent # noqa 79 except ImportError: 80 raise plugin.DependencyError( 81 'convert_magnet', 'libtorrent', 'libtorrent package required', logger 82 ) 83 84 @plugin.priority(130) 85 def on_task_download(self, task, config): 86 if config is False: 87 return 88 config = self.prepare_config(config) 89 # Create the conversion target directory 90 converted_path = os.path.join(task.manager.config_base, 'converted') 91 92 timeout = parse_timedelta(config['timeout']).total_seconds() 93 94 if not os.path.isdir(converted_path): 95 os.mkdir(converted_path) 96 97 for entry in task.accepted: 98 if entry['url'].startswith('magnet:'): 99 entry.setdefault('urls', [entry['url']]) 100 try: 101 logger.info('Converting entry {} magnet URI to a torrent file', entry['title']) 102 torrent_file = self.magnet_to_torrent(entry['url'], converted_path, timeout) 103 except (plugin.PluginError, TypeError) as e: 104 logger.error( 105 'Unable to convert Magnet URI for entry {}: {}', entry['title'], e 106 ) 107 if config['force']: 108 entry.fail('Magnet URI conversion failed') 109 continue 110 # Windows paths need an extra / prepended to them for url 111 if not torrent_file.startswith('/'): 112 torrent_file = '/' + torrent_file 113 entry['url'] = torrent_file 114 entry['file'] = torrent_file 115 # make sure it's first in the list because of how download plugin works 116 entry['urls'].insert(0, f'file://{quote(torrent_file)}') 117 118 119 @event('plugin.register') 120 def register_plugin(): 121 plugin.register(ConvertMagnet, 'convert_magnet', api_ver=2) 122 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/flexget/components/bittorrent/convert_magnet.py b/flexget/components/bittorrent/convert_magnet.py --- a/flexget/components/bittorrent/convert_magnet.py +++ b/flexget/components/bittorrent/convert_magnet.py @@ -44,6 +44,7 @@ params['url'] = magnet_uri else: params.url = magnet_uri + params.save_path = destination_folder handle = session.add_torrent(params) logger.debug('Acquiring torrent metadata for magnet {}', magnet_uri) timeout_value = timeout
{"golden_diff": "diff --git a/flexget/components/bittorrent/convert_magnet.py b/flexget/components/bittorrent/convert_magnet.py\n--- a/flexget/components/bittorrent/convert_magnet.py\n+++ b/flexget/components/bittorrent/convert_magnet.py\n@@ -44,6 +44,7 @@\n params['url'] = magnet_uri\n else:\n params.url = magnet_uri\n+ params.save_path = destination_folder\n handle = session.add_torrent(params)\n logger.debug('Acquiring torrent metadata for magnet {}', magnet_uri)\n timeout_value = timeout\n", "issue": " BUG: Unhandled error in plugin convert_magnet: 'save_path must be set in add_torrent_params'\n\r\n### Expected behaviour:\r\n\r\n<!---\r\nShould be convert the magnet URI to torrent file\r\n--->\r\n\r\n### Actual behaviour:\r\n\r\n### Steps to reproduce:\r\n- Step 1: flexget execute \r\n\r\n#### Config:\r\n```yaml\r\ntemplates:\r\n torrent:\r\n transmission:\r\n host: localhost\r\n port: 9091\r\n username: XXXXX\r\n password: XXXXXX\r\n\r\n tv_showrss:\r\n all_series: yes\r\n convert_magnet: yes\r\n seen: local\r\n download: /mnt/kodi/complete/peliculas/\r\n template: torrent\r\n\r\n\r\n movies:\r\n template: torrent\r\n imdb:\r\n min_score: 5\r\n min_votes: 100\r\n min_year: 2020\r\n reject_genres:\r\n - horror\r\n - porn\r\n - anime\r\n - xxx\r\n accept_languages:\r\n - english\r\n imdb_lookup: yes\r\n unique:\r\n field:\r\n - imdb_id\r\n - movie_name\r\n action: reject\r\n seen: local\r\n convert_magnet: yes\r\n quality: 2160p\r\n download: /mnt/kodi/complete/peliculas/\r\n\r\ntasks:\r\n series_task:\r\n rss:\r\n url: http://showrss.info/user/XXXXX.rss?magnets=true&namespaces=true&name=clean&quality=null&re=null\r\n escape: yes\r\n link:\r\n - link\r\n - magneturl\r\n template: tv_showrss\r\n retry_failed:\r\n retry_time: 30 minutes # Base time in between retries\r\n retry_time_multiplier: 2 # Amount retry time will be multiplied by after each successive failure\r\n max_retries: 5\r\n\r\n movies_task:\r\n rss:\r\n url: https://XXXXX.org/rssdd.php?categories=50;52\r\n escape: yes\r\n link:\r\n - link\r\n - magneturl\r\n template: movies\r\n \r\n\r\n move-episodes:\r\n metainfo_series: yes\r\n require_field: series_name\r\n accept_all: yes\r\n thetvdb_lookup: yes\r\n all_series:\r\n parse_only: yes\r\n filesystem:\r\n path: /mnt/kodi/complete/peliculas/\r\n regexp: '.*\\.(avi|mkv|mp4|mpg)$'\r\n recursive: yes\r\n regexp:\r\n reject:\r\n - sample\r\n move:\r\n clean_source: 50\r\n to: '/mnt/kodi/complete/series/{{series_name}}/'\r\n clean_source: 1000\r\n along:\r\n extensions:\r\n - sub\r\n - srt\r\n```\r\n \r\n#### Log:\r\n<details>\r\n <summary>(click to expand)</summary>\r\n\r\n```\r\n2023-05-28 12:27:57 CRITICAL task movies_task BUG: Unhandled error in plugin convert_magnet: 'save_path must be set in add_torrent_params'\r\nTraceback (most recent call last):\r\n\r\n File \"/usr/lib/python3.10/threading.py\", line 973, in _bootstrap\r\n self._bootstrap_inner()\r\n \u2502 \u2514 <function Thread._bootstrap_inner at 0x7f6954bddb40>\r\n \u2514 <Thread(task_queue, started daemon 140090266555968)>\r\n File \"/usr/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner\r\n self.run()\r\n \u2502 \u2514 <function Thread.run at 0x7f6954bdd870>\r\n \u2514 <Thread(task_queue, started daemon 140090266555968)>\r\n File \"/usr/lib/python3.10/threading.py\", line 953, in run\r\n self._target(*self._args, **self._kwargs)\r\n \u2502 \u2502 \u2502 \u2502 \u2502 \u2514 {}\r\n \u2502 \u2502 \u2502 \u2502 \u2514 <Thread(task_queue, started daemon 140090266555968)>\r\n \u2502 \u2502 \u2502 \u2514 ()\r\n \u2502 \u2502 \u2514 <Thread(task_queue, started daemon 140090266555968)>\r\n \u2502 \u2514 <bound method TaskQueue.run of <flexget.task_queue.TaskQueue object at 0x7f694ed09a50>>\r\n \u2514 <Thread(task_queue, started daemon 140090266555968)>\r\n File \"/usr/local/lib/python3.10/dist-packages/flexget/task_queue.py\", line 46, in run\r\n self.current_task.execute()\r\n \u2502 \u2502 \u2514 <function Task.execute at 0x7f6951d9dc60>\r\n \u2502 \u2514 <flexget.task.Task object at 0x7f694e9a7d30>\r\n \u2514 <flexget.task_queue.TaskQueue object at 0x7f694ed09a50>\r\n File \"/usr/local/lib/python3.10/dist-packages/flexget/task.py\", line 87, in wrapper\r\n return func(self, *args, **kw)\r\n \u2502 \u2502 \u2502 \u2514 {}\r\n \u2502 \u2502 \u2514 ()\r\n \u2502 \u2514 <flexget.task.Task object at 0x7f694e9a7d30>\r\n \u2514 <function Task.execute at 0x7f6951d9dbd0>\r\n File \"/usr/local/lib/python3.10/dist-packages/flexget/task.py\", line 725, in execute\r\n self._execute()\r\n \u2502 \u2514 <function Task._execute at 0x7f6951d9db40>\r\n \u2514 <flexget.task.Task object at 0x7f694e9a7d30>\r\n File \"/usr/local/lib/python3.10/dist-packages/flexget/task.py\", line 694, in _execute\r\n self.__run_task_phase(phase)\r\n \u2502 \u2514 'download'\r\n \u2514 <flexget.task.Task object at 0x7f694e9a7d30>\r\n File \"/usr/local/lib/python3.10/dist-packages/flexget/task.py\", line 514, in __run_task_phase\r\n response = self.__run_plugin(plugin, phase, args)\r\n \u2502 \u2502 \u2502 \u2514 (<flexget.task.Task object at 0x7f694e9a7d30>, True)\r\n \u2502 \u2502 \u2514 'download'\r\n \u2502 \u2514 <PluginInfo(name=convert_magnet)>\r\n \u2514 <flexget.task.Task object at 0x7f694e9a7d30>\r\n> File \"/usr/local/lib/python3.10/dist-packages/flexget/task.py\", line 547, in __run_plugin\r\n result = method(*args, **kwargs)\r\n \u2502 \u2502 \u2514 {}\r\n \u2502 \u2514 (<flexget.task.Task object at 0x7f694e9a7d30>, True)\r\n \u2514 <Event(name=plugin.convert_magnet.download,func=on_task_download,priority=130)>\r\n File \"/usr/local/lib/python3.10/dist-packages/flexget/event.py\", line 20, in __call__\r\n return self.func(*args, **kwargs)\r\n \u2502 \u2502 \u2502 \u2514 {}\r\n \u2502 \u2502 \u2514 (<flexget.task.Task object at 0x7f694e9a7d30>, True)\r\n \u2502 \u2514 <bound method ConvertMagnet.on_task_download of <flexget.components.bittorrent.convert_magnet.ConvertMagnet object at 0x7f694...\r\n \u2514 <Event(name=plugin.convert_magnet.download,func=on_task_download,priority=130)>\r\n File \"/usr/local/lib/python3.10/dist-packages/flexget/components/bittorrent/convert_magnet.py\", line 102, in on_task_download\r\n torrent_file = self.magnet_to_torrent(entry['url'], converted_path, timeout)\r\n \u2502 \u2502 \u2502 \u2502 \u2514 30.0\r\n \u2502 \u2502 \u2502 \u2514 '/home/wooltar/.flexget/converted'\r\n \u2502 \u2502 \u2514 <Entry(title=Venom.Let.There.Be.Carnage.2021.2160p.UHD.BluRay.x265.10bit.HDR.TrueHD.7.1.Atmos-RARBG,state=accepted)>\r\n \u2502 \u2514 <function ConvertMagnet.magnet_to_torrent at 0x7f69500f1900>\r\n \u2514 <flexget.components.bittorrent.convert_magnet.ConvertMagnet object at 0x7f694ecbee60>\r\n File \"/usr/local/lib/python3.10/dist-packages/flexget/components/bittorrent/convert_magnet.py\", line 47, in magnet_to_torrent\r\n handle = session.add_torrent(params)\r\n \u2502 \u2502 \u2514 <libtorrent.add_torrent_params object at 0x7f69483267f0>\r\n \u2502 \u2514 <Boost.Python.function object at 0x7f6948036b80>\r\n \u2514 <libtorrent.session object at 0x7f694ec13150>```\r\n```\r\n</details>\r\n\r\n### Additional information:\r\n\r\n- FlexGet version: 3.7.2\r\n- Python version: 3.10.6\r\n- Installation method: pip3 \r\n- Using daemon (yes/no): no\r\n- OS and version: Ubuntu 22.04.2 LTS\r\n- Link to crash log:\r\n\r\n\n", "before_files": [{"content": "import os\nimport time\nfrom urllib.parse import quote\n\nfrom loguru import logger\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.utils.pathscrub import pathscrub\nfrom flexget.utils.tools import parse_timedelta\n\nlogger = logger.bind(name='convert_magnet')\n\n\nclass ConvertMagnet:\n \"\"\"Convert magnet only entries to a torrent file\"\"\"\n\n schema = {\n \"oneOf\": [\n # Allow convert_magnet: no form to turn off plugin altogether\n {\"type\": \"boolean\"},\n {\n \"type\": \"object\",\n \"properties\": {\n \"timeout\": {\"type\": \"string\", \"format\": \"interval\"},\n \"force\": {\"type\": \"boolean\"},\n },\n \"additionalProperties\": False,\n },\n ]\n }\n\n def magnet_to_torrent(self, magnet_uri, destination_folder, timeout):\n import libtorrent\n\n params = libtorrent.parse_magnet_uri(magnet_uri)\n session = libtorrent.session()\n lt_version = [int(v) for v in libtorrent.version.split('.')]\n if lt_version > [0, 16, 13, 0] and lt_version < [1, 1, 3, 0]:\n # for some reason the info_hash needs to be bytes but it's a struct called sha1_hash\n params['info_hash'] = params['info_hash'].to_bytes()\n if lt_version < [1, 2]:\n # for versions < 1.2\n params['url'] = magnet_uri\n else:\n params.url = magnet_uri\n handle = session.add_torrent(params)\n logger.debug('Acquiring torrent metadata for magnet {}', magnet_uri)\n timeout_value = timeout\n while not handle.has_metadata():\n time.sleep(0.1)\n timeout_value -= 0.1\n if timeout_value <= 0:\n raise plugin.PluginError(f'Timed out after {timeout} seconds trying to magnetize')\n logger.debug('Metadata acquired')\n torrent_info = handle.get_torrent_info()\n torrent_file = libtorrent.create_torrent(torrent_info)\n torrent_path = pathscrub(\n os.path.join(destination_folder, torrent_info.name() + \".torrent\")\n )\n with open(torrent_path, \"wb\") as f:\n f.write(libtorrent.bencode(torrent_file.generate()))\n logger.debug('Torrent file wrote to {}', torrent_path)\n return torrent_path\n\n def prepare_config(self, config):\n if not isinstance(config, dict):\n config = {}\n config.setdefault('timeout', '30 seconds')\n config.setdefault('force', False)\n return config\n\n @plugin.priority(plugin.PRIORITY_FIRST)\n def on_task_start(self, task, config):\n if config is False:\n return\n try:\n import libtorrent # noqa\n except ImportError:\n raise plugin.DependencyError(\n 'convert_magnet', 'libtorrent', 'libtorrent package required', logger\n )\n\n @plugin.priority(130)\n def on_task_download(self, task, config):\n if config is False:\n return\n config = self.prepare_config(config)\n # Create the conversion target directory\n converted_path = os.path.join(task.manager.config_base, 'converted')\n\n timeout = parse_timedelta(config['timeout']).total_seconds()\n\n if not os.path.isdir(converted_path):\n os.mkdir(converted_path)\n\n for entry in task.accepted:\n if entry['url'].startswith('magnet:'):\n entry.setdefault('urls', [entry['url']])\n try:\n logger.info('Converting entry {} magnet URI to a torrent file', entry['title'])\n torrent_file = self.magnet_to_torrent(entry['url'], converted_path, timeout)\n except (plugin.PluginError, TypeError) as e:\n logger.error(\n 'Unable to convert Magnet URI for entry {}: {}', entry['title'], e\n )\n if config['force']:\n entry.fail('Magnet URI conversion failed')\n continue\n # Windows paths need an extra / prepended to them for url\n if not torrent_file.startswith('/'):\n torrent_file = '/' + torrent_file\n entry['url'] = torrent_file\n entry['file'] = torrent_file\n # make sure it's first in the list because of how download plugin works\n entry['urls'].insert(0, f'file://{quote(torrent_file)}')\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(ConvertMagnet, 'convert_magnet', api_ver=2)\n", "path": "flexget/components/bittorrent/convert_magnet.py"}], "after_files": [{"content": "import os\nimport time\nfrom urllib.parse import quote\n\nfrom loguru import logger\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.utils.pathscrub import pathscrub\nfrom flexget.utils.tools import parse_timedelta\n\nlogger = logger.bind(name='convert_magnet')\n\n\nclass ConvertMagnet:\n \"\"\"Convert magnet only entries to a torrent file\"\"\"\n\n schema = {\n \"oneOf\": [\n # Allow convert_magnet: no form to turn off plugin altogether\n {\"type\": \"boolean\"},\n {\n \"type\": \"object\",\n \"properties\": {\n \"timeout\": {\"type\": \"string\", \"format\": \"interval\"},\n \"force\": {\"type\": \"boolean\"},\n },\n \"additionalProperties\": False,\n },\n ]\n }\n\n def magnet_to_torrent(self, magnet_uri, destination_folder, timeout):\n import libtorrent\n\n params = libtorrent.parse_magnet_uri(magnet_uri)\n session = libtorrent.session()\n lt_version = [int(v) for v in libtorrent.version.split('.')]\n if lt_version > [0, 16, 13, 0] and lt_version < [1, 1, 3, 0]:\n # for some reason the info_hash needs to be bytes but it's a struct called sha1_hash\n params['info_hash'] = params['info_hash'].to_bytes()\n if lt_version < [1, 2]:\n # for versions < 1.2\n params['url'] = magnet_uri\n else:\n params.url = magnet_uri\n params.save_path = destination_folder\n handle = session.add_torrent(params)\n logger.debug('Acquiring torrent metadata for magnet {}', magnet_uri)\n timeout_value = timeout\n while not handle.has_metadata():\n time.sleep(0.1)\n timeout_value -= 0.1\n if timeout_value <= 0:\n raise plugin.PluginError(f'Timed out after {timeout} seconds trying to magnetize')\n logger.debug('Metadata acquired')\n torrent_info = handle.get_torrent_info()\n torrent_file = libtorrent.create_torrent(torrent_info)\n torrent_path = pathscrub(\n os.path.join(destination_folder, torrent_info.name() + \".torrent\")\n )\n with open(torrent_path, \"wb\") as f:\n f.write(libtorrent.bencode(torrent_file.generate()))\n logger.debug('Torrent file wrote to {}', torrent_path)\n return torrent_path\n\n def prepare_config(self, config):\n if not isinstance(config, dict):\n config = {}\n config.setdefault('timeout', '30 seconds')\n config.setdefault('force', False)\n return config\n\n @plugin.priority(plugin.PRIORITY_FIRST)\n def on_task_start(self, task, config):\n if config is False:\n return\n try:\n import libtorrent # noqa\n except ImportError:\n raise plugin.DependencyError(\n 'convert_magnet', 'libtorrent', 'libtorrent package required', logger\n )\n\n @plugin.priority(130)\n def on_task_download(self, task, config):\n if config is False:\n return\n config = self.prepare_config(config)\n # Create the conversion target directory\n converted_path = os.path.join(task.manager.config_base, 'converted')\n\n timeout = parse_timedelta(config['timeout']).total_seconds()\n\n if not os.path.isdir(converted_path):\n os.mkdir(converted_path)\n\n for entry in task.accepted:\n if entry['url'].startswith('magnet:'):\n entry.setdefault('urls', [entry['url']])\n try:\n logger.info('Converting entry {} magnet URI to a torrent file', entry['title'])\n torrent_file = self.magnet_to_torrent(entry['url'], converted_path, timeout)\n except (plugin.PluginError, TypeError) as e:\n logger.error(\n 'Unable to convert Magnet URI for entry {}: {}', entry['title'], e\n )\n if config['force']:\n entry.fail('Magnet URI conversion failed')\n continue\n # Windows paths need an extra / prepended to them for url\n if not torrent_file.startswith('/'):\n torrent_file = '/' + torrent_file\n entry['url'] = torrent_file\n entry['file'] = torrent_file\n # make sure it's first in the list because of how download plugin works\n entry['urls'].insert(0, f'file://{quote(torrent_file)}')\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(ConvertMagnet, 'convert_magnet', api_ver=2)\n", "path": "flexget/components/bittorrent/convert_magnet.py"}]}
3,697
130
gh_patches_debug_20512
rasdani/github-patches
git_diff
conan-io__conan-15174
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [feature] Supress Conan 2 compatibility message ### What is your suggestion? When a recipe fails to build Conan 2 will print a migration note regarding v2 compatibility: ``` ********************************************************* Recipe 'conanfile.py (...)' cannot build its binary It is possible that this recipe is not Conan 2.0 ready If the recipe comes from ConanCenter check: https://conan.io/cci-v2.html If it is your recipe, check if it is updated to 2.0 ********************************************************* ``` This is confusing our developers though because they assume a Conan issue when in fact its a good old compiler failure right above that flashy message. Looking at the code I found now way to disable this - is there any plans to offer an option for that? ### Have you read the CONTRIBUTING guide? - [X] I've read the CONTRIBUTING guide --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conan/cli/cli.py` Content: ``` 1 import importlib 2 import os 3 import pkgutil 4 import re 5 import signal 6 import sys 7 import textwrap 8 import traceback 9 from collections import defaultdict 10 from difflib import get_close_matches 11 from inspect import getmembers 12 13 from conan.api.conan_api import ConanAPI 14 from conan.api.output import ConanOutput, Color, cli_out_write, LEVEL_TRACE 15 from conan.cli.command import ConanSubCommand 16 from conan.cli.exit_codes import SUCCESS, ERROR_MIGRATION, ERROR_GENERAL, USER_CTRL_C, \ 17 ERROR_SIGTERM, USER_CTRL_BREAK, ERROR_INVALID_CONFIGURATION, ERROR_UNEXPECTED 18 from conan.internal.cache.home_paths import HomePaths 19 from conans import __version__ as client_version 20 from conan.errors import ConanException, ConanInvalidConfiguration, ConanMigrationError 21 from conans.util.files import exception_message_safe 22 23 24 class Cli: 25 """A single command of the conan application, with all the first level commands. Manages the 26 parsing of parameters and delegates functionality to the conan python api. It can also show the 27 help of the tool. 28 """ 29 30 def __init__(self, conan_api): 31 assert isinstance(conan_api, ConanAPI), \ 32 "Expected 'Conan' type, got '{}'".format(type(conan_api)) 33 self._conan_api = conan_api 34 self._groups = defaultdict(list) 35 self._commands = {} 36 37 def _add_commands(self): 38 conan_commands_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "commands") 39 for module in pkgutil.iter_modules([conan_commands_path]): 40 module_name = module[1] 41 self._add_command("conan.cli.commands.{}".format(module_name), module_name) 42 43 custom_commands_path = HomePaths(self._conan_api.cache_folder).custom_commands_path 44 if not os.path.isdir(custom_commands_path): 45 return 46 47 sys.path.append(custom_commands_path) 48 for module in pkgutil.iter_modules([custom_commands_path]): 49 module_name = module[1] 50 if module_name.startswith("cmd_"): 51 try: 52 self._add_command(module_name, module_name.replace("cmd_", "")) 53 except Exception as e: 54 ConanOutput().error("Error loading custom command " 55 "'{}.py': {}".format(module_name, e)) 56 # layers 57 for folder in os.listdir(custom_commands_path): 58 layer_folder = os.path.join(custom_commands_path, folder) 59 sys.path.append(layer_folder) 60 if not os.path.isdir(layer_folder): 61 continue 62 for module in pkgutil.iter_modules([layer_folder]): 63 module_name = module[1] 64 if module_name.startswith("cmd_"): 65 module_path = f"{folder}.{module_name}" 66 try: 67 self._add_command(module_path, module_name.replace("cmd_", ""), 68 package=folder) 69 except Exception as e: 70 ConanOutput().error(f"Error loading custom command {module_path}: {e}") 71 72 def _add_command(self, import_path, method_name, package=None): 73 try: 74 imported_module = importlib.import_module(import_path) 75 command_wrapper = getattr(imported_module, method_name) 76 if command_wrapper.doc: 77 name = f"{package}:{command_wrapper.name}" if package else command_wrapper.name 78 self._commands[name] = command_wrapper 79 self._groups[command_wrapper.group].append(name) 80 for name, value in getmembers(imported_module): 81 if isinstance(value, ConanSubCommand): 82 if name.startswith("{}_".format(method_name)): 83 command_wrapper.add_subcommand(value) 84 else: 85 raise ConanException("The name for the subcommand method should " 86 "begin with the main command name + '_'. " 87 "i.e. {}_<subcommand_name>".format(method_name)) 88 except AttributeError: 89 raise ConanException("There is no {} method defined in {}".format(method_name, 90 import_path)) 91 92 def _print_similar(self, command): 93 """ Looks for similar commands and prints them if found. 94 """ 95 output = ConanOutput() 96 matches = get_close_matches( 97 word=command, possibilities=self._commands.keys(), n=5, cutoff=0.75) 98 99 if len(matches) == 0: 100 return 101 102 if len(matches) > 1: 103 output.info("The most similar commands are") 104 else: 105 output.info("The most similar command is") 106 107 for match in matches: 108 output.info(" %s" % match) 109 110 output.writeln("") 111 112 def _output_help_cli(self): 113 """ 114 Prints a summary of all commands. 115 """ 116 max_len = max((len(c) for c in self._commands)) + 1 117 line_format = '{{: <{}}}'.format(max_len) 118 119 for group_name, comm_names in sorted(self._groups.items()): 120 cli_out_write("\n" + group_name + " commands", Color.BRIGHT_MAGENTA) 121 for name in comm_names: 122 # future-proof way to ensure tabular formatting 123 cli_out_write(line_format.format(name), Color.GREEN, endline="") 124 125 # Help will be all the lines up to the first empty one 126 docstring_lines = self._commands[name].doc.split('\n') 127 start = False 128 data = [] 129 for line in docstring_lines: 130 line = line.strip() 131 if not line: 132 if start: 133 break 134 start = True 135 continue 136 data.append(line) 137 138 txt = textwrap.fill(' '.join(data), 80, subsequent_indent=" " * (max_len + 2)) 139 cli_out_write(txt) 140 141 cli_out_write("") 142 cli_out_write('Type "conan <command> -h" for help', Color.BRIGHT_MAGENTA) 143 144 def run(self, *args): 145 """ Entry point for executing commands, dispatcher to class 146 methods 147 """ 148 output = ConanOutput() 149 self._add_commands() 150 try: 151 command_argument = args[0][0] 152 except IndexError: # No parameters 153 self._output_help_cli() 154 return 155 try: 156 command = self._commands[command_argument] 157 except KeyError as exc: 158 if command_argument in ["-v", "--version"]: 159 cli_out_write("Conan version %s" % client_version) 160 return 161 162 if command_argument in ["-h", "--help"]: 163 self._output_help_cli() 164 return 165 166 output.info("'%s' is not a Conan command. See 'conan --help'." % command_argument) 167 output.info("") 168 self._print_similar(command_argument) 169 raise ConanException("Unknown command %s" % str(exc)) 170 171 try: 172 command.run(self._conan_api, args[0][1:]) 173 except Exception as e: 174 # must be a local-import to get updated value 175 if ConanOutput.level_allowed(LEVEL_TRACE): 176 print(traceback.format_exc(), file=sys.stderr) 177 self._conan2_migrate_recipe_msg(e) 178 raise 179 180 @staticmethod 181 def _conan2_migrate_recipe_msg(exception): 182 message = str(exception) 183 184 result = re.search(r"Package '(.*)' not resolved: .*: Cannot load recipe", message) 185 if result: 186 pkg = result.group(1) 187 error = "*********************************************************\n" \ 188 f"Recipe '{pkg}' seems broken.\n" \ 189 f"It is possible that this recipe is not Conan 2.0 ready\n"\ 190 "If the recipe comes from ConanCenter, report it at https://github.com/conan-io/conan-center-index/issues\n" \ 191 "If it is your recipe, check if it is updated to 2.0\n" \ 192 "*********************************************************\n" 193 ConanOutput().writeln(error, fg=Color.BRIGHT_MAGENTA) 194 result = re.search(r"(.*): Error in build\(\) method, line", message) 195 if result: 196 pkg = result.group(1) 197 error = "*********************************************************\n" \ 198 f"Recipe '{pkg}' cannot build its binary\n" \ 199 f"It is possible that this recipe is not Conan 2.0 ready\n" \ 200 "If the recipe comes from ConanCenter, report it at https://github.com/conan-io/conan-center-index/issues\n" \ 201 "If it is your recipe, check if it is updated to 2.0\n" \ 202 "*********************************************************\n" 203 ConanOutput().writeln(error, fg=Color.BRIGHT_MAGENTA) 204 205 @staticmethod 206 def exception_exit_error(exception): 207 output = ConanOutput() 208 if exception is None: 209 return SUCCESS 210 if isinstance(exception, ConanInvalidConfiguration): 211 output.error(exception) 212 return ERROR_INVALID_CONFIGURATION 213 if isinstance(exception, ConanException): 214 output.error(exception) 215 return ERROR_GENERAL 216 if isinstance(exception, SystemExit): 217 if exception.code != 0: 218 output.error("Exiting with code: %d" % exception.code) 219 return exception.code 220 221 assert isinstance(exception, Exception) 222 output.error(traceback.format_exc()) 223 msg = exception_message_safe(exception) 224 output.error(msg) 225 return ERROR_UNEXPECTED 226 227 228 def main(args): 229 """ main entry point of the conan application, using a Command to 230 parse parameters 231 232 Exit codes for conan command: 233 234 0: Success (done) 235 1: General ConanException error (done) 236 2: Migration error 237 3: Ctrl+C 238 4: Ctrl+Break 239 5: SIGTERM 240 6: Invalid configuration (done) 241 """ 242 243 try: 244 conan_api = ConanAPI() 245 except ConanMigrationError: # Error migrating 246 sys.exit(ERROR_MIGRATION) 247 except ConanException as e: 248 sys.stderr.write("Error in Conan initialization: {}".format(e)) 249 sys.exit(ERROR_GENERAL) 250 251 def ctrl_c_handler(_, __): 252 print('You pressed Ctrl+C!') 253 sys.exit(USER_CTRL_C) 254 255 def sigterm_handler(_, __): 256 print('Received SIGTERM!') 257 sys.exit(ERROR_SIGTERM) 258 259 def ctrl_break_handler(_, __): 260 print('You pressed Ctrl+Break!') 261 sys.exit(USER_CTRL_BREAK) 262 263 signal.signal(signal.SIGINT, ctrl_c_handler) 264 signal.signal(signal.SIGTERM, sigterm_handler) 265 266 if sys.platform == 'win32': 267 signal.signal(signal.SIGBREAK, ctrl_break_handler) 268 269 cli = Cli(conan_api) 270 error = SUCCESS 271 try: 272 cli.run(args) 273 except BaseException as e: 274 error = cli.exception_exit_error(e) 275 sys.exit(error) 276 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conan/cli/cli.py b/conan/cli/cli.py --- a/conan/cli/cli.py +++ b/conan/cli/cli.py @@ -191,16 +191,6 @@ "If it is your recipe, check if it is updated to 2.0\n" \ "*********************************************************\n" ConanOutput().writeln(error, fg=Color.BRIGHT_MAGENTA) - result = re.search(r"(.*): Error in build\(\) method, line", message) - if result: - pkg = result.group(1) - error = "*********************************************************\n" \ - f"Recipe '{pkg}' cannot build its binary\n" \ - f"It is possible that this recipe is not Conan 2.0 ready\n" \ - "If the recipe comes from ConanCenter, report it at https://github.com/conan-io/conan-center-index/issues\n" \ - "If it is your recipe, check if it is updated to 2.0\n" \ - "*********************************************************\n" - ConanOutput().writeln(error, fg=Color.BRIGHT_MAGENTA) @staticmethod def exception_exit_error(exception):
{"golden_diff": "diff --git a/conan/cli/cli.py b/conan/cli/cli.py\n--- a/conan/cli/cli.py\n+++ b/conan/cli/cli.py\n@@ -191,16 +191,6 @@\n \"If it is your recipe, check if it is updated to 2.0\\n\" \\\n \"*********************************************************\\n\"\n ConanOutput().writeln(error, fg=Color.BRIGHT_MAGENTA)\n- result = re.search(r\"(.*): Error in build\\(\\) method, line\", message)\n- if result:\n- pkg = result.group(1)\n- error = \"*********************************************************\\n\" \\\n- f\"Recipe '{pkg}' cannot build its binary\\n\" \\\n- f\"It is possible that this recipe is not Conan 2.0 ready\\n\" \\\n- \"If the recipe comes from ConanCenter, report it at https://github.com/conan-io/conan-center-index/issues\\n\" \\\n- \"If it is your recipe, check if it is updated to 2.0\\n\" \\\n- \"*********************************************************\\n\"\n- ConanOutput().writeln(error, fg=Color.BRIGHT_MAGENTA)\n \n @staticmethod\n def exception_exit_error(exception):\n", "issue": "[feature] Supress Conan 2 compatibility message\n### What is your suggestion?\r\n\r\nWhen a recipe fails to build Conan 2 will print a migration note regarding v2 compatibility:\r\n\r\n```\r\n*********************************************************\r\nRecipe 'conanfile.py (...)' cannot build its binary\r\nIt is possible that this recipe is not Conan 2.0 ready\r\nIf the recipe comes from ConanCenter check: https://conan.io/cci-v2.html\r\nIf it is your recipe, check if it is updated to 2.0\r\n*********************************************************\r\n```\r\n\r\nThis is confusing our developers though because they assume a Conan issue when in fact its a good old compiler failure right above that flashy message. Looking at the code I found now way to disable this - is there any plans to offer an option for that?\r\n\r\n### Have you read the CONTRIBUTING guide?\r\n\r\n- [X] I've read the CONTRIBUTING guide\n", "before_files": [{"content": "import importlib\nimport os\nimport pkgutil\nimport re\nimport signal\nimport sys\nimport textwrap\nimport traceback\nfrom collections import defaultdict\nfrom difflib import get_close_matches\nfrom inspect import getmembers\n\nfrom conan.api.conan_api import ConanAPI\nfrom conan.api.output import ConanOutput, Color, cli_out_write, LEVEL_TRACE\nfrom conan.cli.command import ConanSubCommand\nfrom conan.cli.exit_codes import SUCCESS, ERROR_MIGRATION, ERROR_GENERAL, USER_CTRL_C, \\\n ERROR_SIGTERM, USER_CTRL_BREAK, ERROR_INVALID_CONFIGURATION, ERROR_UNEXPECTED\nfrom conan.internal.cache.home_paths import HomePaths\nfrom conans import __version__ as client_version\nfrom conan.errors import ConanException, ConanInvalidConfiguration, ConanMigrationError\nfrom conans.util.files import exception_message_safe\n\n\nclass Cli:\n \"\"\"A single command of the conan application, with all the first level commands. Manages the\n parsing of parameters and delegates functionality to the conan python api. It can also show the\n help of the tool.\n \"\"\"\n\n def __init__(self, conan_api):\n assert isinstance(conan_api, ConanAPI), \\\n \"Expected 'Conan' type, got '{}'\".format(type(conan_api))\n self._conan_api = conan_api\n self._groups = defaultdict(list)\n self._commands = {}\n\n def _add_commands(self):\n conan_commands_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"commands\")\n for module in pkgutil.iter_modules([conan_commands_path]):\n module_name = module[1]\n self._add_command(\"conan.cli.commands.{}\".format(module_name), module_name)\n\n custom_commands_path = HomePaths(self._conan_api.cache_folder).custom_commands_path\n if not os.path.isdir(custom_commands_path):\n return\n\n sys.path.append(custom_commands_path)\n for module in pkgutil.iter_modules([custom_commands_path]):\n module_name = module[1]\n if module_name.startswith(\"cmd_\"):\n try:\n self._add_command(module_name, module_name.replace(\"cmd_\", \"\"))\n except Exception as e:\n ConanOutput().error(\"Error loading custom command \"\n \"'{}.py': {}\".format(module_name, e))\n # layers\n for folder in os.listdir(custom_commands_path):\n layer_folder = os.path.join(custom_commands_path, folder)\n sys.path.append(layer_folder)\n if not os.path.isdir(layer_folder):\n continue\n for module in pkgutil.iter_modules([layer_folder]):\n module_name = module[1]\n if module_name.startswith(\"cmd_\"):\n module_path = f\"{folder}.{module_name}\"\n try:\n self._add_command(module_path, module_name.replace(\"cmd_\", \"\"),\n package=folder)\n except Exception as e:\n ConanOutput().error(f\"Error loading custom command {module_path}: {e}\")\n\n def _add_command(self, import_path, method_name, package=None):\n try:\n imported_module = importlib.import_module(import_path)\n command_wrapper = getattr(imported_module, method_name)\n if command_wrapper.doc:\n name = f\"{package}:{command_wrapper.name}\" if package else command_wrapper.name\n self._commands[name] = command_wrapper\n self._groups[command_wrapper.group].append(name)\n for name, value in getmembers(imported_module):\n if isinstance(value, ConanSubCommand):\n if name.startswith(\"{}_\".format(method_name)):\n command_wrapper.add_subcommand(value)\n else:\n raise ConanException(\"The name for the subcommand method should \"\n \"begin with the main command name + '_'. \"\n \"i.e. {}_<subcommand_name>\".format(method_name))\n except AttributeError:\n raise ConanException(\"There is no {} method defined in {}\".format(method_name,\n import_path))\n\n def _print_similar(self, command):\n \"\"\" Looks for similar commands and prints them if found.\n \"\"\"\n output = ConanOutput()\n matches = get_close_matches(\n word=command, possibilities=self._commands.keys(), n=5, cutoff=0.75)\n\n if len(matches) == 0:\n return\n\n if len(matches) > 1:\n output.info(\"The most similar commands are\")\n else:\n output.info(\"The most similar command is\")\n\n for match in matches:\n output.info(\" %s\" % match)\n\n output.writeln(\"\")\n\n def _output_help_cli(self):\n \"\"\"\n Prints a summary of all commands.\n \"\"\"\n max_len = max((len(c) for c in self._commands)) + 1\n line_format = '{{: <{}}}'.format(max_len)\n\n for group_name, comm_names in sorted(self._groups.items()):\n cli_out_write(\"\\n\" + group_name + \" commands\", Color.BRIGHT_MAGENTA)\n for name in comm_names:\n # future-proof way to ensure tabular formatting\n cli_out_write(line_format.format(name), Color.GREEN, endline=\"\")\n\n # Help will be all the lines up to the first empty one\n docstring_lines = self._commands[name].doc.split('\\n')\n start = False\n data = []\n for line in docstring_lines:\n line = line.strip()\n if not line:\n if start:\n break\n start = True\n continue\n data.append(line)\n\n txt = textwrap.fill(' '.join(data), 80, subsequent_indent=\" \" * (max_len + 2))\n cli_out_write(txt)\n\n cli_out_write(\"\")\n cli_out_write('Type \"conan <command> -h\" for help', Color.BRIGHT_MAGENTA)\n\n def run(self, *args):\n \"\"\" Entry point for executing commands, dispatcher to class\n methods\n \"\"\"\n output = ConanOutput()\n self._add_commands()\n try:\n command_argument = args[0][0]\n except IndexError: # No parameters\n self._output_help_cli()\n return\n try:\n command = self._commands[command_argument]\n except KeyError as exc:\n if command_argument in [\"-v\", \"--version\"]:\n cli_out_write(\"Conan version %s\" % client_version)\n return\n\n if command_argument in [\"-h\", \"--help\"]:\n self._output_help_cli()\n return\n\n output.info(\"'%s' is not a Conan command. See 'conan --help'.\" % command_argument)\n output.info(\"\")\n self._print_similar(command_argument)\n raise ConanException(\"Unknown command %s\" % str(exc))\n\n try:\n command.run(self._conan_api, args[0][1:])\n except Exception as e:\n # must be a local-import to get updated value\n if ConanOutput.level_allowed(LEVEL_TRACE):\n print(traceback.format_exc(), file=sys.stderr)\n self._conan2_migrate_recipe_msg(e)\n raise\n\n @staticmethod\n def _conan2_migrate_recipe_msg(exception):\n message = str(exception)\n\n result = re.search(r\"Package '(.*)' not resolved: .*: Cannot load recipe\", message)\n if result:\n pkg = result.group(1)\n error = \"*********************************************************\\n\" \\\n f\"Recipe '{pkg}' seems broken.\\n\" \\\n f\"It is possible that this recipe is not Conan 2.0 ready\\n\"\\\n \"If the recipe comes from ConanCenter, report it at https://github.com/conan-io/conan-center-index/issues\\n\" \\\n \"If it is your recipe, check if it is updated to 2.0\\n\" \\\n \"*********************************************************\\n\"\n ConanOutput().writeln(error, fg=Color.BRIGHT_MAGENTA)\n result = re.search(r\"(.*): Error in build\\(\\) method, line\", message)\n if result:\n pkg = result.group(1)\n error = \"*********************************************************\\n\" \\\n f\"Recipe '{pkg}' cannot build its binary\\n\" \\\n f\"It is possible that this recipe is not Conan 2.0 ready\\n\" \\\n \"If the recipe comes from ConanCenter, report it at https://github.com/conan-io/conan-center-index/issues\\n\" \\\n \"If it is your recipe, check if it is updated to 2.0\\n\" \\\n \"*********************************************************\\n\"\n ConanOutput().writeln(error, fg=Color.BRIGHT_MAGENTA)\n\n @staticmethod\n def exception_exit_error(exception):\n output = ConanOutput()\n if exception is None:\n return SUCCESS\n if isinstance(exception, ConanInvalidConfiguration):\n output.error(exception)\n return ERROR_INVALID_CONFIGURATION\n if isinstance(exception, ConanException):\n output.error(exception)\n return ERROR_GENERAL\n if isinstance(exception, SystemExit):\n if exception.code != 0:\n output.error(\"Exiting with code: %d\" % exception.code)\n return exception.code\n\n assert isinstance(exception, Exception)\n output.error(traceback.format_exc())\n msg = exception_message_safe(exception)\n output.error(msg)\n return ERROR_UNEXPECTED\n\n\ndef main(args):\n \"\"\" main entry point of the conan application, using a Command to\n parse parameters\n\n Exit codes for conan command:\n\n 0: Success (done)\n 1: General ConanException error (done)\n 2: Migration error\n 3: Ctrl+C\n 4: Ctrl+Break\n 5: SIGTERM\n 6: Invalid configuration (done)\n \"\"\"\n\n try:\n conan_api = ConanAPI()\n except ConanMigrationError: # Error migrating\n sys.exit(ERROR_MIGRATION)\n except ConanException as e:\n sys.stderr.write(\"Error in Conan initialization: {}\".format(e))\n sys.exit(ERROR_GENERAL)\n\n def ctrl_c_handler(_, __):\n print('You pressed Ctrl+C!')\n sys.exit(USER_CTRL_C)\n\n def sigterm_handler(_, __):\n print('Received SIGTERM!')\n sys.exit(ERROR_SIGTERM)\n\n def ctrl_break_handler(_, __):\n print('You pressed Ctrl+Break!')\n sys.exit(USER_CTRL_BREAK)\n\n signal.signal(signal.SIGINT, ctrl_c_handler)\n signal.signal(signal.SIGTERM, sigterm_handler)\n\n if sys.platform == 'win32':\n signal.signal(signal.SIGBREAK, ctrl_break_handler)\n\n cli = Cli(conan_api)\n error = SUCCESS\n try:\n cli.run(args)\n except BaseException as e:\n error = cli.exception_exit_error(e)\n sys.exit(error)\n", "path": "conan/cli/cli.py"}], "after_files": [{"content": "import importlib\nimport os\nimport pkgutil\nimport re\nimport signal\nimport sys\nimport textwrap\nimport traceback\nfrom collections import defaultdict\nfrom difflib import get_close_matches\nfrom inspect import getmembers\n\nfrom conan.api.conan_api import ConanAPI\nfrom conan.api.output import ConanOutput, Color, cli_out_write, LEVEL_TRACE\nfrom conan.cli.command import ConanSubCommand\nfrom conan.cli.exit_codes import SUCCESS, ERROR_MIGRATION, ERROR_GENERAL, USER_CTRL_C, \\\n ERROR_SIGTERM, USER_CTRL_BREAK, ERROR_INVALID_CONFIGURATION, ERROR_UNEXPECTED\nfrom conan.internal.cache.home_paths import HomePaths\nfrom conans import __version__ as client_version\nfrom conan.errors import ConanException, ConanInvalidConfiguration, ConanMigrationError\nfrom conans.util.files import exception_message_safe\n\n\nclass Cli:\n \"\"\"A single command of the conan application, with all the first level commands. Manages the\n parsing of parameters and delegates functionality to the conan python api. It can also show the\n help of the tool.\n \"\"\"\n\n def __init__(self, conan_api):\n assert isinstance(conan_api, ConanAPI), \\\n \"Expected 'Conan' type, got '{}'\".format(type(conan_api))\n self._conan_api = conan_api\n self._groups = defaultdict(list)\n self._commands = {}\n\n def _add_commands(self):\n conan_commands_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"commands\")\n for module in pkgutil.iter_modules([conan_commands_path]):\n module_name = module[1]\n self._add_command(\"conan.cli.commands.{}\".format(module_name), module_name)\n\n custom_commands_path = HomePaths(self._conan_api.cache_folder).custom_commands_path\n if not os.path.isdir(custom_commands_path):\n return\n\n sys.path.append(custom_commands_path)\n for module in pkgutil.iter_modules([custom_commands_path]):\n module_name = module[1]\n if module_name.startswith(\"cmd_\"):\n try:\n self._add_command(module_name, module_name.replace(\"cmd_\", \"\"))\n except Exception as e:\n ConanOutput().error(\"Error loading custom command \"\n \"'{}.py': {}\".format(module_name, e))\n # layers\n for folder in os.listdir(custom_commands_path):\n layer_folder = os.path.join(custom_commands_path, folder)\n sys.path.append(layer_folder)\n if not os.path.isdir(layer_folder):\n continue\n for module in pkgutil.iter_modules([layer_folder]):\n module_name = module[1]\n if module_name.startswith(\"cmd_\"):\n module_path = f\"{folder}.{module_name}\"\n try:\n self._add_command(module_path, module_name.replace(\"cmd_\", \"\"),\n package=folder)\n except Exception as e:\n ConanOutput().error(f\"Error loading custom command {module_path}: {e}\")\n\n def _add_command(self, import_path, method_name, package=None):\n try:\n imported_module = importlib.import_module(import_path)\n command_wrapper = getattr(imported_module, method_name)\n if command_wrapper.doc:\n name = f\"{package}:{command_wrapper.name}\" if package else command_wrapper.name\n self._commands[name] = command_wrapper\n self._groups[command_wrapper.group].append(name)\n for name, value in getmembers(imported_module):\n if isinstance(value, ConanSubCommand):\n if name.startswith(\"{}_\".format(method_name)):\n command_wrapper.add_subcommand(value)\n else:\n raise ConanException(\"The name for the subcommand method should \"\n \"begin with the main command name + '_'. \"\n \"i.e. {}_<subcommand_name>\".format(method_name))\n except AttributeError:\n raise ConanException(\"There is no {} method defined in {}\".format(method_name,\n import_path))\n\n def _print_similar(self, command):\n \"\"\" Looks for similar commands and prints them if found.\n \"\"\"\n output = ConanOutput()\n matches = get_close_matches(\n word=command, possibilities=self._commands.keys(), n=5, cutoff=0.75)\n\n if len(matches) == 0:\n return\n\n if len(matches) > 1:\n output.info(\"The most similar commands are\")\n else:\n output.info(\"The most similar command is\")\n\n for match in matches:\n output.info(\" %s\" % match)\n\n output.writeln(\"\")\n\n def _output_help_cli(self):\n \"\"\"\n Prints a summary of all commands.\n \"\"\"\n max_len = max((len(c) for c in self._commands)) + 1\n line_format = '{{: <{}}}'.format(max_len)\n\n for group_name, comm_names in sorted(self._groups.items()):\n cli_out_write(\"\\n\" + group_name + \" commands\", Color.BRIGHT_MAGENTA)\n for name in comm_names:\n # future-proof way to ensure tabular formatting\n cli_out_write(line_format.format(name), Color.GREEN, endline=\"\")\n\n # Help will be all the lines up to the first empty one\n docstring_lines = self._commands[name].doc.split('\\n')\n start = False\n data = []\n for line in docstring_lines:\n line = line.strip()\n if not line:\n if start:\n break\n start = True\n continue\n data.append(line)\n\n txt = textwrap.fill(' '.join(data), 80, subsequent_indent=\" \" * (max_len + 2))\n cli_out_write(txt)\n\n cli_out_write(\"\")\n cli_out_write('Type \"conan <command> -h\" for help', Color.BRIGHT_MAGENTA)\n\n def run(self, *args):\n \"\"\" Entry point for executing commands, dispatcher to class\n methods\n \"\"\"\n output = ConanOutput()\n self._add_commands()\n try:\n command_argument = args[0][0]\n except IndexError: # No parameters\n self._output_help_cli()\n return\n try:\n command = self._commands[command_argument]\n except KeyError as exc:\n if command_argument in [\"-v\", \"--version\"]:\n cli_out_write(\"Conan version %s\" % client_version)\n return\n\n if command_argument in [\"-h\", \"--help\"]:\n self._output_help_cli()\n return\n\n output.info(\"'%s' is not a Conan command. See 'conan --help'.\" % command_argument)\n output.info(\"\")\n self._print_similar(command_argument)\n raise ConanException(\"Unknown command %s\" % str(exc))\n\n try:\n command.run(self._conan_api, args[0][1:])\n except Exception as e:\n # must be a local-import to get updated value\n if ConanOutput.level_allowed(LEVEL_TRACE):\n print(traceback.format_exc(), file=sys.stderr)\n self._conan2_migrate_recipe_msg(e)\n raise\n\n @staticmethod\n def _conan2_migrate_recipe_msg(exception):\n message = str(exception)\n\n result = re.search(r\"Package '(.*)' not resolved: .*: Cannot load recipe\", message)\n if result:\n pkg = result.group(1)\n error = \"*********************************************************\\n\" \\\n f\"Recipe '{pkg}' seems broken.\\n\" \\\n f\"It is possible that this recipe is not Conan 2.0 ready\\n\"\\\n \"If the recipe comes from ConanCenter, report it at https://github.com/conan-io/conan-center-index/issues\\n\" \\\n \"If it is your recipe, check if it is updated to 2.0\\n\" \\\n \"*********************************************************\\n\"\n ConanOutput().writeln(error, fg=Color.BRIGHT_MAGENTA)\n\n @staticmethod\n def exception_exit_error(exception):\n output = ConanOutput()\n if exception is None:\n return SUCCESS\n if isinstance(exception, ConanInvalidConfiguration):\n output.error(exception)\n return ERROR_INVALID_CONFIGURATION\n if isinstance(exception, ConanException):\n output.error(exception)\n return ERROR_GENERAL\n if isinstance(exception, SystemExit):\n if exception.code != 0:\n output.error(\"Exiting with code: %d\" % exception.code)\n return exception.code\n\n assert isinstance(exception, Exception)\n output.error(traceback.format_exc())\n msg = exception_message_safe(exception)\n output.error(msg)\n return ERROR_UNEXPECTED\n\n\ndef main(args):\n \"\"\" main entry point of the conan application, using a Command to\n parse parameters\n\n Exit codes for conan command:\n\n 0: Success (done)\n 1: General ConanException error (done)\n 2: Migration error\n 3: Ctrl+C\n 4: Ctrl+Break\n 5: SIGTERM\n 6: Invalid configuration (done)\n \"\"\"\n\n try:\n conan_api = ConanAPI()\n except ConanMigrationError: # Error migrating\n sys.exit(ERROR_MIGRATION)\n except ConanException as e:\n sys.stderr.write(\"Error in Conan initialization: {}\".format(e))\n sys.exit(ERROR_GENERAL)\n\n def ctrl_c_handler(_, __):\n print('You pressed Ctrl+C!')\n sys.exit(USER_CTRL_C)\n\n def sigterm_handler(_, __):\n print('Received SIGTERM!')\n sys.exit(ERROR_SIGTERM)\n\n def ctrl_break_handler(_, __):\n print('You pressed Ctrl+Break!')\n sys.exit(USER_CTRL_BREAK)\n\n signal.signal(signal.SIGINT, ctrl_c_handler)\n signal.signal(signal.SIGTERM, sigterm_handler)\n\n if sys.platform == 'win32':\n signal.signal(signal.SIGBREAK, ctrl_break_handler)\n\n cli = Cli(conan_api)\n error = SUCCESS\n try:\n cli.run(args)\n except BaseException as e:\n error = cli.exception_exit_error(e)\n sys.exit(error)\n", "path": "conan/cli/cli.py"}]}
3,405
256
gh_patches_debug_25555
rasdani/github-patches
git_diff
nipy__nipype-3444
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- TypeError: '<' not supported between instances of 'str' and 'int' ### Summary Hi, I have been following the tutorials and official documentation for nipype on registering two t1w MRIs using ANTs. 1) https://nipype.readthedocs.io/en/latest/users/examples/smri_ants_registration.html 2) https://nipype.readthedocs.io/en/1.0.3/interfaces/generated/interfaces.ants/registration.html However after following the tutorials and writing the script. When I initialize the registration `reg = Registration()` the script exits with error `TypeError: '<' not supported between instances of 'str' and 'int'`. I have not been able to find anything conclusive on this issue. I found a similar issue in this repository but it was not clear how to solve the problem (https://github.com/nipy/nipype/issues/3232). ### Actual behavior Initialization of Registration via `reg = Registration()` results in script exiting with error `TypeError: '<' not supported between instances of 'str' and 'int'` ### Expected behavior Script runs without throwing type error. ### How to replicate the behavior Initialize registration. The script stops. ### Script/Workflow details ``` import numpy as np from nipype.interfaces.ants import Registration from nipype.interfaces.ants import ANTS #establish paths template = '/home/sungurea/faisal-sandbox/students/sungurea/Part2-ScaledMR/MNI305/average305_t1_tal_lin.nii' subject = '/home/sungurea/faisal-sandbox/students/sungurea/Part1_RawMR/UK_BIOBANK_IMAGES_1000/1000106.mgz' #Registration reg = Registration() reg.inputs.fixed_image = template reg.inputs.moving_image = subject reg.inputs.output_transform_prefix = 'thisTransform' reg.inputs.output_warped_image = 'transformed.nii.gz' reg.inputs.output_transform_prefix = "output_" reg.inputs.transforms = ['Translation', 'Rigid', 'Affine', 'SyN'] reg.inputs.transform_parameters = [(0.1,), (0.1,), (0.1,), (0.2, 3.0, 0.0)] reg.inputs.number_of_iterations = ([[10000, 111110, 11110]] * 3 + [[100, 50, 30]]) reg.inputs.dimension = 3 reg.inputs.write_composite_transform = True reg.inputs.collapse_output_transforms = False reg.inputs.metric = ['Mattes'] * 3 + [['Mattes', 'CC']] reg.inputs.metric_weight = [1] * 3 + [[0.5, 0.5]] reg.inputs.radius_or_number_of_bins = [32] * 3 + [[32, 4]] reg.inputs.sampling_strategy = ['Regular'] * 3 + [[None, None]] reg.inputs.sampling_percentage = [0.3] * 3 + [[None, None]] reg.inputs.convergence_threshold = [1.e-8] * 3 + [-0.01] reg.inputs.convergence_window_size = [20] * 3 + [5] reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 3 + [[1, 0.5, 0]] reg.inputs.sigma_units = ['vox'] * 4 reg.inputs.shrink_factors = [[6, 4, 2]] + [[3, 2, 1]] * 2 + [[4, 2, 1]] reg.inputs.use_estimate_learning_rate_once = [True] * 4 reg.inputs.use_histogram_matching = [False] * 3 + [True] reg.inputs.initial_moving_transform_com = True print(reg.cmdline) reg.run() ``` ### Traceback ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/sungurea/ENV/lib/python3.9/site-packages/nipype/interfaces/ants/registration.py", line 1009, in __init__ super(Registration, self).__init__(**inputs) File "/home/sungurea/ENV/lib/python3.9/site-packages/nipype/interfaces/ants/base.py", line 73, in __init__ super(ANTSCommand, self).__init__(**inputs) File "/home/sungurea/ENV/lib/python3.9/site-packages/nipype/interfaces/base/core.py", line 627, in __init__ super(CommandLine, self).__init__(**inputs) File "/home/sungurea/ENV/lib/python3.9/site-packages/nipype/interfaces/base/core.py", line 197, in __init__ unavailable_traits = self._check_version_requirements( File "/home/sungurea/ENV/lib/python3.9/site-packages/nipype/interfaces/base/core.py", line 295, in _check_version_requirements if names and self.version: File "/home/sungurea/ENV/lib/python3.9/site-packages/nipype/interfaces/ants/base.py", line 121, in version return Info.version() File "/home/sungurea/ENV/lib/python3.9/site-packages/nipype/interfaces/base/core.py", line 1067, in version klass._version = klass.parse_version(raw_info) File "/home/sungurea/ENV/lib/python3.9/site-packages/nipype/interfaces/ants/base.py", line 47, in parse_version if "post" in v_string and LooseVersion(v_string) >= LooseVersion( File "/cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/python/3.9.6/lib/python3.9/distutils/version.py", line 70, in __ge__ c = self._cmp(other) File "/cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/python/3.9.6/lib/python3.9/distutils/version.py", line 341, in _cmp if self.version < other.version: TypeError: '<' not supported between instances of 'str' and 'int' ``` ### Platform details: <!-- Please run the following code from your shell and place the output between the triple ticks, below. python -c "import nipype; from pprint import pprint; pprint(nipype.get_info())" --> ``` {'commit_hash': 'b385720', 'commit_source': 'installation', 'networkx_version': '2.6.3', 'nibabel_version': '3.2.1', 'nipype_version': '1.7.0', 'numpy_version': '1.21.0', 'pkg_path': '/home/sungurea/ENV/lib/python3.9/site-packages/nipype', 'scipy_version': '1.7.0', 'sys_executable': '/home/sungurea/ENV/bin/python', 'sys_platform': 'linux', 'sys_version': '3.9.6 (default, Jul 12 2021, 18:23:59) \n[GCC 9.3.0]', 'traits_version': '6.2.0'} ``` ### Execution environment python 3.9 ANTs: - ANTs Version: v2.3.5.post79-gdb98de3 - Compiled: Jan 2 2022 15:47:47 Choose one Container [Venv] My python environment inside container [3.9] My python environment outside container --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nipype/interfaces/ants/base.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- 3 # vi: set ft=python sts=4 ts=4 sw=4 et: 4 """The ants module provides basic functions for interfacing with ANTS tools.""" 5 import os 6 7 # Local imports 8 from ... import logging, LooseVersion 9 from ..base import CommandLine, CommandLineInputSpec, traits, isdefined, PackageInfo 10 11 iflogger = logging.getLogger("nipype.interface") 12 13 # -Using -1 gives primary responsibilty to ITKv4 to do the correct 14 # thread limitings. 15 # -Using 1 takes a very conservative approach to avoid overloading 16 # the computer (when running MultiProc) by forcing everything to 17 # single threaded. This can be a severe penalty for registration 18 # performance. 19 LOCAL_DEFAULT_NUMBER_OF_THREADS = 1 20 # -Using NSLOTS has the same behavior as ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS 21 # as long as ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS is not set. Otherwise 22 # ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS takes precidence. 23 # This behavior states that you the user explicitly specifies 24 # num_threads, then respect that no matter what SGE tries to limit. 25 PREFERED_ITKv4_THREAD_LIMIT_VARIABLE = "NSLOTS" 26 ALT_ITKv4_THREAD_LIMIT_VARIABLE = "ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS" 27 28 29 class Info(PackageInfo): 30 version_cmd = ( 31 os.path.join(os.getenv("ANTSPATH", ""), "antsRegistration") + " --version" 32 ) 33 34 @staticmethod 35 def parse_version(raw_info): 36 for line in raw_info.splitlines(): 37 if line.startswith("ANTs Version: "): 38 v_string = line.split()[2] 39 break 40 else: 41 return None 42 43 # -githash may or may not be appended 44 v_string = v_string.split("-")[0] 45 46 # 2.2.0-equivalent version string 47 if "post" in v_string and LooseVersion(v_string) >= LooseVersion( 48 "2.1.0.post789" 49 ): 50 return "2.2.0" 51 else: 52 return ".".join(v_string.split(".")[:3]) 53 54 55 class ANTSCommandInputSpec(CommandLineInputSpec): 56 """Base Input Specification for all ANTS Commands""" 57 58 num_threads = traits.Int( 59 LOCAL_DEFAULT_NUMBER_OF_THREADS, 60 usedefault=True, 61 nohash=True, 62 desc="Number of ITK threads to use", 63 ) 64 65 66 class ANTSCommand(CommandLine): 67 """Base class for ANTS interfaces""" 68 69 input_spec = ANTSCommandInputSpec 70 _num_threads = LOCAL_DEFAULT_NUMBER_OF_THREADS 71 72 def __init__(self, **inputs): 73 super(ANTSCommand, self).__init__(**inputs) 74 self.inputs.on_trait_change(self._num_threads_update, "num_threads") 75 76 if not isdefined(self.inputs.num_threads): 77 self.inputs.num_threads = self._num_threads 78 else: 79 self._num_threads_update() 80 81 def _num_threads_update(self): 82 self._num_threads = self.inputs.num_threads 83 # ONLY SET THE ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS if requested 84 # by the end user. The default setting did not allow for 85 # overwriting the default values. 86 # In ITKv4 (the version used for all ANTS programs), ITK respects 87 # the SGE controlled $NSLOTS environmental variable. 88 # If user specifies -1, then that indicates that the system 89 # default behavior should be the one specified by ITKv4 rules 90 # (i.e. respect SGE $NSLOTS or environmental variables of threads, or 91 # user environmental settings) 92 if self.inputs.num_threads == -1: 93 if ALT_ITKv4_THREAD_LIMIT_VARIABLE in self.inputs.environ: 94 del self.inputs.environ[ALT_ITKv4_THREAD_LIMIT_VARIABLE] 95 if PREFERED_ITKv4_THREAD_LIMIT_VARIABLE in self.inputs.environ: 96 del self.inputs.environ[PREFERED_ITKv4_THREAD_LIMIT_VARIABLE] 97 else: 98 self.inputs.environ.update( 99 {PREFERED_ITKv4_THREAD_LIMIT_VARIABLE: "%s" % self.inputs.num_threads} 100 ) 101 102 @staticmethod 103 def _format_xarray(val): 104 """Convenience method for converting input arrays [1,2,3] to 105 commandline format '1x2x3'""" 106 return "x".join([str(x) for x in val]) 107 108 @classmethod 109 def set_default_num_threads(cls, num_threads): 110 """Set the default number of threads for ITK calls 111 112 This method is used to set the default number of ITK threads for all 113 the ANTS interfaces. However, setting this will not update the output 114 type for any existing instances. For these, assign the 115 <instance>.inputs.num_threads 116 """ 117 cls._num_threads = num_threads 118 119 @property 120 def version(self): 121 return Info.version() 122 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nipype/interfaces/ants/base.py b/nipype/interfaces/ants/base.py --- a/nipype/interfaces/ants/base.py +++ b/nipype/interfaces/ants/base.py @@ -3,9 +3,10 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """The ants module provides basic functions for interfacing with ANTS tools.""" import os +from packaging.version import Version, parse # Local imports -from ... import logging, LooseVersion +from ... import logging from ..base import CommandLine, CommandLineInputSpec, traits, isdefined, PackageInfo iflogger = logging.getLogger("nipype.interface") @@ -43,13 +44,16 @@ # -githash may or may not be appended v_string = v_string.split("-")[0] - # 2.2.0-equivalent version string - if "post" in v_string and LooseVersion(v_string) >= LooseVersion( - "2.1.0.post789" - ): - return "2.2.0" - else: - return ".".join(v_string.split(".")[:3]) + version = parse(v_string) + + # Known mislabeled versions + if version.is_postrelease: + if version.base_version == "2.1.0" and version.post >= 789: + return "2.2.0" + + # Unless we know of a specific reason to re-version, we will + # treat the base version (before pre/post/dev) as authoritative + return version.base_version class ANTSCommandInputSpec(CommandLineInputSpec):
{"golden_diff": "diff --git a/nipype/interfaces/ants/base.py b/nipype/interfaces/ants/base.py\n--- a/nipype/interfaces/ants/base.py\n+++ b/nipype/interfaces/ants/base.py\n@@ -3,9 +3,10 @@\n # vi: set ft=python sts=4 ts=4 sw=4 et:\n \"\"\"The ants module provides basic functions for interfacing with ANTS tools.\"\"\"\n import os\n+from packaging.version import Version, parse\n \n # Local imports\n-from ... import logging, LooseVersion\n+from ... import logging\n from ..base import CommandLine, CommandLineInputSpec, traits, isdefined, PackageInfo\n \n iflogger = logging.getLogger(\"nipype.interface\")\n@@ -43,13 +44,16 @@\n # -githash may or may not be appended\n v_string = v_string.split(\"-\")[0]\n \n- # 2.2.0-equivalent version string\n- if \"post\" in v_string and LooseVersion(v_string) >= LooseVersion(\n- \"2.1.0.post789\"\n- ):\n- return \"2.2.0\"\n- else:\n- return \".\".join(v_string.split(\".\")[:3])\n+ version = parse(v_string)\n+\n+ # Known mislabeled versions\n+ if version.is_postrelease:\n+ if version.base_version == \"2.1.0\" and version.post >= 789:\n+ return \"2.2.0\"\n+\n+ # Unless we know of a specific reason to re-version, we will\n+ # treat the base version (before pre/post/dev) as authoritative\n+ return version.base_version\n \n \n class ANTSCommandInputSpec(CommandLineInputSpec):\n", "issue": "TypeError: '<' not supported between instances of 'str' and 'int'\n### Summary\r\nHi,\r\n\r\nI have been following the tutorials and official documentation for nipype on registering two t1w MRIs using ANTs. \r\n\r\n1) https://nipype.readthedocs.io/en/latest/users/examples/smri_ants_registration.html\r\n2) https://nipype.readthedocs.io/en/1.0.3/interfaces/generated/interfaces.ants/registration.html\r\n\r\nHowever after following the tutorials and writing the script. When I initialize the registration\r\n `reg = Registration()` the script exits with error `TypeError: '<' not supported between instances of 'str' and 'int'`. I have not been able to find anything conclusive on this issue. I found a similar issue in this repository but it was not clear how to solve the problem (https://github.com/nipy/nipype/issues/3232).\r\n### Actual behavior\r\nInitialization of Registration via `reg = Registration()` results in script exiting with error `TypeError: '<' not supported between instances of 'str' and 'int'`\r\n### Expected behavior\r\nScript runs without throwing type error.\r\n### How to replicate the behavior\r\nInitialize registration. The script stops. \r\n### Script/Workflow details\r\n```\r\nimport numpy as np\r\nfrom nipype.interfaces.ants import Registration\r\nfrom nipype.interfaces.ants import ANTS\r\n\r\n#establish paths\r\ntemplate = '/home/sungurea/faisal-sandbox/students/sungurea/Part2-ScaledMR/MNI305/average305_t1_tal_lin.nii'\r\nsubject = '/home/sungurea/faisal-sandbox/students/sungurea/Part1_RawMR/UK_BIOBANK_IMAGES_1000/1000106.mgz'\r\n\r\n#Registration\r\nreg = Registration()\r\n\r\nreg.inputs.fixed_image = template\r\nreg.inputs.moving_image = subject\r\nreg.inputs.output_transform_prefix = 'thisTransform'\r\nreg.inputs.output_warped_image = 'transformed.nii.gz'\r\nreg.inputs.output_transform_prefix = \"output_\"\r\nreg.inputs.transforms = ['Translation', 'Rigid', 'Affine', 'SyN']\r\nreg.inputs.transform_parameters = [(0.1,), (0.1,), (0.1,), (0.2, 3.0, 0.0)]\r\nreg.inputs.number_of_iterations = ([[10000, 111110, 11110]] * 3 +\r\n [[100, 50, 30]])\r\nreg.inputs.dimension = 3\r\nreg.inputs.write_composite_transform = True\r\nreg.inputs.collapse_output_transforms = False\r\nreg.inputs.metric = ['Mattes'] * 3 + [['Mattes', 'CC']]\r\nreg.inputs.metric_weight = [1] * 3 + [[0.5, 0.5]]\r\nreg.inputs.radius_or_number_of_bins = [32] * 3 + [[32, 4]]\r\nreg.inputs.sampling_strategy = ['Regular'] * 3 + [[None, None]]\r\nreg.inputs.sampling_percentage = [0.3] * 3 + [[None, None]]\r\nreg.inputs.convergence_threshold = [1.e-8] * 3 + [-0.01]\r\nreg.inputs.convergence_window_size = [20] * 3 + [5]\r\nreg.inputs.smoothing_sigmas = [[4, 2, 1]] * 3 + [[1, 0.5, 0]]\r\nreg.inputs.sigma_units = ['vox'] * 4\r\nreg.inputs.shrink_factors = [[6, 4, 2]] + [[3, 2, 1]] * 2 + [[4, 2, 1]]\r\nreg.inputs.use_estimate_learning_rate_once = [True] * 4\r\nreg.inputs.use_histogram_matching = [False] * 3 + [True]\r\nreg.inputs.initial_moving_transform_com = True\r\nprint(reg.cmdline)\r\nreg.run()\r\n```\r\n\r\n### Traceback\r\n```\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/sungurea/ENV/lib/python3.9/site-packages/nipype/interfaces/ants/registration.py\", line 1009, in __init__\r\n super(Registration, self).__init__(**inputs)\r\n File \"/home/sungurea/ENV/lib/python3.9/site-packages/nipype/interfaces/ants/base.py\", line 73, in __init__\r\n super(ANTSCommand, self).__init__(**inputs)\r\n File \"/home/sungurea/ENV/lib/python3.9/site-packages/nipype/interfaces/base/core.py\", line 627, in __init__\r\n super(CommandLine, self).__init__(**inputs)\r\n File \"/home/sungurea/ENV/lib/python3.9/site-packages/nipype/interfaces/base/core.py\", line 197, in __init__\r\n unavailable_traits = self._check_version_requirements(\r\n File \"/home/sungurea/ENV/lib/python3.9/site-packages/nipype/interfaces/base/core.py\", line 295, in _check_version_requirements\r\n if names and self.version:\r\n File \"/home/sungurea/ENV/lib/python3.9/site-packages/nipype/interfaces/ants/base.py\", line 121, in version\r\n return Info.version()\r\n File \"/home/sungurea/ENV/lib/python3.9/site-packages/nipype/interfaces/base/core.py\", line 1067, in version\r\n klass._version = klass.parse_version(raw_info)\r\n File \"/home/sungurea/ENV/lib/python3.9/site-packages/nipype/interfaces/ants/base.py\", line 47, in parse_version\r\n if \"post\" in v_string and LooseVersion(v_string) >= LooseVersion(\r\n File \"/cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/python/3.9.6/lib/python3.9/distutils/version.py\", line 70, in __ge__\r\n c = self._cmp(other)\r\n File \"/cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/python/3.9.6/lib/python3.9/distutils/version.py\", line 341, in _cmp\r\n if self.version < other.version:\r\nTypeError: '<' not supported between instances of 'str' and 'int'\r\n\r\n```\r\n### Platform details:\r\n\r\n<!-- Please run the following code from your shell and place the output between the triple ticks, below.\r\npython -c \"import nipype; from pprint import pprint; pprint(nipype.get_info())\"\r\n-->\r\n\r\n```\r\n{'commit_hash': 'b385720',\r\n 'commit_source': 'installation',\r\n 'networkx_version': '2.6.3',\r\n 'nibabel_version': '3.2.1',\r\n 'nipype_version': '1.7.0',\r\n 'numpy_version': '1.21.0',\r\n 'pkg_path': '/home/sungurea/ENV/lib/python3.9/site-packages/nipype',\r\n 'scipy_version': '1.7.0',\r\n 'sys_executable': '/home/sungurea/ENV/bin/python',\r\n 'sys_platform': 'linux',\r\n 'sys_version': '3.9.6 (default, Jul 12 2021, 18:23:59) \\n[GCC 9.3.0]',\r\n 'traits_version': '6.2.0'}\r\n```\r\n\r\n### Execution environment\r\npython 3.9\r\nANTs:\r\n- ANTs Version: v2.3.5.post79-gdb98de3\r\n- Compiled: Jan 2 2022 15:47:47\r\n\r\nChoose one\r\nContainer [Venv]\r\nMy python environment inside container [3.9]\r\nMy python environment outside container\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"The ants module provides basic functions for interfacing with ANTS tools.\"\"\"\nimport os\n\n# Local imports\nfrom ... import logging, LooseVersion\nfrom ..base import CommandLine, CommandLineInputSpec, traits, isdefined, PackageInfo\n\niflogger = logging.getLogger(\"nipype.interface\")\n\n# -Using -1 gives primary responsibilty to ITKv4 to do the correct\n# thread limitings.\n# -Using 1 takes a very conservative approach to avoid overloading\n# the computer (when running MultiProc) by forcing everything to\n# single threaded. This can be a severe penalty for registration\n# performance.\nLOCAL_DEFAULT_NUMBER_OF_THREADS = 1\n# -Using NSLOTS has the same behavior as ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS\n# as long as ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS is not set. Otherwise\n# ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS takes precidence.\n# This behavior states that you the user explicitly specifies\n# num_threads, then respect that no matter what SGE tries to limit.\nPREFERED_ITKv4_THREAD_LIMIT_VARIABLE = \"NSLOTS\"\nALT_ITKv4_THREAD_LIMIT_VARIABLE = \"ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS\"\n\n\nclass Info(PackageInfo):\n version_cmd = (\n os.path.join(os.getenv(\"ANTSPATH\", \"\"), \"antsRegistration\") + \" --version\"\n )\n\n @staticmethod\n def parse_version(raw_info):\n for line in raw_info.splitlines():\n if line.startswith(\"ANTs Version: \"):\n v_string = line.split()[2]\n break\n else:\n return None\n\n # -githash may or may not be appended\n v_string = v_string.split(\"-\")[0]\n\n # 2.2.0-equivalent version string\n if \"post\" in v_string and LooseVersion(v_string) >= LooseVersion(\n \"2.1.0.post789\"\n ):\n return \"2.2.0\"\n else:\n return \".\".join(v_string.split(\".\")[:3])\n\n\nclass ANTSCommandInputSpec(CommandLineInputSpec):\n \"\"\"Base Input Specification for all ANTS Commands\"\"\"\n\n num_threads = traits.Int(\n LOCAL_DEFAULT_NUMBER_OF_THREADS,\n usedefault=True,\n nohash=True,\n desc=\"Number of ITK threads to use\",\n )\n\n\nclass ANTSCommand(CommandLine):\n \"\"\"Base class for ANTS interfaces\"\"\"\n\n input_spec = ANTSCommandInputSpec\n _num_threads = LOCAL_DEFAULT_NUMBER_OF_THREADS\n\n def __init__(self, **inputs):\n super(ANTSCommand, self).__init__(**inputs)\n self.inputs.on_trait_change(self._num_threads_update, \"num_threads\")\n\n if not isdefined(self.inputs.num_threads):\n self.inputs.num_threads = self._num_threads\n else:\n self._num_threads_update()\n\n def _num_threads_update(self):\n self._num_threads = self.inputs.num_threads\n # ONLY SET THE ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS if requested\n # by the end user. The default setting did not allow for\n # overwriting the default values.\n # In ITKv4 (the version used for all ANTS programs), ITK respects\n # the SGE controlled $NSLOTS environmental variable.\n # If user specifies -1, then that indicates that the system\n # default behavior should be the one specified by ITKv4 rules\n # (i.e. respect SGE $NSLOTS or environmental variables of threads, or\n # user environmental settings)\n if self.inputs.num_threads == -1:\n if ALT_ITKv4_THREAD_LIMIT_VARIABLE in self.inputs.environ:\n del self.inputs.environ[ALT_ITKv4_THREAD_LIMIT_VARIABLE]\n if PREFERED_ITKv4_THREAD_LIMIT_VARIABLE in self.inputs.environ:\n del self.inputs.environ[PREFERED_ITKv4_THREAD_LIMIT_VARIABLE]\n else:\n self.inputs.environ.update(\n {PREFERED_ITKv4_THREAD_LIMIT_VARIABLE: \"%s\" % self.inputs.num_threads}\n )\n\n @staticmethod\n def _format_xarray(val):\n \"\"\"Convenience method for converting input arrays [1,2,3] to\n commandline format '1x2x3'\"\"\"\n return \"x\".join([str(x) for x in val])\n\n @classmethod\n def set_default_num_threads(cls, num_threads):\n \"\"\"Set the default number of threads for ITK calls\n\n This method is used to set the default number of ITK threads for all\n the ANTS interfaces. However, setting this will not update the output\n type for any existing instances. For these, assign the\n <instance>.inputs.num_threads\n \"\"\"\n cls._num_threads = num_threads\n\n @property\n def version(self):\n return Info.version()\n", "path": "nipype/interfaces/ants/base.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"The ants module provides basic functions for interfacing with ANTS tools.\"\"\"\nimport os\nfrom packaging.version import Version, parse\n\n# Local imports\nfrom ... import logging\nfrom ..base import CommandLine, CommandLineInputSpec, traits, isdefined, PackageInfo\n\niflogger = logging.getLogger(\"nipype.interface\")\n\n# -Using -1 gives primary responsibilty to ITKv4 to do the correct\n# thread limitings.\n# -Using 1 takes a very conservative approach to avoid overloading\n# the computer (when running MultiProc) by forcing everything to\n# single threaded. This can be a severe penalty for registration\n# performance.\nLOCAL_DEFAULT_NUMBER_OF_THREADS = 1\n# -Using NSLOTS has the same behavior as ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS\n# as long as ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS is not set. Otherwise\n# ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS takes precidence.\n# This behavior states that you the user explicitly specifies\n# num_threads, then respect that no matter what SGE tries to limit.\nPREFERED_ITKv4_THREAD_LIMIT_VARIABLE = \"NSLOTS\"\nALT_ITKv4_THREAD_LIMIT_VARIABLE = \"ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS\"\n\n\nclass Info(PackageInfo):\n version_cmd = (\n os.path.join(os.getenv(\"ANTSPATH\", \"\"), \"antsRegistration\") + \" --version\"\n )\n\n @staticmethod\n def parse_version(raw_info):\n for line in raw_info.splitlines():\n if line.startswith(\"ANTs Version: \"):\n v_string = line.split()[2]\n break\n else:\n return None\n\n # -githash may or may not be appended\n v_string = v_string.split(\"-\")[0]\n\n version = parse(v_string)\n\n # Known mislabeled versions\n if version.is_postrelease:\n if version.base_version == \"2.1.0\" and version.post >= 789:\n return \"2.2.0\"\n\n # Unless we know of a specific reason to re-version, we will\n # treat the base version (before pre/post/dev) as authoritative\n return version.base_version\n\n\nclass ANTSCommandInputSpec(CommandLineInputSpec):\n \"\"\"Base Input Specification for all ANTS Commands\"\"\"\n\n num_threads = traits.Int(\n LOCAL_DEFAULT_NUMBER_OF_THREADS,\n usedefault=True,\n nohash=True,\n desc=\"Number of ITK threads to use\",\n )\n\n\nclass ANTSCommand(CommandLine):\n \"\"\"Base class for ANTS interfaces\"\"\"\n\n input_spec = ANTSCommandInputSpec\n _num_threads = LOCAL_DEFAULT_NUMBER_OF_THREADS\n\n def __init__(self, **inputs):\n super(ANTSCommand, self).__init__(**inputs)\n self.inputs.on_trait_change(self._num_threads_update, \"num_threads\")\n\n if not isdefined(self.inputs.num_threads):\n self.inputs.num_threads = self._num_threads\n else:\n self._num_threads_update()\n\n def _num_threads_update(self):\n self._num_threads = self.inputs.num_threads\n # ONLY SET THE ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS if requested\n # by the end user. The default setting did not allow for\n # overwriting the default values.\n # In ITKv4 (the version used for all ANTS programs), ITK respects\n # the SGE controlled $NSLOTS environmental variable.\n # If user specifies -1, then that indicates that the system\n # default behavior should be the one specified by ITKv4 rules\n # (i.e. respect SGE $NSLOTS or environmental variables of threads, or\n # user environmental settings)\n if self.inputs.num_threads == -1:\n if ALT_ITKv4_THREAD_LIMIT_VARIABLE in self.inputs.environ:\n del self.inputs.environ[ALT_ITKv4_THREAD_LIMIT_VARIABLE]\n if PREFERED_ITKv4_THREAD_LIMIT_VARIABLE in self.inputs.environ:\n del self.inputs.environ[PREFERED_ITKv4_THREAD_LIMIT_VARIABLE]\n else:\n self.inputs.environ.update(\n {PREFERED_ITKv4_THREAD_LIMIT_VARIABLE: \"%s\" % self.inputs.num_threads}\n )\n\n @staticmethod\n def _format_xarray(val):\n \"\"\"Convenience method for converting input arrays [1,2,3] to\n commandline format '1x2x3'\"\"\"\n return \"x\".join([str(x) for x in val])\n\n @classmethod\n def set_default_num_threads(cls, num_threads):\n \"\"\"Set the default number of threads for ITK calls\n\n This method is used to set the default number of ITK threads for all\n the ANTS interfaces. However, setting this will not update the output\n type for any existing instances. For these, assign the\n <instance>.inputs.num_threads\n \"\"\"\n cls._num_threads = num_threads\n\n @property\n def version(self):\n return Info.version()\n", "path": "nipype/interfaces/ants/base.py"}]}
3,291
373
gh_patches_debug_18615
rasdani/github-patches
git_diff
vyperlang__vyper-555
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Log topic and data allow byte array longer than 32 bytes. ### What's your issue about? When packing data/topic for log, if the the actual argument is a byte array variable, there is no check for the actual length of the variable. e.g., ``` MyLog: __log__({arg1: indexed(bytes<=2000)}) @public def foo(): a: bytes<=100 log.MyLog(a) ``` This program should be rejected by is not. ### How can it be fixed? Add check in event_sig, pack_arg_by_32 and pack_logging_topic. #### Cute Animal Picture ![image](https://user-images.githubusercontent.com/5641590/33631206-753fab08-d9cf-11e7-89b4-f2d71f844453.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `viper/signatures/event_signature.py` Content: ``` 1 from viper.types import get_size_of_type, canonicalize_type, parse_type, \ 2 ByteArrayType 3 from viper.utils import sha3, is_varname_valid, bytes_to_int 4 import ast 5 from viper.function_signature import VariableRecord 6 from viper.exceptions import InvalidTypeException, VariableDeclarationException 7 8 9 # Event signature object 10 class EventSignature(): 11 def __init__(self, name, args, indexed_list, event_id, sig): 12 self.name = name 13 self.args = args 14 self.indexed_list = indexed_list 15 self.sig = sig 16 self.event_id = event_id 17 18 # Get a signature from an event declaration 19 @classmethod 20 def from_declaration(cls, code): 21 name = code.target.id 22 pos = 0 23 # Determine the arguments, expects something of the form def foo(arg1: num, arg2: num ... 24 args = [] 25 indexed_list = [] 26 topics_count = 1 27 if code.annotation.args: 28 keys = code.annotation.args[0].keys 29 values = code.annotation.args[0].values 30 for i in range(len(keys)): 31 typ = values[i] 32 arg = keys[i].id 33 if isinstance(typ, ast.Call): 34 # Check to see if argument is a topic 35 if typ.func.id == 'indexed': 36 typ = values[i].args[0] 37 indexed_list.append(True) 38 topics_count += 1 39 else: 40 raise VariableDeclarationException("Only indexed keyword is allowed", arg) 41 else: 42 if hasattr(typ, 'left') and typ.left.id == 'bytes' and typ.comparators[0].n > 32: 43 raise VariableDeclarationException("Can only log a maximum of 32 bytes at a time.") 44 indexed_list.append(False) 45 if topics_count > 4: 46 raise VariableDeclarationException("Maximum of 3 topics {} given".format(topics_count - 1), arg) 47 if not isinstance(arg, str): 48 raise VariableDeclarationException("Argument name invalid", arg) 49 if not typ: 50 raise InvalidTypeException("Argument must have type", arg) 51 if not is_varname_valid(arg): 52 raise VariableDeclarationException("Argument name invalid or reserved: " + arg, arg) 53 if arg in (x.name for x in args): 54 raise VariableDeclarationException("Duplicate function argument name: " + arg, arg) 55 parsed_type = parse_type(typ, None) 56 args.append(VariableRecord(arg, pos, parsed_type, False)) 57 if isinstance(parsed_type, ByteArrayType): 58 pos += 32 59 else: 60 pos += get_size_of_type(parsed_type) * 32 61 sig = name + '(' + ','.join([canonicalize_type(arg.typ, True) for arg in args]) + ')' 62 event_id = bytes_to_int(sha3(bytes(sig, 'utf-8'))) 63 return cls(name, args, indexed_list, event_id, sig) 64 65 def to_abi_dict(self): 66 return { 67 "name": self.name, 68 "inputs": [{"type": canonicalize_type(arg.typ, True), "name": arg.name, "indexed": self.indexed_list[pos]} for pos, arg in enumerate(self.args)] if self.args else [], 69 "anonymous": False, 70 "type": "event" 71 } 72 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/viper/signatures/event_signature.py b/viper/signatures/event_signature.py --- a/viper/signatures/event_signature.py +++ b/viper/signatures/event_signature.py @@ -39,9 +39,9 @@ else: raise VariableDeclarationException("Only indexed keyword is allowed", arg) else: - if hasattr(typ, 'left') and typ.left.id == 'bytes' and typ.comparators[0].n > 32: - raise VariableDeclarationException("Can only log a maximum of 32 bytes at a time.") indexed_list.append(False) + if hasattr(typ, 'left') and typ.left.id == 'bytes' and typ.comparators[0].n > 32: + raise VariableDeclarationException("Can only log a maximum of 32 bytes at a time.") if topics_count > 4: raise VariableDeclarationException("Maximum of 3 topics {} given".format(topics_count - 1), arg) if not isinstance(arg, str):
{"golden_diff": "diff --git a/viper/signatures/event_signature.py b/viper/signatures/event_signature.py\n--- a/viper/signatures/event_signature.py\n+++ b/viper/signatures/event_signature.py\n@@ -39,9 +39,9 @@\n else:\n raise VariableDeclarationException(\"Only indexed keyword is allowed\", arg)\n else:\n- if hasattr(typ, 'left') and typ.left.id == 'bytes' and typ.comparators[0].n > 32:\n- raise VariableDeclarationException(\"Can only log a maximum of 32 bytes at a time.\")\n indexed_list.append(False)\n+ if hasattr(typ, 'left') and typ.left.id == 'bytes' and typ.comparators[0].n > 32:\n+ raise VariableDeclarationException(\"Can only log a maximum of 32 bytes at a time.\")\n if topics_count > 4:\n raise VariableDeclarationException(\"Maximum of 3 topics {} given\".format(topics_count - 1), arg)\n if not isinstance(arg, str):\n", "issue": "Log topic and data allow byte array longer than 32 bytes.\n### What's your issue about?\r\nWhen packing data/topic for log, if the the actual argument is a byte array variable, there is no check for the actual length of the variable.\r\ne.g.,\r\n```\r\nMyLog: __log__({arg1: indexed(bytes<=2000)})\r\n\r\n@public\r\ndef foo():\r\n a: bytes<=100\r\n log.MyLog(a)\r\n```\r\nThis program should be rejected by is not.\r\n\r\n### How can it be fixed?\r\n\r\nAdd check in event_sig, pack_arg_by_32 and pack_logging_topic.\r\n\r\n#### Cute Animal Picture\r\n![image](https://user-images.githubusercontent.com/5641590/33631206-753fab08-d9cf-11e7-89b4-f2d71f844453.png)\r\n\r\n\n", "before_files": [{"content": "from viper.types import get_size_of_type, canonicalize_type, parse_type, \\\n ByteArrayType\nfrom viper.utils import sha3, is_varname_valid, bytes_to_int\nimport ast\nfrom viper.function_signature import VariableRecord\nfrom viper.exceptions import InvalidTypeException, VariableDeclarationException\n\n\n# Event signature object\nclass EventSignature():\n def __init__(self, name, args, indexed_list, event_id, sig):\n self.name = name\n self.args = args\n self.indexed_list = indexed_list\n self.sig = sig\n self.event_id = event_id\n\n # Get a signature from an event declaration\n @classmethod\n def from_declaration(cls, code):\n name = code.target.id\n pos = 0\n # Determine the arguments, expects something of the form def foo(arg1: num, arg2: num ...\n args = []\n indexed_list = []\n topics_count = 1\n if code.annotation.args:\n keys = code.annotation.args[0].keys\n values = code.annotation.args[0].values\n for i in range(len(keys)):\n typ = values[i]\n arg = keys[i].id\n if isinstance(typ, ast.Call):\n # Check to see if argument is a topic\n if typ.func.id == 'indexed':\n typ = values[i].args[0]\n indexed_list.append(True)\n topics_count += 1\n else:\n raise VariableDeclarationException(\"Only indexed keyword is allowed\", arg)\n else:\n if hasattr(typ, 'left') and typ.left.id == 'bytes' and typ.comparators[0].n > 32:\n raise VariableDeclarationException(\"Can only log a maximum of 32 bytes at a time.\")\n indexed_list.append(False)\n if topics_count > 4:\n raise VariableDeclarationException(\"Maximum of 3 topics {} given\".format(topics_count - 1), arg)\n if not isinstance(arg, str):\n raise VariableDeclarationException(\"Argument name invalid\", arg)\n if not typ:\n raise InvalidTypeException(\"Argument must have type\", arg)\n if not is_varname_valid(arg):\n raise VariableDeclarationException(\"Argument name invalid or reserved: \" + arg, arg)\n if arg in (x.name for x in args):\n raise VariableDeclarationException(\"Duplicate function argument name: \" + arg, arg)\n parsed_type = parse_type(typ, None)\n args.append(VariableRecord(arg, pos, parsed_type, False))\n if isinstance(parsed_type, ByteArrayType):\n pos += 32\n else:\n pos += get_size_of_type(parsed_type) * 32\n sig = name + '(' + ','.join([canonicalize_type(arg.typ, True) for arg in args]) + ')'\n event_id = bytes_to_int(sha3(bytes(sig, 'utf-8')))\n return cls(name, args, indexed_list, event_id, sig)\n\n def to_abi_dict(self):\n return {\n \"name\": self.name,\n \"inputs\": [{\"type\": canonicalize_type(arg.typ, True), \"name\": arg.name, \"indexed\": self.indexed_list[pos]} for pos, arg in enumerate(self.args)] if self.args else [],\n \"anonymous\": False,\n \"type\": \"event\"\n }\n", "path": "viper/signatures/event_signature.py"}], "after_files": [{"content": "from viper.types import get_size_of_type, canonicalize_type, parse_type, \\\n ByteArrayType\nfrom viper.utils import sha3, is_varname_valid, bytes_to_int\nimport ast\nfrom viper.function_signature import VariableRecord\nfrom viper.exceptions import InvalidTypeException, VariableDeclarationException\n\n\n# Event signature object\nclass EventSignature():\n def __init__(self, name, args, indexed_list, event_id, sig):\n self.name = name\n self.args = args\n self.indexed_list = indexed_list\n self.sig = sig\n self.event_id = event_id\n\n # Get a signature from an event declaration\n @classmethod\n def from_declaration(cls, code):\n name = code.target.id\n pos = 0\n # Determine the arguments, expects something of the form def foo(arg1: num, arg2: num ...\n args = []\n indexed_list = []\n topics_count = 1\n if code.annotation.args:\n keys = code.annotation.args[0].keys\n values = code.annotation.args[0].values\n for i in range(len(keys)):\n typ = values[i]\n arg = keys[i].id\n if isinstance(typ, ast.Call):\n # Check to see if argument is a topic\n if typ.func.id == 'indexed':\n typ = values[i].args[0]\n indexed_list.append(True)\n topics_count += 1\n else:\n raise VariableDeclarationException(\"Only indexed keyword is allowed\", arg)\n else:\n indexed_list.append(False)\n if hasattr(typ, 'left') and typ.left.id == 'bytes' and typ.comparators[0].n > 32:\n raise VariableDeclarationException(\"Can only log a maximum of 32 bytes at a time.\")\n if topics_count > 4:\n raise VariableDeclarationException(\"Maximum of 3 topics {} given\".format(topics_count - 1), arg)\n if not isinstance(arg, str):\n raise VariableDeclarationException(\"Argument name invalid\", arg)\n if not typ:\n raise InvalidTypeException(\"Argument must have type\", arg)\n if not is_varname_valid(arg):\n raise VariableDeclarationException(\"Argument name invalid or reserved: \" + arg, arg)\n if arg in (x.name for x in args):\n raise VariableDeclarationException(\"Duplicate function argument name: \" + arg, arg)\n parsed_type = parse_type(typ, None)\n args.append(VariableRecord(arg, pos, parsed_type, False))\n if isinstance(parsed_type, ByteArrayType):\n pos += 32\n else:\n pos += get_size_of_type(parsed_type) * 32\n sig = name + '(' + ','.join([canonicalize_type(arg.typ, True) for arg in args]) + ')'\n event_id = bytes_to_int(sha3(bytes(sig, 'utf-8')))\n return cls(name, args, indexed_list, event_id, sig)\n\n def to_abi_dict(self):\n return {\n \"name\": self.name,\n \"inputs\": [{\"type\": canonicalize_type(arg.typ, True), \"name\": arg.name, \"indexed\": self.indexed_list[pos]} for pos, arg in enumerate(self.args)] if self.args else [],\n \"anonymous\": False,\n \"type\": \"event\"\n }\n", "path": "viper/signatures/event_signature.py"}]}
1,295
222
gh_patches_debug_22575
rasdani/github-patches
git_diff
evennia__evennia-1977
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] validatorfuncs.duration() returns inaccurate datetime.timedelta #### Describe the bug `validatorfuncs.duration()` produces incorrect `datetime.timedelta` #### To Reproduce See https://github.com/evennia/evennia/pull/1967#pullrequestreview-301630347 ```python validatorfuncs.duration('1d 2s 3m 4h 5w 5y') ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `evennia/utils/validatorfuncs.py` Content: ``` 1 """ 2 Contains all the validation functions. 3 4 All validation functions must have a checker (probably a session) and entry arg. 5 6 They can employ more paramters at your leisure. 7 8 9 """ 10 11 import re as _re 12 import pytz as _pytz 13 import datetime as _dt 14 from django.core.exceptions import ValidationError as _error 15 from django.core.validators import validate_email as _val_email 16 from evennia.utils.ansi import strip_ansi 17 from evennia.utils.utils import string_partial_matching as _partial 18 19 _TZ_DICT = {str(tz): _pytz.timezone(tz) for tz in _pytz.common_timezones} 20 21 22 def text(entry, option_key="Text", **kwargs): 23 try: 24 return str(entry) 25 except Exception as err: 26 raise ValueError(f"Input could not be converted to text ({err})") 27 28 29 def color(entry, option_key="Color", **kwargs): 30 """ 31 The color should be just a color character, so 'r' if red color is desired. 32 """ 33 if not entry: 34 raise ValueError(f"Nothing entered for a {option_key}!") 35 test_str = strip_ansi(f"|{entry}|n") 36 if test_str: 37 raise ValueError(f"'{entry}' is not a valid {option_key}.") 38 return entry 39 40 41 def datetime(entry, option_key="Datetime", account=None, from_tz=None, **kwargs): 42 """ 43 Process a datetime string in standard forms while accounting for the inputter's timezone. 44 45 Args: 46 entry (str): A date string from a user. 47 option_key (str): Name to display this datetime as. 48 account (AccountDB): The Account performing this lookup. Unless from_tz is provided, 49 account's timezone will be used (if found) for local time and convert the results 50 to UTC. 51 from_tz (pytz): An instance of pytz from the user. If not provided, defaults to whatever 52 the Account uses. If neither one is provided, defaults to UTC. 53 54 Returns: 55 datetime in utc. 56 """ 57 if not entry: 58 raise ValueError(f"No {option_key} entered!") 59 if not from_tz: 60 from_tz = _pytz.UTC 61 utc = _pytz.UTC 62 now = _dt.datetime.utcnow().replace(tzinfo=utc) 63 cur_year = now.strftime("%Y") 64 split_time = entry.split(" ") 65 if len(split_time) == 3: 66 entry = f"{split_time[0]} {split_time[1]} {split_time[2]} {cur_year}" 67 elif len(split_time) == 4: 68 entry = f"{split_time[0]} {split_time[1]} {split_time[2]} {split_time[3]}" 69 else: 70 raise ValueError( 71 f"{option_key} must be entered in a 24-hour format such as: {now.strftime('%b %d %H:%M')}" 72 ) 73 try: 74 local = _dt.datetime.strptime(entry, "%b %d %H:%M %Y") 75 except ValueError: 76 raise ValueError( 77 f"{option_key} must be entered in a 24-hour format such as: {now.strftime('%b %d %H:%M')}" 78 ) 79 local_tz = from_tz.localize(local) 80 return local_tz.astimezone(utc) 81 82 83 def duration(entry, option_key="Duration", **kwargs): 84 """ 85 Take a string and derive a datetime timedelta from it. 86 87 Args: 88 entry (string): This is a string from user-input. The intended format is, for example: "5d 2w 90s" for 89 'five days, two weeks, and ninety seconds.' Invalid sections are ignored. 90 option_key (str): Name to display this query as. 91 92 Returns: 93 timedelta 94 95 """ 96 time_string = entry.lower().split(" ") 97 seconds = 0 98 minutes = 0 99 hours = 0 100 days = 0 101 weeks = 0 102 103 for interval in time_string: 104 if _re.match(r"^[\d]+s$", interval): 105 seconds = +int(interval.rstrip("s")) 106 elif _re.match(r"^[\d]+m$", interval): 107 minutes = +int(interval.rstrip("m")) 108 elif _re.match(r"^[\d]+h$", interval): 109 hours = +int(interval.rstrip("h")) 110 elif _re.match(r"^[\d]+d$", interval): 111 days = +int(interval.rstrip("d")) 112 elif _re.match(r"^[\d]+w$", interval): 113 weeks = +int(interval.rstrip("w")) 114 elif _re.match(r"^[\d]+y$", interval): 115 days = +int(interval.rstrip("y")) * 365 116 else: 117 raise ValueError(f"Could not convert section '{interval}' to a {option_key}.") 118 119 return _dt.timedelta(days, seconds, 0, 0, minutes, hours, weeks) 120 121 122 def future(entry, option_key="Future Datetime", from_tz=None, **kwargs): 123 time = datetime(entry, option_key, from_tz=from_tz) 124 if time < _dt.datetime.utcnow().replace(tzinfo=_dt.timezone.utc): 125 raise ValueError(f"That {option_key} is in the past! Must give a Future datetime!") 126 return time 127 128 129 def signed_integer(entry, option_key="Signed Integer", **kwargs): 130 if not entry: 131 raise ValueError(f"Must enter a whole number for {option_key}!") 132 try: 133 num = int(entry) 134 except ValueError: 135 raise ValueError(f"Could not convert '{entry}' to a whole number for {option_key}!") 136 return num 137 138 139 def positive_integer(entry, option_key="Positive Integer", **kwargs): 140 num = signed_integer(entry, option_key) 141 if not num >= 1: 142 raise ValueError(f"Must enter a whole number greater than 0 for {option_key}!") 143 return num 144 145 146 def unsigned_integer(entry, option_key="Unsigned Integer", **kwargs): 147 num = signed_integer(entry, option_key) 148 if not num >= 0: 149 raise ValueError(f"{option_key} must be a whole number greater than or equal to 0!") 150 return num 151 152 153 def boolean(entry, option_key="True/False", **kwargs): 154 """ 155 Simplest check in computer logic, right? This will take user input to flick the switch on or off 156 Args: 157 entry (str): A value such as True, On, Enabled, Disabled, False, 0, or 1. 158 option_key (str): What kind of Boolean we are setting. What Option is this for? 159 160 Returns: 161 Boolean 162 """ 163 error = f"Must enter 0 (false) or 1 (true) for {option_key}. Also accepts True, False, On, Off, Yes, No, Enabled, and Disabled" 164 if not isinstance(entry, str): 165 raise ValueError(error) 166 entry = entry.upper() 167 if entry in ("1", "TRUE", "ON", "ENABLED", "ENABLE", "YES"): 168 return True 169 if entry in ("0", "FALSE", "OFF", "DISABLED", "DISABLE", "NO"): 170 return False 171 raise ValueError(error) 172 173 174 def timezone(entry, option_key="Timezone", **kwargs): 175 """ 176 Takes user input as string, and partial matches a Timezone. 177 178 Args: 179 entry (str): The name of the Timezone. 180 option_key (str): What this Timezone is used for. 181 182 Returns: 183 A PYTZ timezone. 184 """ 185 if not entry: 186 raise ValueError(f"No {option_key} entered!") 187 found = _partial(list(_TZ_DICT.keys()), entry, ret_index=False) 188 if len(found) > 1: 189 raise ValueError( 190 f"That matched: {', '.join(str(t) for t in found)}. Please be more specific!" 191 ) 192 if found: 193 return _TZ_DICT[found[0]] 194 raise ValueError(f"Could not find timezone '{entry}' for {option_key}!") 195 196 197 def email(entry, option_key="Email Address", **kwargs): 198 if not entry: 199 raise ValueError("Email address field empty!") 200 try: 201 _val_email(str(entry)) # offloading the hard work to Django! 202 except _error: 203 raise ValueError(f"That isn't a valid {option_key}!") 204 return entry 205 206 207 def lock(entry, option_key="locks", access_options=None, **kwargs): 208 entry = entry.strip() 209 if not entry: 210 raise ValueError(f"No {option_key} entered to set!") 211 for locksetting in entry.split(";"): 212 access_type, lockfunc = locksetting.split(":", 1) 213 if not access_type: 214 raise ValueError("Must enter an access type!") 215 if access_options: 216 if access_type not in access_options: 217 raise ValueError(f"Access type must be one of: {', '.join(access_options)}") 218 if not lockfunc: 219 raise ValueError("Lock func not entered.") 220 return entry 221 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/evennia/utils/validatorfuncs.py b/evennia/utils/validatorfuncs.py --- a/evennia/utils/validatorfuncs.py +++ b/evennia/utils/validatorfuncs.py @@ -102,17 +102,17 @@ for interval in time_string: if _re.match(r"^[\d]+s$", interval): - seconds = +int(interval.rstrip("s")) + seconds += int(interval.rstrip("s")) elif _re.match(r"^[\d]+m$", interval): - minutes = +int(interval.rstrip("m")) + minutes += int(interval.rstrip("m")) elif _re.match(r"^[\d]+h$", interval): - hours = +int(interval.rstrip("h")) + hours += int(interval.rstrip("h")) elif _re.match(r"^[\d]+d$", interval): - days = +int(interval.rstrip("d")) + days += int(interval.rstrip("d")) elif _re.match(r"^[\d]+w$", interval): - weeks = +int(interval.rstrip("w")) + weeks += int(interval.rstrip("w")) elif _re.match(r"^[\d]+y$", interval): - days = +int(interval.rstrip("y")) * 365 + days += int(interval.rstrip("y")) * 365 else: raise ValueError(f"Could not convert section '{interval}' to a {option_key}.")
{"golden_diff": "diff --git a/evennia/utils/validatorfuncs.py b/evennia/utils/validatorfuncs.py\n--- a/evennia/utils/validatorfuncs.py\n+++ b/evennia/utils/validatorfuncs.py\n@@ -102,17 +102,17 @@\n \n for interval in time_string:\n if _re.match(r\"^[\\d]+s$\", interval):\n- seconds = +int(interval.rstrip(\"s\"))\n+ seconds += int(interval.rstrip(\"s\"))\n elif _re.match(r\"^[\\d]+m$\", interval):\n- minutes = +int(interval.rstrip(\"m\"))\n+ minutes += int(interval.rstrip(\"m\"))\n elif _re.match(r\"^[\\d]+h$\", interval):\n- hours = +int(interval.rstrip(\"h\"))\n+ hours += int(interval.rstrip(\"h\"))\n elif _re.match(r\"^[\\d]+d$\", interval):\n- days = +int(interval.rstrip(\"d\"))\n+ days += int(interval.rstrip(\"d\"))\n elif _re.match(r\"^[\\d]+w$\", interval):\n- weeks = +int(interval.rstrip(\"w\"))\n+ weeks += int(interval.rstrip(\"w\"))\n elif _re.match(r\"^[\\d]+y$\", interval):\n- days = +int(interval.rstrip(\"y\")) * 365\n+ days += int(interval.rstrip(\"y\")) * 365\n else:\n raise ValueError(f\"Could not convert section '{interval}' to a {option_key}.\")\n", "issue": "[BUG] validatorfuncs.duration() returns inaccurate datetime.timedelta\n#### Describe the bug\r\n`validatorfuncs.duration()` produces incorrect `datetime.timedelta`\r\n\r\n#### To Reproduce\r\nSee https://github.com/evennia/evennia/pull/1967#pullrequestreview-301630347\r\n\r\n```python\r\nvalidatorfuncs.duration('1d 2s 3m 4h 5w 5y')\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nContains all the validation functions.\n\nAll validation functions must have a checker (probably a session) and entry arg.\n\nThey can employ more paramters at your leisure.\n\n\n\"\"\"\n\nimport re as _re\nimport pytz as _pytz\nimport datetime as _dt\nfrom django.core.exceptions import ValidationError as _error\nfrom django.core.validators import validate_email as _val_email\nfrom evennia.utils.ansi import strip_ansi\nfrom evennia.utils.utils import string_partial_matching as _partial\n\n_TZ_DICT = {str(tz): _pytz.timezone(tz) for tz in _pytz.common_timezones}\n\n\ndef text(entry, option_key=\"Text\", **kwargs):\n try:\n return str(entry)\n except Exception as err:\n raise ValueError(f\"Input could not be converted to text ({err})\")\n\n\ndef color(entry, option_key=\"Color\", **kwargs):\n \"\"\"\n The color should be just a color character, so 'r' if red color is desired.\n \"\"\"\n if not entry:\n raise ValueError(f\"Nothing entered for a {option_key}!\")\n test_str = strip_ansi(f\"|{entry}|n\")\n if test_str:\n raise ValueError(f\"'{entry}' is not a valid {option_key}.\")\n return entry\n\n\ndef datetime(entry, option_key=\"Datetime\", account=None, from_tz=None, **kwargs):\n \"\"\"\n Process a datetime string in standard forms while accounting for the inputter's timezone.\n\n Args:\n entry (str): A date string from a user.\n option_key (str): Name to display this datetime as.\n account (AccountDB): The Account performing this lookup. Unless from_tz is provided,\n account's timezone will be used (if found) for local time and convert the results\n to UTC.\n from_tz (pytz): An instance of pytz from the user. If not provided, defaults to whatever\n the Account uses. If neither one is provided, defaults to UTC.\n\n Returns:\n datetime in utc.\n \"\"\"\n if not entry:\n raise ValueError(f\"No {option_key} entered!\")\n if not from_tz:\n from_tz = _pytz.UTC\n utc = _pytz.UTC\n now = _dt.datetime.utcnow().replace(tzinfo=utc)\n cur_year = now.strftime(\"%Y\")\n split_time = entry.split(\" \")\n if len(split_time) == 3:\n entry = f\"{split_time[0]} {split_time[1]} {split_time[2]} {cur_year}\"\n elif len(split_time) == 4:\n entry = f\"{split_time[0]} {split_time[1]} {split_time[2]} {split_time[3]}\"\n else:\n raise ValueError(\n f\"{option_key} must be entered in a 24-hour format such as: {now.strftime('%b %d %H:%M')}\"\n )\n try:\n local = _dt.datetime.strptime(entry, \"%b %d %H:%M %Y\")\n except ValueError:\n raise ValueError(\n f\"{option_key} must be entered in a 24-hour format such as: {now.strftime('%b %d %H:%M')}\"\n )\n local_tz = from_tz.localize(local)\n return local_tz.astimezone(utc)\n\n\ndef duration(entry, option_key=\"Duration\", **kwargs):\n \"\"\"\n Take a string and derive a datetime timedelta from it.\n\n Args:\n entry (string): This is a string from user-input. The intended format is, for example: \"5d 2w 90s\" for\n 'five days, two weeks, and ninety seconds.' Invalid sections are ignored.\n option_key (str): Name to display this query as.\n\n Returns:\n timedelta\n\n \"\"\"\n time_string = entry.lower().split(\" \")\n seconds = 0\n minutes = 0\n hours = 0\n days = 0\n weeks = 0\n\n for interval in time_string:\n if _re.match(r\"^[\\d]+s$\", interval):\n seconds = +int(interval.rstrip(\"s\"))\n elif _re.match(r\"^[\\d]+m$\", interval):\n minutes = +int(interval.rstrip(\"m\"))\n elif _re.match(r\"^[\\d]+h$\", interval):\n hours = +int(interval.rstrip(\"h\"))\n elif _re.match(r\"^[\\d]+d$\", interval):\n days = +int(interval.rstrip(\"d\"))\n elif _re.match(r\"^[\\d]+w$\", interval):\n weeks = +int(interval.rstrip(\"w\"))\n elif _re.match(r\"^[\\d]+y$\", interval):\n days = +int(interval.rstrip(\"y\")) * 365\n else:\n raise ValueError(f\"Could not convert section '{interval}' to a {option_key}.\")\n\n return _dt.timedelta(days, seconds, 0, 0, minutes, hours, weeks)\n\n\ndef future(entry, option_key=\"Future Datetime\", from_tz=None, **kwargs):\n time = datetime(entry, option_key, from_tz=from_tz)\n if time < _dt.datetime.utcnow().replace(tzinfo=_dt.timezone.utc):\n raise ValueError(f\"That {option_key} is in the past! Must give a Future datetime!\")\n return time\n\n\ndef signed_integer(entry, option_key=\"Signed Integer\", **kwargs):\n if not entry:\n raise ValueError(f\"Must enter a whole number for {option_key}!\")\n try:\n num = int(entry)\n except ValueError:\n raise ValueError(f\"Could not convert '{entry}' to a whole number for {option_key}!\")\n return num\n\n\ndef positive_integer(entry, option_key=\"Positive Integer\", **kwargs):\n num = signed_integer(entry, option_key)\n if not num >= 1:\n raise ValueError(f\"Must enter a whole number greater than 0 for {option_key}!\")\n return num\n\n\ndef unsigned_integer(entry, option_key=\"Unsigned Integer\", **kwargs):\n num = signed_integer(entry, option_key)\n if not num >= 0:\n raise ValueError(f\"{option_key} must be a whole number greater than or equal to 0!\")\n return num\n\n\ndef boolean(entry, option_key=\"True/False\", **kwargs):\n \"\"\"\n Simplest check in computer logic, right? This will take user input to flick the switch on or off\n Args:\n entry (str): A value such as True, On, Enabled, Disabled, False, 0, or 1.\n option_key (str): What kind of Boolean we are setting. What Option is this for?\n\n Returns:\n Boolean\n \"\"\"\n error = f\"Must enter 0 (false) or 1 (true) for {option_key}. Also accepts True, False, On, Off, Yes, No, Enabled, and Disabled\"\n if not isinstance(entry, str):\n raise ValueError(error)\n entry = entry.upper()\n if entry in (\"1\", \"TRUE\", \"ON\", \"ENABLED\", \"ENABLE\", \"YES\"):\n return True\n if entry in (\"0\", \"FALSE\", \"OFF\", \"DISABLED\", \"DISABLE\", \"NO\"):\n return False\n raise ValueError(error)\n\n\ndef timezone(entry, option_key=\"Timezone\", **kwargs):\n \"\"\"\n Takes user input as string, and partial matches a Timezone.\n\n Args:\n entry (str): The name of the Timezone.\n option_key (str): What this Timezone is used for.\n\n Returns:\n A PYTZ timezone.\n \"\"\"\n if not entry:\n raise ValueError(f\"No {option_key} entered!\")\n found = _partial(list(_TZ_DICT.keys()), entry, ret_index=False)\n if len(found) > 1:\n raise ValueError(\n f\"That matched: {', '.join(str(t) for t in found)}. Please be more specific!\"\n )\n if found:\n return _TZ_DICT[found[0]]\n raise ValueError(f\"Could not find timezone '{entry}' for {option_key}!\")\n\n\ndef email(entry, option_key=\"Email Address\", **kwargs):\n if not entry:\n raise ValueError(\"Email address field empty!\")\n try:\n _val_email(str(entry)) # offloading the hard work to Django!\n except _error:\n raise ValueError(f\"That isn't a valid {option_key}!\")\n return entry\n\n\ndef lock(entry, option_key=\"locks\", access_options=None, **kwargs):\n entry = entry.strip()\n if not entry:\n raise ValueError(f\"No {option_key} entered to set!\")\n for locksetting in entry.split(\";\"):\n access_type, lockfunc = locksetting.split(\":\", 1)\n if not access_type:\n raise ValueError(\"Must enter an access type!\")\n if access_options:\n if access_type not in access_options:\n raise ValueError(f\"Access type must be one of: {', '.join(access_options)}\")\n if not lockfunc:\n raise ValueError(\"Lock func not entered.\")\n return entry\n", "path": "evennia/utils/validatorfuncs.py"}], "after_files": [{"content": "\"\"\"\nContains all the validation functions.\n\nAll validation functions must have a checker (probably a session) and entry arg.\n\nThey can employ more paramters at your leisure.\n\n\n\"\"\"\n\nimport re as _re\nimport pytz as _pytz\nimport datetime as _dt\nfrom django.core.exceptions import ValidationError as _error\nfrom django.core.validators import validate_email as _val_email\nfrom evennia.utils.ansi import strip_ansi\nfrom evennia.utils.utils import string_partial_matching as _partial\n\n_TZ_DICT = {str(tz): _pytz.timezone(tz) for tz in _pytz.common_timezones}\n\n\ndef text(entry, option_key=\"Text\", **kwargs):\n try:\n return str(entry)\n except Exception as err:\n raise ValueError(f\"Input could not be converted to text ({err})\")\n\n\ndef color(entry, option_key=\"Color\", **kwargs):\n \"\"\"\n The color should be just a color character, so 'r' if red color is desired.\n \"\"\"\n if not entry:\n raise ValueError(f\"Nothing entered for a {option_key}!\")\n test_str = strip_ansi(f\"|{entry}|n\")\n if test_str:\n raise ValueError(f\"'{entry}' is not a valid {option_key}.\")\n return entry\n\n\ndef datetime(entry, option_key=\"Datetime\", account=None, from_tz=None, **kwargs):\n \"\"\"\n Process a datetime string in standard forms while accounting for the inputter's timezone.\n\n Args:\n entry (str): A date string from a user.\n option_key (str): Name to display this datetime as.\n account (AccountDB): The Account performing this lookup. Unless from_tz is provided,\n account's timezone will be used (if found) for local time and convert the results\n to UTC.\n from_tz (pytz): An instance of pytz from the user. If not provided, defaults to whatever\n the Account uses. If neither one is provided, defaults to UTC.\n\n Returns:\n datetime in utc.\n \"\"\"\n if not entry:\n raise ValueError(f\"No {option_key} entered!\")\n if not from_tz:\n from_tz = _pytz.UTC\n utc = _pytz.UTC\n now = _dt.datetime.utcnow().replace(tzinfo=utc)\n cur_year = now.strftime(\"%Y\")\n split_time = entry.split(\" \")\n if len(split_time) == 3:\n entry = f\"{split_time[0]} {split_time[1]} {split_time[2]} {cur_year}\"\n elif len(split_time) == 4:\n entry = f\"{split_time[0]} {split_time[1]} {split_time[2]} {split_time[3]}\"\n else:\n raise ValueError(\n f\"{option_key} must be entered in a 24-hour format such as: {now.strftime('%b %d %H:%M')}\"\n )\n try:\n local = _dt.datetime.strptime(entry, \"%b %d %H:%M %Y\")\n except ValueError:\n raise ValueError(\n f\"{option_key} must be entered in a 24-hour format such as: {now.strftime('%b %d %H:%M')}\"\n )\n local_tz = from_tz.localize(local)\n return local_tz.astimezone(utc)\n\n\ndef duration(entry, option_key=\"Duration\", **kwargs):\n \"\"\"\n Take a string and derive a datetime timedelta from it.\n\n Args:\n entry (string): This is a string from user-input. The intended format is, for example: \"5d 2w 90s\" for\n 'five days, two weeks, and ninety seconds.' Invalid sections are ignored.\n option_key (str): Name to display this query as.\n\n Returns:\n timedelta\n\n \"\"\"\n time_string = entry.lower().split(\" \")\n seconds = 0\n minutes = 0\n hours = 0\n days = 0\n weeks = 0\n\n for interval in time_string:\n if _re.match(r\"^[\\d]+s$\", interval):\n seconds += int(interval.rstrip(\"s\"))\n elif _re.match(r\"^[\\d]+m$\", interval):\n minutes += int(interval.rstrip(\"m\"))\n elif _re.match(r\"^[\\d]+h$\", interval):\n hours += int(interval.rstrip(\"h\"))\n elif _re.match(r\"^[\\d]+d$\", interval):\n days += int(interval.rstrip(\"d\"))\n elif _re.match(r\"^[\\d]+w$\", interval):\n weeks += int(interval.rstrip(\"w\"))\n elif _re.match(r\"^[\\d]+y$\", interval):\n days += int(interval.rstrip(\"y\")) * 365\n else:\n raise ValueError(f\"Could not convert section '{interval}' to a {option_key}.\")\n\n return _dt.timedelta(days, seconds, 0, 0, minutes, hours, weeks)\n\n\ndef future(entry, option_key=\"Future Datetime\", from_tz=None, **kwargs):\n time = datetime(entry, option_key, from_tz=from_tz)\n if time < _dt.datetime.utcnow().replace(tzinfo=_dt.timezone.utc):\n raise ValueError(f\"That {option_key} is in the past! Must give a Future datetime!\")\n return time\n\n\ndef signed_integer(entry, option_key=\"Signed Integer\", **kwargs):\n if not entry:\n raise ValueError(f\"Must enter a whole number for {option_key}!\")\n try:\n num = int(entry)\n except ValueError:\n raise ValueError(f\"Could not convert '{entry}' to a whole number for {option_key}!\")\n return num\n\n\ndef positive_integer(entry, option_key=\"Positive Integer\", **kwargs):\n num = signed_integer(entry, option_key)\n if not num >= 1:\n raise ValueError(f\"Must enter a whole number greater than 0 for {option_key}!\")\n return num\n\n\ndef unsigned_integer(entry, option_key=\"Unsigned Integer\", **kwargs):\n num = signed_integer(entry, option_key)\n if not num >= 0:\n raise ValueError(f\"{option_key} must be a whole number greater than or equal to 0!\")\n return num\n\n\ndef boolean(entry, option_key=\"True/False\", **kwargs):\n \"\"\"\n Simplest check in computer logic, right? This will take user input to flick the switch on or off\n Args:\n entry (str): A value such as True, On, Enabled, Disabled, False, 0, or 1.\n option_key (str): What kind of Boolean we are setting. What Option is this for?\n\n Returns:\n Boolean\n \"\"\"\n error = f\"Must enter 0 (false) or 1 (true) for {option_key}. Also accepts True, False, On, Off, Yes, No, Enabled, and Disabled\"\n if not isinstance(entry, str):\n raise ValueError(error)\n entry = entry.upper()\n if entry in (\"1\", \"TRUE\", \"ON\", \"ENABLED\", \"ENABLE\", \"YES\"):\n return True\n if entry in (\"0\", \"FALSE\", \"OFF\", \"DISABLED\", \"DISABLE\", \"NO\"):\n return False\n raise ValueError(error)\n\n\ndef timezone(entry, option_key=\"Timezone\", **kwargs):\n \"\"\"\n Takes user input as string, and partial matches a Timezone.\n\n Args:\n entry (str): The name of the Timezone.\n option_key (str): What this Timezone is used for.\n\n Returns:\n A PYTZ timezone.\n \"\"\"\n if not entry:\n raise ValueError(f\"No {option_key} entered!\")\n found = _partial(list(_TZ_DICT.keys()), entry, ret_index=False)\n if len(found) > 1:\n raise ValueError(\n f\"That matched: {', '.join(str(t) for t in found)}. Please be more specific!\"\n )\n if found:\n return _TZ_DICT[found[0]]\n raise ValueError(f\"Could not find timezone '{entry}' for {option_key}!\")\n\n\ndef email(entry, option_key=\"Email Address\", **kwargs):\n if not entry:\n raise ValueError(\"Email address field empty!\")\n try:\n _val_email(str(entry)) # offloading the hard work to Django!\n except _error:\n raise ValueError(f\"That isn't a valid {option_key}!\")\n return entry\n\n\ndef lock(entry, option_key=\"locks\", access_options=None, **kwargs):\n entry = entry.strip()\n if not entry:\n raise ValueError(f\"No {option_key} entered to set!\")\n for locksetting in entry.split(\";\"):\n access_type, lockfunc = locksetting.split(\":\", 1)\n if not access_type:\n raise ValueError(\"Must enter an access type!\")\n if access_options:\n if access_type not in access_options:\n raise ValueError(f\"Access type must be one of: {', '.join(access_options)}\")\n if not lockfunc:\n raise ValueError(\"Lock func not entered.\")\n return entry\n", "path": "evennia/utils/validatorfuncs.py"}]}
2,844
312
gh_patches_debug_39389
rasdani/github-patches
git_diff
ivy-llc__ivy-13622
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- gather --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ivy/functional/backends/paddle/general.py` Content: ``` 1 """Collection of Paddle general functions, wrapped to fit Ivy syntax and signature.""" 2 # global 3 from functools import reduce 4 from numbers import Number 5 from operator import mul 6 from typing import Optional, Union, Sequence, Callable, List, Tuple 7 import paddle 8 import numpy as np 9 10 # local 11 import ivy 12 from ivy.utils.exceptions import IvyNotImplementedException 13 from ivy.func_wrapper import with_unsupported_device_and_dtypes 14 from . import backend_version 15 import multiprocessing as _multiprocessing 16 from .elementwise import _elementwise_helper 17 18 19 @with_unsupported_device_and_dtypes( 20 {"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version 21 ) 22 def is_native_array(x, /, *, exclusive=False): 23 if isinstance(x, paddle.Tensor): 24 if exclusive and not x.stop_gradient: 25 return False 26 return True 27 return False 28 29 30 @with_unsupported_device_and_dtypes( 31 {"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version 32 ) 33 def array_equal(x0: paddle.Tensor, x1: paddle.Tensor, /) -> bool: 34 return bool(ivy.all(ivy.equal(x0, x1))) 35 36 37 def container_types(): 38 return [] 39 40 41 def current_backend_str() -> str: 42 return "paddle" 43 44 45 @with_unsupported_device_and_dtypes( 46 {"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version 47 ) 48 def get_item(x: paddle.Tensor, query: Union[paddle.Tensor, Tuple]) -> paddle.Tensor: 49 # regular queries x[idx_1,idx_2,...,idx_i] 50 if isinstance(query, tuple): 51 if x.dtype in [paddle.int8, paddle.int16, paddle.uint8, paddle.float16]: 52 return x.cast("float32").__getitem__(query).cast(x.dtype) 53 return x.__getitem__(query) 54 55 if not ivy.is_native_array(query): 56 query = paddle.to_tensor(query, dtype="int64") 57 58 # masked queries x[bool_1,bool_2,...,bool_i] 59 if query.dtype == paddle.bool: 60 if x.dtype in [ 61 paddle.int8, 62 paddle.int16, 63 paddle.uint8, 64 paddle.float16, 65 paddle.complex64, 66 paddle.complex128, 67 paddle.bool, 68 ]: 69 if paddle.is_complex(x): 70 return paddle.complex( 71 paddle.masked_select(x.real(), query), 72 paddle.masked_select(x.imag(), query), 73 ) 74 return paddle.masked_select(x.cast("float32"), query).cast(x.dtype) 75 return paddle.masked_select(x, query) 76 77 query = query.cast("int64") 78 # array queries idx = Tensor(idx_1,idx_2,...,idx_i), x[idx] 79 if x.dtype in [ 80 paddle.int8, 81 paddle.int16, 82 paddle.uint8, 83 paddle.float16, 84 paddle.complex64, 85 paddle.complex128, 86 paddle.bool, 87 ]: 88 if paddle.is_complex(x): 89 return paddle.complex( 90 x.real().__getitem__(query), x.imag().__getitem__(query) 91 ) 92 return x.cast("float32").__getitem__(query).cast(x.dtype) 93 return x.__getitem__(query) 94 95 96 @with_unsupported_device_and_dtypes( 97 {"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version 98 ) 99 def to_numpy( 100 x: Union[paddle.Tensor, List[paddle.Tensor]], /, *, copy: bool = True 101 ) -> Union[np.ndarray, List[np.ndarray]]: 102 if isinstance(x, (float, int, bool)): 103 return x 104 elif isinstance(x, np.ndarray): 105 if copy: 106 return x.copy() 107 else: 108 return x 109 elif paddle.is_tensor(x): 110 if copy: 111 return np.array(x) 112 else: 113 return np.asarray(x) 114 elif isinstance(x, list): 115 return [ivy.to_numpy(u) for u in x] 116 raise ivy.utils.exceptions.IvyException("Expected a Paddle Tensor.") 117 118 119 @with_unsupported_device_and_dtypes( 120 {"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version 121 ) 122 def to_scalar(x: paddle.Tensor, /) -> Number: 123 if isinstance(x, (Number, complex)): 124 return x 125 return x.item() 126 127 128 @with_unsupported_device_and_dtypes( 129 {"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version 130 ) 131 def to_list(x: paddle.Tensor, /) -> list: 132 return x.tolist() 133 134 135 @with_unsupported_device_and_dtypes( 136 { 137 "2.4.2 and below": { 138 "cpu": ( 139 "uint16", 140 "bfloat16", 141 "int8", 142 "int16", 143 "int32", 144 "int64", 145 "float16", 146 "complex64", 147 "complex128", 148 "bool", 149 ) 150 } 151 }, 152 backend_version, 153 ) 154 def gather( 155 params: paddle.Tensor, 156 indices: paddle.Tensor, 157 /, 158 *, 159 axis: Optional[int] = -1, 160 batch_dims: Optional[int] = 0, 161 out: Optional[paddle.Tensor] = None, 162 ) -> paddle.Tensor: 163 axis = axis % paddle.Tensor.ndimension(params) 164 batch_dims = batch_dims % paddle.Tensor.ndimension(params) 165 ivy.utils.assertions.check_gather_input_valid(params, indices, axis, batch_dims) 166 if batch_dims == 0: 167 result = paddle.gather(params, paddle.reshape(indices, shape=[-1]), axis=axis) 168 else: 169 params_list = [p for p in params] 170 indices_list = [i for i in indices] 171 for b in range(1, batch_dims): 172 params_list = [p1 for p in params_list for p1 in p] 173 indices_list = [i1 for i in indices_list for i1 in i] 174 result = [] 175 for p, i in zip(params_list, indices_list): 176 result.append( 177 paddle.gather(p, paddle.reshape(i, shape=[-1]), axis=axis - batch_dims) 178 ) 179 result = paddle.concat(result, axis=0) 180 new_shape = ( 181 params.shape[:axis] + indices.shape[batch_dims:] + params.shape[axis + 1 :] 182 ) 183 return paddle.reshape(result, shape=new_shape) 184 185 186 @with_unsupported_device_and_dtypes( 187 {"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version 188 ) 189 def gather_nd( 190 params: paddle.Tensor, 191 indices: paddle.Tensor, 192 /, 193 *, 194 batch_dims: Optional[int] = 0, 195 out: Optional[paddle.Tensor] = None, 196 ) -> paddle.Tensor: 197 raise IvyNotImplementedException() 198 199 200 @with_unsupported_device_and_dtypes( 201 {"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version 202 ) 203 def get_num_dims( 204 x: paddle.Tensor, /, *, as_array: bool = False 205 ) -> Union[paddle.Tensor, int]: 206 return paddle.to_tensor(x.ndim) if as_array else x.ndim 207 208 209 def inplace_arrays_supported(): 210 # there are some operations that support inplace updates 211 # but it's not supported in all functions 212 return False 213 214 215 @with_unsupported_device_and_dtypes( 216 {"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version 217 ) 218 def inplace_decrement( 219 x: Union[ivy.Array, paddle.Tensor], 220 val: Union[ivy.Array, paddle.Tensor], 221 ) -> ivy.Array: 222 (x_native, val_native), _ = ivy.args_to_native(x, val) 223 x_native -= val_native 224 if ivy.is_ivy_array(x): 225 x.data = x_native 226 else: 227 x = ivy.Array(x_native) 228 return x 229 230 231 @with_unsupported_device_and_dtypes( 232 {"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version 233 ) 234 def inplace_increment( 235 x: Union[ivy.Array, paddle.Tensor], 236 val: Union[ivy.Array, paddle.Tensor], 237 ) -> ivy.Array: 238 (x_native, val_native), _ = ivy.args_to_native(x, val) 239 x_native += val_native 240 if ivy.is_ivy_array(x): 241 x.data = x_native 242 else: 243 x = ivy.Array(x_native) 244 return x 245 246 247 @with_unsupported_device_and_dtypes( 248 {"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version 249 ) 250 def inplace_update( 251 x: Union[ivy.Array, paddle.Tensor], 252 val: Union[ivy.Array, paddle.Tensor], 253 /, 254 *, 255 ensure_in_backend: bool = False, 256 keep_input_dtype: bool = False, 257 ) -> ivy.Array: 258 if ivy.is_array(x) and ivy.is_array(val): 259 (x_native, val_native), _ = ivy.args_to_native(x, val) 260 261 if val_native.shape == x_native.shape: 262 if x_native.dtype != val_native.dtype: 263 x_native = x_native.astype(val_native.dtype) 264 paddle.assign(val_native, x_native) 265 else: 266 x_native = val_native 267 if ivy.is_ivy_array(x): 268 x.data = x_native 269 else: 270 x = ivy.Array(x_native) 271 return x 272 else: 273 return val 274 275 276 @with_unsupported_device_and_dtypes( 277 {"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version 278 ) 279 def inplace_variables_supported(): 280 return False 281 282 283 def multiprocessing(context=None): 284 return ( 285 _multiprocessing if context is None else _multiprocessing.get_context(context) 286 ) 287 288 289 @with_unsupported_device_and_dtypes( 290 {"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version 291 ) 292 def scatter_flat( 293 indices: paddle.Tensor, 294 updates: paddle.Tensor, 295 /, 296 *, 297 size: Optional[int] = None, 298 reduction: str = "sum", 299 out: Optional[paddle.Tensor] = None, 300 ): 301 raise IvyNotImplementedException() 302 303 304 @with_unsupported_device_and_dtypes( 305 {"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version 306 ) 307 def scatter_nd( 308 indices: paddle.Tensor, 309 updates: paddle.Tensor, 310 /, 311 shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None, 312 *, 313 reduction: str = "sum", 314 out: Optional[paddle.Tensor] = None, 315 ) -> paddle.Tensor: 316 raise IvyNotImplementedException() 317 318 319 @with_unsupported_device_and_dtypes( 320 {"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version 321 ) 322 def shape( 323 x: paddle.Tensor, /, *, as_array: bool = False 324 ) -> Union[ivy.Shape, ivy.Array]: 325 if as_array: 326 return ivy.array(x.shape, dtype=ivy.default_int_dtype()) 327 else: 328 return ivy.Shape(x.shape) 329 330 331 @with_unsupported_device_and_dtypes( 332 {"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version 333 ) 334 def vmap( 335 func: Callable, 336 in_axes: Union[int, Sequence[int], Sequence[None]] = 0, 337 out_axes: Optional[int] = 0, 338 ) -> Callable: 339 raise IvyNotImplementedException() 340 341 342 def isin( 343 elements: paddle.Tensor, 344 test_elements: paddle.Tensor, 345 /, 346 *, 347 assume_unique: Optional[bool] = False, 348 invert: Optional[bool] = False, 349 ) -> paddle.Tensor: 350 raise IvyNotImplementedException() 351 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ivy/functional/backends/paddle/general.py b/ivy/functional/backends/paddle/general.py --- a/ivy/functional/backends/paddle/general.py +++ b/ivy/functional/backends/paddle/general.py @@ -133,23 +133,7 @@ @with_unsupported_device_and_dtypes( - { - "2.4.2 and below": { - "cpu": ( - "uint16", - "bfloat16", - "int8", - "int16", - "int32", - "int64", - "float16", - "complex64", - "complex128", - "bool", - ) - } - }, - backend_version, + {"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version ) def gather( params: paddle.Tensor, @@ -160,27 +144,50 @@ batch_dims: Optional[int] = 0, out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: - axis = axis % paddle.Tensor.ndimension(params) - batch_dims = batch_dims % paddle.Tensor.ndimension(params) + + + def _gather(params1): + with ivy.ArrayMode(False): + if batch_dims == 0: + result = paddle.gather(params1, ivy.reshape(indices, shape=[-1]), axis=axis) + #inputs are unstacked batch_dims times because paddle.gather does not support batch_dims + else: + params1_list = ivy.unstack(params1, axis=0) + indices_list = ivy.unstack(indices, axis=0) + for b in range(1, batch_dims): + params1_list = [p2 for p1 in params1_list for p2 in ivy.unstack(p1, axis=0)] + indices_list = [i2 for i1 in indices_list for i2 in ivy.unstack(i1, axis=0)] + result = [] + for p, i in zip(params1_list, indices_list): + result.append( + paddle.gather(p, ivy.reshape(i, shape=[-1]), axis=axis - batch_dims) + ) + result = ivy.concat(result, axis=0) + new_shape = ( + params1.shape[:axis] + indices.shape[batch_dims:] + params1.shape[axis + 1 :] + ) + return ivy.reshape(result, shape=new_shape) + + + axis = axis % params.ndim + batch_dims = batch_dims % params.ndim ivy.utils.assertions.check_gather_input_valid(params, indices, axis, batch_dims) - if batch_dims == 0: - result = paddle.gather(params, paddle.reshape(indices, shape=[-1]), axis=axis) - else: - params_list = [p for p in params] - indices_list = [i for i in indices] - for b in range(1, batch_dims): - params_list = [p1 for p in params_list for p1 in p] - indices_list = [i1 for i in indices_list for i1 in i] - result = [] - for p, i in zip(params_list, indices_list): - result.append( - paddle.gather(p, paddle.reshape(i, shape=[-1]), axis=axis - batch_dims) + if params.dtype in [ + paddle.int8, + paddle.int16, + paddle.float16, + paddle.complex64, + paddle.complex128, + paddle.bool, + ]: + if paddle.is_complex(params): + return paddle.complex( + _gather(params.real()), + _gather(params.imag()) ) - result = paddle.concat(result, axis=0) - new_shape = ( - params.shape[:axis] + indices.shape[batch_dims:] + params.shape[axis + 1 :] - ) - return paddle.reshape(result, shape=new_shape) + return _gather(params.cast("float32")).cast(params.dtype) + return _gather(params) + @with_unsupported_device_and_dtypes(
{"golden_diff": "diff --git a/ivy/functional/backends/paddle/general.py b/ivy/functional/backends/paddle/general.py\n--- a/ivy/functional/backends/paddle/general.py\n+++ b/ivy/functional/backends/paddle/general.py\n@@ -133,23 +133,7 @@\n \n \n @with_unsupported_device_and_dtypes(\n- {\n- \"2.4.2 and below\": {\n- \"cpu\": (\n- \"uint16\",\n- \"bfloat16\",\n- \"int8\",\n- \"int16\",\n- \"int32\",\n- \"int64\",\n- \"float16\",\n- \"complex64\",\n- \"complex128\",\n- \"bool\",\n- )\n- }\n- },\n- backend_version,\n+ {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n )\n def gather(\n params: paddle.Tensor,\n@@ -160,27 +144,50 @@\n batch_dims: Optional[int] = 0,\n out: Optional[paddle.Tensor] = None,\n ) -> paddle.Tensor:\n- axis = axis % paddle.Tensor.ndimension(params)\n- batch_dims = batch_dims % paddle.Tensor.ndimension(params)\n+ \n+\n+ def _gather(params1):\n+ with ivy.ArrayMode(False):\n+ if batch_dims == 0:\n+ result = paddle.gather(params1, ivy.reshape(indices, shape=[-1]), axis=axis)\n+ #inputs are unstacked batch_dims times because paddle.gather does not support batch_dims\n+ else:\n+ params1_list = ivy.unstack(params1, axis=0)\n+ indices_list = ivy.unstack(indices, axis=0)\n+ for b in range(1, batch_dims):\n+ params1_list = [p2 for p1 in params1_list for p2 in ivy.unstack(p1, axis=0)]\n+ indices_list = [i2 for i1 in indices_list for i2 in ivy.unstack(i1, axis=0)]\n+ result = []\n+ for p, i in zip(params1_list, indices_list):\n+ result.append(\n+ paddle.gather(p, ivy.reshape(i, shape=[-1]), axis=axis - batch_dims)\n+ )\n+ result = ivy.concat(result, axis=0)\n+ new_shape = (\n+ params1.shape[:axis] + indices.shape[batch_dims:] + params1.shape[axis + 1 :]\n+ )\n+ return ivy.reshape(result, shape=new_shape)\n+ \n+ \n+ axis = axis % params.ndim\n+ batch_dims = batch_dims % params.ndim\n ivy.utils.assertions.check_gather_input_valid(params, indices, axis, batch_dims)\n- if batch_dims == 0:\n- result = paddle.gather(params, paddle.reshape(indices, shape=[-1]), axis=axis)\n- else:\n- params_list = [p for p in params]\n- indices_list = [i for i in indices]\n- for b in range(1, batch_dims):\n- params_list = [p1 for p in params_list for p1 in p]\n- indices_list = [i1 for i in indices_list for i1 in i]\n- result = []\n- for p, i in zip(params_list, indices_list):\n- result.append(\n- paddle.gather(p, paddle.reshape(i, shape=[-1]), axis=axis - batch_dims)\n+ if params.dtype in [\n+ paddle.int8,\n+ paddle.int16,\n+ paddle.float16,\n+ paddle.complex64,\n+ paddle.complex128,\n+ paddle.bool,\n+ ]:\n+ if paddle.is_complex(params):\n+ return paddle.complex(\n+ _gather(params.real()),\n+ _gather(params.imag())\n )\n- result = paddle.concat(result, axis=0)\n- new_shape = (\n- params.shape[:axis] + indices.shape[batch_dims:] + params.shape[axis + 1 :]\n- )\n- return paddle.reshape(result, shape=new_shape)\n+ return _gather(params.cast(\"float32\")).cast(params.dtype)\n+ return _gather(params)\n+ \n \n \n @with_unsupported_device_and_dtypes(\n", "issue": "gather\n\n", "before_files": [{"content": "\"\"\"Collection of Paddle general functions, wrapped to fit Ivy syntax and signature.\"\"\"\n# global\nfrom functools import reduce\nfrom numbers import Number\nfrom operator import mul\nfrom typing import Optional, Union, Sequence, Callable, List, Tuple\nimport paddle\nimport numpy as np\n\n# local\nimport ivy\nfrom ivy.utils.exceptions import IvyNotImplementedException\nfrom ivy.func_wrapper import with_unsupported_device_and_dtypes\nfrom . import backend_version\nimport multiprocessing as _multiprocessing\nfrom .elementwise import _elementwise_helper\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef is_native_array(x, /, *, exclusive=False):\n if isinstance(x, paddle.Tensor):\n if exclusive and not x.stop_gradient:\n return False\n return True\n return False\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef array_equal(x0: paddle.Tensor, x1: paddle.Tensor, /) -> bool:\n return bool(ivy.all(ivy.equal(x0, x1)))\n\n\ndef container_types():\n return []\n\n\ndef current_backend_str() -> str:\n return \"paddle\"\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef get_item(x: paddle.Tensor, query: Union[paddle.Tensor, Tuple]) -> paddle.Tensor:\n # regular queries x[idx_1,idx_2,...,idx_i]\n if isinstance(query, tuple):\n if x.dtype in [paddle.int8, paddle.int16, paddle.uint8, paddle.float16]:\n return x.cast(\"float32\").__getitem__(query).cast(x.dtype)\n return x.__getitem__(query)\n\n if not ivy.is_native_array(query):\n query = paddle.to_tensor(query, dtype=\"int64\")\n\n # masked queries x[bool_1,bool_2,...,bool_i]\n if query.dtype == paddle.bool:\n if x.dtype in [\n paddle.int8,\n paddle.int16,\n paddle.uint8,\n paddle.float16,\n paddle.complex64,\n paddle.complex128,\n paddle.bool,\n ]:\n if paddle.is_complex(x):\n return paddle.complex(\n paddle.masked_select(x.real(), query),\n paddle.masked_select(x.imag(), query),\n )\n return paddle.masked_select(x.cast(\"float32\"), query).cast(x.dtype)\n return paddle.masked_select(x, query)\n\n query = query.cast(\"int64\")\n # array queries idx = Tensor(idx_1,idx_2,...,idx_i), x[idx]\n if x.dtype in [\n paddle.int8,\n paddle.int16,\n paddle.uint8,\n paddle.float16,\n paddle.complex64,\n paddle.complex128,\n paddle.bool,\n ]:\n if paddle.is_complex(x):\n return paddle.complex(\n x.real().__getitem__(query), x.imag().__getitem__(query)\n )\n return x.cast(\"float32\").__getitem__(query).cast(x.dtype)\n return x.__getitem__(query)\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef to_numpy(\n x: Union[paddle.Tensor, List[paddle.Tensor]], /, *, copy: bool = True\n) -> Union[np.ndarray, List[np.ndarray]]:\n if isinstance(x, (float, int, bool)):\n return x\n elif isinstance(x, np.ndarray):\n if copy:\n return x.copy()\n else:\n return x\n elif paddle.is_tensor(x):\n if copy:\n return np.array(x)\n else:\n return np.asarray(x)\n elif isinstance(x, list):\n return [ivy.to_numpy(u) for u in x]\n raise ivy.utils.exceptions.IvyException(\"Expected a Paddle Tensor.\")\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef to_scalar(x: paddle.Tensor, /) -> Number:\n if isinstance(x, (Number, complex)):\n return x\n return x.item()\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef to_list(x: paddle.Tensor, /) -> list:\n return x.tolist()\n\n\n@with_unsupported_device_and_dtypes(\n {\n \"2.4.2 and below\": {\n \"cpu\": (\n \"uint16\",\n \"bfloat16\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float16\",\n \"complex64\",\n \"complex128\",\n \"bool\",\n )\n }\n },\n backend_version,\n)\ndef gather(\n params: paddle.Tensor,\n indices: paddle.Tensor,\n /,\n *,\n axis: Optional[int] = -1,\n batch_dims: Optional[int] = 0,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n axis = axis % paddle.Tensor.ndimension(params)\n batch_dims = batch_dims % paddle.Tensor.ndimension(params)\n ivy.utils.assertions.check_gather_input_valid(params, indices, axis, batch_dims)\n if batch_dims == 0:\n result = paddle.gather(params, paddle.reshape(indices, shape=[-1]), axis=axis)\n else:\n params_list = [p for p in params]\n indices_list = [i for i in indices]\n for b in range(1, batch_dims):\n params_list = [p1 for p in params_list for p1 in p]\n indices_list = [i1 for i in indices_list for i1 in i]\n result = []\n for p, i in zip(params_list, indices_list):\n result.append(\n paddle.gather(p, paddle.reshape(i, shape=[-1]), axis=axis - batch_dims)\n )\n result = paddle.concat(result, axis=0)\n new_shape = (\n params.shape[:axis] + indices.shape[batch_dims:] + params.shape[axis + 1 :]\n )\n return paddle.reshape(result, shape=new_shape)\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef gather_nd(\n params: paddle.Tensor,\n indices: paddle.Tensor,\n /,\n *,\n batch_dims: Optional[int] = 0,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef get_num_dims(\n x: paddle.Tensor, /, *, as_array: bool = False\n) -> Union[paddle.Tensor, int]:\n return paddle.to_tensor(x.ndim) if as_array else x.ndim\n\n\ndef inplace_arrays_supported():\n # there are some operations that support inplace updates\n # but it's not supported in all functions\n return False\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef inplace_decrement(\n x: Union[ivy.Array, paddle.Tensor],\n val: Union[ivy.Array, paddle.Tensor],\n) -> ivy.Array:\n (x_native, val_native), _ = ivy.args_to_native(x, val)\n x_native -= val_native\n if ivy.is_ivy_array(x):\n x.data = x_native\n else:\n x = ivy.Array(x_native)\n return x\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef inplace_increment(\n x: Union[ivy.Array, paddle.Tensor],\n val: Union[ivy.Array, paddle.Tensor],\n) -> ivy.Array:\n (x_native, val_native), _ = ivy.args_to_native(x, val)\n x_native += val_native\n if ivy.is_ivy_array(x):\n x.data = x_native\n else:\n x = ivy.Array(x_native)\n return x\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef inplace_update(\n x: Union[ivy.Array, paddle.Tensor],\n val: Union[ivy.Array, paddle.Tensor],\n /,\n *,\n ensure_in_backend: bool = False,\n keep_input_dtype: bool = False,\n) -> ivy.Array:\n if ivy.is_array(x) and ivy.is_array(val):\n (x_native, val_native), _ = ivy.args_to_native(x, val)\n\n if val_native.shape == x_native.shape:\n if x_native.dtype != val_native.dtype:\n x_native = x_native.astype(val_native.dtype)\n paddle.assign(val_native, x_native)\n else:\n x_native = val_native\n if ivy.is_ivy_array(x):\n x.data = x_native\n else:\n x = ivy.Array(x_native)\n return x\n else:\n return val\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef inplace_variables_supported():\n return False\n\n\ndef multiprocessing(context=None):\n return (\n _multiprocessing if context is None else _multiprocessing.get_context(context)\n )\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef scatter_flat(\n indices: paddle.Tensor,\n updates: paddle.Tensor,\n /,\n *,\n size: Optional[int] = None,\n reduction: str = \"sum\",\n out: Optional[paddle.Tensor] = None,\n):\n raise IvyNotImplementedException()\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef scatter_nd(\n indices: paddle.Tensor,\n updates: paddle.Tensor,\n /,\n shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n *,\n reduction: str = \"sum\",\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef shape(\n x: paddle.Tensor, /, *, as_array: bool = False\n) -> Union[ivy.Shape, ivy.Array]:\n if as_array:\n return ivy.array(x.shape, dtype=ivy.default_int_dtype())\n else:\n return ivy.Shape(x.shape)\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef vmap(\n func: Callable,\n in_axes: Union[int, Sequence[int], Sequence[None]] = 0,\n out_axes: Optional[int] = 0,\n) -> Callable:\n raise IvyNotImplementedException()\n\n\ndef isin(\n elements: paddle.Tensor,\n test_elements: paddle.Tensor,\n /,\n *,\n assume_unique: Optional[bool] = False,\n invert: Optional[bool] = False,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n", "path": "ivy/functional/backends/paddle/general.py"}], "after_files": [{"content": "\"\"\"Collection of Paddle general functions, wrapped to fit Ivy syntax and signature.\"\"\"\n# global\nfrom functools import reduce\nfrom numbers import Number\nfrom operator import mul\nfrom typing import Optional, Union, Sequence, Callable, List, Tuple\nimport paddle\nimport numpy as np\n\n# local\nimport ivy\nfrom ivy.utils.exceptions import IvyNotImplementedException\nfrom ivy.func_wrapper import with_unsupported_device_and_dtypes\nfrom . import backend_version\nimport multiprocessing as _multiprocessing\nfrom .elementwise import _elementwise_helper\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef is_native_array(x, /, *, exclusive=False):\n if isinstance(x, paddle.Tensor):\n if exclusive and not x.stop_gradient:\n return False\n return True\n return False\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef array_equal(x0: paddle.Tensor, x1: paddle.Tensor, /) -> bool:\n return bool(ivy.all(ivy.equal(x0, x1)))\n\n\ndef container_types():\n return []\n\n\ndef current_backend_str() -> str:\n return \"paddle\"\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef get_item(x: paddle.Tensor, query: Union[paddle.Tensor, Tuple]) -> paddle.Tensor:\n # regular queries x[idx_1,idx_2,...,idx_i]\n if isinstance(query, tuple):\n if x.dtype in [paddle.int8, paddle.int16, paddle.uint8, paddle.float16]:\n return x.cast(\"float32\").__getitem__(query).cast(x.dtype)\n return x.__getitem__(query)\n\n if not ivy.is_native_array(query):\n query = paddle.to_tensor(query, dtype=\"int64\")\n\n # masked queries x[bool_1,bool_2,...,bool_i]\n if query.dtype == paddle.bool:\n if x.dtype in [\n paddle.int8,\n paddle.int16,\n paddle.uint8,\n paddle.float16,\n paddle.complex64,\n paddle.complex128,\n paddle.bool,\n ]:\n if paddle.is_complex(x):\n return paddle.complex(\n paddle.masked_select(x.real(), query),\n paddle.masked_select(x.imag(), query),\n )\n return paddle.masked_select(x.cast(\"float32\"), query).cast(x.dtype)\n return paddle.masked_select(x, query)\n\n query = query.cast(\"int64\")\n # array queries idx = Tensor(idx_1,idx_2,...,idx_i), x[idx]\n if x.dtype in [\n paddle.int8,\n paddle.int16,\n paddle.uint8,\n paddle.float16,\n paddle.complex64,\n paddle.complex128,\n paddle.bool,\n ]:\n if paddle.is_complex(x):\n return paddle.complex(\n x.real().__getitem__(query), x.imag().__getitem__(query)\n )\n return x.cast(\"float32\").__getitem__(query).cast(x.dtype)\n return x.__getitem__(query)\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef to_numpy(\n x: Union[paddle.Tensor, List[paddle.Tensor]], /, *, copy: bool = True\n) -> Union[np.ndarray, List[np.ndarray]]:\n if isinstance(x, (float, int, bool)):\n return x\n elif isinstance(x, np.ndarray):\n if copy:\n return x.copy()\n else:\n return x\n elif paddle.is_tensor(x):\n if copy:\n return np.array(x)\n else:\n return np.asarray(x)\n elif isinstance(x, list):\n return [ivy.to_numpy(u) for u in x]\n raise ivy.utils.exceptions.IvyException(\"Expected a Paddle Tensor.\")\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef to_scalar(x: paddle.Tensor, /) -> Number:\n if isinstance(x, (Number, complex)):\n return x\n return x.item()\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef to_list(x: paddle.Tensor, /) -> list:\n return x.tolist()\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef gather(\n params: paddle.Tensor,\n indices: paddle.Tensor,\n /,\n *,\n axis: Optional[int] = -1,\n batch_dims: Optional[int] = 0,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n \n\n def _gather(params1):\n with ivy.ArrayMode(False):\n if batch_dims == 0:\n result = paddle.gather(params1, ivy.reshape(indices, shape=[-1]), axis=axis)\n #inputs are unstacked batch_dims times because paddle.gather does not support batch_dims\n else:\n params1_list = ivy.unstack(params1, axis=0)\n indices_list = ivy.unstack(indices, axis=0)\n for b in range(1, batch_dims):\n params1_list = [p2 for p1 in params1_list for p2 in ivy.unstack(p1, axis=0)]\n indices_list = [i2 for i1 in indices_list for i2 in ivy.unstack(i1, axis=0)]\n result = []\n for p, i in zip(params1_list, indices_list):\n result.append(\n paddle.gather(p, ivy.reshape(i, shape=[-1]), axis=axis - batch_dims)\n )\n result = ivy.concat(result, axis=0)\n new_shape = (\n params1.shape[:axis] + indices.shape[batch_dims:] + params1.shape[axis + 1 :]\n )\n return ivy.reshape(result, shape=new_shape)\n \n \n axis = axis % params.ndim\n batch_dims = batch_dims % params.ndim\n ivy.utils.assertions.check_gather_input_valid(params, indices, axis, batch_dims)\n if params.dtype in [\n paddle.int8,\n paddle.int16,\n paddle.float16,\n paddle.complex64,\n paddle.complex128,\n paddle.bool,\n ]:\n if paddle.is_complex(params):\n return paddle.complex(\n _gather(params.real()),\n _gather(params.imag())\n )\n return _gather(params.cast(\"float32\")).cast(params.dtype)\n return _gather(params)\n \n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef gather_nd(\n params: paddle.Tensor,\n indices: paddle.Tensor,\n /,\n *,\n batch_dims: Optional[int] = 0,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef get_num_dims(\n x: paddle.Tensor, /, *, as_array: bool = False\n) -> Union[paddle.Tensor, int]:\n return paddle.to_tensor(x.ndim) if as_array else x.ndim\n\n\ndef inplace_arrays_supported():\n # there are some operations that support inplace updates\n # but it's not supported in all functions\n return False\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef inplace_decrement(\n x: Union[ivy.Array, paddle.Tensor],\n val: Union[ivy.Array, paddle.Tensor],\n) -> ivy.Array:\n (x_native, val_native), _ = ivy.args_to_native(x, val)\n x_native -= val_native\n if ivy.is_ivy_array(x):\n x.data = x_native\n else:\n x = ivy.Array(x_native)\n return x\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef inplace_increment(\n x: Union[ivy.Array, paddle.Tensor],\n val: Union[ivy.Array, paddle.Tensor],\n) -> ivy.Array:\n (x_native, val_native), _ = ivy.args_to_native(x, val)\n x_native += val_native\n if ivy.is_ivy_array(x):\n x.data = x_native\n else:\n x = ivy.Array(x_native)\n return x\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef inplace_update(\n x: Union[ivy.Array, paddle.Tensor],\n val: Union[ivy.Array, paddle.Tensor],\n /,\n *,\n ensure_in_backend: bool = False,\n keep_input_dtype: bool = False,\n) -> ivy.Array:\n if ivy.is_array(x) and ivy.is_array(val):\n (x_native, val_native), _ = ivy.args_to_native(x, val)\n\n if val_native.shape == x_native.shape:\n if x_native.dtype != val_native.dtype:\n x_native = x_native.astype(val_native.dtype)\n paddle.assign(val_native, x_native)\n else:\n x_native = val_native\n if ivy.is_ivy_array(x):\n x.data = x_native\n else:\n x = ivy.Array(x_native)\n return x\n else:\n return val\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef inplace_variables_supported():\n return False\n\n\ndef multiprocessing(context=None):\n return (\n _multiprocessing if context is None else _multiprocessing.get_context(context)\n )\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef scatter_flat(\n indices: paddle.Tensor,\n updates: paddle.Tensor,\n /,\n *,\n size: Optional[int] = None,\n reduction: str = \"sum\",\n out: Optional[paddle.Tensor] = None,\n):\n raise IvyNotImplementedException()\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef scatter_nd(\n indices: paddle.Tensor,\n updates: paddle.Tensor,\n /,\n shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n *,\n reduction: str = \"sum\",\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef shape(\n x: paddle.Tensor, /, *, as_array: bool = False\n) -> Union[ivy.Shape, ivy.Array]:\n if as_array:\n return ivy.array(x.shape, dtype=ivy.default_int_dtype())\n else:\n return ivy.Shape(x.shape)\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"uint16\", \"bfloat16\")}}, backend_version\n)\ndef vmap(\n func: Callable,\n in_axes: Union[int, Sequence[int], Sequence[None]] = 0,\n out_axes: Optional[int] = 0,\n) -> Callable:\n raise IvyNotImplementedException()\n\n\ndef isin(\n elements: paddle.Tensor,\n test_elements: paddle.Tensor,\n /,\n *,\n assume_unique: Optional[bool] = False,\n invert: Optional[bool] = False,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n", "path": "ivy/functional/backends/paddle/general.py"}]}
3,872
954
gh_patches_debug_21110
rasdani/github-patches
git_diff
iterative__dvc-1978
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- logger: still ignoring the context of the progress bar version: `0.40.0+6408b5` When trying to push to an SSH with the `ask_password` option set to `True`: ``` # [############ ] 40% Collecting informationEnter a private key passphrase or a password for host 'localhost' port '22' user 'mroutis': ``` This behavior should be handle at: https://github.com/iterative/dvc/blob/6408b58b8daddc297467453bcd130c07b09cd46b/dvc/logger.py#L134-L140 It should be tested under `tests/unit/test_logger.py` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dvc/logger.py` Content: ``` 1 """Manages logging configuration for dvc repo.""" 2 3 from __future__ import unicode_literals 4 5 from dvc.utils.compat import str, StringIO 6 7 import logging 8 import logging.handlers 9 import logging.config 10 import colorama 11 12 13 class ExcludeErrorsFilter(logging.Filter): 14 def filter(self, record): 15 return record.levelno < logging.ERROR 16 17 18 class ColorFormatter(logging.Formatter): 19 """Enable color support when logging to a terminal that supports it. 20 21 Color support on Windows versions that do not support ANSI color codes is 22 enabled by use of the colorama__ library. 23 See the colorama documentation for details. 24 25 __ https://pypi.python.org/pypi/colorama 26 27 For records containing `exc_info`, it will use a custom `_walk_exc` to 28 retrieve the whole tracebak. 29 """ 30 31 color_code = { 32 "DEBUG": colorama.Fore.BLUE, 33 "INFO": "", 34 "WARNING": colorama.Fore.YELLOW, 35 "ERROR": colorama.Fore.RED, 36 "CRITICAL": colorama.Fore.RED, 37 } 38 39 footer = ( 40 "{yellow}Having any troubles?{nc}." 41 " Hit us up at {blue}https://dvc.org/support{nc}," 42 " we are always happy to help!" 43 ).format( 44 blue=colorama.Fore.BLUE, 45 nc=colorama.Fore.RESET, 46 yellow=colorama.Fore.YELLOW, 47 ) 48 49 def format(self, record): 50 if record.levelname == "INFO": 51 return record.msg 52 53 if record.levelname == "ERROR" or record.levelname == "CRITICAL": 54 exception, stack_trace = self._parse_exc(record.exc_info) 55 56 return ( 57 "{color}{levelname}{nc}: {description}" 58 "{stack_trace}\n" 59 "\n" 60 "{footer}" 61 ).format( 62 color=self.color_code.get(record.levelname, ""), 63 nc=colorama.Fore.RESET, 64 levelname=record.levelname, 65 description=self._description(record.msg, exception), 66 msg=record.msg, 67 stack_trace=stack_trace, 68 footer=self.footer, 69 ) 70 71 return "{color}{levelname}{nc}: {msg}".format( 72 color=self.color_code.get(record.levelname, ""), 73 nc=colorama.Fore.RESET, 74 levelname=record.levelname, 75 msg=record.msg, 76 ) 77 78 def _description(self, message, exception): 79 description = "" 80 81 if exception and message: 82 description = "{message} - {exception}" 83 elif exception: 84 description = "{exception}" 85 elif message: 86 description = "{message}" 87 88 return description.format(message=message, exception=exception) 89 90 def _walk_exc(self, exc_info): 91 import traceback 92 93 buffer = StringIO() 94 95 traceback.print_exception(*exc_info, file=buffer) 96 97 exc = exc_info[1] 98 tb = buffer.getvalue() 99 100 exc_list = [str(exc)] 101 tb_list = [tb] 102 103 # NOTE: parsing chained exceptions. See dvc/exceptions.py for more info 104 while hasattr(exc, "cause") and exc.cause: 105 exc_list.append(str(exc.cause)) 106 if hasattr(exc, "cause_tb") and exc.cause_tb: 107 tb_list.insert(0, str(exc.cause_tb)) 108 exc = exc.cause 109 110 return exc_list, tb_list 111 112 def _parse_exc(self, exc_info): 113 if not exc_info: 114 return (None, "") 115 116 exc_list, tb_list = self._walk_exc(exc_info) 117 118 exception = ": ".join(exc_list) 119 120 if logging.getLogger("dvc").getEffectiveLevel() == logging.DEBUG: 121 stack_trace = ( 122 "\n" "{red}{line}{nc}\n" "{stack_trace}" "{red}{line}{nc}" 123 ).format( 124 red=colorama.Fore.RED, 125 nc=colorama.Fore.RESET, 126 line="-" * 60, 127 stack_trace="\n".join(tb_list), 128 ) 129 else: 130 stack_trace = "" 131 132 return (exception, stack_trace) 133 134 def _progress_aware(self): 135 """Add a new line if progress bar hasn't finished""" 136 from dvc.progress import progress 137 138 if not progress.is_finished: 139 progress._print() 140 progress.clearln() 141 142 143 def setup(level=logging.INFO): 144 colorama.init() 145 146 logging.config.dictConfig( 147 { 148 "version": 1, 149 "filters": {"exclude_errors": {"()": ExcludeErrorsFilter}}, 150 "formatters": {"color": {"()": ColorFormatter}}, 151 "handlers": { 152 "console": { 153 "class": "logging.StreamHandler", 154 "level": "DEBUG", 155 "formatter": "color", 156 "stream": "ext://sys.stdout", 157 "filters": ["exclude_errors"], 158 }, 159 "console_errors": { 160 "class": "logging.StreamHandler", 161 "level": "ERROR", 162 "formatter": "color", 163 "stream": "ext://sys.stderr", 164 }, 165 }, 166 "loggers": { 167 "dvc": { 168 "level": level, 169 "handlers": ["console", "console_errors"], 170 } 171 }, 172 } 173 ) 174 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dvc/logger.py b/dvc/logger.py --- a/dvc/logger.py +++ b/dvc/logger.py @@ -47,6 +47,9 @@ ) def format(self, record): + if self._is_visible(record): + self._progress_aware() + if record.levelname == "INFO": return record.msg @@ -75,6 +78,12 @@ msg=record.msg, ) + def _current_level(self): + return logging.getLogger("dvc").getEffectiveLevel() + + def _is_visible(self, record): + return record.levelno >= self._current_level() + def _description(self, message, exception): description = "" @@ -117,7 +126,7 @@ exception = ": ".join(exc_list) - if logging.getLogger("dvc").getEffectiveLevel() == logging.DEBUG: + if self._current_level() == logging.DEBUG: stack_trace = ( "\n" "{red}{line}{nc}\n" "{stack_trace}" "{red}{line}{nc}" ).format(
{"golden_diff": "diff --git a/dvc/logger.py b/dvc/logger.py\n--- a/dvc/logger.py\n+++ b/dvc/logger.py\n@@ -47,6 +47,9 @@\n )\n \n def format(self, record):\n+ if self._is_visible(record):\n+ self._progress_aware()\n+\n if record.levelname == \"INFO\":\n return record.msg\n \n@@ -75,6 +78,12 @@\n msg=record.msg,\n )\n \n+ def _current_level(self):\n+ return logging.getLogger(\"dvc\").getEffectiveLevel()\n+\n+ def _is_visible(self, record):\n+ return record.levelno >= self._current_level()\n+\n def _description(self, message, exception):\n description = \"\"\n \n@@ -117,7 +126,7 @@\n \n exception = \": \".join(exc_list)\n \n- if logging.getLogger(\"dvc\").getEffectiveLevel() == logging.DEBUG:\n+ if self._current_level() == logging.DEBUG:\n stack_trace = (\n \"\\n\" \"{red}{line}{nc}\\n\" \"{stack_trace}\" \"{red}{line}{nc}\"\n ).format(\n", "issue": "logger: still ignoring the context of the progress bar\nversion: `0.40.0+6408b5`\r\n\r\nWhen trying to push to an SSH with the `ask_password` option set to `True`:\r\n```\r\n# [############ ] 40% Collecting informationEnter a private key passphrase or a password for host 'localhost' port '22' user 'mroutis':\r\n```\r\n\r\nThis behavior should be handle at: https://github.com/iterative/dvc/blob/6408b58b8daddc297467453bcd130c07b09cd46b/dvc/logger.py#L134-L140\r\n\r\nIt should be tested under `tests/unit/test_logger.py`\n", "before_files": [{"content": "\"\"\"Manages logging configuration for dvc repo.\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom dvc.utils.compat import str, StringIO\n\nimport logging\nimport logging.handlers\nimport logging.config\nimport colorama\n\n\nclass ExcludeErrorsFilter(logging.Filter):\n def filter(self, record):\n return record.levelno < logging.ERROR\n\n\nclass ColorFormatter(logging.Formatter):\n \"\"\"Enable color support when logging to a terminal that supports it.\n\n Color support on Windows versions that do not support ANSI color codes is\n enabled by use of the colorama__ library.\n See the colorama documentation for details.\n\n __ https://pypi.python.org/pypi/colorama\n\n For records containing `exc_info`, it will use a custom `_walk_exc` to\n retrieve the whole tracebak.\n \"\"\"\n\n color_code = {\n \"DEBUG\": colorama.Fore.BLUE,\n \"INFO\": \"\",\n \"WARNING\": colorama.Fore.YELLOW,\n \"ERROR\": colorama.Fore.RED,\n \"CRITICAL\": colorama.Fore.RED,\n }\n\n footer = (\n \"{yellow}Having any troubles?{nc}.\"\n \" Hit us up at {blue}https://dvc.org/support{nc},\"\n \" we are always happy to help!\"\n ).format(\n blue=colorama.Fore.BLUE,\n nc=colorama.Fore.RESET,\n yellow=colorama.Fore.YELLOW,\n )\n\n def format(self, record):\n if record.levelname == \"INFO\":\n return record.msg\n\n if record.levelname == \"ERROR\" or record.levelname == \"CRITICAL\":\n exception, stack_trace = self._parse_exc(record.exc_info)\n\n return (\n \"{color}{levelname}{nc}: {description}\"\n \"{stack_trace}\\n\"\n \"\\n\"\n \"{footer}\"\n ).format(\n color=self.color_code.get(record.levelname, \"\"),\n nc=colorama.Fore.RESET,\n levelname=record.levelname,\n description=self._description(record.msg, exception),\n msg=record.msg,\n stack_trace=stack_trace,\n footer=self.footer,\n )\n\n return \"{color}{levelname}{nc}: {msg}\".format(\n color=self.color_code.get(record.levelname, \"\"),\n nc=colorama.Fore.RESET,\n levelname=record.levelname,\n msg=record.msg,\n )\n\n def _description(self, message, exception):\n description = \"\"\n\n if exception and message:\n description = \"{message} - {exception}\"\n elif exception:\n description = \"{exception}\"\n elif message:\n description = \"{message}\"\n\n return description.format(message=message, exception=exception)\n\n def _walk_exc(self, exc_info):\n import traceback\n\n buffer = StringIO()\n\n traceback.print_exception(*exc_info, file=buffer)\n\n exc = exc_info[1]\n tb = buffer.getvalue()\n\n exc_list = [str(exc)]\n tb_list = [tb]\n\n # NOTE: parsing chained exceptions. See dvc/exceptions.py for more info\n while hasattr(exc, \"cause\") and exc.cause:\n exc_list.append(str(exc.cause))\n if hasattr(exc, \"cause_tb\") and exc.cause_tb:\n tb_list.insert(0, str(exc.cause_tb))\n exc = exc.cause\n\n return exc_list, tb_list\n\n def _parse_exc(self, exc_info):\n if not exc_info:\n return (None, \"\")\n\n exc_list, tb_list = self._walk_exc(exc_info)\n\n exception = \": \".join(exc_list)\n\n if logging.getLogger(\"dvc\").getEffectiveLevel() == logging.DEBUG:\n stack_trace = (\n \"\\n\" \"{red}{line}{nc}\\n\" \"{stack_trace}\" \"{red}{line}{nc}\"\n ).format(\n red=colorama.Fore.RED,\n nc=colorama.Fore.RESET,\n line=\"-\" * 60,\n stack_trace=\"\\n\".join(tb_list),\n )\n else:\n stack_trace = \"\"\n\n return (exception, stack_trace)\n\n def _progress_aware(self):\n \"\"\"Add a new line if progress bar hasn't finished\"\"\"\n from dvc.progress import progress\n\n if not progress.is_finished:\n progress._print()\n progress.clearln()\n\n\ndef setup(level=logging.INFO):\n colorama.init()\n\n logging.config.dictConfig(\n {\n \"version\": 1,\n \"filters\": {\"exclude_errors\": {\"()\": ExcludeErrorsFilter}},\n \"formatters\": {\"color\": {\"()\": ColorFormatter}},\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"color\",\n \"stream\": \"ext://sys.stdout\",\n \"filters\": [\"exclude_errors\"],\n },\n \"console_errors\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"ERROR\",\n \"formatter\": \"color\",\n \"stream\": \"ext://sys.stderr\",\n },\n },\n \"loggers\": {\n \"dvc\": {\n \"level\": level,\n \"handlers\": [\"console\", \"console_errors\"],\n }\n },\n }\n )\n", "path": "dvc/logger.py"}], "after_files": [{"content": "\"\"\"Manages logging configuration for dvc repo.\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom dvc.utils.compat import str, StringIO\n\nimport logging\nimport logging.handlers\nimport logging.config\nimport colorama\n\n\nclass ExcludeErrorsFilter(logging.Filter):\n def filter(self, record):\n return record.levelno < logging.ERROR\n\n\nclass ColorFormatter(logging.Formatter):\n \"\"\"Enable color support when logging to a terminal that supports it.\n\n Color support on Windows versions that do not support ANSI color codes is\n enabled by use of the colorama__ library.\n See the colorama documentation for details.\n\n __ https://pypi.python.org/pypi/colorama\n\n For records containing `exc_info`, it will use a custom `_walk_exc` to\n retrieve the whole tracebak.\n \"\"\"\n\n color_code = {\n \"DEBUG\": colorama.Fore.BLUE,\n \"INFO\": \"\",\n \"WARNING\": colorama.Fore.YELLOW,\n \"ERROR\": colorama.Fore.RED,\n \"CRITICAL\": colorama.Fore.RED,\n }\n\n footer = (\n \"{yellow}Having any troubles?{nc}.\"\n \" Hit us up at {blue}https://dvc.org/support{nc},\"\n \" we are always happy to help!\"\n ).format(\n blue=colorama.Fore.BLUE,\n nc=colorama.Fore.RESET,\n yellow=colorama.Fore.YELLOW,\n )\n\n def format(self, record):\n if self._is_visible(record):\n self._progress_aware()\n\n if record.levelname == \"INFO\":\n return record.msg\n\n if record.levelname == \"ERROR\" or record.levelname == \"CRITICAL\":\n exception, stack_trace = self._parse_exc(record.exc_info)\n\n return (\n \"{color}{levelname}{nc}: {description}\"\n \"{stack_trace}\\n\"\n \"\\n\"\n \"{footer}\"\n ).format(\n color=self.color_code.get(record.levelname, \"\"),\n nc=colorama.Fore.RESET,\n levelname=record.levelname,\n description=self._description(record.msg, exception),\n msg=record.msg,\n stack_trace=stack_trace,\n footer=self.footer,\n )\n\n return \"{color}{levelname}{nc}: {msg}\".format(\n color=self.color_code.get(record.levelname, \"\"),\n nc=colorama.Fore.RESET,\n levelname=record.levelname,\n msg=record.msg,\n )\n\n def _current_level(self):\n return logging.getLogger(\"dvc\").getEffectiveLevel()\n\n def _is_visible(self, record):\n return record.levelno >= self._current_level()\n\n def _description(self, message, exception):\n description = \"\"\n\n if exception and message:\n description = \"{message} - {exception}\"\n elif exception:\n description = \"{exception}\"\n elif message:\n description = \"{message}\"\n\n return description.format(message=message, exception=exception)\n\n def _walk_exc(self, exc_info):\n import traceback\n\n buffer = StringIO()\n\n traceback.print_exception(*exc_info, file=buffer)\n\n exc = exc_info[1]\n tb = buffer.getvalue()\n\n exc_list = [str(exc)]\n tb_list = [tb]\n\n # NOTE: parsing chained exceptions. See dvc/exceptions.py for more info\n while hasattr(exc, \"cause\") and exc.cause:\n exc_list.append(str(exc.cause))\n if hasattr(exc, \"cause_tb\") and exc.cause_tb:\n tb_list.insert(0, str(exc.cause_tb))\n exc = exc.cause\n\n return exc_list, tb_list\n\n def _parse_exc(self, exc_info):\n if not exc_info:\n return (None, \"\")\n\n exc_list, tb_list = self._walk_exc(exc_info)\n\n exception = \": \".join(exc_list)\n\n if self._current_level() == logging.DEBUG:\n stack_trace = (\n \"\\n\" \"{red}{line}{nc}\\n\" \"{stack_trace}\" \"{red}{line}{nc}\"\n ).format(\n red=colorama.Fore.RED,\n nc=colorama.Fore.RESET,\n line=\"-\" * 60,\n stack_trace=\"\\n\".join(tb_list),\n )\n else:\n stack_trace = \"\"\n\n return (exception, stack_trace)\n\n def _progress_aware(self):\n \"\"\"Add a new line if progress bar hasn't finished\"\"\"\n from dvc.progress import progress\n\n if not progress.is_finished:\n progress._print()\n progress.clearln()\n\n\ndef setup(level=logging.INFO):\n colorama.init()\n\n logging.config.dictConfig(\n {\n \"version\": 1,\n \"filters\": {\"exclude_errors\": {\"()\": ExcludeErrorsFilter}},\n \"formatters\": {\"color\": {\"()\": ColorFormatter}},\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"color\",\n \"stream\": \"ext://sys.stdout\",\n \"filters\": [\"exclude_errors\"],\n },\n \"console_errors\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"ERROR\",\n \"formatter\": \"color\",\n \"stream\": \"ext://sys.stderr\",\n },\n },\n \"loggers\": {\n \"dvc\": {\n \"level\": level,\n \"handlers\": [\"console\", \"console_errors\"],\n }\n },\n }\n )\n", "path": "dvc/logger.py"}]}
1,931
251
gh_patches_debug_17451
rasdani/github-patches
git_diff
cal-itp__benefits-950
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Make buttons use title-case ## Acceptance Criteria - [ ] All buttons are using title case ## Additional context This is according to the design in Figma --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `benefits/core/views.py` Content: ``` 1 """ 2 The core application: view definition for the root of the webapp. 3 """ 4 from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError 5 from django.shortcuts import redirect 6 from django.template import loader 7 from django.template.response import TemplateResponse 8 from django.urls import reverse 9 from django.utils.translation import gettext as _ 10 11 from . import models, session, viewmodels 12 from .middleware import pageview_decorator 13 14 ROUTE_INDEX = "core:index" 15 ROUTE_ELIGIBILITY = "eligibility:index" 16 ROUTE_HELP = "core:help" 17 18 TEMPLATE_PAGE = "core/page.html" 19 TEMPLATE_AGENCY = "core/agency_index.html" 20 TEMPLATE_HELP = "core/help.html" 21 22 23 @pageview_decorator 24 def index(request): 25 """View handler for the main entry page.""" 26 session.reset(request) 27 28 agencies = models.TransitAgency.all_active() 29 30 if len(agencies) == 1: 31 agency = agencies[0] 32 return redirect(agency.index_url) 33 34 # generate a button to the landing page for each active agency 35 buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies] 36 buttons[0].classes.append("mt-3") 37 buttons[0].label = _("core.pages.index.chooseprovider") 38 39 page = viewmodels.Page( 40 title=_("core.pages.index.title"), 41 content_title=_("core.pages.index.content_title"), 42 buttons=buttons, 43 classes="home", 44 ) 45 46 return TemplateResponse(request, TEMPLATE_PAGE, page.context_dict()) 47 48 49 @pageview_decorator 50 def agency_index(request, agency): 51 """View handler for an agency entry page.""" 52 session.reset(request) 53 session.update(request, agency=agency, origin=agency.index_url) 54 55 if len(agency.eligibility_verifiers.all()) == 1: 56 return redirect(reverse(ROUTE_ELIGIBILITY)) 57 58 button = viewmodels.Button.primary(text=_("core.pages.index.continue"), url=reverse(ROUTE_ELIGIBILITY)) 59 button.label = _("core.pages.agency_index.button.label") 60 61 page = viewmodels.Page( 62 title=_("core.pages.agency_index.title"), 63 content_title=_("core.pages.agency_index.content_title"), 64 button=button, 65 classes="home", 66 ) 67 68 help_page = reverse(ROUTE_HELP) 69 context_dict = {**page.context_dict(), **{"info_link": f"{help_page}#about"}} 70 71 return TemplateResponse(request, TEMPLATE_AGENCY, context_dict) 72 73 74 @pageview_decorator 75 def agency_public_key(request, agency): 76 """View handler returns an agency's public key as plain text.""" 77 return HttpResponse(agency.public_key_data, content_type="text/plain") 78 79 80 @pageview_decorator 81 def help(request): 82 """View handler for the help page.""" 83 if session.active_agency(request): 84 agency = session.agency(request) 85 buttons = viewmodels.Button.agency_contact_links(agency) 86 else: 87 buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)] 88 89 buttons.append(viewmodels.Button.home(request, _("core.buttons.back"))) 90 91 page = viewmodels.Page( 92 title=_("core.buttons.help"), 93 content_title=_("core.buttons.help"), 94 buttons=buttons, 95 ) 96 97 return TemplateResponse(request, TEMPLATE_HELP, page.context_dict()) 98 99 100 @pageview_decorator 101 def bad_request(request, exception, template_name="400.html"): 102 """View handler for HTTP 400 Bad Request responses.""" 103 if session.active_agency(request): 104 session.update(request, origin=session.agency(request).index_url) 105 else: 106 session.update(request, origin=reverse(ROUTE_INDEX)) 107 108 home = viewmodels.Button.home(request) 109 page = viewmodels.ErrorPage.server_error(button=home) 110 t = loader.get_template(template_name) 111 112 return HttpResponseBadRequest(t.render(page.context_dict())) 113 114 115 @pageview_decorator 116 def csrf_failure(request, reason): 117 """ 118 View handler for CSRF_FAILURE_VIEW with custom data. 119 """ 120 if session.active_agency(request): 121 session.update(request, origin=session.agency(request).index_url) 122 else: 123 session.update(request, origin=reverse(ROUTE_INDEX)) 124 125 home = viewmodels.Button.home(request) 126 page = viewmodels.ErrorPage.not_found(button=home, path=request.path) 127 t = loader.get_template("400.html") 128 129 return HttpResponseNotFound(t.render(page.context_dict())) 130 131 132 @pageview_decorator 133 def page_not_found(request, exception, template_name="404.html"): 134 """View handler for HTTP 404 Not Found responses.""" 135 if session.active_agency(request): 136 session.update(request, origin=session.agency(request).index_url) 137 else: 138 session.update(request, origin=reverse(ROUTE_INDEX)) 139 140 home = viewmodels.Button.home(request) 141 # show a more user-friendly message instead of not_found 142 page = viewmodels.ErrorPage.user_error(button=home, path=request.path) 143 t = loader.get_template(template_name) 144 145 return HttpResponseNotFound(t.render(page.context_dict())) 146 147 148 @pageview_decorator 149 def server_error(request, template_name="500.html"): 150 """View handler for HTTP 500 Server Error responses.""" 151 if session.active_agency(request): 152 session.update(request, origin=session.agency(request).index_url) 153 else: 154 session.update(request, origin=reverse(ROUTE_INDEX)) 155 156 home = viewmodels.Button.home(request) 157 page = viewmodels.ErrorPage.server_error(button=home) 158 t = loader.get_template(template_name) 159 160 return HttpResponseServerError(t.render(page.context_dict())) 161 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/benefits/core/views.py b/benefits/core/views.py --- a/benefits/core/views.py +++ b/benefits/core/views.py @@ -56,19 +56,15 @@ return redirect(reverse(ROUTE_ELIGIBILITY)) button = viewmodels.Button.primary(text=_("core.pages.index.continue"), url=reverse(ROUTE_ELIGIBILITY)) - button.label = _("core.pages.agency_index.button.label") page = viewmodels.Page( title=_("core.pages.agency_index.title"), - content_title=_("core.pages.agency_index.content_title"), + content_title=_("core.pages.agency_index.mst_cc.content_title"), button=button, classes="home", ) - help_page = reverse(ROUTE_HELP) - context_dict = {**page.context_dict(), **{"info_link": f"{help_page}#about"}} - - return TemplateResponse(request, TEMPLATE_AGENCY, context_dict) + return TemplateResponse(request, TEMPLATE_AGENCY, page.context_dict()) @pageview_decorator
{"golden_diff": "diff --git a/benefits/core/views.py b/benefits/core/views.py\n--- a/benefits/core/views.py\n+++ b/benefits/core/views.py\n@@ -56,19 +56,15 @@\n return redirect(reverse(ROUTE_ELIGIBILITY))\n \n button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(ROUTE_ELIGIBILITY))\n- button.label = _(\"core.pages.agency_index.button.label\")\n \n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n- content_title=_(\"core.pages.agency_index.content_title\"),\n+ content_title=_(\"core.pages.agency_index.mst_cc.content_title\"),\n button=button,\n classes=\"home\",\n )\n \n- help_page = reverse(ROUTE_HELP)\n- context_dict = {**page.context_dict(), **{\"info_link\": f\"{help_page}#about\"}}\n-\n- return TemplateResponse(request, TEMPLATE_AGENCY, context_dict)\n+ return TemplateResponse(request, TEMPLATE_AGENCY, page.context_dict())\n \n \n @pageview_decorator\n", "issue": "Make buttons use title-case\n## Acceptance Criteria\r\n- [ ] All buttons are using title case\r\n\r\n## Additional context\r\nThis is according to the design in Figma\n", "before_files": [{"content": "\"\"\"\nThe core application: view definition for the root of the webapp.\n\"\"\"\nfrom django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\nfrom django.shortcuts import redirect\nfrom django.template import loader\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\n\nfrom . import models, session, viewmodels\nfrom .middleware import pageview_decorator\n\nROUTE_INDEX = \"core:index\"\nROUTE_ELIGIBILITY = \"eligibility:index\"\nROUTE_HELP = \"core:help\"\n\nTEMPLATE_PAGE = \"core/page.html\"\nTEMPLATE_AGENCY = \"core/agency_index.html\"\nTEMPLATE_HELP = \"core/help.html\"\n\n\n@pageview_decorator\ndef index(request):\n \"\"\"View handler for the main entry page.\"\"\"\n session.reset(request)\n\n agencies = models.TransitAgency.all_active()\n\n if len(agencies) == 1:\n agency = agencies[0]\n return redirect(agency.index_url)\n\n # generate a button to the landing page for each active agency\n buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]\n buttons[0].classes.append(\"mt-3\")\n buttons[0].label = _(\"core.pages.index.chooseprovider\")\n\n page = viewmodels.Page(\n title=_(\"core.pages.index.title\"),\n content_title=_(\"core.pages.index.content_title\"),\n buttons=buttons,\n classes=\"home\",\n )\n\n return TemplateResponse(request, TEMPLATE_PAGE, page.context_dict())\n\n\n@pageview_decorator\ndef agency_index(request, agency):\n \"\"\"View handler for an agency entry page.\"\"\"\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n\n if len(agency.eligibility_verifiers.all()) == 1:\n return redirect(reverse(ROUTE_ELIGIBILITY))\n\n button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(ROUTE_ELIGIBILITY))\n button.label = _(\"core.pages.agency_index.button.label\")\n\n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n content_title=_(\"core.pages.agency_index.content_title\"),\n button=button,\n classes=\"home\",\n )\n\n help_page = reverse(ROUTE_HELP)\n context_dict = {**page.context_dict(), **{\"info_link\": f\"{help_page}#about\"}}\n\n return TemplateResponse(request, TEMPLATE_AGENCY, context_dict)\n\n\n@pageview_decorator\ndef agency_public_key(request, agency):\n \"\"\"View handler returns an agency's public key as plain text.\"\"\"\n return HttpResponse(agency.public_key_data, content_type=\"text/plain\")\n\n\n@pageview_decorator\ndef help(request):\n \"\"\"View handler for the help page.\"\"\"\n if session.active_agency(request):\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n else:\n buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]\n\n buttons.append(viewmodels.Button.home(request, _(\"core.buttons.back\")))\n\n page = viewmodels.Page(\n title=_(\"core.buttons.help\"),\n content_title=_(\"core.buttons.help\"),\n buttons=buttons,\n )\n\n return TemplateResponse(request, TEMPLATE_HELP, page.context_dict())\n\n\n@pageview_decorator\ndef bad_request(request, exception, template_name=\"400.html\"):\n \"\"\"View handler for HTTP 400 Bad Request responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseBadRequest(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef csrf_failure(request, reason):\n \"\"\"\n View handler for CSRF_FAILURE_VIEW with custom data.\n \"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(\"400.html\")\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef page_not_found(request, exception, template_name=\"404.html\"):\n \"\"\"View handler for HTTP 404 Not Found responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n # show a more user-friendly message instead of not_found\n page = viewmodels.ErrorPage.user_error(button=home, path=request.path)\n t = loader.get_template(template_name)\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef server_error(request, template_name=\"500.html\"):\n \"\"\"View handler for HTTP 500 Server Error responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseServerError(t.render(page.context_dict()))\n", "path": "benefits/core/views.py"}], "after_files": [{"content": "\"\"\"\nThe core application: view definition for the root of the webapp.\n\"\"\"\nfrom django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\nfrom django.shortcuts import redirect\nfrom django.template import loader\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\n\nfrom . import models, session, viewmodels\nfrom .middleware import pageview_decorator\n\nROUTE_INDEX = \"core:index\"\nROUTE_ELIGIBILITY = \"eligibility:index\"\nROUTE_HELP = \"core:help\"\n\nTEMPLATE_PAGE = \"core/page.html\"\nTEMPLATE_AGENCY = \"core/agency_index.html\"\nTEMPLATE_HELP = \"core/help.html\"\n\n\n@pageview_decorator\ndef index(request):\n \"\"\"View handler for the main entry page.\"\"\"\n session.reset(request)\n\n agencies = models.TransitAgency.all_active()\n\n if len(agencies) == 1:\n agency = agencies[0]\n return redirect(agency.index_url)\n\n # generate a button to the landing page for each active agency\n buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]\n buttons[0].classes.append(\"mt-3\")\n buttons[0].label = _(\"core.pages.index.chooseprovider\")\n\n page = viewmodels.Page(\n title=_(\"core.pages.index.title\"),\n content_title=_(\"core.pages.index.content_title\"),\n buttons=buttons,\n classes=\"home\",\n )\n\n return TemplateResponse(request, TEMPLATE_PAGE, page.context_dict())\n\n\n@pageview_decorator\ndef agency_index(request, agency):\n \"\"\"View handler for an agency entry page.\"\"\"\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n\n if len(agency.eligibility_verifiers.all()) == 1:\n return redirect(reverse(ROUTE_ELIGIBILITY))\n\n button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(ROUTE_ELIGIBILITY))\n\n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n content_title=_(\"core.pages.agency_index.mst_cc.content_title\"),\n button=button,\n classes=\"home\",\n )\n\n return TemplateResponse(request, TEMPLATE_AGENCY, page.context_dict())\n\n\n@pageview_decorator\ndef agency_public_key(request, agency):\n \"\"\"View handler returns an agency's public key as plain text.\"\"\"\n return HttpResponse(agency.public_key_data, content_type=\"text/plain\")\n\n\n@pageview_decorator\ndef help(request):\n \"\"\"View handler for the help page.\"\"\"\n if session.active_agency(request):\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n else:\n buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]\n\n buttons.append(viewmodels.Button.home(request, _(\"core.buttons.back\")))\n\n page = viewmodels.Page(\n title=_(\"core.buttons.help\"),\n content_title=_(\"core.buttons.help\"),\n buttons=buttons,\n )\n\n return TemplateResponse(request, TEMPLATE_HELP, page.context_dict())\n\n\n@pageview_decorator\ndef bad_request(request, exception, template_name=\"400.html\"):\n \"\"\"View handler for HTTP 400 Bad Request responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseBadRequest(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef csrf_failure(request, reason):\n \"\"\"\n View handler for CSRF_FAILURE_VIEW with custom data.\n \"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(\"400.html\")\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef page_not_found(request, exception, template_name=\"404.html\"):\n \"\"\"View handler for HTTP 404 Not Found responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n # show a more user-friendly message instead of not_found\n page = viewmodels.ErrorPage.user_error(button=home, path=request.path)\n t = loader.get_template(template_name)\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef server_error(request, template_name=\"500.html\"):\n \"\"\"View handler for HTTP 500 Server Error responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseServerError(t.render(page.context_dict()))\n", "path": "benefits/core/views.py"}]}
1,851
229
gh_patches_debug_41484
rasdani/github-patches
git_diff
freqtrade__freqtrade-3040
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Warning log level when VolumePairList and a blacklist are associated Hi, Given i have set a VolumePairList and a blacklist containing ` [BNB/USDT]` When the bot refresh the markets Then I have a warning message `2020-02-28 16:01:47,568 - freqtrade.pairlist.IPairList - WARNING - Pair BNB/USDT in your blacklist. Removing it from whitelist...` I understand a warning in the case i have put in your whitelist and blacklist the same market (has it can be human mistake), but in this case i don't understand why. Maybe a make an error in my config file ? Many tks. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `freqtrade/pairlist/pairlistmanager.py` Content: ``` 1 """ 2 Static List provider 3 4 Provides lists as configured in config.json 5 6 """ 7 import logging 8 from typing import Dict, List 9 10 from cachetools import TTLCache, cached 11 12 from freqtrade.exceptions import OperationalException 13 from freqtrade.pairlist.IPairList import IPairList 14 from freqtrade.resolvers import PairListResolver 15 16 logger = logging.getLogger(__name__) 17 18 19 class PairListManager(): 20 21 def __init__(self, exchange, config: dict) -> None: 22 self._exchange = exchange 23 self._config = config 24 self._whitelist = self._config['exchange'].get('pair_whitelist') 25 self._blacklist = self._config['exchange'].get('pair_blacklist', []) 26 self._pairlists: List[IPairList] = [] 27 self._tickers_needed = False 28 for pl in self._config.get('pairlists', None): 29 if 'method' not in pl: 30 logger.warning(f"No method in {pl}") 31 continue 32 pairl = PairListResolver.load_pairlist(pl.get('method'), 33 exchange=exchange, 34 pairlistmanager=self, 35 config=config, 36 pairlistconfig=pl, 37 pairlist_pos=len(self._pairlists) 38 ) 39 self._tickers_needed = pairl.needstickers or self._tickers_needed 40 self._pairlists.append(pairl) 41 42 if not self._pairlists: 43 raise OperationalException("No Pairlist defined!") 44 45 @property 46 def whitelist(self) -> List[str]: 47 """ 48 Has the current whitelist 49 """ 50 return self._whitelist 51 52 @property 53 def blacklist(self) -> List[str]: 54 """ 55 Has the current blacklist 56 -> no need to overwrite in subclasses 57 """ 58 return self._blacklist 59 60 @property 61 def name_list(self) -> List[str]: 62 """ 63 Get list of loaded pairlists names 64 """ 65 return [p.name for p in self._pairlists] 66 67 def short_desc(self) -> List[Dict]: 68 """ 69 List of short_desc for each pairlist 70 """ 71 return [{p.name: p.short_desc()} for p in self._pairlists] 72 73 @cached(TTLCache(maxsize=1, ttl=1800)) 74 def _get_cached_tickers(self): 75 return self._exchange.get_tickers() 76 77 def refresh_pairlist(self) -> None: 78 """ 79 Run pairlist through all configured pairlists. 80 """ 81 82 pairlist = self._whitelist.copy() 83 84 # tickers should be cached to avoid calling the exchange on each call. 85 tickers: Dict = {} 86 if self._tickers_needed: 87 tickers = self._get_cached_tickers() 88 89 # Process all pairlists in chain 90 for pl in self._pairlists: 91 pairlist = pl.filter_pairlist(pairlist, tickers) 92 93 # Validation against blacklist happens after the pairlists to ensure blacklist is respected. 94 pairlist = IPairList.verify_blacklist(pairlist, self.blacklist) 95 96 self._whitelist = pairlist 97 ``` Path: `freqtrade/pairlist/IPairList.py` Content: ``` 1 """ 2 Static List provider 3 4 Provides lists as configured in config.json 5 6 """ 7 import logging 8 from abc import ABC, abstractmethod, abstractproperty 9 from copy import deepcopy 10 from typing import Any, Dict, List 11 12 from freqtrade.exchange import market_is_active 13 14 logger = logging.getLogger(__name__) 15 16 17 class IPairList(ABC): 18 19 def __init__(self, exchange, pairlistmanager, 20 config: Dict[str, Any], pairlistconfig: Dict[str, Any], 21 pairlist_pos: int) -> None: 22 """ 23 :param exchange: Exchange instance 24 :param pairlistmanager: Instanciating Pairlist manager 25 :param config: Global bot configuration 26 :param pairlistconfig: Configuration for this pairlist - can be empty. 27 :param pairlist_pos: Position of the filter in the pairlist-filter-list 28 """ 29 self._exchange = exchange 30 self._pairlistmanager = pairlistmanager 31 self._config = config 32 self._pairlistconfig = pairlistconfig 33 self._pairlist_pos = pairlist_pos 34 35 @property 36 def name(self) -> str: 37 """ 38 Gets name of the class 39 -> no need to overwrite in subclasses 40 """ 41 return self.__class__.__name__ 42 43 @abstractproperty 44 def needstickers(self) -> bool: 45 """ 46 Boolean property defining if tickers are necessary. 47 If no Pairlist requries tickers, an empty List is passed 48 as tickers argument to filter_pairlist 49 """ 50 51 @abstractmethod 52 def short_desc(self) -> str: 53 """ 54 Short whitelist method description - used for startup-messages 55 -> Please overwrite in subclasses 56 """ 57 58 @abstractmethod 59 def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]: 60 """ 61 Filters and sorts pairlist and returns the whitelist again. 62 Called on each bot iteration - please use internal caching if necessary 63 -> Please overwrite in subclasses 64 :param pairlist: pairlist to filter or sort 65 :param tickers: Tickers (from exchange.get_tickers()). May be cached. 66 :return: new whitelist 67 """ 68 69 @staticmethod 70 def verify_blacklist(pairlist: List[str], blacklist: List[str]) -> List[str]: 71 """ 72 Verify and remove items from pairlist - returning a filtered pairlist. 73 """ 74 for pair in deepcopy(pairlist): 75 if pair in blacklist: 76 logger.warning(f"Pair {pair} in your blacklist. Removing it from whitelist...") 77 pairlist.remove(pair) 78 return pairlist 79 80 def _verify_blacklist(self, pairlist: List[str]) -> List[str]: 81 """ 82 Proxy method to verify_blacklist for easy access for child classes. 83 """ 84 return IPairList.verify_blacklist(pairlist, self._pairlistmanager.blacklist) 85 86 def _whitelist_for_active_markets(self, pairlist: List[str]) -> List[str]: 87 """ 88 Check available markets and remove pair from whitelist if necessary 89 :param whitelist: the sorted list of pairs the user might want to trade 90 :return: the list of pairs the user wants to trade without those unavailable or 91 black_listed 92 """ 93 markets = self._exchange.markets 94 95 sanitized_whitelist: List[str] = [] 96 for pair in pairlist: 97 # pair is not in the generated dynamic market or has the wrong stake currency 98 if pair not in markets: 99 logger.warning(f"Pair {pair} is not compatible with exchange " 100 f"{self._exchange.name}. Removing it from whitelist..") 101 continue 102 103 if self._exchange.get_pair_quote_currency(pair) != self._config['stake_currency']: 104 logger.warning(f"Pair {pair} is not compatible with your stake currency " 105 f"{self._config['stake_currency']}. Removing it from whitelist..") 106 continue 107 108 # Check if market is active 109 market = markets[pair] 110 if not market_is_active(market): 111 logger.info(f"Ignoring {pair} from whitelist. Market is not active.") 112 continue 113 if pair not in sanitized_whitelist: 114 sanitized_whitelist.append(pair) 115 116 sanitized_whitelist = self._verify_blacklist(sanitized_whitelist) 117 # We need to remove pairs that are unknown 118 return sanitized_whitelist 119 ``` Path: `freqtrade/pairlist/VolumePairList.py` Content: ``` 1 """ 2 Volume PairList provider 3 4 Provides lists as configured in config.json 5 6 """ 7 import logging 8 from datetime import datetime 9 from typing import Any, Dict, List 10 11 from freqtrade.exceptions import OperationalException 12 from freqtrade.pairlist.IPairList import IPairList 13 14 logger = logging.getLogger(__name__) 15 16 SORT_VALUES = ['askVolume', 'bidVolume', 'quoteVolume'] 17 18 19 class VolumePairList(IPairList): 20 21 def __init__(self, exchange, pairlistmanager, config: Dict[str, Any], pairlistconfig: dict, 22 pairlist_pos: int) -> None: 23 super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos) 24 25 if 'number_assets' not in self._pairlistconfig: 26 raise OperationalException( 27 f'`number_assets` not specified. Please check your configuration ' 28 'for "pairlist.config.number_assets"') 29 self._number_pairs = self._pairlistconfig['number_assets'] 30 self._sort_key = self._pairlistconfig.get('sort_key', 'quoteVolume') 31 self._min_value = self._pairlistconfig.get('min_value', 0) 32 self.refresh_period = self._pairlistconfig.get('refresh_period', 1800) 33 34 if not self._exchange.exchange_has('fetchTickers'): 35 raise OperationalException( 36 'Exchange does not support dynamic whitelist.' 37 'Please edit your config and restart the bot' 38 ) 39 if not self._validate_keys(self._sort_key): 40 raise OperationalException( 41 f'key {self._sort_key} not in {SORT_VALUES}') 42 self._last_refresh = 0 43 44 @property 45 def needstickers(self) -> bool: 46 """ 47 Boolean property defining if tickers are necessary. 48 If no Pairlist requries tickers, an empty List is passed 49 as tickers argument to filter_pairlist 50 """ 51 return True 52 53 def _validate_keys(self, key): 54 return key in SORT_VALUES 55 56 def short_desc(self) -> str: 57 """ 58 Short whitelist method description - used for startup-messages 59 """ 60 return f"{self.name} - top {self._pairlistconfig['number_assets']} volume pairs." 61 62 def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]: 63 """ 64 Filters and sorts pairlist and returns the whitelist again. 65 Called on each bot iteration - please use internal caching if necessary 66 :param pairlist: pairlist to filter or sort 67 :param tickers: Tickers (from exchange.get_tickers()). May be cached. 68 :return: new whitelist 69 """ 70 # Generate dynamic whitelist 71 if self._last_refresh + self.refresh_period < datetime.now().timestamp(): 72 self._last_refresh = int(datetime.now().timestamp()) 73 return self._gen_pair_whitelist(pairlist, 74 tickers, 75 self._config['stake_currency'], 76 self._sort_key, 77 self._min_value 78 ) 79 else: 80 return pairlist 81 82 def _gen_pair_whitelist(self, pairlist: List[str], tickers: Dict, 83 base_currency: str, key: str, min_val: int) -> List[str]: 84 """ 85 Updates the whitelist with with a dynamically generated list 86 :param base_currency: base currency as str 87 :param key: sort key (defaults to 'quoteVolume') 88 :param tickers: Tickers (from exchange.get_tickers()). 89 :return: List of pairs 90 """ 91 92 if self._pairlist_pos == 0: 93 # If VolumePairList is the first in the list, use fresh pairlist 94 # Check if pair quote currency equals to the stake currency. 95 filtered_tickers = [v for k, v in tickers.items() 96 if (self._exchange.get_pair_quote_currency(k) == base_currency 97 and v[key] is not None)] 98 else: 99 # If other pairlist is in front, use the incomming pairlist. 100 filtered_tickers = [v for k, v in tickers.items() if k in pairlist] 101 102 if min_val > 0: 103 filtered_tickers = list(filter(lambda t: t[key] > min_val, filtered_tickers)) 104 105 sorted_tickers = sorted(filtered_tickers, reverse=True, key=lambda t: t[key]) 106 107 # Validate whitelist to only have active market pairs 108 pairs = self._whitelist_for_active_markets([s['symbol'] for s in sorted_tickers]) 109 pairs = self._verify_blacklist(pairs) 110 # Limit to X number of pairs 111 pairs = pairs[:self._number_pairs] 112 logger.info(f"Searching {self._number_pairs} pairs: {pairs}") 113 114 return pairs 115 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/freqtrade/pairlist/IPairList.py b/freqtrade/pairlist/IPairList.py --- a/freqtrade/pairlist/IPairList.py +++ b/freqtrade/pairlist/IPairList.py @@ -67,21 +67,37 @@ """ @staticmethod - def verify_blacklist(pairlist: List[str], blacklist: List[str]) -> List[str]: + def verify_blacklist(pairlist: List[str], blacklist: List[str], + aswarning: bool) -> List[str]: """ Verify and remove items from pairlist - returning a filtered pairlist. + Logs a warning or info depending on `aswarning`. + Pairlists explicitly using this method shall use `aswarning=False`! + :param pairlist: Pairlist to validate + :param blacklist: Blacklist to validate pairlist against + :param aswarning: Log message as Warning or info + :return: pairlist - blacklisted pairs """ for pair in deepcopy(pairlist): if pair in blacklist: - logger.warning(f"Pair {pair} in your blacklist. Removing it from whitelist...") + if aswarning: + logger.warning(f"Pair {pair} in your blacklist. Removing it from whitelist...") + else: + logger.info(f"Pair {pair} in your blacklist. Removing it from whitelist...") pairlist.remove(pair) return pairlist - def _verify_blacklist(self, pairlist: List[str]) -> List[str]: + def _verify_blacklist(self, pairlist: List[str], aswarning: bool = True) -> List[str]: """ Proxy method to verify_blacklist for easy access for child classes. + Logs a warning or info depending on `aswarning`. + Pairlists explicitly using this method shall use aswarning=False! + :param pairlist: Pairlist to validate + :param aswarning: Log message as Warning or info. + :return: pairlist - blacklisted pairs """ - return IPairList.verify_blacklist(pairlist, self._pairlistmanager.blacklist) + return IPairList.verify_blacklist(pairlist, self._pairlistmanager.blacklist, + aswarning=aswarning) def _whitelist_for_active_markets(self, pairlist: List[str]) -> List[str]: """ @@ -113,6 +129,5 @@ if pair not in sanitized_whitelist: sanitized_whitelist.append(pair) - sanitized_whitelist = self._verify_blacklist(sanitized_whitelist) # We need to remove pairs that are unknown return sanitized_whitelist diff --git a/freqtrade/pairlist/VolumePairList.py b/freqtrade/pairlist/VolumePairList.py --- a/freqtrade/pairlist/VolumePairList.py +++ b/freqtrade/pairlist/VolumePairList.py @@ -106,7 +106,7 @@ # Validate whitelist to only have active market pairs pairs = self._whitelist_for_active_markets([s['symbol'] for s in sorted_tickers]) - pairs = self._verify_blacklist(pairs) + pairs = self._verify_blacklist(pairs, aswarning=False) # Limit to X number of pairs pairs = pairs[:self._number_pairs] logger.info(f"Searching {self._number_pairs} pairs: {pairs}") diff --git a/freqtrade/pairlist/pairlistmanager.py b/freqtrade/pairlist/pairlistmanager.py --- a/freqtrade/pairlist/pairlistmanager.py +++ b/freqtrade/pairlist/pairlistmanager.py @@ -91,6 +91,6 @@ pairlist = pl.filter_pairlist(pairlist, tickers) # Validation against blacklist happens after the pairlists to ensure blacklist is respected. - pairlist = IPairList.verify_blacklist(pairlist, self.blacklist) + pairlist = IPairList.verify_blacklist(pairlist, self.blacklist, True) self._whitelist = pairlist
{"golden_diff": "diff --git a/freqtrade/pairlist/IPairList.py b/freqtrade/pairlist/IPairList.py\n--- a/freqtrade/pairlist/IPairList.py\n+++ b/freqtrade/pairlist/IPairList.py\n@@ -67,21 +67,37 @@\n \"\"\"\n \n @staticmethod\n- def verify_blacklist(pairlist: List[str], blacklist: List[str]) -> List[str]:\n+ def verify_blacklist(pairlist: List[str], blacklist: List[str],\n+ aswarning: bool) -> List[str]:\n \"\"\"\n Verify and remove items from pairlist - returning a filtered pairlist.\n+ Logs a warning or info depending on `aswarning`.\n+ Pairlists explicitly using this method shall use `aswarning=False`!\n+ :param pairlist: Pairlist to validate\n+ :param blacklist: Blacklist to validate pairlist against\n+ :param aswarning: Log message as Warning or info\n+ :return: pairlist - blacklisted pairs\n \"\"\"\n for pair in deepcopy(pairlist):\n if pair in blacklist:\n- logger.warning(f\"Pair {pair} in your blacklist. Removing it from whitelist...\")\n+ if aswarning:\n+ logger.warning(f\"Pair {pair} in your blacklist. Removing it from whitelist...\")\n+ else:\n+ logger.info(f\"Pair {pair} in your blacklist. Removing it from whitelist...\")\n pairlist.remove(pair)\n return pairlist\n \n- def _verify_blacklist(self, pairlist: List[str]) -> List[str]:\n+ def _verify_blacklist(self, pairlist: List[str], aswarning: bool = True) -> List[str]:\n \"\"\"\n Proxy method to verify_blacklist for easy access for child classes.\n+ Logs a warning or info depending on `aswarning`.\n+ Pairlists explicitly using this method shall use aswarning=False!\n+ :param pairlist: Pairlist to validate\n+ :param aswarning: Log message as Warning or info.\n+ :return: pairlist - blacklisted pairs\n \"\"\"\n- return IPairList.verify_blacklist(pairlist, self._pairlistmanager.blacklist)\n+ return IPairList.verify_blacklist(pairlist, self._pairlistmanager.blacklist,\n+ aswarning=aswarning)\n \n def _whitelist_for_active_markets(self, pairlist: List[str]) -> List[str]:\n \"\"\"\n@@ -113,6 +129,5 @@\n if pair not in sanitized_whitelist:\n sanitized_whitelist.append(pair)\n \n- sanitized_whitelist = self._verify_blacklist(sanitized_whitelist)\n # We need to remove pairs that are unknown\n return sanitized_whitelist\ndiff --git a/freqtrade/pairlist/VolumePairList.py b/freqtrade/pairlist/VolumePairList.py\n--- a/freqtrade/pairlist/VolumePairList.py\n+++ b/freqtrade/pairlist/VolumePairList.py\n@@ -106,7 +106,7 @@\n \n # Validate whitelist to only have active market pairs\n pairs = self._whitelist_for_active_markets([s['symbol'] for s in sorted_tickers])\n- pairs = self._verify_blacklist(pairs)\n+ pairs = self._verify_blacklist(pairs, aswarning=False)\n # Limit to X number of pairs\n pairs = pairs[:self._number_pairs]\n logger.info(f\"Searching {self._number_pairs} pairs: {pairs}\")\ndiff --git a/freqtrade/pairlist/pairlistmanager.py b/freqtrade/pairlist/pairlistmanager.py\n--- a/freqtrade/pairlist/pairlistmanager.py\n+++ b/freqtrade/pairlist/pairlistmanager.py\n@@ -91,6 +91,6 @@\n pairlist = pl.filter_pairlist(pairlist, tickers)\n \n # Validation against blacklist happens after the pairlists to ensure blacklist is respected.\n- pairlist = IPairList.verify_blacklist(pairlist, self.blacklist)\n+ pairlist = IPairList.verify_blacklist(pairlist, self.blacklist, True)\n \n self._whitelist = pairlist\n", "issue": "Warning log level when VolumePairList and a blacklist are associated\nHi,\r\n\r\nGiven i have set a VolumePairList and a blacklist containing ` [BNB/USDT]`\r\nWhen the bot refresh the markets\r\nThen I have a warning message\r\n`2020-02-28 16:01:47,568 - freqtrade.pairlist.IPairList - WARNING - Pair BNB/USDT in your blacklist. Removing it from whitelist...`\r\n\r\nI understand a warning in the case i have put in your whitelist and blacklist the same market (has it can be human mistake), but in this case i don't understand why.\r\n\r\nMaybe a make an error in my config file ?\r\n\r\nMany tks.\n", "before_files": [{"content": "\"\"\"\nStatic List provider\n\nProvides lists as configured in config.json\n\n \"\"\"\nimport logging\nfrom typing import Dict, List\n\nfrom cachetools import TTLCache, cached\n\nfrom freqtrade.exceptions import OperationalException\nfrom freqtrade.pairlist.IPairList import IPairList\nfrom freqtrade.resolvers import PairListResolver\n\nlogger = logging.getLogger(__name__)\n\n\nclass PairListManager():\n\n def __init__(self, exchange, config: dict) -> None:\n self._exchange = exchange\n self._config = config\n self._whitelist = self._config['exchange'].get('pair_whitelist')\n self._blacklist = self._config['exchange'].get('pair_blacklist', [])\n self._pairlists: List[IPairList] = []\n self._tickers_needed = False\n for pl in self._config.get('pairlists', None):\n if 'method' not in pl:\n logger.warning(f\"No method in {pl}\")\n continue\n pairl = PairListResolver.load_pairlist(pl.get('method'),\n exchange=exchange,\n pairlistmanager=self,\n config=config,\n pairlistconfig=pl,\n pairlist_pos=len(self._pairlists)\n )\n self._tickers_needed = pairl.needstickers or self._tickers_needed\n self._pairlists.append(pairl)\n\n if not self._pairlists:\n raise OperationalException(\"No Pairlist defined!\")\n\n @property\n def whitelist(self) -> List[str]:\n \"\"\"\n Has the current whitelist\n \"\"\"\n return self._whitelist\n\n @property\n def blacklist(self) -> List[str]:\n \"\"\"\n Has the current blacklist\n -> no need to overwrite in subclasses\n \"\"\"\n return self._blacklist\n\n @property\n def name_list(self) -> List[str]:\n \"\"\"\n Get list of loaded pairlists names\n \"\"\"\n return [p.name for p in self._pairlists]\n\n def short_desc(self) -> List[Dict]:\n \"\"\"\n List of short_desc for each pairlist\n \"\"\"\n return [{p.name: p.short_desc()} for p in self._pairlists]\n\n @cached(TTLCache(maxsize=1, ttl=1800))\n def _get_cached_tickers(self):\n return self._exchange.get_tickers()\n\n def refresh_pairlist(self) -> None:\n \"\"\"\n Run pairlist through all configured pairlists.\n \"\"\"\n\n pairlist = self._whitelist.copy()\n\n # tickers should be cached to avoid calling the exchange on each call.\n tickers: Dict = {}\n if self._tickers_needed:\n tickers = self._get_cached_tickers()\n\n # Process all pairlists in chain\n for pl in self._pairlists:\n pairlist = pl.filter_pairlist(pairlist, tickers)\n\n # Validation against blacklist happens after the pairlists to ensure blacklist is respected.\n pairlist = IPairList.verify_blacklist(pairlist, self.blacklist)\n\n self._whitelist = pairlist\n", "path": "freqtrade/pairlist/pairlistmanager.py"}, {"content": "\"\"\"\nStatic List provider\n\nProvides lists as configured in config.json\n\n \"\"\"\nimport logging\nfrom abc import ABC, abstractmethod, abstractproperty\nfrom copy import deepcopy\nfrom typing import Any, Dict, List\n\nfrom freqtrade.exchange import market_is_active\n\nlogger = logging.getLogger(__name__)\n\n\nclass IPairList(ABC):\n\n def __init__(self, exchange, pairlistmanager,\n config: Dict[str, Any], pairlistconfig: Dict[str, Any],\n pairlist_pos: int) -> None:\n \"\"\"\n :param exchange: Exchange instance\n :param pairlistmanager: Instanciating Pairlist manager\n :param config: Global bot configuration\n :param pairlistconfig: Configuration for this pairlist - can be empty.\n :param pairlist_pos: Position of the filter in the pairlist-filter-list\n \"\"\"\n self._exchange = exchange\n self._pairlistmanager = pairlistmanager\n self._config = config\n self._pairlistconfig = pairlistconfig\n self._pairlist_pos = pairlist_pos\n\n @property\n def name(self) -> str:\n \"\"\"\n Gets name of the class\n -> no need to overwrite in subclasses\n \"\"\"\n return self.__class__.__name__\n\n @abstractproperty\n def needstickers(self) -> bool:\n \"\"\"\n Boolean property defining if tickers are necessary.\n If no Pairlist requries tickers, an empty List is passed\n as tickers argument to filter_pairlist\n \"\"\"\n\n @abstractmethod\n def short_desc(self) -> str:\n \"\"\"\n Short whitelist method description - used for startup-messages\n -> Please overwrite in subclasses\n \"\"\"\n\n @abstractmethod\n def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]:\n \"\"\"\n Filters and sorts pairlist and returns the whitelist again.\n Called on each bot iteration - please use internal caching if necessary\n -> Please overwrite in subclasses\n :param pairlist: pairlist to filter or sort\n :param tickers: Tickers (from exchange.get_tickers()). May be cached.\n :return: new whitelist\n \"\"\"\n\n @staticmethod\n def verify_blacklist(pairlist: List[str], blacklist: List[str]) -> List[str]:\n \"\"\"\n Verify and remove items from pairlist - returning a filtered pairlist.\n \"\"\"\n for pair in deepcopy(pairlist):\n if pair in blacklist:\n logger.warning(f\"Pair {pair} in your blacklist. Removing it from whitelist...\")\n pairlist.remove(pair)\n return pairlist\n\n def _verify_blacklist(self, pairlist: List[str]) -> List[str]:\n \"\"\"\n Proxy method to verify_blacklist for easy access for child classes.\n \"\"\"\n return IPairList.verify_blacklist(pairlist, self._pairlistmanager.blacklist)\n\n def _whitelist_for_active_markets(self, pairlist: List[str]) -> List[str]:\n \"\"\"\n Check available markets and remove pair from whitelist if necessary\n :param whitelist: the sorted list of pairs the user might want to trade\n :return: the list of pairs the user wants to trade without those unavailable or\n black_listed\n \"\"\"\n markets = self._exchange.markets\n\n sanitized_whitelist: List[str] = []\n for pair in pairlist:\n # pair is not in the generated dynamic market or has the wrong stake currency\n if pair not in markets:\n logger.warning(f\"Pair {pair} is not compatible with exchange \"\n f\"{self._exchange.name}. Removing it from whitelist..\")\n continue\n\n if self._exchange.get_pair_quote_currency(pair) != self._config['stake_currency']:\n logger.warning(f\"Pair {pair} is not compatible with your stake currency \"\n f\"{self._config['stake_currency']}. Removing it from whitelist..\")\n continue\n\n # Check if market is active\n market = markets[pair]\n if not market_is_active(market):\n logger.info(f\"Ignoring {pair} from whitelist. Market is not active.\")\n continue\n if pair not in sanitized_whitelist:\n sanitized_whitelist.append(pair)\n\n sanitized_whitelist = self._verify_blacklist(sanitized_whitelist)\n # We need to remove pairs that are unknown\n return sanitized_whitelist\n", "path": "freqtrade/pairlist/IPairList.py"}, {"content": "\"\"\"\nVolume PairList provider\n\nProvides lists as configured in config.json\n\n \"\"\"\nimport logging\nfrom datetime import datetime\nfrom typing import Any, Dict, List\n\nfrom freqtrade.exceptions import OperationalException\nfrom freqtrade.pairlist.IPairList import IPairList\n\nlogger = logging.getLogger(__name__)\n\nSORT_VALUES = ['askVolume', 'bidVolume', 'quoteVolume']\n\n\nclass VolumePairList(IPairList):\n\n def __init__(self, exchange, pairlistmanager, config: Dict[str, Any], pairlistconfig: dict,\n pairlist_pos: int) -> None:\n super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos)\n\n if 'number_assets' not in self._pairlistconfig:\n raise OperationalException(\n f'`number_assets` not specified. Please check your configuration '\n 'for \"pairlist.config.number_assets\"')\n self._number_pairs = self._pairlistconfig['number_assets']\n self._sort_key = self._pairlistconfig.get('sort_key', 'quoteVolume')\n self._min_value = self._pairlistconfig.get('min_value', 0)\n self.refresh_period = self._pairlistconfig.get('refresh_period', 1800)\n\n if not self._exchange.exchange_has('fetchTickers'):\n raise OperationalException(\n 'Exchange does not support dynamic whitelist.'\n 'Please edit your config and restart the bot'\n )\n if not self._validate_keys(self._sort_key):\n raise OperationalException(\n f'key {self._sort_key} not in {SORT_VALUES}')\n self._last_refresh = 0\n\n @property\n def needstickers(self) -> bool:\n \"\"\"\n Boolean property defining if tickers are necessary.\n If no Pairlist requries tickers, an empty List is passed\n as tickers argument to filter_pairlist\n \"\"\"\n return True\n\n def _validate_keys(self, key):\n return key in SORT_VALUES\n\n def short_desc(self) -> str:\n \"\"\"\n Short whitelist method description - used for startup-messages\n \"\"\"\n return f\"{self.name} - top {self._pairlistconfig['number_assets']} volume pairs.\"\n\n def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]:\n \"\"\"\n Filters and sorts pairlist and returns the whitelist again.\n Called on each bot iteration - please use internal caching if necessary\n :param pairlist: pairlist to filter or sort\n :param tickers: Tickers (from exchange.get_tickers()). May be cached.\n :return: new whitelist\n \"\"\"\n # Generate dynamic whitelist\n if self._last_refresh + self.refresh_period < datetime.now().timestamp():\n self._last_refresh = int(datetime.now().timestamp())\n return self._gen_pair_whitelist(pairlist,\n tickers,\n self._config['stake_currency'],\n self._sort_key,\n self._min_value\n )\n else:\n return pairlist\n\n def _gen_pair_whitelist(self, pairlist: List[str], tickers: Dict,\n base_currency: str, key: str, min_val: int) -> List[str]:\n \"\"\"\n Updates the whitelist with with a dynamically generated list\n :param base_currency: base currency as str\n :param key: sort key (defaults to 'quoteVolume')\n :param tickers: Tickers (from exchange.get_tickers()).\n :return: List of pairs\n \"\"\"\n\n if self._pairlist_pos == 0:\n # If VolumePairList is the first in the list, use fresh pairlist\n # Check if pair quote currency equals to the stake currency.\n filtered_tickers = [v for k, v in tickers.items()\n if (self._exchange.get_pair_quote_currency(k) == base_currency\n and v[key] is not None)]\n else:\n # If other pairlist is in front, use the incomming pairlist.\n filtered_tickers = [v for k, v in tickers.items() if k in pairlist]\n\n if min_val > 0:\n filtered_tickers = list(filter(lambda t: t[key] > min_val, filtered_tickers))\n\n sorted_tickers = sorted(filtered_tickers, reverse=True, key=lambda t: t[key])\n\n # Validate whitelist to only have active market pairs\n pairs = self._whitelist_for_active_markets([s['symbol'] for s in sorted_tickers])\n pairs = self._verify_blacklist(pairs)\n # Limit to X number of pairs\n pairs = pairs[:self._number_pairs]\n logger.info(f\"Searching {self._number_pairs} pairs: {pairs}\")\n\n return pairs\n", "path": "freqtrade/pairlist/VolumePairList.py"}], "after_files": [{"content": "\"\"\"\nStatic List provider\n\nProvides lists as configured in config.json\n\n \"\"\"\nimport logging\nfrom typing import Dict, List\n\nfrom cachetools import TTLCache, cached\n\nfrom freqtrade.exceptions import OperationalException\nfrom freqtrade.pairlist.IPairList import IPairList\nfrom freqtrade.resolvers import PairListResolver\n\nlogger = logging.getLogger(__name__)\n\n\nclass PairListManager():\n\n def __init__(self, exchange, config: dict) -> None:\n self._exchange = exchange\n self._config = config\n self._whitelist = self._config['exchange'].get('pair_whitelist')\n self._blacklist = self._config['exchange'].get('pair_blacklist', [])\n self._pairlists: List[IPairList] = []\n self._tickers_needed = False\n for pl in self._config.get('pairlists', None):\n if 'method' not in pl:\n logger.warning(f\"No method in {pl}\")\n continue\n pairl = PairListResolver.load_pairlist(pl.get('method'),\n exchange=exchange,\n pairlistmanager=self,\n config=config,\n pairlistconfig=pl,\n pairlist_pos=len(self._pairlists)\n )\n self._tickers_needed = pairl.needstickers or self._tickers_needed\n self._pairlists.append(pairl)\n\n if not self._pairlists:\n raise OperationalException(\"No Pairlist defined!\")\n\n @property\n def whitelist(self) -> List[str]:\n \"\"\"\n Has the current whitelist\n \"\"\"\n return self._whitelist\n\n @property\n def blacklist(self) -> List[str]:\n \"\"\"\n Has the current blacklist\n -> no need to overwrite in subclasses\n \"\"\"\n return self._blacklist\n\n @property\n def name_list(self) -> List[str]:\n \"\"\"\n Get list of loaded pairlists names\n \"\"\"\n return [p.name for p in self._pairlists]\n\n def short_desc(self) -> List[Dict]:\n \"\"\"\n List of short_desc for each pairlist\n \"\"\"\n return [{p.name: p.short_desc()} for p in self._pairlists]\n\n @cached(TTLCache(maxsize=1, ttl=1800))\n def _get_cached_tickers(self):\n return self._exchange.get_tickers()\n\n def refresh_pairlist(self) -> None:\n \"\"\"\n Run pairlist through all configured pairlists.\n \"\"\"\n\n pairlist = self._whitelist.copy()\n\n # tickers should be cached to avoid calling the exchange on each call.\n tickers: Dict = {}\n if self._tickers_needed:\n tickers = self._get_cached_tickers()\n\n # Process all pairlists in chain\n for pl in self._pairlists:\n pairlist = pl.filter_pairlist(pairlist, tickers)\n\n # Validation against blacklist happens after the pairlists to ensure blacklist is respected.\n pairlist = IPairList.verify_blacklist(pairlist, self.blacklist, True)\n\n self._whitelist = pairlist\n", "path": "freqtrade/pairlist/pairlistmanager.py"}, {"content": "\"\"\"\nStatic List provider\n\nProvides lists as configured in config.json\n\n \"\"\"\nimport logging\nfrom abc import ABC, abstractmethod, abstractproperty\nfrom copy import deepcopy\nfrom typing import Any, Dict, List\n\nfrom freqtrade.exchange import market_is_active\n\nlogger = logging.getLogger(__name__)\n\n\nclass IPairList(ABC):\n\n def __init__(self, exchange, pairlistmanager,\n config: Dict[str, Any], pairlistconfig: Dict[str, Any],\n pairlist_pos: int) -> None:\n \"\"\"\n :param exchange: Exchange instance\n :param pairlistmanager: Instanciating Pairlist manager\n :param config: Global bot configuration\n :param pairlistconfig: Configuration for this pairlist - can be empty.\n :param pairlist_pos: Position of the filter in the pairlist-filter-list\n \"\"\"\n self._exchange = exchange\n self._pairlistmanager = pairlistmanager\n self._config = config\n self._pairlistconfig = pairlistconfig\n self._pairlist_pos = pairlist_pos\n\n @property\n def name(self) -> str:\n \"\"\"\n Gets name of the class\n -> no need to overwrite in subclasses\n \"\"\"\n return self.__class__.__name__\n\n @abstractproperty\n def needstickers(self) -> bool:\n \"\"\"\n Boolean property defining if tickers are necessary.\n If no Pairlist requries tickers, an empty List is passed\n as tickers argument to filter_pairlist\n \"\"\"\n\n @abstractmethod\n def short_desc(self) -> str:\n \"\"\"\n Short whitelist method description - used for startup-messages\n -> Please overwrite in subclasses\n \"\"\"\n\n @abstractmethod\n def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]:\n \"\"\"\n Filters and sorts pairlist and returns the whitelist again.\n Called on each bot iteration - please use internal caching if necessary\n -> Please overwrite in subclasses\n :param pairlist: pairlist to filter or sort\n :param tickers: Tickers (from exchange.get_tickers()). May be cached.\n :return: new whitelist\n \"\"\"\n\n @staticmethod\n def verify_blacklist(pairlist: List[str], blacklist: List[str],\n aswarning: bool) -> List[str]:\n \"\"\"\n Verify and remove items from pairlist - returning a filtered pairlist.\n Logs a warning or info depending on `aswarning`.\n Pairlists explicitly using this method shall use `aswarning=False`!\n :param pairlist: Pairlist to validate\n :param blacklist: Blacklist to validate pairlist against\n :param aswarning: Log message as Warning or info\n :return: pairlist - blacklisted pairs\n \"\"\"\n for pair in deepcopy(pairlist):\n if pair in blacklist:\n if aswarning:\n logger.warning(f\"Pair {pair} in your blacklist. Removing it from whitelist...\")\n else:\n logger.info(f\"Pair {pair} in your blacklist. Removing it from whitelist...\")\n pairlist.remove(pair)\n return pairlist\n\n def _verify_blacklist(self, pairlist: List[str], aswarning: bool = True) -> List[str]:\n \"\"\"\n Proxy method to verify_blacklist for easy access for child classes.\n Logs a warning or info depending on `aswarning`.\n Pairlists explicitly using this method shall use aswarning=False!\n :param pairlist: Pairlist to validate\n :param aswarning: Log message as Warning or info.\n :return: pairlist - blacklisted pairs\n \"\"\"\n return IPairList.verify_blacklist(pairlist, self._pairlistmanager.blacklist,\n aswarning=aswarning)\n\n def _whitelist_for_active_markets(self, pairlist: List[str]) -> List[str]:\n \"\"\"\n Check available markets and remove pair from whitelist if necessary\n :param whitelist: the sorted list of pairs the user might want to trade\n :return: the list of pairs the user wants to trade without those unavailable or\n black_listed\n \"\"\"\n markets = self._exchange.markets\n\n sanitized_whitelist: List[str] = []\n for pair in pairlist:\n # pair is not in the generated dynamic market or has the wrong stake currency\n if pair not in markets:\n logger.warning(f\"Pair {pair} is not compatible with exchange \"\n f\"{self._exchange.name}. Removing it from whitelist..\")\n continue\n\n if self._exchange.get_pair_quote_currency(pair) != self._config['stake_currency']:\n logger.warning(f\"Pair {pair} is not compatible with your stake currency \"\n f\"{self._config['stake_currency']}. Removing it from whitelist..\")\n continue\n\n # Check if market is active\n market = markets[pair]\n if not market_is_active(market):\n logger.info(f\"Ignoring {pair} from whitelist. Market is not active.\")\n continue\n if pair not in sanitized_whitelist:\n sanitized_whitelist.append(pair)\n\n # We need to remove pairs that are unknown\n return sanitized_whitelist\n", "path": "freqtrade/pairlist/IPairList.py"}, {"content": "\"\"\"\nVolume PairList provider\n\nProvides lists as configured in config.json\n\n \"\"\"\nimport logging\nfrom datetime import datetime\nfrom typing import Any, Dict, List\n\nfrom freqtrade.exceptions import OperationalException\nfrom freqtrade.pairlist.IPairList import IPairList\n\nlogger = logging.getLogger(__name__)\n\nSORT_VALUES = ['askVolume', 'bidVolume', 'quoteVolume']\n\n\nclass VolumePairList(IPairList):\n\n def __init__(self, exchange, pairlistmanager, config: Dict[str, Any], pairlistconfig: dict,\n pairlist_pos: int) -> None:\n super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos)\n\n if 'number_assets' not in self._pairlistconfig:\n raise OperationalException(\n f'`number_assets` not specified. Please check your configuration '\n 'for \"pairlist.config.number_assets\"')\n self._number_pairs = self._pairlistconfig['number_assets']\n self._sort_key = self._pairlistconfig.get('sort_key', 'quoteVolume')\n self._min_value = self._pairlistconfig.get('min_value', 0)\n self.refresh_period = self._pairlistconfig.get('refresh_period', 1800)\n\n if not self._exchange.exchange_has('fetchTickers'):\n raise OperationalException(\n 'Exchange does not support dynamic whitelist.'\n 'Please edit your config and restart the bot'\n )\n if not self._validate_keys(self._sort_key):\n raise OperationalException(\n f'key {self._sort_key} not in {SORT_VALUES}')\n self._last_refresh = 0\n\n @property\n def needstickers(self) -> bool:\n \"\"\"\n Boolean property defining if tickers are necessary.\n If no Pairlist requries tickers, an empty List is passed\n as tickers argument to filter_pairlist\n \"\"\"\n return True\n\n def _validate_keys(self, key):\n return key in SORT_VALUES\n\n def short_desc(self) -> str:\n \"\"\"\n Short whitelist method description - used for startup-messages\n \"\"\"\n return f\"{self.name} - top {self._pairlistconfig['number_assets']} volume pairs.\"\n\n def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]:\n \"\"\"\n Filters and sorts pairlist and returns the whitelist again.\n Called on each bot iteration - please use internal caching if necessary\n :param pairlist: pairlist to filter or sort\n :param tickers: Tickers (from exchange.get_tickers()). May be cached.\n :return: new whitelist\n \"\"\"\n # Generate dynamic whitelist\n if self._last_refresh + self.refresh_period < datetime.now().timestamp():\n self._last_refresh = int(datetime.now().timestamp())\n return self._gen_pair_whitelist(pairlist,\n tickers,\n self._config['stake_currency'],\n self._sort_key,\n self._min_value\n )\n else:\n return pairlist\n\n def _gen_pair_whitelist(self, pairlist: List[str], tickers: Dict,\n base_currency: str, key: str, min_val: int) -> List[str]:\n \"\"\"\n Updates the whitelist with with a dynamically generated list\n :param base_currency: base currency as str\n :param key: sort key (defaults to 'quoteVolume')\n :param tickers: Tickers (from exchange.get_tickers()).\n :return: List of pairs\n \"\"\"\n\n if self._pairlist_pos == 0:\n # If VolumePairList is the first in the list, use fresh pairlist\n # Check if pair quote currency equals to the stake currency.\n filtered_tickers = [v for k, v in tickers.items()\n if (self._exchange.get_pair_quote_currency(k) == base_currency\n and v[key] is not None)]\n else:\n # If other pairlist is in front, use the incomming pairlist.\n filtered_tickers = [v for k, v in tickers.items() if k in pairlist]\n\n if min_val > 0:\n filtered_tickers = list(filter(lambda t: t[key] > min_val, filtered_tickers))\n\n sorted_tickers = sorted(filtered_tickers, reverse=True, key=lambda t: t[key])\n\n # Validate whitelist to only have active market pairs\n pairs = self._whitelist_for_active_markets([s['symbol'] for s in sorted_tickers])\n pairs = self._verify_blacklist(pairs, aswarning=False)\n # Limit to X number of pairs\n pairs = pairs[:self._number_pairs]\n logger.info(f\"Searching {self._number_pairs} pairs: {pairs}\")\n\n return pairs\n", "path": "freqtrade/pairlist/VolumePairList.py"}]}
3,729
895
gh_patches_debug_1433
rasdani/github-patches
git_diff
translate__translate-3603
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- po2ts fails with ascii encode error on py2 (should use utf-8) Test file: [octave.zip](https://github.com/translate/translate/files/870288/octave.zip) ``` $ po2ts octave.po oct.ts processing 1 files... po2ts: WARNING: Error processing: input octave.po, output oct.ts, template None: 'ascii' codec can't encode characters in position 187-188: ordinal not in range(128) [###########################################] 100% $ python --version Python 2.7.12 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `translate/convert/po2ts.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright 2004-2006 Zuza Software Foundation 5 # 6 # This file is part of translate. 7 # 8 # translate is free software; you can redistribute it and/or modify 9 # it under the terms of the GNU General Public License as published by 10 # the Free Software Foundation; either version 2 of the License, or 11 # (at your option) any later version. 12 # 13 # translate is distributed in the hope that it will be useful, 14 # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 # GNU General Public License for more details. 17 # 18 # You should have received a copy of the GNU General Public License 19 # along with this program; if not, see <http://www.gnu.org/licenses/>. 20 21 """Convert Gettext PO localization files to Qt Linguist (.ts) files. 22 23 See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/ts2po.html 24 for examples and usage instructions. 25 """ 26 27 from translate.storage import po, ts 28 29 30 class po2ts(object): 31 32 def convertstore(self, inputstore, templatefile=None, context=None): 33 """converts a .po file to .ts format (using a template .ts file if given)""" 34 if templatefile is None: 35 tsfile = ts.QtTsParser() 36 else: 37 tsfile = ts.QtTsParser(templatefile) 38 for inputunit in inputstore.units: 39 if inputunit.isheader() or inputunit.isblank(): 40 continue 41 source = inputunit.source 42 translation = inputunit.target 43 comment = inputunit.getnotes("translator") 44 transtype = None 45 if not inputunit.istranslated(): 46 transtype = "unfinished" 47 elif inputunit.getnotes("developer") == "(obsolete)": 48 transtype = "obsolete" 49 if isinstance(source, bytes): 50 source = source.decode("utf-8") 51 if isinstance(translation, bytes): 52 translation = translation.decode("utf-8") 53 for sourcelocation in inputunit.getlocations(): 54 if context is None: 55 if "#" in sourcelocation: 56 contextname = sourcelocation[:sourcelocation.find("#")] 57 else: 58 contextname = sourcelocation 59 else: 60 contextname = context 61 tsfile.addtranslation(contextname, source, translation, comment, transtype, createifmissing=True) 62 return tsfile.getxml() 63 64 65 def convertpo(inputfile, outputfile, templatefile, context): 66 """reads in stdin using fromfileclass, converts using convertorclass, writes to stdout""" 67 inputstore = po.pofile(inputfile) 68 if inputstore.isempty(): 69 return 0 70 convertor = po2ts() 71 outputstring = convertor.convertstore(inputstore, templatefile, context) 72 outputfile.write(outputstring) 73 return 1 74 75 76 def main(argv=None): 77 from translate.convert import convert 78 formats = {"po": ("ts", convertpo), ("po", "ts"): ("ts", convertpo)} 79 parser = convert.ConvertOptionParser(formats, usepots=False, usetemplates=True, description=__doc__) 80 parser.add_option("-c", "--context", dest="context", default=None, 81 help="use supplied context instead of the one in the .po file comment") 82 parser.passthrough.append("context") 83 parser.run(argv) 84 85 86 if __name__ == '__main__': 87 main() 88 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/translate/convert/po2ts.py b/translate/convert/po2ts.py --- a/translate/convert/po2ts.py +++ b/translate/convert/po2ts.py @@ -69,7 +69,7 @@ return 0 convertor = po2ts() outputstring = convertor.convertstore(inputstore, templatefile, context) - outputfile.write(outputstring) + outputfile.write(outputstring.encode('utf-8')) return 1
{"golden_diff": "diff --git a/translate/convert/po2ts.py b/translate/convert/po2ts.py\n--- a/translate/convert/po2ts.py\n+++ b/translate/convert/po2ts.py\n@@ -69,7 +69,7 @@\n return 0\n convertor = po2ts()\n outputstring = convertor.convertstore(inputstore, templatefile, context)\n- outputfile.write(outputstring)\n+ outputfile.write(outputstring.encode('utf-8'))\n return 1\n", "issue": "po2ts fails with ascii encode error on py2 (should use utf-8)\nTest file:\r\n[octave.zip](https://github.com/translate/translate/files/870288/octave.zip)\r\n\r\n```\r\n$ po2ts octave.po oct.ts\r\nprocessing 1 files...\r\npo2ts: WARNING: Error processing: input octave.po, output oct.ts, template None: 'ascii' codec can't encode characters in position 187-188: ordinal not in range(128)\r\n[###########################################] 100%\r\n\r\n$ python --version\r\nPython 2.7.12\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2004-2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Convert Gettext PO localization files to Qt Linguist (.ts) files.\n\nSee: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/ts2po.html\nfor examples and usage instructions.\n\"\"\"\n\nfrom translate.storage import po, ts\n\n\nclass po2ts(object):\n\n def convertstore(self, inputstore, templatefile=None, context=None):\n \"\"\"converts a .po file to .ts format (using a template .ts file if given)\"\"\"\n if templatefile is None:\n tsfile = ts.QtTsParser()\n else:\n tsfile = ts.QtTsParser(templatefile)\n for inputunit in inputstore.units:\n if inputunit.isheader() or inputunit.isblank():\n continue\n source = inputunit.source\n translation = inputunit.target\n comment = inputunit.getnotes(\"translator\")\n transtype = None\n if not inputunit.istranslated():\n transtype = \"unfinished\"\n elif inputunit.getnotes(\"developer\") == \"(obsolete)\":\n transtype = \"obsolete\"\n if isinstance(source, bytes):\n source = source.decode(\"utf-8\")\n if isinstance(translation, bytes):\n translation = translation.decode(\"utf-8\")\n for sourcelocation in inputunit.getlocations():\n if context is None:\n if \"#\" in sourcelocation:\n contextname = sourcelocation[:sourcelocation.find(\"#\")]\n else:\n contextname = sourcelocation\n else:\n contextname = context\n tsfile.addtranslation(contextname, source, translation, comment, transtype, createifmissing=True)\n return tsfile.getxml()\n\n\ndef convertpo(inputfile, outputfile, templatefile, context):\n \"\"\"reads in stdin using fromfileclass, converts using convertorclass, writes to stdout\"\"\"\n inputstore = po.pofile(inputfile)\n if inputstore.isempty():\n return 0\n convertor = po2ts()\n outputstring = convertor.convertstore(inputstore, templatefile, context)\n outputfile.write(outputstring)\n return 1\n\n\ndef main(argv=None):\n from translate.convert import convert\n formats = {\"po\": (\"ts\", convertpo), (\"po\", \"ts\"): (\"ts\", convertpo)}\n parser = convert.ConvertOptionParser(formats, usepots=False, usetemplates=True, description=__doc__)\n parser.add_option(\"-c\", \"--context\", dest=\"context\", default=None,\n help=\"use supplied context instead of the one in the .po file comment\")\n parser.passthrough.append(\"context\")\n parser.run(argv)\n\n\nif __name__ == '__main__':\n main()\n", "path": "translate/convert/po2ts.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2004-2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Convert Gettext PO localization files to Qt Linguist (.ts) files.\n\nSee: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/ts2po.html\nfor examples and usage instructions.\n\"\"\"\n\nfrom translate.storage import po, ts\n\n\nclass po2ts(object):\n\n def convertstore(self, inputstore, templatefile=None, context=None):\n \"\"\"converts a .po file to .ts format (using a template .ts file if given)\"\"\"\n if templatefile is None:\n tsfile = ts.QtTsParser()\n else:\n tsfile = ts.QtTsParser(templatefile)\n for inputunit in inputstore.units:\n if inputunit.isheader() or inputunit.isblank():\n continue\n source = inputunit.source\n translation = inputunit.target\n comment = inputunit.getnotes(\"translator\")\n transtype = None\n if not inputunit.istranslated():\n transtype = \"unfinished\"\n elif inputunit.getnotes(\"developer\") == \"(obsolete)\":\n transtype = \"obsolete\"\n if isinstance(source, bytes):\n source = source.decode(\"utf-8\")\n if isinstance(translation, bytes):\n translation = translation.decode(\"utf-8\")\n for sourcelocation in inputunit.getlocations():\n if context is None:\n if \"#\" in sourcelocation:\n contextname = sourcelocation[:sourcelocation.find(\"#\")]\n else:\n contextname = sourcelocation\n else:\n contextname = context\n tsfile.addtranslation(contextname, source, translation, comment, transtype, createifmissing=True)\n return tsfile.getxml()\n\n\ndef convertpo(inputfile, outputfile, templatefile, context):\n \"\"\"reads in stdin using fromfileclass, converts using convertorclass, writes to stdout\"\"\"\n inputstore = po.pofile(inputfile)\n if inputstore.isempty():\n return 0\n convertor = po2ts()\n outputstring = convertor.convertstore(inputstore, templatefile, context)\n outputfile.write(outputstring.encode('utf-8'))\n return 1\n\n\ndef main(argv=None):\n from translate.convert import convert\n formats = {\"po\": (\"ts\", convertpo), (\"po\", \"ts\"): (\"ts\", convertpo)}\n parser = convert.ConvertOptionParser(formats, usepots=False, usetemplates=True, description=__doc__)\n parser.add_option(\"-c\", \"--context\", dest=\"context\", default=None,\n help=\"use supplied context instead of the one in the .po file comment\")\n parser.passthrough.append(\"context\")\n parser.run(argv)\n\n\nif __name__ == '__main__':\n main()\n", "path": "translate/convert/po2ts.py"}]}
1,316
115
gh_patches_debug_29838
rasdani/github-patches
git_diff
gammapy__gammapy-1601
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Incorrect results for SkyPointSource The SkyPointSource spatial model gives a wrong npred. eg: `spatial_model = SkyGaussian( lon_0='0.2 deg', lat_0='0.1 deg', sigma='0.01 deg', ) spectral_model = PowerLaw( index=3, amplitude='1e-11 cm-2 s-1 TeV-1', reference='1 TeV', ) sky_model = SkyModel( spatial_model=spatial_model, spectral_model=spectral_model, )' 'evaluator = MapEvaluator(sky_model, exposure=exposure_map)' 'npred = evaluator.compute_npred()' 'print(npred.sum())` gives `3846` predicted events in total. `spatial_model1 = SkyPointSource( lon_0='0.2 deg', lat_0='0.1 deg', ) spectral_model = PowerLaw( index=3, amplitude='1e-11 cm-2 s-1 TeV-1', reference='1 TeV', ) sky_model1 = SkyModel( spatial_model=spatial_model1, spectral_model=spectral_model, )' `evaluator = MapEvaluator(sky_model1, exposure=exposure_map)` `npred = evaluator.compute_npred()` `print(npred.sum())` gives `0.0004824` predicted events in total (for the detailed code, see https://gist.github.com/AtreyeeS/d5161ebe30f29c1a19b34757a1916301) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gammapy/image/models/new.py` Content: ``` 1 # Licensed under a 3-clause BSD style license - see LICENSE.rst 2 """ 3 Spatial models for astrophysical gamma-ray sources. 4 """ 5 from __future__ import absolute_import, division, print_function, unicode_literals 6 import copy 7 import abc 8 import numpy as np 9 import astropy.units as u 10 from astropy.coordinates.angle_utilities import angular_separation 11 from astropy.coordinates import Angle, Longitude, Latitude 12 from ...extern import six 13 from ...utils.modeling import Parameter, ParameterList 14 from ...maps import Map 15 16 __all__ = [ 17 'SkySpatialModel', 18 'SkyPointSource', 19 'SkyGaussian', 20 'SkyDisk', 21 'SkyShell', 22 'SkyDiffuseConstant', 23 'SkyDiffuseMap', 24 ] 25 26 27 @six.add_metaclass(abc.ABCMeta) 28 class SkySpatialModel(object): 29 """SkySpatial model base class. 30 """ 31 32 def __str__(self): 33 ss = self.__class__.__name__ 34 ss += '\n\nParameters: \n\n\t' 35 36 table = self.parameters.to_table() 37 ss += '\n\t'.join(table.pformat()) 38 39 if self.parameters.covariance is not None: 40 ss += '\n\nCovariance: \n\n\t' 41 covar = self.parameters.covariance_to_table() 42 ss += '\n\t'.join(covar.pformat()) 43 return ss 44 45 def __call__(self, lon, lat): 46 """Call evaluate method""" 47 kwargs = dict() 48 for par in self.parameters.parameters: 49 kwargs[par.name] = par.quantity 50 51 return self.evaluate(lon, lat, **kwargs) 52 53 def copy(self): 54 """A deep copy.""" 55 return copy.deepcopy(self) 56 57 58 class SkyPointSource(SkySpatialModel): 59 r"""Point Source. 60 61 .. math:: 62 63 \phi(lon, lat) = \delta{(lon - lon_0, lat - lat_0)} 64 65 A tolerance of 1 arcsecond is accepted for numerical stability 66 67 Parameters 68 ---------- 69 lon_0 : `~astropy.coordinates.Longitude` 70 :math:`lon_0` 71 lat_0 : `~astropy.coordinates.Latitude` 72 :math:`lat_0` 73 """ 74 75 def __init__(self, lon_0, lat_0): 76 self.parameters = ParameterList([ 77 Parameter('lon_0', Longitude(lon_0)), 78 Parameter('lat_0', Latitude(lat_0)) 79 ]) 80 81 @staticmethod 82 def evaluate(lon, lat, lon_0, lat_0): 83 """Evaluate the model (static function).""" 84 85 wrapval=lon_0 + 180*u.deg 86 lon = Angle(lon).wrap_at(wrapval) 87 88 _, grad_lon = np.gradient(lon) 89 grad_lat, _ = np.gradient(lat) 90 lon_diff = np.abs((lon - lon_0) / grad_lon) 91 lat_diff = np.abs((lat - lat_0) / grad_lat) 92 93 lon_val = np.select([lon_diff < 1], [1 - lon_diff], 0) 94 lat_val = np.select([lat_diff < 1], [1 - lat_diff], 0) 95 val = lon_val * lat_val 96 return val * u.Unit('sr-1') 97 98 99 class SkyGaussian(SkySpatialModel): 100 r"""Two-dimensional symmetric Gaussian model. 101 102 .. math:: 103 104 \phi(lon, lat) = \frac{1}{2\pi\sigma^2} \exp{\left(-\frac{1}{2} 105 \frac{\theta^2}{\sigma^2}\right)} 106 107 where :math:`\theta` is the sky separation 108 109 Parameters 110 ---------- 111 lon_0 : `~astropy.coordinates.Longitude` 112 :math:`lon_0` 113 lat_0 : `~astropy.coordinates.Latitude` 114 :math:`lat_0` 115 sigma : `~astropy.coordinates.Angle` 116 :math:`\sigma` 117 """ 118 119 def __init__(self, lon_0, lat_0, sigma): 120 self.parameters = ParameterList([ 121 Parameter('lon_0', Longitude(lon_0)), 122 Parameter('lat_0', Latitude(lat_0)), 123 Parameter('sigma', Angle(sigma)) 124 ]) 125 126 @staticmethod 127 def evaluate(lon, lat, lon_0, lat_0, sigma): 128 """Evaluate the model (static function).""" 129 sep = angular_separation(lon, lat, lon_0, lat_0) 130 sep = sep.to('rad').value 131 sigma = sigma.to('rad').value 132 133 norm = 1 / (2 * np.pi * sigma ** 2) 134 exponent = -0.5 * (sep / sigma) ** 2 135 val = norm * np.exp(exponent) 136 137 return val * u.Unit('sr-1') 138 139 140 class SkyDisk(SkySpatialModel): 141 r"""Constant radial disk model. 142 143 .. math:: 144 145 \phi(lon, lat) = \frac{1}{2 \pi (1 - \cos{r}) } \cdot 146 \begin{cases} 147 1 & \text{for } \theta \leq r_0 \\ 148 0 & \text{for } \theta < r_0 149 \end{cases} 150 151 where :math:`\theta` is the sky separation 152 153 Parameters 154 ---------- 155 lon_0 : `~astropy.coordinates.Longitude` 156 :math:`lon_0` 157 lat_0 : `~astropy.coordinates.Latitude` 158 :math:`lat_0` 159 r_0 : `~astropy.coordinates.Angle` 160 :math:`r_0` 161 """ 162 163 def __init__(self, lon_0, lat_0, r_0): 164 self.parameters = ParameterList([ 165 Parameter('lon_0', Longitude(lon_0)), 166 Parameter('lat_0', Latitude(lat_0)), 167 Parameter('r_0', Angle(r_0)) 168 ]) 169 170 @staticmethod 171 def evaluate(lon, lat, lon_0, lat_0, r_0): 172 """Evaluate the model (static function).""" 173 sep = angular_separation(lon, lat, lon_0, lat_0) 174 sep = sep.to('rad').value 175 r_0 = r_0.to('rad').value 176 177 norm = 1. / (2 * np.pi * (1 - np.cos(r_0))) 178 val = np.where(sep <= r_0, norm, 0) 179 180 return val * u.Unit('sr-1') 181 182 183 class SkyShell(SkySpatialModel): 184 r"""Shell model 185 186 .. math:: 187 188 \phi(lon, lat) = \frac{3}{2 \pi (r_{out}^3 - r_{in}^3)} \cdot 189 \begin{cases} 190 \sqrt{r_{out}^2 - \theta^2} - \sqrt{r_{in}^2 - \theta^2} & 191 \text{for } \theta \lt r_{in} \\ 192 \sqrt{r_{out}^2 - \theta^2} & 193 \text{for } r_{in} \leq \theta \lt r_{out} \\ 194 0 & \text{for } \theta > r_{out} 195 \end{cases} 196 197 where :math:`\theta` is the sky separation and :math:`r_out = r_in` + width 198 199 Note that the normalization is a small angle approximation, 200 although that approximation is still very good even for 10 deg radius shells. 201 202 Parameters 203 ---------- 204 lon_0 : `~astropy.coordinates.Longitude` 205 :math:`lon_0` 206 lat_0 : `~astropy.coordinates.Latitude` 207 :math:`lat_0` 208 radius : `~astropy.coordinates.Angle` 209 Inner radius, :math:`r_{in}` 210 width : `~astropy.coordinates.Angle` 211 Shell width 212 """ 213 214 def __init__(self, lon_0, lat_0, radius, width): 215 self.parameters = ParameterList([ 216 Parameter('lon_0', Longitude(lon_0)), 217 Parameter('lat_0', Latitude(lat_0)), 218 Parameter('radius', Angle(radius)), 219 Parameter('width', Angle(width)) 220 ]) 221 222 @staticmethod 223 def evaluate(lon, lat, lon_0, lat_0, radius, width): 224 """Evaluate the model (static function).""" 225 sep = angular_separation(lon, lat, lon_0, lat_0) 226 sep = sep.to('rad').value 227 r_i = radius.to('rad').value 228 r_o = (radius + width).to('rad').value 229 230 norm = 3 / (2 * np.pi * (r_o ** 3 - r_i ** 3)) 231 232 with np.errstate(invalid='ignore'): 233 val_out = np.sqrt(r_o ** 2 - sep ** 2) 234 val_in = val_out - np.sqrt(r_i ** 2 - sep ** 2) 235 val = np.select([sep < r_i, sep < r_o], [val_in, val_out]) 236 237 return norm * val * u.Unit('sr-1') 238 239 240 class SkyDiffuseConstant(SkySpatialModel): 241 """Spatially constant (isotropic) spatial model. 242 243 Parameters 244 ---------- 245 value : `~astropy.units.Quantity` 246 Value 247 """ 248 249 def __init__(self, value=1): 250 self.parameters = ParameterList([ 251 Parameter('value', value), 252 ]) 253 254 @staticmethod 255 def evaluate(lon, lat, value): 256 # TODO: try fitting this -> probably the interface doesn't work?! 257 return value 258 259 260 class SkyDiffuseMap(SkySpatialModel): 261 """Spatial sky map template model. 262 263 At the moment only support 2D maps. 264 TODO: support maps with an energy axis here or in a separate class? 265 TODO: should we cache some interpolator object for efficiency? 266 267 Parameters 268 ---------- 269 map : `~gammapy.map.Map` 270 Map template 271 norm : `~astropy.units.Quantity` 272 Norm parameter (multiplied with map values) 273 meta : dict, optional 274 Meta information, meta['filename'] will be used for serialization 275 """ 276 277 def __init__(self, map, norm=1, meta=None): 278 self._map = map 279 self.parameters = ParameterList([ 280 Parameter('norm', norm), 281 ]) 282 self.meta = dict() if meta is None else meta 283 284 @classmethod 285 def read(cls, filename, **kwargs): 286 """Read spatial template model from FITS image. 287 288 Parameters 289 ---------- 290 filename : str 291 FITS image filename. 292 """ 293 template = Map.read(filename, **kwargs) 294 return cls(template) 295 296 def evaluate(self, lon, lat, norm): 297 coord = dict( 298 lon=lon.to('deg').value, 299 lat=lat.to('deg').value, 300 ) 301 val = self._map.interp_by_coord(coord, fill_value=0) 302 # TODO: use map unit? self._map.unit 303 return norm * val * u.Unit('sr-1') 304 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gammapy/image/models/new.py b/gammapy/image/models/new.py --- a/gammapy/image/models/new.py +++ b/gammapy/image/models/new.py @@ -82,7 +82,7 @@ def evaluate(lon, lat, lon_0, lat_0): """Evaluate the model (static function).""" - wrapval=lon_0 + 180*u.deg + wrapval = lon_0 + 180 * u.deg lon = Angle(lon).wrap_at(wrapval) _, grad_lon = np.gradient(lon) @@ -90,10 +90,10 @@ lon_diff = np.abs((lon - lon_0) / grad_lon) lat_diff = np.abs((lat - lat_0) / grad_lat) - lon_val = np.select([lon_diff < 1], [1 - lon_diff], 0) - lat_val = np.select([lat_diff < 1], [1 - lat_diff], 0) + lon_val = np.select([lon_diff < 1], [1 - lon_diff], 0) / np.abs(grad_lon) + lat_val = np.select([lat_diff < 1], [1 - lat_diff], 0) / np.abs(grad_lat) val = lon_val * lat_val - return val * u.Unit('sr-1') + return val.to('sr-1') class SkyGaussian(SkySpatialModel): @@ -189,7 +189,7 @@ \begin{cases} \sqrt{r_{out}^2 - \theta^2} - \sqrt{r_{in}^2 - \theta^2} & \text{for } \theta \lt r_{in} \\ - \sqrt{r_{out}^2 - \theta^2} & + \sqrt{r_{out}^2 - \theta^2} & \text{for } r_{in} \leq \theta \lt r_{out} \\ 0 & \text{for } \theta > r_{out} \end{cases}
{"golden_diff": "diff --git a/gammapy/image/models/new.py b/gammapy/image/models/new.py\n--- a/gammapy/image/models/new.py\n+++ b/gammapy/image/models/new.py\n@@ -82,7 +82,7 @@\n def evaluate(lon, lat, lon_0, lat_0):\n \"\"\"Evaluate the model (static function).\"\"\"\n \n- wrapval=lon_0 + 180*u.deg\n+ wrapval = lon_0 + 180 * u.deg\n lon = Angle(lon).wrap_at(wrapval)\n \n _, grad_lon = np.gradient(lon)\n@@ -90,10 +90,10 @@\n lon_diff = np.abs((lon - lon_0) / grad_lon)\n lat_diff = np.abs((lat - lat_0) / grad_lat)\n \n- lon_val = np.select([lon_diff < 1], [1 - lon_diff], 0)\n- lat_val = np.select([lat_diff < 1], [1 - lat_diff], 0)\n+ lon_val = np.select([lon_diff < 1], [1 - lon_diff], 0) / np.abs(grad_lon)\n+ lat_val = np.select([lat_diff < 1], [1 - lat_diff], 0) / np.abs(grad_lat)\n val = lon_val * lat_val\n- return val * u.Unit('sr-1')\n+ return val.to('sr-1')\n \n \n class SkyGaussian(SkySpatialModel):\n@@ -189,7 +189,7 @@\n \\begin{cases}\n \\sqrt{r_{out}^2 - \\theta^2} - \\sqrt{r_{in}^2 - \\theta^2} &\n \\text{for } \\theta \\lt r_{in} \\\\\n- \\sqrt{r_{out}^2 - \\theta^2} & \n+ \\sqrt{r_{out}^2 - \\theta^2} &\n \\text{for } r_{in} \\leq \\theta \\lt r_{out} \\\\\n 0 & \\text{for } \\theta > r_{out}\n \\end{cases}\n", "issue": "Incorrect results for SkyPointSource\nThe SkyPointSource spatial model gives a wrong npred.\r\n\r\neg:\r\n\r\n`spatial_model = SkyGaussian(\r\n lon_0='0.2 deg',\r\n lat_0='0.1 deg',\r\n sigma='0.01 deg',\r\n)\r\nspectral_model = PowerLaw(\r\n index=3,\r\n amplitude='1e-11 cm-2 s-1 TeV-1',\r\n reference='1 TeV',\r\n)\r\nsky_model = SkyModel(\r\n spatial_model=spatial_model,\r\n spectral_model=spectral_model,\r\n)'\r\n'evaluator = MapEvaluator(sky_model, exposure=exposure_map)'\r\n'npred = evaluator.compute_npred()'\r\n'print(npred.sum())`\r\n\r\ngives `3846` predicted events in total.\r\n\r\n`spatial_model1 = SkyPointSource(\r\n lon_0='0.2 deg',\r\n lat_0='0.1 deg',\r\n)\r\nspectral_model = PowerLaw(\r\n index=3,\r\n amplitude='1e-11 cm-2 s-1 TeV-1',\r\n reference='1 TeV',\r\n)\r\nsky_model1 = SkyModel(\r\n spatial_model=spatial_model1,\r\n spectral_model=spectral_model,\r\n)'\r\n`evaluator = MapEvaluator(sky_model1, exposure=exposure_map)`\r\n`npred = evaluator.compute_npred()`\r\n`print(npred.sum())`\r\n\r\ngives `0.0004824` predicted events in total\r\n\r\n\r\n\r\n(for the detailed code, see https://gist.github.com/AtreyeeS/d5161ebe30f29c1a19b34757a1916301)\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nSpatial models for astrophysical gamma-ray sources.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport copy\nimport abc\nimport numpy as np\nimport astropy.units as u\nfrom astropy.coordinates.angle_utilities import angular_separation\nfrom astropy.coordinates import Angle, Longitude, Latitude\nfrom ...extern import six\nfrom ...utils.modeling import Parameter, ParameterList\nfrom ...maps import Map\n\n__all__ = [\n 'SkySpatialModel',\n 'SkyPointSource',\n 'SkyGaussian',\n 'SkyDisk',\n 'SkyShell',\n 'SkyDiffuseConstant',\n 'SkyDiffuseMap',\n]\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass SkySpatialModel(object):\n \"\"\"SkySpatial model base class.\n \"\"\"\n\n def __str__(self):\n ss = self.__class__.__name__\n ss += '\\n\\nParameters: \\n\\n\\t'\n\n table = self.parameters.to_table()\n ss += '\\n\\t'.join(table.pformat())\n\n if self.parameters.covariance is not None:\n ss += '\\n\\nCovariance: \\n\\n\\t'\n covar = self.parameters.covariance_to_table()\n ss += '\\n\\t'.join(covar.pformat())\n return ss\n\n def __call__(self, lon, lat):\n \"\"\"Call evaluate method\"\"\"\n kwargs = dict()\n for par in self.parameters.parameters:\n kwargs[par.name] = par.quantity\n\n return self.evaluate(lon, lat, **kwargs)\n\n def copy(self):\n \"\"\"A deep copy.\"\"\"\n return copy.deepcopy(self)\n\n\nclass SkyPointSource(SkySpatialModel):\n r\"\"\"Point Source.\n\n .. math::\n\n \\phi(lon, lat) = \\delta{(lon - lon_0, lat - lat_0)}\n\n A tolerance of 1 arcsecond is accepted for numerical stability\n\n Parameters\n ----------\n lon_0 : `~astropy.coordinates.Longitude`\n :math:`lon_0`\n lat_0 : `~astropy.coordinates.Latitude`\n :math:`lat_0`\n \"\"\"\n\n def __init__(self, lon_0, lat_0):\n self.parameters = ParameterList([\n Parameter('lon_0', Longitude(lon_0)),\n Parameter('lat_0', Latitude(lat_0))\n ])\n\n @staticmethod\n def evaluate(lon, lat, lon_0, lat_0):\n \"\"\"Evaluate the model (static function).\"\"\"\n\n wrapval=lon_0 + 180*u.deg\n lon = Angle(lon).wrap_at(wrapval)\n\n _, grad_lon = np.gradient(lon)\n grad_lat, _ = np.gradient(lat)\n lon_diff = np.abs((lon - lon_0) / grad_lon)\n lat_diff = np.abs((lat - lat_0) / grad_lat)\n\n lon_val = np.select([lon_diff < 1], [1 - lon_diff], 0)\n lat_val = np.select([lat_diff < 1], [1 - lat_diff], 0)\n val = lon_val * lat_val\n return val * u.Unit('sr-1')\n\n\nclass SkyGaussian(SkySpatialModel):\n r\"\"\"Two-dimensional symmetric Gaussian model.\n\n .. math::\n\n \\phi(lon, lat) = \\frac{1}{2\\pi\\sigma^2} \\exp{\\left(-\\frac{1}{2}\n \\frac{\\theta^2}{\\sigma^2}\\right)}\n\n where :math:`\\theta` is the sky separation\n\n Parameters\n ----------\n lon_0 : `~astropy.coordinates.Longitude`\n :math:`lon_0`\n lat_0 : `~astropy.coordinates.Latitude`\n :math:`lat_0`\n sigma : `~astropy.coordinates.Angle`\n :math:`\\sigma`\n \"\"\"\n\n def __init__(self, lon_0, lat_0, sigma):\n self.parameters = ParameterList([\n Parameter('lon_0', Longitude(lon_0)),\n Parameter('lat_0', Latitude(lat_0)),\n Parameter('sigma', Angle(sigma))\n ])\n\n @staticmethod\n def evaluate(lon, lat, lon_0, lat_0, sigma):\n \"\"\"Evaluate the model (static function).\"\"\"\n sep = angular_separation(lon, lat, lon_0, lat_0)\n sep = sep.to('rad').value\n sigma = sigma.to('rad').value\n\n norm = 1 / (2 * np.pi * sigma ** 2)\n exponent = -0.5 * (sep / sigma) ** 2\n val = norm * np.exp(exponent)\n\n return val * u.Unit('sr-1')\n\n\nclass SkyDisk(SkySpatialModel):\n r\"\"\"Constant radial disk model.\n\n .. math::\n\n \\phi(lon, lat) = \\frac{1}{2 \\pi (1 - \\cos{r}) } \\cdot\n \\begin{cases}\n 1 & \\text{for } \\theta \\leq r_0 \\\\\n 0 & \\text{for } \\theta < r_0\n \\end{cases}\n\n where :math:`\\theta` is the sky separation\n\n Parameters\n ----------\n lon_0 : `~astropy.coordinates.Longitude`\n :math:`lon_0`\n lat_0 : `~astropy.coordinates.Latitude`\n :math:`lat_0`\n r_0 : `~astropy.coordinates.Angle`\n :math:`r_0`\n \"\"\"\n\n def __init__(self, lon_0, lat_0, r_0):\n self.parameters = ParameterList([\n Parameter('lon_0', Longitude(lon_0)),\n Parameter('lat_0', Latitude(lat_0)),\n Parameter('r_0', Angle(r_0))\n ])\n\n @staticmethod\n def evaluate(lon, lat, lon_0, lat_0, r_0):\n \"\"\"Evaluate the model (static function).\"\"\"\n sep = angular_separation(lon, lat, lon_0, lat_0)\n sep = sep.to('rad').value\n r_0 = r_0.to('rad').value\n\n norm = 1. / (2 * np.pi * (1 - np.cos(r_0)))\n val = np.where(sep <= r_0, norm, 0)\n\n return val * u.Unit('sr-1')\n\n\nclass SkyShell(SkySpatialModel):\n r\"\"\"Shell model\n\n .. math::\n\n \\phi(lon, lat) = \\frac{3}{2 \\pi (r_{out}^3 - r_{in}^3)} \\cdot\n \\begin{cases}\n \\sqrt{r_{out}^2 - \\theta^2} - \\sqrt{r_{in}^2 - \\theta^2} &\n \\text{for } \\theta \\lt r_{in} \\\\\n \\sqrt{r_{out}^2 - \\theta^2} & \n \\text{for } r_{in} \\leq \\theta \\lt r_{out} \\\\\n 0 & \\text{for } \\theta > r_{out}\n \\end{cases}\n\n where :math:`\\theta` is the sky separation and :math:`r_out = r_in` + width\n\n Note that the normalization is a small angle approximation,\n although that approximation is still very good even for 10 deg radius shells.\n\n Parameters\n ----------\n lon_0 : `~astropy.coordinates.Longitude`\n :math:`lon_0`\n lat_0 : `~astropy.coordinates.Latitude`\n :math:`lat_0`\n radius : `~astropy.coordinates.Angle`\n Inner radius, :math:`r_{in}`\n width : `~astropy.coordinates.Angle`\n Shell width\n \"\"\"\n\n def __init__(self, lon_0, lat_0, radius, width):\n self.parameters = ParameterList([\n Parameter('lon_0', Longitude(lon_0)),\n Parameter('lat_0', Latitude(lat_0)),\n Parameter('radius', Angle(radius)),\n Parameter('width', Angle(width))\n ])\n\n @staticmethod\n def evaluate(lon, lat, lon_0, lat_0, radius, width):\n \"\"\"Evaluate the model (static function).\"\"\"\n sep = angular_separation(lon, lat, lon_0, lat_0)\n sep = sep.to('rad').value\n r_i = radius.to('rad').value\n r_o = (radius + width).to('rad').value\n\n norm = 3 / (2 * np.pi * (r_o ** 3 - r_i ** 3))\n\n with np.errstate(invalid='ignore'):\n val_out = np.sqrt(r_o ** 2 - sep ** 2)\n val_in = val_out - np.sqrt(r_i ** 2 - sep ** 2)\n val = np.select([sep < r_i, sep < r_o], [val_in, val_out])\n\n return norm * val * u.Unit('sr-1')\n\n\nclass SkyDiffuseConstant(SkySpatialModel):\n \"\"\"Spatially constant (isotropic) spatial model.\n\n Parameters\n ----------\n value : `~astropy.units.Quantity`\n Value\n \"\"\"\n\n def __init__(self, value=1):\n self.parameters = ParameterList([\n Parameter('value', value),\n ])\n\n @staticmethod\n def evaluate(lon, lat, value):\n # TODO: try fitting this -> probably the interface doesn't work?!\n return value\n\n\nclass SkyDiffuseMap(SkySpatialModel):\n \"\"\"Spatial sky map template model.\n\n At the moment only support 2D maps.\n TODO: support maps with an energy axis here or in a separate class?\n TODO: should we cache some interpolator object for efficiency?\n\n Parameters\n ----------\n map : `~gammapy.map.Map`\n Map template\n norm : `~astropy.units.Quantity`\n Norm parameter (multiplied with map values)\n meta : dict, optional\n Meta information, meta['filename'] will be used for serialization\n \"\"\"\n\n def __init__(self, map, norm=1, meta=None):\n self._map = map\n self.parameters = ParameterList([\n Parameter('norm', norm),\n ])\n self.meta = dict() if meta is None else meta\n\n @classmethod\n def read(cls, filename, **kwargs):\n \"\"\"Read spatial template model from FITS image.\n\n Parameters\n ----------\n filename : str\n FITS image filename.\n \"\"\"\n template = Map.read(filename, **kwargs)\n return cls(template)\n\n def evaluate(self, lon, lat, norm):\n coord = dict(\n lon=lon.to('deg').value,\n lat=lat.to('deg').value,\n )\n val = self._map.interp_by_coord(coord, fill_value=0)\n # TODO: use map unit? self._map.unit\n return norm * val * u.Unit('sr-1')\n", "path": "gammapy/image/models/new.py"}], "after_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nSpatial models for astrophysical gamma-ray sources.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport copy\nimport abc\nimport numpy as np\nimport astropy.units as u\nfrom astropy.coordinates.angle_utilities import angular_separation\nfrom astropy.coordinates import Angle, Longitude, Latitude\nfrom ...extern import six\nfrom ...utils.modeling import Parameter, ParameterList\nfrom ...maps import Map\n\n__all__ = [\n 'SkySpatialModel',\n 'SkyPointSource',\n 'SkyGaussian',\n 'SkyDisk',\n 'SkyShell',\n 'SkyDiffuseConstant',\n 'SkyDiffuseMap',\n]\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass SkySpatialModel(object):\n \"\"\"SkySpatial model base class.\n \"\"\"\n\n def __str__(self):\n ss = self.__class__.__name__\n ss += '\\n\\nParameters: \\n\\n\\t'\n\n table = self.parameters.to_table()\n ss += '\\n\\t'.join(table.pformat())\n\n if self.parameters.covariance is not None:\n ss += '\\n\\nCovariance: \\n\\n\\t'\n covar = self.parameters.covariance_to_table()\n ss += '\\n\\t'.join(covar.pformat())\n return ss\n\n def __call__(self, lon, lat):\n \"\"\"Call evaluate method\"\"\"\n kwargs = dict()\n for par in self.parameters.parameters:\n kwargs[par.name] = par.quantity\n\n return self.evaluate(lon, lat, **kwargs)\n\n def copy(self):\n \"\"\"A deep copy.\"\"\"\n return copy.deepcopy(self)\n\n\nclass SkyPointSource(SkySpatialModel):\n r\"\"\"Point Source.\n\n .. math::\n\n \\phi(lon, lat) = \\delta{(lon - lon_0, lat - lat_0)}\n\n A tolerance of 1 arcsecond is accepted for numerical stability\n\n Parameters\n ----------\n lon_0 : `~astropy.coordinates.Longitude`\n :math:`lon_0`\n lat_0 : `~astropy.coordinates.Latitude`\n :math:`lat_0`\n \"\"\"\n\n def __init__(self, lon_0, lat_0):\n self.parameters = ParameterList([\n Parameter('lon_0', Longitude(lon_0)),\n Parameter('lat_0', Latitude(lat_0))\n ])\n\n @staticmethod\n def evaluate(lon, lat, lon_0, lat_0):\n \"\"\"Evaluate the model (static function).\"\"\"\n\n wrapval = lon_0 + 180 * u.deg\n lon = Angle(lon).wrap_at(wrapval)\n\n _, grad_lon = np.gradient(lon)\n grad_lat, _ = np.gradient(lat)\n lon_diff = np.abs((lon - lon_0) / grad_lon)\n lat_diff = np.abs((lat - lat_0) / grad_lat)\n\n lon_val = np.select([lon_diff < 1], [1 - lon_diff], 0) / np.abs(grad_lon)\n lat_val = np.select([lat_diff < 1], [1 - lat_diff], 0) / np.abs(grad_lat)\n val = lon_val * lat_val\n return val.to('sr-1')\n\n\nclass SkyGaussian(SkySpatialModel):\n r\"\"\"Two-dimensional symmetric Gaussian model.\n\n .. math::\n\n \\phi(lon, lat) = \\frac{1}{2\\pi\\sigma^2} \\exp{\\left(-\\frac{1}{2}\n \\frac{\\theta^2}{\\sigma^2}\\right)}\n\n where :math:`\\theta` is the sky separation\n\n Parameters\n ----------\n lon_0 : `~astropy.coordinates.Longitude`\n :math:`lon_0`\n lat_0 : `~astropy.coordinates.Latitude`\n :math:`lat_0`\n sigma : `~astropy.coordinates.Angle`\n :math:`\\sigma`\n \"\"\"\n\n def __init__(self, lon_0, lat_0, sigma):\n self.parameters = ParameterList([\n Parameter('lon_0', Longitude(lon_0)),\n Parameter('lat_0', Latitude(lat_0)),\n Parameter('sigma', Angle(sigma))\n ])\n\n @staticmethod\n def evaluate(lon, lat, lon_0, lat_0, sigma):\n \"\"\"Evaluate the model (static function).\"\"\"\n sep = angular_separation(lon, lat, lon_0, lat_0)\n sep = sep.to('rad').value\n sigma = sigma.to('rad').value\n\n norm = 1 / (2 * np.pi * sigma ** 2)\n exponent = -0.5 * (sep / sigma) ** 2\n val = norm * np.exp(exponent)\n\n return val * u.Unit('sr-1')\n\n\nclass SkyDisk(SkySpatialModel):\n r\"\"\"Constant radial disk model.\n\n .. math::\n\n \\phi(lon, lat) = \\frac{1}{2 \\pi (1 - \\cos{r}) } \\cdot\n \\begin{cases}\n 1 & \\text{for } \\theta \\leq r_0 \\\\\n 0 & \\text{for } \\theta < r_0\n \\end{cases}\n\n where :math:`\\theta` is the sky separation\n\n Parameters\n ----------\n lon_0 : `~astropy.coordinates.Longitude`\n :math:`lon_0`\n lat_0 : `~astropy.coordinates.Latitude`\n :math:`lat_0`\n r_0 : `~astropy.coordinates.Angle`\n :math:`r_0`\n \"\"\"\n\n def __init__(self, lon_0, lat_0, r_0):\n self.parameters = ParameterList([\n Parameter('lon_0', Longitude(lon_0)),\n Parameter('lat_0', Latitude(lat_0)),\n Parameter('r_0', Angle(r_0))\n ])\n\n @staticmethod\n def evaluate(lon, lat, lon_0, lat_0, r_0):\n \"\"\"Evaluate the model (static function).\"\"\"\n sep = angular_separation(lon, lat, lon_0, lat_0)\n sep = sep.to('rad').value\n r_0 = r_0.to('rad').value\n\n norm = 1. / (2 * np.pi * (1 - np.cos(r_0)))\n val = np.where(sep <= r_0, norm, 0)\n\n return val * u.Unit('sr-1')\n\n\nclass SkyShell(SkySpatialModel):\n r\"\"\"Shell model\n\n .. math::\n\n \\phi(lon, lat) = \\frac{3}{2 \\pi (r_{out}^3 - r_{in}^3)} \\cdot\n \\begin{cases}\n \\sqrt{r_{out}^2 - \\theta^2} - \\sqrt{r_{in}^2 - \\theta^2} &\n \\text{for } \\theta \\lt r_{in} \\\\\n \\sqrt{r_{out}^2 - \\theta^2} &\n \\text{for } r_{in} \\leq \\theta \\lt r_{out} \\\\\n 0 & \\text{for } \\theta > r_{out}\n \\end{cases}\n\n where :math:`\\theta` is the sky separation and :math:`r_out = r_in` + width\n\n Note that the normalization is a small angle approximation,\n although that approximation is still very good even for 10 deg radius shells.\n\n Parameters\n ----------\n lon_0 : `~astropy.coordinates.Longitude`\n :math:`lon_0`\n lat_0 : `~astropy.coordinates.Latitude`\n :math:`lat_0`\n radius : `~astropy.coordinates.Angle`\n Inner radius, :math:`r_{in}`\n width : `~astropy.coordinates.Angle`\n Shell width\n \"\"\"\n\n def __init__(self, lon_0, lat_0, radius, width):\n self.parameters = ParameterList([\n Parameter('lon_0', Longitude(lon_0)),\n Parameter('lat_0', Latitude(lat_0)),\n Parameter('radius', Angle(radius)),\n Parameter('width', Angle(width))\n ])\n\n @staticmethod\n def evaluate(lon, lat, lon_0, lat_0, radius, width):\n \"\"\"Evaluate the model (static function).\"\"\"\n sep = angular_separation(lon, lat, lon_0, lat_0)\n sep = sep.to('rad').value\n r_i = radius.to('rad').value\n r_o = (radius + width).to('rad').value\n\n norm = 3 / (2 * np.pi * (r_o ** 3 - r_i ** 3))\n\n with np.errstate(invalid='ignore'):\n val_out = np.sqrt(r_o ** 2 - sep ** 2)\n val_in = val_out - np.sqrt(r_i ** 2 - sep ** 2)\n val = np.select([sep < r_i, sep < r_o], [val_in, val_out])\n\n return norm * val * u.Unit('sr-1')\n\n\nclass SkyDiffuseConstant(SkySpatialModel):\n \"\"\"Spatially constant (isotropic) spatial model.\n\n Parameters\n ----------\n value : `~astropy.units.Quantity`\n Value\n \"\"\"\n\n def __init__(self, value=1):\n self.parameters = ParameterList([\n Parameter('value', value),\n ])\n\n @staticmethod\n def evaluate(lon, lat, value):\n # TODO: try fitting this -> probably the interface doesn't work?!\n return value\n\n\nclass SkyDiffuseMap(SkySpatialModel):\n \"\"\"Spatial sky map template model.\n\n At the moment only support 2D maps.\n TODO: support maps with an energy axis here or in a separate class?\n TODO: should we cache some interpolator object for efficiency?\n\n Parameters\n ----------\n map : `~gammapy.map.Map`\n Map template\n norm : `~astropy.units.Quantity`\n Norm parameter (multiplied with map values)\n meta : dict, optional\n Meta information, meta['filename'] will be used for serialization\n \"\"\"\n\n def __init__(self, map, norm=1, meta=None):\n self._map = map\n self.parameters = ParameterList([\n Parameter('norm', norm),\n ])\n self.meta = dict() if meta is None else meta\n\n @classmethod\n def read(cls, filename, **kwargs):\n \"\"\"Read spatial template model from FITS image.\n\n Parameters\n ----------\n filename : str\n FITS image filename.\n \"\"\"\n template = Map.read(filename, **kwargs)\n return cls(template)\n\n def evaluate(self, lon, lat, norm):\n coord = dict(\n lon=lon.to('deg').value,\n lat=lat.to('deg').value,\n )\n val = self._map.interp_by_coord(coord, fill_value=0)\n # TODO: use map unit? self._map.unit\n return norm * val * u.Unit('sr-1')\n", "path": "gammapy/image/models/new.py"}]}
3,860
480
gh_patches_debug_29754
rasdani/github-patches
git_diff
conan-io__conan-7690
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [question] CLI command to discover the cmake_find_package filename from the Conan package name? The `cmake_find_package` generator writes a Find Module file that defines a target in a namespace. Each of these three names (filename, namespace, target) are [configurable](https://github.com/conan-io/conan/issues/7254) in `cpp_info`. For example, ```python self.cpp_info.filename['cmake_find_package'] = 'BoostFile' self.cpp_info.names['cmake_find_package'] = 'BoostNamespace' self.cpp_info.name = 'BoostTarget' ``` will generate a file named `FindBoostFile.cmake` that is used like this: ```cmake find_package(BoostFile) target_link_libraries(${target} BoostNamespace::BoostTarget) ``` Is there a CLI command that can print out these values given a package reference? Something like ``` $ conan inspect -a cpp_info.names.cmake_find_package boost/1.73.0@ ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conans/client/generators/markdown.py` Content: ``` 1 import os 2 import textwrap 3 4 from jinja2 import DictLoader 5 from jinja2 import Environment 6 from conans.model import Generator 7 import datetime 8 9 10 11 render_cpp_info = textwrap.dedent(""" 12 {% macro join_list_sources(items) -%} 13 ``{{ "``, ``".join(items) }}`` 14 {%- endmacro %} 15 16 {% macro render_cpp_info(cpp_info) -%} 17 {%- if cpp_info.requires is iterable and cpp_info.requires %} 18 * Requires: {{ join_list_sources(cpp_info.requires) }} 19 {%- endif %} 20 {%- if cpp_info.libs %} 21 * Libraries: {{ join_list_sources(cpp_info.libs) }} 22 {%- endif %} 23 {%- if cpp_info.system_libs %} 24 * Systems libs: {{ join_list_sources(cpp_info.system_libs) }} 25 {%- endif %} 26 {%- if cpp_info.defines %} 27 * Preprocessor definitions: {{ join_list_sources(cpp_info.defines) }} 28 {%- endif %} 29 {%- if cpp_info.cflags %} 30 * C_FLAGS: {{ join_list_sources(cpp_info.cflags) }} 31 {%- endif %} 32 {%- if cpp_info.cxxflags %} 33 * CXX_FLAGS: {{ join_list_sources(cpp_info.cxxflags) }} 34 {%- endif %} 35 {%- if cpp_info.build_modules %} 36 * Build modules (see [below](#build-modules)): {{ join_list_sources(cpp_info.build_modules) }} 37 {%- endif %} 38 {%- endmacro %} 39 """) 40 41 generator_cmake_tpl = textwrap.dedent(""" 42 ### Generator ``cmake`` 43 44 Add these lines to your *CMakeLists.txt*: 45 46 ```cmake 47 include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) 48 conan_basic_setup(TARGETS) 49 50 target_link_libraries(<library_name> CONAN_PKG::{{ cpp_info.get_name("cmake") }}) 51 ``` 52 """) 53 54 generator_cmake_find_package_tpl = textwrap.dedent(""" 55 ### Generator ``cmake_find_package`` 56 57 Add these lines to your *CMakeLists.txt*: 58 59 ```cmake 60 find_package({{ cpp_info.get_filename("cmake_find_package") }}) 61 62 # Use the global target 63 target_link_libraries(<library_name> {{ cpp_info.get_name("cmake_find_package") }}::{{ cpp_info.get_name("cmake_find_package") }}) 64 {% if cpp_info.components %} 65 # Or link just one of its components 66 {% for cmp_name, cmp_cpp_info in cpp_info.components.items() -%} 67 target_link_libraries(<library_name> {{ cpp_info.get_name("cmake_find_package") }}::{{ cmp_cpp_info.get_name("cmake_find_package") }}) 68 {% endfor %} 69 {%- endif %} 70 ``` 71 72 Remember to adjust your build system settings to match the binaries you are linking with. You can 73 use the [CMake build helper](https://docs.conan.io/en/latest/reference/build_helpers/cmake.html) and 74 the ``cmake`` generator from a *conanfile.py* or the new [toolchain paradigm](https://docs.conan.io/en/latest/creating_packages/toolchains.html). 75 """) 76 77 generator_pkg_config_tpl = textwrap.dedent(""" 78 ### Generator ``pkg_config`` 79 80 This package provides one *pkg-config* file ``{{ cpp_info.get_filename('pkg_config') }}.pc`` with 81 all the information from the library 82 {% if cpp_info.components -%} 83 and another file for each of its components: 84 {%- for cmp_name, cmp_cpp_info in cpp_info.components.items() -%} 85 ``{{ cmp_cpp_info.get_filename('pkg_config') }}.pc``{% if not loop.last %},{% endif %} 86 {%- endfor -%} 87 {%- endif -%}. 88 Use your *pkg-config* tool as usual to consume the information provided by the Conan package. 89 """) 90 91 requirement_tpl = textwrap.dedent(""" 92 {% from 'render_cpp_info' import render_cpp_info %} 93 94 # {{ cpp_info.name }}/{{ cpp_info.version }} 95 96 --- 97 **Note.-** If this package belongs to ConanCenter, you can find more information [here](https://conan.io/center/{{ cpp_info.name }}/{{ cpp_info.version }}/). 98 99 --- 100 101 {% if requires or required_by %} 102 Graph of dependencies: 103 {% if requires %} 104 * ``{{ cpp_info.name }}`` requires: 105 {% for dep_name, dep_cpp_info in requires -%} 106 [{{ dep_name }}/{{ dep_cpp_info.version }}]({{ dep_name }}.md){% if not loop.last %}, {% endif %} 107 {%- endfor -%} 108 {%- endif %} 109 {%- if required_by %} 110 * ``{{ cpp_info.name }}`` is required by: 111 {%- for dep_name, dep_cpp_info in required_by %} 112 [{{ dep_name }}/{{ dep_cpp_info.version }}]({{ dep_name }}.md){% if not loop.last %}, {% endif %} 113 {%- endfor %} 114 {%- endif %} 115 {% endif %} 116 117 Information published by ``{{ cpp_info.name }}`` to consumers: 118 119 {%- if cpp_info.includedirs %} 120 * Headers (see [below](#header-files)) 121 {%- endif %} 122 {% if cpp_info.components %} 123 {% for cmp_name, cmp_cpp_info in cpp_info.components.items() %} 124 * Component ``{{ cpp_info.name }}::{{ cmp_name }}``: 125 {{ render_cpp_info(cmp_cpp_info)|indent(width=2) }} 126 {%- endfor %} 127 {% else %} 128 {{ render_cpp_info(cpp_info)|indent(width=0) }} 129 {% endif %} 130 131 132 ## Generators 133 134 Read below how to use this package using different 135 [generators](https://docs.conan.io/en/latest/reference/generators.html). In order to use 136 these generators they have to be listed in the _conanfile.py_ file or using the command 137 line argument ``--generator/-g`` in the ``conan install`` command. 138 139 140 {% include 'generator_cmake' %} 141 {% include 'generator_cmake_find_package' %} 142 {% include 'generator_pkg_config_tpl' %} 143 144 --- 145 ## Header files 146 147 List of header files exposed by this package. Use them in your ``#include`` directives: 148 149 ```cpp 150 {%- for header in headers %} 151 {{ header }} 152 {%- endfor %} 153 ``` 154 155 {%- if cpp_info.build_modules %} 156 --- 157 ## Build modules 158 159 Modules exported by this recipe. They are automatically included when using Conan generators: 160 161 {% for name, build_module in build_modules %} 162 **{{ name }}** 163 ``` 164 {{ build_module }} 165 ``` 166 {% endfor %} 167 {% endif %} 168 169 --- 170 --- 171 Conan **{{ conan_version }}**. JFrog LTD. [https://conan.io](https://conan.io). Autogenerated {{ now.strftime('%Y-%m-%d %H:%M:%S') }}. 172 """) 173 174 175 class MarkdownGenerator(Generator): 176 177 def _list_headers(self, cpp_info): 178 rootpath = cpp_info.rootpath 179 for include_dir in cpp_info.includedirs: 180 for root, _, files in os.walk(os.path.join(cpp_info.rootpath, include_dir)): 181 for f in files: 182 yield os.path.relpath(os.path.join(root, f), os.path.join(rootpath, include_dir)) 183 184 def _list_requires(self, cpp_info): 185 return [(it, self.conanfile.deps_cpp_info[it]) for it in cpp_info.public_deps] 186 187 def _list_required_by(self, cpp_info): 188 for other_name, other_cpp_info in self.conanfile.deps_cpp_info.dependencies: 189 if cpp_info.name in other_cpp_info.public_deps: 190 yield other_name, other_cpp_info 191 192 def _read_build_modules(self, cpp_info): 193 for build_module in cpp_info.build_modules: 194 filename = os.path.join(cpp_info.rootpath, build_module) 195 yield build_module, open(filename, 'r').read() 196 197 @property 198 def filename(self): 199 pass 200 201 @property 202 def content(self): 203 dict_loader = DictLoader({ 204 'render_cpp_info': render_cpp_info, 205 'package.md': requirement_tpl, 206 'generator_cmake': generator_cmake_tpl, 207 'generator_cmake_find_package': generator_cmake_find_package_tpl, 208 'generator_pkg_config_tpl': generator_pkg_config_tpl, 209 }) 210 env = Environment(loader=dict_loader) 211 template = env.get_template('package.md') 212 213 from conans import __version__ as conan_version 214 ret = {} 215 for name, cpp_info in self.conanfile.deps_cpp_info.dependencies: 216 ret["{}.md".format(name)] = template.render( 217 cpp_info=cpp_info, 218 headers=self._list_headers(cpp_info), 219 requires=list(self._list_requires(cpp_info)), 220 required_by=list(self._list_required_by(cpp_info)), 221 build_modules=self._read_build_modules(cpp_info), 222 conan_version=conan_version, 223 now=datetime.datetime.now() 224 ) 225 return ret 226 ``` Path: `conans/client/generators/json_generator.py` Content: ``` 1 import json 2 3 from conans.model import Generator 4 5 6 def serialize_cpp_info(cpp_info): 7 keys = [ 8 "version", 9 "description", 10 "rootpath", 11 "sysroot", 12 "include_paths", "lib_paths", "bin_paths", "build_paths", "res_paths", 13 "libs", 14 "system_libs", 15 "defines", "cflags", "cxxflags", "sharedlinkflags", "exelinkflags", 16 "frameworks", "framework_paths" 17 ] 18 res = {} 19 for key in keys: 20 res[key] = getattr(cpp_info, key) 21 res["cppflags"] = cpp_info.cxxflags # Backwards compatibility 22 return res 23 24 25 def serialize_user_info(user_info): 26 res = {} 27 for key, value in user_info.items(): 28 res[key] = value.vars 29 return res 30 31 32 class JsonGenerator(Generator): 33 @property 34 def filename(self): 35 return "conanbuildinfo.json" 36 37 @property 38 def content(self): 39 info = {} 40 info["deps_env_info"] = self.deps_env_info.vars 41 info["deps_user_info"] = serialize_user_info(self.deps_user_info) 42 info["dependencies"] = self.get_dependencies_info() 43 info["settings"] = self.get_settings() 44 info["options"] = self.get_options() 45 if self._user_info_build: 46 info["user_info_build"] = serialize_user_info(self._user_info_build) 47 48 return json.dumps(info, indent=2) 49 50 def get_dependencies_info(self): 51 res = [] 52 for depname, cpp_info in self.deps_build_info.dependencies: 53 serialized_info = serialize_cpp_info(cpp_info) 54 serialized_info["name"] = depname 55 for cfg, cfg_cpp_info in cpp_info.configs.items(): 56 serialized_info.setdefault("configs", {})[cfg] = serialize_cpp_info(cfg_cpp_info) 57 res.append(serialized_info) 58 return res 59 60 def get_settings(self): 61 settings = {} 62 for key, value in self.settings.items(): 63 settings[key] = value 64 return settings 65 66 def get_options(self): 67 options = {} 68 for req in self.conanfile.requires: 69 options[req] = {} 70 for key, value in self.conanfile.options[req].items(): 71 options[req][key] = value 72 return options 73 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conans/client/generators/json_generator.py b/conans/client/generators/json_generator.py --- a/conans/client/generators/json_generator.py +++ b/conans/client/generators/json_generator.py @@ -13,7 +13,7 @@ "libs", "system_libs", "defines", "cflags", "cxxflags", "sharedlinkflags", "exelinkflags", - "frameworks", "framework_paths" + "frameworks", "framework_paths", "names", "filenames" ] res = {} for key in keys: diff --git a/conans/client/generators/markdown.py b/conans/client/generators/markdown.py --- a/conans/client/generators/markdown.py +++ b/conans/client/generators/markdown.py @@ -53,18 +53,21 @@ generator_cmake_find_package_tpl = textwrap.dedent(""" ### Generator ``cmake_find_package`` + {% set cmake_find_package_name = cpp_info.get_name("cmake_find_package") %} + {% set cmake_find_package_filename = cpp_info.get_filename("cmake_find_package") %} + Generates the file Find{{ cmake_find_package_filename }}.cmake Add these lines to your *CMakeLists.txt*: ```cmake - find_package({{ cpp_info.get_filename("cmake_find_package") }}) + find_package({{ cmake_find_package_filename }}) # Use the global target - target_link_libraries(<library_name> {{ cpp_info.get_name("cmake_find_package") }}::{{ cpp_info.get_name("cmake_find_package") }}) + target_link_libraries(<library_name> {{ cmake_find_package_name }}::{{ cmake_find_package_name }}) {% if cpp_info.components %} # Or link just one of its components {% for cmp_name, cmp_cpp_info in cpp_info.components.items() -%} - target_link_libraries(<library_name> {{ cpp_info.get_name("cmake_find_package") }}::{{ cmp_cpp_info.get_name("cmake_find_package") }}) + target_link_libraries(<library_name> {{ cmake_find_package_name }}::{{ cmp_cpp_info.get_name("cmake_find_package") }}) {% endfor %} {%- endif %} ```
{"golden_diff": "diff --git a/conans/client/generators/json_generator.py b/conans/client/generators/json_generator.py\n--- a/conans/client/generators/json_generator.py\n+++ b/conans/client/generators/json_generator.py\n@@ -13,7 +13,7 @@\n \"libs\",\n \"system_libs\",\n \"defines\", \"cflags\", \"cxxflags\", \"sharedlinkflags\", \"exelinkflags\",\n- \"frameworks\", \"framework_paths\"\n+ \"frameworks\", \"framework_paths\", \"names\", \"filenames\"\n ]\n res = {}\n for key in keys:\ndiff --git a/conans/client/generators/markdown.py b/conans/client/generators/markdown.py\n--- a/conans/client/generators/markdown.py\n+++ b/conans/client/generators/markdown.py\n@@ -53,18 +53,21 @@\n \n generator_cmake_find_package_tpl = textwrap.dedent(\"\"\"\n ### Generator ``cmake_find_package``\n+ {% set cmake_find_package_name = cpp_info.get_name(\"cmake_find_package\") %}\n+ {% set cmake_find_package_filename = cpp_info.get_filename(\"cmake_find_package\") %}\n+ Generates the file Find{{ cmake_find_package_filename }}.cmake\n \n Add these lines to your *CMakeLists.txt*:\n \n ```cmake\n- find_package({{ cpp_info.get_filename(\"cmake_find_package\") }})\n+ find_package({{ cmake_find_package_filename }})\n \n # Use the global target\n- target_link_libraries(<library_name> {{ cpp_info.get_name(\"cmake_find_package\") }}::{{ cpp_info.get_name(\"cmake_find_package\") }})\n+ target_link_libraries(<library_name> {{ cmake_find_package_name }}::{{ cmake_find_package_name }})\n {% if cpp_info.components %}\n # Or link just one of its components\n {% for cmp_name, cmp_cpp_info in cpp_info.components.items() -%}\n- target_link_libraries(<library_name> {{ cpp_info.get_name(\"cmake_find_package\") }}::{{ cmp_cpp_info.get_name(\"cmake_find_package\") }})\n+ target_link_libraries(<library_name> {{ cmake_find_package_name }}::{{ cmp_cpp_info.get_name(\"cmake_find_package\") }})\n {% endfor %}\n {%- endif %}\n ```\n", "issue": "[question] CLI command to discover the cmake_find_package filename from the Conan package name?\nThe `cmake_find_package` generator writes a Find Module file that defines a target in a namespace. Each of these three names (filename, namespace, target) are [configurable](https://github.com/conan-io/conan/issues/7254) in `cpp_info`. For example,\r\n\r\n```python\r\n self.cpp_info.filename['cmake_find_package'] = 'BoostFile'\r\n self.cpp_info.names['cmake_find_package'] = 'BoostNamespace'\r\n self.cpp_info.name = 'BoostTarget'\r\n```\r\n\r\nwill generate a file named `FindBoostFile.cmake` that is used like this:\r\n\r\n```cmake\r\nfind_package(BoostFile)\r\ntarget_link_libraries(${target} BoostNamespace::BoostTarget)\r\n```\r\n\r\nIs there a CLI command that can print out these values given a package reference? Something like\r\n\r\n```\r\n$ conan inspect -a cpp_info.names.cmake_find_package boost/1.73.0@\r\n```\n", "before_files": [{"content": "import os\nimport textwrap\n\nfrom jinja2 import DictLoader\nfrom jinja2 import Environment\nfrom conans.model import Generator\nimport datetime\n\n\n\nrender_cpp_info = textwrap.dedent(\"\"\"\n {% macro join_list_sources(items) -%}\n ``{{ \"``, ``\".join(items) }}``\n {%- endmacro %}\n\n {% macro render_cpp_info(cpp_info) -%}\n {%- if cpp_info.requires is iterable and cpp_info.requires %}\n * Requires: {{ join_list_sources(cpp_info.requires) }}\n {%- endif %}\n {%- if cpp_info.libs %}\n * Libraries: {{ join_list_sources(cpp_info.libs) }}\n {%- endif %}\n {%- if cpp_info.system_libs %}\n * Systems libs: {{ join_list_sources(cpp_info.system_libs) }}\n {%- endif %}\n {%- if cpp_info.defines %}\n * Preprocessor definitions: {{ join_list_sources(cpp_info.defines) }}\n {%- endif %}\n {%- if cpp_info.cflags %}\n * C_FLAGS: {{ join_list_sources(cpp_info.cflags) }}\n {%- endif %}\n {%- if cpp_info.cxxflags %}\n * CXX_FLAGS: {{ join_list_sources(cpp_info.cxxflags) }}\n {%- endif %}\n {%- if cpp_info.build_modules %}\n * Build modules (see [below](#build-modules)): {{ join_list_sources(cpp_info.build_modules) }}\n {%- endif %}\n {%- endmacro %}\n\"\"\")\n\ngenerator_cmake_tpl = textwrap.dedent(\"\"\"\n ### Generator ``cmake``\n\n Add these lines to your *CMakeLists.txt*:\n\n ```cmake\n include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\n conan_basic_setup(TARGETS)\n\n target_link_libraries(<library_name> CONAN_PKG::{{ cpp_info.get_name(\"cmake\") }})\n ```\n\"\"\")\n\ngenerator_cmake_find_package_tpl = textwrap.dedent(\"\"\"\n ### Generator ``cmake_find_package``\n\n Add these lines to your *CMakeLists.txt*:\n\n ```cmake\n find_package({{ cpp_info.get_filename(\"cmake_find_package\") }})\n\n # Use the global target\n target_link_libraries(<library_name> {{ cpp_info.get_name(\"cmake_find_package\") }}::{{ cpp_info.get_name(\"cmake_find_package\") }})\n {% if cpp_info.components %}\n # Or link just one of its components\n {% for cmp_name, cmp_cpp_info in cpp_info.components.items() -%}\n target_link_libraries(<library_name> {{ cpp_info.get_name(\"cmake_find_package\") }}::{{ cmp_cpp_info.get_name(\"cmake_find_package\") }})\n {% endfor %}\n {%- endif %}\n ```\n\n Remember to adjust your build system settings to match the binaries you are linking with. You can\n use the [CMake build helper](https://docs.conan.io/en/latest/reference/build_helpers/cmake.html) and\n the ``cmake`` generator from a *conanfile.py* or the new [toolchain paradigm](https://docs.conan.io/en/latest/creating_packages/toolchains.html).\n\"\"\")\n\ngenerator_pkg_config_tpl = textwrap.dedent(\"\"\"\n ### Generator ``pkg_config``\n\n This package provides one *pkg-config* file ``{{ cpp_info.get_filename('pkg_config') }}.pc`` with\n all the information from the library\n {% if cpp_info.components -%}\n and another file for each of its components:\n {%- for cmp_name, cmp_cpp_info in cpp_info.components.items() -%}\n ``{{ cmp_cpp_info.get_filename('pkg_config') }}.pc``{% if not loop.last %},{% endif %}\n {%- endfor -%}\n {%- endif -%}.\n Use your *pkg-config* tool as usual to consume the information provided by the Conan package.\n\"\"\")\n\nrequirement_tpl = textwrap.dedent(\"\"\"\n {% from 'render_cpp_info' import render_cpp_info %}\n\n # {{ cpp_info.name }}/{{ cpp_info.version }}\n\n ---\n **Note.-** If this package belongs to ConanCenter, you can find more information [here](https://conan.io/center/{{ cpp_info.name }}/{{ cpp_info.version }}/).\n\n ---\n\n {% if requires or required_by %}\n Graph of dependencies:\n {% if requires %}\n * ``{{ cpp_info.name }}`` requires:\n {% for dep_name, dep_cpp_info in requires -%}\n [{{ dep_name }}/{{ dep_cpp_info.version }}]({{ dep_name }}.md){% if not loop.last %}, {% endif %}\n {%- endfor -%}\n {%- endif %}\n {%- if required_by %}\n * ``{{ cpp_info.name }}`` is required by:\n {%- for dep_name, dep_cpp_info in required_by %}\n [{{ dep_name }}/{{ dep_cpp_info.version }}]({{ dep_name }}.md){% if not loop.last %}, {% endif %}\n {%- endfor %}\n {%- endif %}\n {% endif %}\n\n Information published by ``{{ cpp_info.name }}`` to consumers:\n\n {%- if cpp_info.includedirs %}\n * Headers (see [below](#header-files))\n {%- endif %}\n {% if cpp_info.components %}\n {% for cmp_name, cmp_cpp_info in cpp_info.components.items() %}\n * Component ``{{ cpp_info.name }}::{{ cmp_name }}``:\n {{ render_cpp_info(cmp_cpp_info)|indent(width=2) }}\n {%- endfor %}\n {% else %}\n {{ render_cpp_info(cpp_info)|indent(width=0) }}\n {% endif %}\n\n\n ## Generators\n\n Read below how to use this package using different\n [generators](https://docs.conan.io/en/latest/reference/generators.html). In order to use\n these generators they have to be listed in the _conanfile.py_ file or using the command\n line argument ``--generator/-g`` in the ``conan install`` command.\n\n\n {% include 'generator_cmake' %}\n {% include 'generator_cmake_find_package' %}\n {% include 'generator_pkg_config_tpl' %}\n\n ---\n ## Header files\n\n List of header files exposed by this package. Use them in your ``#include`` directives:\n\n ```cpp\n {%- for header in headers %}\n {{ header }}\n {%- endfor %}\n ```\n\n {%- if cpp_info.build_modules %}\n ---\n ## Build modules\n\n Modules exported by this recipe. They are automatically included when using Conan generators:\n\n {% for name, build_module in build_modules %}\n **{{ name }}**\n ```\n {{ build_module }}\n ```\n {% endfor %}\n {% endif %}\n\n ---\n ---\n Conan **{{ conan_version }}**. JFrog LTD. [https://conan.io](https://conan.io). Autogenerated {{ now.strftime('%Y-%m-%d %H:%M:%S') }}.\n\"\"\")\n\n\nclass MarkdownGenerator(Generator):\n\n def _list_headers(self, cpp_info):\n rootpath = cpp_info.rootpath\n for include_dir in cpp_info.includedirs:\n for root, _, files in os.walk(os.path.join(cpp_info.rootpath, include_dir)):\n for f in files:\n yield os.path.relpath(os.path.join(root, f), os.path.join(rootpath, include_dir))\n\n def _list_requires(self, cpp_info):\n return [(it, self.conanfile.deps_cpp_info[it]) for it in cpp_info.public_deps]\n\n def _list_required_by(self, cpp_info):\n for other_name, other_cpp_info in self.conanfile.deps_cpp_info.dependencies:\n if cpp_info.name in other_cpp_info.public_deps:\n yield other_name, other_cpp_info\n\n def _read_build_modules(self, cpp_info):\n for build_module in cpp_info.build_modules:\n filename = os.path.join(cpp_info.rootpath, build_module)\n yield build_module, open(filename, 'r').read()\n\n @property\n def filename(self):\n pass\n\n @property\n def content(self):\n dict_loader = DictLoader({\n 'render_cpp_info': render_cpp_info,\n 'package.md': requirement_tpl,\n 'generator_cmake': generator_cmake_tpl,\n 'generator_cmake_find_package': generator_cmake_find_package_tpl,\n 'generator_pkg_config_tpl': generator_pkg_config_tpl,\n })\n env = Environment(loader=dict_loader)\n template = env.get_template('package.md')\n\n from conans import __version__ as conan_version\n ret = {}\n for name, cpp_info in self.conanfile.deps_cpp_info.dependencies:\n ret[\"{}.md\".format(name)] = template.render(\n cpp_info=cpp_info,\n headers=self._list_headers(cpp_info),\n requires=list(self._list_requires(cpp_info)),\n required_by=list(self._list_required_by(cpp_info)),\n build_modules=self._read_build_modules(cpp_info),\n conan_version=conan_version,\n now=datetime.datetime.now()\n )\n return ret\n", "path": "conans/client/generators/markdown.py"}, {"content": "import json\n\nfrom conans.model import Generator\n\n\ndef serialize_cpp_info(cpp_info):\n keys = [\n \"version\",\n \"description\",\n \"rootpath\",\n \"sysroot\",\n \"include_paths\", \"lib_paths\", \"bin_paths\", \"build_paths\", \"res_paths\",\n \"libs\",\n \"system_libs\",\n \"defines\", \"cflags\", \"cxxflags\", \"sharedlinkflags\", \"exelinkflags\",\n \"frameworks\", \"framework_paths\"\n ]\n res = {}\n for key in keys:\n res[key] = getattr(cpp_info, key)\n res[\"cppflags\"] = cpp_info.cxxflags # Backwards compatibility\n return res\n\n\ndef serialize_user_info(user_info):\n res = {}\n for key, value in user_info.items():\n res[key] = value.vars\n return res\n\n\nclass JsonGenerator(Generator):\n @property\n def filename(self):\n return \"conanbuildinfo.json\"\n\n @property\n def content(self):\n info = {}\n info[\"deps_env_info\"] = self.deps_env_info.vars\n info[\"deps_user_info\"] = serialize_user_info(self.deps_user_info)\n info[\"dependencies\"] = self.get_dependencies_info()\n info[\"settings\"] = self.get_settings()\n info[\"options\"] = self.get_options()\n if self._user_info_build:\n info[\"user_info_build\"] = serialize_user_info(self._user_info_build)\n\n return json.dumps(info, indent=2)\n\n def get_dependencies_info(self):\n res = []\n for depname, cpp_info in self.deps_build_info.dependencies:\n serialized_info = serialize_cpp_info(cpp_info)\n serialized_info[\"name\"] = depname\n for cfg, cfg_cpp_info in cpp_info.configs.items():\n serialized_info.setdefault(\"configs\", {})[cfg] = serialize_cpp_info(cfg_cpp_info)\n res.append(serialized_info)\n return res\n\n def get_settings(self):\n settings = {}\n for key, value in self.settings.items():\n settings[key] = value\n return settings\n\n def get_options(self):\n options = {}\n for req in self.conanfile.requires:\n options[req] = {}\n for key, value in self.conanfile.options[req].items():\n options[req][key] = value\n return options\n", "path": "conans/client/generators/json_generator.py"}], "after_files": [{"content": "import os\nimport textwrap\n\nfrom jinja2 import DictLoader\nfrom jinja2 import Environment\nfrom conans.model import Generator\nimport datetime\n\n\n\nrender_cpp_info = textwrap.dedent(\"\"\"\n {% macro join_list_sources(items) -%}\n ``{{ \"``, ``\".join(items) }}``\n {%- endmacro %}\n\n {% macro render_cpp_info(cpp_info) -%}\n {%- if cpp_info.requires is iterable and cpp_info.requires %}\n * Requires: {{ join_list_sources(cpp_info.requires) }}\n {%- endif %}\n {%- if cpp_info.libs %}\n * Libraries: {{ join_list_sources(cpp_info.libs) }}\n {%- endif %}\n {%- if cpp_info.system_libs %}\n * Systems libs: {{ join_list_sources(cpp_info.system_libs) }}\n {%- endif %}\n {%- if cpp_info.defines %}\n * Preprocessor definitions: {{ join_list_sources(cpp_info.defines) }}\n {%- endif %}\n {%- if cpp_info.cflags %}\n * C_FLAGS: {{ join_list_sources(cpp_info.cflags) }}\n {%- endif %}\n {%- if cpp_info.cxxflags %}\n * CXX_FLAGS: {{ join_list_sources(cpp_info.cxxflags) }}\n {%- endif %}\n {%- if cpp_info.build_modules %}\n * Build modules (see [below](#build-modules)): {{ join_list_sources(cpp_info.build_modules) }}\n {%- endif %}\n {%- endmacro %}\n\"\"\")\n\ngenerator_cmake_tpl = textwrap.dedent(\"\"\"\n ### Generator ``cmake``\n\n Add these lines to your *CMakeLists.txt*:\n\n ```cmake\n include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\n conan_basic_setup(TARGETS)\n\n target_link_libraries(<library_name> CONAN_PKG::{{ cpp_info.get_name(\"cmake\") }})\n ```\n\"\"\")\n\ngenerator_cmake_find_package_tpl = textwrap.dedent(\"\"\"\n ### Generator ``cmake_find_package``\n {% set cmake_find_package_name = cpp_info.get_name(\"cmake_find_package\") %}\n {% set cmake_find_package_filename = cpp_info.get_filename(\"cmake_find_package\") %}\n Generates the file Find{{ cmake_find_package_filename }}.cmake\n\n Add these lines to your *CMakeLists.txt*:\n\n ```cmake\n find_package({{ cmake_find_package_filename }})\n\n # Use the global target\n target_link_libraries(<library_name> {{ cmake_find_package_name }}::{{ cmake_find_package_name }})\n {% if cpp_info.components %}\n # Or link just one of its components\n {% for cmp_name, cmp_cpp_info in cpp_info.components.items() -%}\n target_link_libraries(<library_name> {{ cmake_find_package_name }}::{{ cmp_cpp_info.get_name(\"cmake_find_package\") }})\n {% endfor %}\n {%- endif %}\n ```\n\n Remember to adjust your build system settings to match the binaries you are linking with. You can\n use the [CMake build helper](https://docs.conan.io/en/latest/reference/build_helpers/cmake.html) and\n the ``cmake`` generator from a *conanfile.py* or the new [toolchain paradigm](https://docs.conan.io/en/latest/creating_packages/toolchains.html).\n\"\"\")\n\ngenerator_pkg_config_tpl = textwrap.dedent(\"\"\"\n ### Generator ``pkg_config``\n\n This package provides one *pkg-config* file ``{{ cpp_info.get_filename('pkg_config') }}.pc`` with\n all the information from the library\n {% if cpp_info.components -%}\n and another file for each of its components:\n {%- for cmp_name, cmp_cpp_info in cpp_info.components.items() -%}\n ``{{ cmp_cpp_info.get_filename('pkg_config') }}.pc``{% if not loop.last %},{% endif %}\n {%- endfor -%}\n {%- endif -%}.\n Use your *pkg-config* tool as usual to consume the information provided by the Conan package.\n\"\"\")\n\nrequirement_tpl = textwrap.dedent(\"\"\"\n {% from 'render_cpp_info' import render_cpp_info %}\n\n # {{ cpp_info.name }}/{{ cpp_info.version }}\n\n ---\n **Note.-** If this package belongs to ConanCenter, you can find more information [here](https://conan.io/center/{{ cpp_info.name }}/{{ cpp_info.version }}/).\n\n ---\n\n {% if requires or required_by %}\n Graph of dependencies:\n {% if requires %}\n * ``{{ cpp_info.name }}`` requires:\n {% for dep_name, dep_cpp_info in requires -%}\n [{{ dep_name }}/{{ dep_cpp_info.version }}]({{ dep_name }}.md){% if not loop.last %}, {% endif %}\n {%- endfor -%}\n {%- endif %}\n {%- if required_by %}\n * ``{{ cpp_info.name }}`` is required by:\n {%- for dep_name, dep_cpp_info in required_by %}\n [{{ dep_name }}/{{ dep_cpp_info.version }}]({{ dep_name }}.md){% if not loop.last %}, {% endif %}\n {%- endfor %}\n {%- endif %}\n {% endif %}\n\n Information published by ``{{ cpp_info.name }}`` to consumers:\n\n {%- if cpp_info.includedirs %}\n * Headers (see [below](#header-files))\n {%- endif %}\n {% if cpp_info.components %}\n {% for cmp_name, cmp_cpp_info in cpp_info.components.items() %}\n * Component ``{{ cpp_info.name }}::{{ cmp_name }}``:\n {{ render_cpp_info(cmp_cpp_info)|indent(width=2) }}\n {%- endfor %}\n {% else %}\n {{ render_cpp_info(cpp_info)|indent(width=0) }}\n {% endif %}\n\n\n ## Generators\n\n Read below how to use this package using different\n [generators](https://docs.conan.io/en/latest/reference/generators.html). In order to use\n these generators they have to be listed in the _conanfile.py_ file or using the command\n line argument ``--generator/-g`` in the ``conan install`` command.\n\n\n {% include 'generator_cmake' %}\n {% include 'generator_cmake_find_package' %}\n {% include 'generator_pkg_config_tpl' %}\n\n ---\n ## Header files\n\n List of header files exposed by this package. Use them in your ``#include`` directives:\n\n ```cpp\n {%- for header in headers %}\n {{ header }}\n {%- endfor %}\n ```\n\n {%- if cpp_info.build_modules %}\n ---\n ## Build modules\n\n Modules exported by this recipe. They are automatically included when using Conan generators:\n\n {% for name, build_module in build_modules %}\n **{{ name }}**\n ```\n {{ build_module }}\n ```\n {% endfor %}\n {% endif %}\n\n ---\n ---\n Conan **{{ conan_version }}**. JFrog LTD. [https://conan.io](https://conan.io). Autogenerated {{ now.strftime('%Y-%m-%d %H:%M:%S') }}.\n\"\"\")\n\n\nclass MarkdownGenerator(Generator):\n\n def _list_headers(self, cpp_info):\n rootpath = cpp_info.rootpath\n for include_dir in cpp_info.includedirs:\n for root, _, files in os.walk(os.path.join(cpp_info.rootpath, include_dir)):\n for f in files:\n yield os.path.relpath(os.path.join(root, f), os.path.join(rootpath, include_dir))\n\n def _list_requires(self, cpp_info):\n return [(it, self.conanfile.deps_cpp_info[it]) for it in cpp_info.public_deps]\n\n def _list_required_by(self, cpp_info):\n for other_name, other_cpp_info in self.conanfile.deps_cpp_info.dependencies:\n if cpp_info.name in other_cpp_info.public_deps:\n yield other_name, other_cpp_info\n\n def _read_build_modules(self, cpp_info):\n for build_module in cpp_info.build_modules:\n filename = os.path.join(cpp_info.rootpath, build_module)\n yield build_module, open(filename, 'r').read()\n\n @property\n def filename(self):\n pass\n\n @property\n def content(self):\n dict_loader = DictLoader({\n 'render_cpp_info': render_cpp_info,\n 'package.md': requirement_tpl,\n 'generator_cmake': generator_cmake_tpl,\n 'generator_cmake_find_package': generator_cmake_find_package_tpl,\n 'generator_pkg_config_tpl': generator_pkg_config_tpl,\n })\n env = Environment(loader=dict_loader)\n template = env.get_template('package.md')\n\n from conans import __version__ as conan_version\n ret = {}\n for name, cpp_info in self.conanfile.deps_cpp_info.dependencies:\n ret[\"{}.md\".format(name)] = template.render(\n cpp_info=cpp_info,\n headers=self._list_headers(cpp_info),\n requires=list(self._list_requires(cpp_info)),\n required_by=list(self._list_required_by(cpp_info)),\n build_modules=self._read_build_modules(cpp_info),\n conan_version=conan_version,\n now=datetime.datetime.now()\n )\n return ret\n", "path": "conans/client/generators/markdown.py"}, {"content": "import json\n\nfrom conans.model import Generator\n\n\ndef serialize_cpp_info(cpp_info):\n keys = [\n \"version\",\n \"description\",\n \"rootpath\",\n \"sysroot\",\n \"include_paths\", \"lib_paths\", \"bin_paths\", \"build_paths\", \"res_paths\",\n \"libs\",\n \"system_libs\",\n \"defines\", \"cflags\", \"cxxflags\", \"sharedlinkflags\", \"exelinkflags\",\n \"frameworks\", \"framework_paths\", \"names\", \"filenames\"\n ]\n res = {}\n for key in keys:\n res[key] = getattr(cpp_info, key)\n res[\"cppflags\"] = cpp_info.cxxflags # Backwards compatibility\n return res\n\n\ndef serialize_user_info(user_info):\n res = {}\n for key, value in user_info.items():\n res[key] = value.vars\n return res\n\n\nclass JsonGenerator(Generator):\n @property\n def filename(self):\n return \"conanbuildinfo.json\"\n\n @property\n def content(self):\n info = {}\n info[\"deps_env_info\"] = self.deps_env_info.vars\n info[\"deps_user_info\"] = serialize_user_info(self.deps_user_info)\n info[\"dependencies\"] = self.get_dependencies_info()\n info[\"settings\"] = self.get_settings()\n info[\"options\"] = self.get_options()\n if self._user_info_build:\n info[\"user_info_build\"] = serialize_user_info(self._user_info_build)\n\n return json.dumps(info, indent=2)\n\n def get_dependencies_info(self):\n res = []\n for depname, cpp_info in self.deps_build_info.dependencies:\n serialized_info = serialize_cpp_info(cpp_info)\n serialized_info[\"name\"] = depname\n for cfg, cfg_cpp_info in cpp_info.configs.items():\n serialized_info.setdefault(\"configs\", {})[cfg] = serialize_cpp_info(cfg_cpp_info)\n res.append(serialized_info)\n return res\n\n def get_settings(self):\n settings = {}\n for key, value in self.settings.items():\n settings[key] = value\n return settings\n\n def get_options(self):\n options = {}\n for req in self.conanfile.requires:\n options[req] = {}\n for key, value in self.conanfile.options[req].items():\n options[req][key] = value\n return options\n", "path": "conans/client/generators/json_generator.py"}]}
3,676
505
gh_patches_debug_63302
rasdani/github-patches
git_diff
scikit-hep__pyhf-915
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- cloudpickle v1.5.0 breaks testing # Description With the release of [`cloudpickle` `v1.5.0`](https://pypi.org/project/cloudpickle/1.5.0/) on 2020-07-01 the CI is broken in testing as the following error is raised ```pytb ImportError while loading conftest '/home/runner/work/pyhf/pyhf/tests/conftest.py'. tests/conftest.py:83: in <module> (pyhf.tensor.tensorflow_backend(), None), src/pyhf/tensor/__init__.py:44: in __getattr__ e, E pyhf.exceptions.ImportBackendError: ('There was a problem importing TensorFlow. The tensorflow backend cannot be used.', ImportError("cannot import name 'CloudPickler' from 'cloudpickle.cloudpickle' (/opt/hostedtoolcache/Python/3.7.7/x64/lib/python3.7/site-packages/cloudpickle/cloudpickle.py)")) ##[error]Process completed with exit code 4. ``` `cloudpickle` is a required dependency of TensorFlow Probability and in TFP `v0.10.0` it is set to [`cloudpickle >= 1.2.2`](https://github.com/tensorflow/probability/blob/f051e03dd3cc847d31061803c2b31c564562a993/setup.py#L34). This has been reported in: - [TensorFlow Probability Issue 991](https://github.com/tensorflow/probability/issues/991) - [`cloudpickle` Issue 390](https://github.com/cloudpipe/cloudpickle/issues/390) # Expected Behavior For no error to be raised # Actual Behavior c.f. above # Steps to Reproduce This was found in CI, but the minimal test case is just to install TensorFlow and TensorFlow Probability and then try to import TFP: ``` $ python -m pip install tensorflow tensorflow-probability $ python -c "import tensorflow_probability" Traceback (most recent call last): File "<string>", line 1, in <module> File "/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/__init__.py", line 76, in <module> from tensorflow_probability.python import * # pylint: disable=wildcard-import File "/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/__init__.py", line 23, in <module> from tensorflow_probability.python import distributions File "/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/distributions/__init__.py", line 88, in <module> from tensorflow_probability.python.distributions.pixel_cnn import PixelCNN File "/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/distributions/pixel_cnn.py", line 37, in <module> from tensorflow_probability.python.layers import weight_norm File "/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/layers/__init__.py", line 31, in <module> from tensorflow_probability.python.layers.distribution_layer import CategoricalMixtureOfOneHotCategorical File "/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/layers/distribution_layer.py", line 28, in <module> from cloudpickle.cloudpickle import CloudPickler ImportError: cannot import name 'CloudPickler' from 'cloudpickle.cloudpickle' (/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/cloudpickle/cloudpickle.py) $ pip list | grep cloudpickle cloudpickle 1.5.0 ``` # Checklist - [x] Run `git fetch` to get the most up to date version of `master` - [x] Searched through existing Issues to confirm this is not a duplicate issue - [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 from setuptools import setup 2 3 extras_require = { 4 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'], 5 'torch': ['torch~=1.2'], 6 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'], 7 'xmlio': ['uproot'], 8 'minuit': ['iminuit'], 9 } 10 extras_require['backends'] = sorted( 11 set( 12 extras_require['tensorflow'] 13 + extras_require['torch'] 14 + extras_require['jax'] 15 + extras_require['minuit'] 16 ) 17 ) 18 extras_require['contrib'] = sorted(set(['matplotlib'])) 19 extras_require['lint'] = sorted(set(['pyflakes', 'black'])) 20 21 extras_require['test'] = sorted( 22 set( 23 extras_require['backends'] 24 + extras_require['xmlio'] 25 + extras_require['contrib'] 26 + [ 27 'pytest~=3.5', 28 'pytest-cov>=2.5.1', 29 'pytest-mock', 30 'pytest-benchmark[histogram]', 31 'pytest-console-scripts', 32 'pytest-mpl', 33 'pydocstyle', 34 'coverage>=4.0', # coveralls 35 'papermill~=2.0', 36 'nteract-scrapbook~=0.2', 37 'jupyter', 38 'uproot~=3.3', 39 'graphviz', 40 'jsonpatch', 41 ] 42 ) 43 ) 44 extras_require['docs'] = sorted( 45 set( 46 [ 47 'sphinx~=3.0.0', # Sphinx v3.1.X regressions break docs 48 'sphinxcontrib-bibtex', 49 'sphinx-click', 50 'sphinx_rtd_theme', 51 'nbsphinx', 52 'ipywidgets', 53 'sphinx-issues', 54 'sphinx-copybutton>0.2.9', 55 ] 56 ) 57 ) 58 extras_require['develop'] = sorted( 59 set( 60 extras_require['docs'] 61 + extras_require['lint'] 62 + extras_require['test'] 63 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine'] 64 ) 65 ) 66 extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) 67 68 69 setup( 70 extras_require=extras_require, 71 use_scm_version=lambda: {'local_scheme': lambda version: ''}, 72 ) 73 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,7 +1,11 @@ from setuptools import setup extras_require = { - 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'], + 'tensorflow': [ + 'tensorflow~=2.0', + 'tensorflow-probability~=0.8', + 'cloudpickle!=1.5.0', # TODO: Temp patch until tfp v0.11 + ], 'torch': ['torch~=1.2'], 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'], 'xmlio': ['uproot'],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,7 +1,11 @@\n from setuptools import setup\n \n extras_require = {\n- 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n+ 'tensorflow': [\n+ 'tensorflow~=2.0',\n+ 'tensorflow-probability~=0.8',\n+ 'cloudpickle!=1.5.0', # TODO: Temp patch until tfp v0.11\n+ ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n", "issue": "cloudpickle v1.5.0 breaks testing\n# Description\r\n\r\nWith the release of [`cloudpickle` `v1.5.0`](https://pypi.org/project/cloudpickle/1.5.0/) on 2020-07-01 the CI is broken in testing as the following error is raised\r\n\r\n```pytb\r\nImportError while loading conftest '/home/runner/work/pyhf/pyhf/tests/conftest.py'.\r\ntests/conftest.py:83: in <module>\r\n (pyhf.tensor.tensorflow_backend(), None),\r\nsrc/pyhf/tensor/__init__.py:44: in __getattr__\r\n e,\r\nE pyhf.exceptions.ImportBackendError: ('There was a problem importing TensorFlow. The tensorflow backend cannot be used.', ImportError(\"cannot import name 'CloudPickler' from 'cloudpickle.cloudpickle' (/opt/hostedtoolcache/Python/3.7.7/x64/lib/python3.7/site-packages/cloudpickle/cloudpickle.py)\"))\r\n##[error]Process completed with exit code 4.\r\n```\r\n\r\n`cloudpickle` is a required dependency of TensorFlow Probability and in TFP `v0.10.0` it is set to [`cloudpickle >= 1.2.2`](https://github.com/tensorflow/probability/blob/f051e03dd3cc847d31061803c2b31c564562a993/setup.py#L34).\r\n\r\nThis has been reported in:\r\n- [TensorFlow Probability Issue 991](https://github.com/tensorflow/probability/issues/991)\r\n- [`cloudpickle` Issue 390](https://github.com/cloudpipe/cloudpickle/issues/390)\r\n\r\n# Expected Behavior\r\n\r\nFor no error to be raised\r\n\r\n# Actual Behavior\r\n\r\nc.f. above\r\n\r\n# Steps to Reproduce\r\n\r\nThis was found in CI, but the minimal test case is just to install TensorFlow and TensorFlow Probability and then try to import TFP:\r\n\r\n```\r\n$ python -m pip install tensorflow tensorflow-probability\r\n$ python -c \"import tensorflow_probability\"\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/__init__.py\", line 76, in <module>\r\n from tensorflow_probability.python import * # pylint: disable=wildcard-import\r\n File \"/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/__init__.py\", line 23, in <module>\r\n from tensorflow_probability.python import distributions\r\n File \"/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/distributions/__init__.py\", line 88, in <module>\r\n from tensorflow_probability.python.distributions.pixel_cnn import PixelCNN\r\n File \"/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/distributions/pixel_cnn.py\", line 37, in <module>\r\n from tensorflow_probability.python.layers import weight_norm\r\n File \"/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/layers/__init__.py\", line 31, in <module>\r\n from tensorflow_probability.python.layers.distribution_layer import CategoricalMixtureOfOneHotCategorical\r\n File \"/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/layers/distribution_layer.py\", line 28, in <module>\r\n from cloudpickle.cloudpickle import CloudPickler\r\nImportError: cannot import name 'CloudPickler' from 'cloudpickle.cloudpickle' (/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/cloudpickle/cloudpickle.py)\r\n$ pip list | grep cloudpickle\r\ncloudpickle 1.5.0\r\n```\r\n\r\n# Checklist\r\n\r\n- [x] Run `git fetch` to get the most up to date version of `master`\r\n- [x] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\nextras_require['lint'] = sorted(set(['pyflakes', 'black']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx~=3.0.0', # Sphinx v3.1.X regressions break docs\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow~=2.0',\n 'tensorflow-probability~=0.8',\n 'cloudpickle!=1.5.0', # TODO: Temp patch until tfp v0.11\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\nextras_require['lint'] = sorted(set(['pyflakes', 'black']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx~=3.0.0', # Sphinx v3.1.X regressions break docs\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
1,860
174
gh_patches_debug_39182
rasdani/github-patches
git_diff
holoviz__panel-1792
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Location sync broken for dates, ints #### ALL software version info ``` Python 3.9.0 bokeh==2.2.3 notebook==5.7.9 panel==0.10.1 macOS Catalina 10.15.7 Chrome 85.0.4183.121 ``` #### Description of expected behavior and the observed behavior ##### Expected When syncing params with the browser location, refreshing the page or sharing the URL should restore those param values. ##### Observed Refreshing the page or sharing the URL does not restore param values for dates and ints. It does seem to work for strings and floats. Editing the URL by changing `?bday="2000-01-01"` to `?bday=2000-01-01` lets you share the URL (though the sync on page load immediately changes the URL back). #### Complete, minimal, self-contained example code that reproduces the issue ```python import panel as pn import panel.widgets as pnw bday = pnw.DatePicker() pn.state.location.sync(bday, {"value": "bday"}) @pn.depends(bday) def summary(bday): return f"My birthday is {bday}" app = pn.Column(bday, summary) app.servable() ``` #### Stack traceback and/or browser JavaScript console output ``` 2020-11-13 16:12:06,453 Exception in callback functools.partial(<bound method IOLoop._discard_future_result of <tornado.platform.asyncio.AsyncIOMainLoop object at 0x142be9c10>>, <Task finished name='Task-369' coro=<_needs_document_lock.<locals>._needs_document_lock_wrapper() done, defined at /Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/bokeh/server/session.py:51> exception=ValueError("CalendarDate 'value' only takes datetime types.")>) Traceback (most recent call last): File "/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/tornado/ioloop.py", line 741, in _run_callback ret = callback() File "/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/tornado/ioloop.py", line 765, in _discard_future_result future.result() File "/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/bokeh/server/session.py", line 71, in _needs_document_lock_wrapper result = await result File "/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/tornado/gen.py", line 216, in wrapper result = ctx_run(func, *args, **kwargs) File "/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/panel/reactive.py", line 194, in _change_coroutine self._change_event(doc) File "/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/panel/reactive.py", line 204, in _change_event self._process_events(events) File "/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/panel/reactive.py", line 187, in _process_events self.param.set_param(**self._process_property_change(events)) File "/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/param/parameterized.py", line 1451, in set_param self_._batch_call_watchers() File "/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/param/parameterized.py", line 1578, in _batch_call_watchers watcher.fn(*events) File "/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/panel/io/location.py", line 108, in _update_synced p.param.set_param(**mapped) File "/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/param/parameterized.py", line 1444, in set_param setattr(self_or_cls, k, v) File "/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/param/parameterized.py", line 302, in _f instance_param.__set__(obj, val) File "/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/param/parameterized.py", line 304, in _f return f(self, obj, val) File "/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/param/__init__.py", line 623, in __set__ super(Dynamic,self).__set__(obj,val) File "/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/param/parameterized.py", line 304, in _f return f(self, obj, val) File "/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/param/parameterized.py", line 871, in __set__ self._validate(val) File "/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/param/__init__.py", line 1891, in _validate raise ValueError("CalendarDate '%s' only takes datetime types."%self.name) ValueError: CalendarDate 'value' only takes datetime types. ``` #### Screenshots or screencasts of the bug in action <img width="478" alt="Screen Shot 2020-11-13 at 4 24 00 PM" src="https://user-images.githubusercontent.com/3858785/99122635-b844ba00-25cc-11eb-83c4-57760c138586.png"> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `panel/io/location.py` Content: ``` 1 """ 2 Defines the Location widget which allows changing the href of the window. 3 """ 4 5 import urllib.parse as urlparse 6 7 import param 8 9 from ..models.location import Location as _BkLocation 10 from ..reactive import Syncable 11 from ..util import parse_query 12 from .state import state 13 14 15 class Location(Syncable): 16 """ 17 The Location component can be made available in a server context 18 to provide read and write access to the URL components in the 19 browser. 20 """ 21 22 href = param.String(readonly=True, doc=""" 23 The full url, e.g. 'https://localhost:80?color=blue#interact'""") 24 25 hostname = param.String(readonly=True, doc=""" 26 hostname in window.location e.g. 'panel.holoviz.org'""") 27 28 pathname = param.String(regex=r"^$|[\/].*$", doc=""" 29 pathname in window.location e.g. '/user_guide/Interact.html'""") 30 31 protocol = param.String(readonly=True, doc=""" 32 protocol in window.location e.g. 'http:' or 'https:'""") 33 34 port = param.String(readonly=True, doc=""" 35 port in window.location e.g. '80'""") 36 37 search = param.String(regex=r"^$|\?", doc=""" 38 search in window.location e.g. '?color=blue'""") 39 40 hash = param.String(regex=r"^$|#", doc=""" 41 hash in window.location e.g. '#interact'""") 42 43 reload = param.Boolean(default=False, doc=""" 44 Reload the page when the location is updated. For multipage 45 apps this should be set to True, For single page apps this 46 should be set to False""") 47 48 # Mapping from parameter name to bokeh model property name 49 _rename = {"name": None} 50 51 def __init__(self, **params): 52 super(Location, self).__init__(**params) 53 self._synced = [] 54 self._syncing = False 55 self.param.watch(self._update_synced, ['search']) 56 57 def _get_model(self, doc, root=None, parent=None, comm=None): 58 model = _BkLocation(**self._process_param_change(self._init_properties())) 59 root = root or model 60 values = dict(self.param.get_param_values()) 61 properties = list(self._process_param_change(values)) 62 self._models[root.ref['id']] = (model, parent) 63 self._link_props(model, properties, doc, root, comm) 64 return model 65 66 def _get_root(self, doc=None, comm=None): 67 root = self._get_model(doc, comm=comm) 68 ref = root.ref['id'] 69 state._views[ref] = (self, root, doc, comm) 70 self._documents[doc] = root 71 return root 72 73 def _cleanup(self, root): 74 if root.document in self._documents: 75 del self._documents[root.document] 76 ref = root.ref['id'] 77 super()._cleanup(root) 78 if ref in state._views: 79 del state._views[ref] 80 81 def _update_synced(self, event=None): 82 if self._syncing: 83 return 84 query_params = self.query_params 85 for p, parameters, _ in self._synced: 86 mapping = {v: k for k, v in parameters.items()} 87 mapped = {} 88 for k, v in query_params.items(): 89 if k not in mapping: 90 continue 91 pname = mapping[k] 92 if isinstance(v, str) and v.startswith('"') and v.endswith('"'): 93 v = v[1:-1] 94 else: 95 try: 96 v = p.param[pname].deserialize(v) 97 except Exception: 98 pass 99 mapped[pname] = v 100 p.param.set_param(**mapped) 101 102 def _update_query(self, *events, query=None): 103 if self._syncing: 104 return 105 query = query or {} 106 for e in events: 107 matches = [ps for o, ps, _ in self._synced if o in (e.cls, e.obj)] 108 if not matches: 109 continue 110 owner = e.cls if e.obj is None else e.obj 111 try: 112 val = owner.param.serialize_value(e.name) 113 except Exception: 114 val = e.new 115 query[matches[0][e.name]] = val 116 self._syncing = True 117 try: 118 self.update_query(**{k: v for k, v in query.items() if v is not None}) 119 finally: 120 self._syncing = False 121 122 @property 123 def query_params(self): 124 return parse_query(self.search) 125 126 def update_query(self, **kwargs): 127 query = self.query_params 128 query.update(kwargs) 129 self.search = '?' + urlparse.urlencode(query) 130 131 def sync(self, parameterized, parameters=None): 132 """ 133 Syncs the parameters of a Parameterized object with the query 134 parameters in the URL. If no parameters are supplied all 135 parameters except the name are synced. 136 137 Arguments 138 --------- 139 parameterized (param.Parameterized): 140 The Parameterized object to sync query parameters with 141 parameters (list or dict): 142 A list or dictionary specifying parameters to sync. 143 If a dictionary is supplied it should define a mapping from 144 the Parameterized's parameteres to the names of the query 145 parameters. 146 """ 147 parameters = parameters or [p for p in parameterized.param if p != 'name'] 148 if not isinstance(parameters, dict): 149 parameters = dict(zip(parameters, parameters)) 150 watcher = parameterized.param.watch(self._update_query, list(parameters)) 151 self._synced.append((parameterized, parameters, watcher)) 152 self._update_synced() 153 self._update_query(query={v: getattr(parameterized, k) 154 for k, v in parameters.items()}) 155 156 def unsync(self, parameterized, parameters=None): 157 """ 158 Unsyncs the parameters of the Parameterized with the query 159 params in the URL. If no parameters are supplied all 160 parameters except the name are unsynced. 161 162 Arguments 163 --------- 164 parameterized (param.Parameterized): 165 The Parameterized object to unsync query parameters with 166 parameters (list or dict): 167 A list of parameters to unsync. 168 """ 169 matches = [s for s in self._synced if s[0] is parameterized] 170 if not matches: 171 ptype = type(parameterized) 172 raise ValueError(f"Cannot unsync {ptype} object since it " 173 "was never synced in the first place.") 174 synced = [] 175 for p, params, watcher in self._synced: 176 if parameterized is p: 177 parameterized.param.unwatch(watcher) 178 if parameters is not None: 179 new_params = {p: q for p, q in params.items() 180 if p not in parameters} 181 new_watcher = parameterized.param.watch(watcher.fn, list(new_params)) 182 synced.append((p, new_params, new_watcher)) 183 else: 184 synced.append((p, params, watcher)) 185 self._synced = synced 186 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/panel/io/location.py b/panel/io/location.py --- a/panel/io/location.py +++ b/panel/io/location.py @@ -2,6 +2,7 @@ Defines the Location widget which allows changing the href of the window. """ +import json import urllib.parse as urlparse import param @@ -89,33 +90,32 @@ if k not in mapping: continue pname = mapping[k] - if isinstance(v, str) and v.startswith('"') and v.endswith('"'): - v = v[1:-1] - else: - try: - v = p.param[pname].deserialize(v) - except Exception: - pass + try: + v = p.param[pname].deserialize(v) + except Exception: + pass mapped[pname] = v p.param.set_param(**mapped) def _update_query(self, *events, query=None): if self._syncing: return - query = query or {} + serialized = query or {} for e in events: matches = [ps for o, ps, _ in self._synced if o in (e.cls, e.obj)] if not matches: continue owner = e.cls if e.obj is None else e.obj try: - val = owner.param.serialize_value(e.name) + val = owner.param[e.name].serialize(e.new) except Exception: val = e.new - query[matches[0][e.name]] = val + if not isinstance(val, str): + val = json.dumps(val) + serialized[matches[0][e.name]] = val self._syncing = True try: - self.update_query(**{k: v for k, v in query.items() if v is not None}) + self.update_query(**{k: v for k, v in serialized.items() if v is not None}) finally: self._syncing = False @@ -150,8 +150,19 @@ watcher = parameterized.param.watch(self._update_query, list(parameters)) self._synced.append((parameterized, parameters, watcher)) self._update_synced() - self._update_query(query={v: getattr(parameterized, k) - for k, v in parameters.items()}) + query = {} + for p, name in parameters.items(): + v = getattr(parameterized, p) + if v is None: + continue + try: + parameterized.param[p].serialize(v) + except Exception: + pass + if not isinstance(v, str): + v = json.dumps(v) + query[name] = v + self._update_query(query=query) def unsync(self, parameterized, parameters=None): """
{"golden_diff": "diff --git a/panel/io/location.py b/panel/io/location.py\n--- a/panel/io/location.py\n+++ b/panel/io/location.py\n@@ -2,6 +2,7 @@\n Defines the Location widget which allows changing the href of the window.\n \"\"\"\n \n+import json\n import urllib.parse as urlparse\n \n import param\n@@ -89,33 +90,32 @@\n if k not in mapping:\n continue\n pname = mapping[k]\n- if isinstance(v, str) and v.startswith('\"') and v.endswith('\"'):\n- v = v[1:-1]\n- else:\n- try:\n- v = p.param[pname].deserialize(v)\n- except Exception:\n- pass\n+ try:\n+ v = p.param[pname].deserialize(v)\n+ except Exception:\n+ pass\n mapped[pname] = v\n p.param.set_param(**mapped)\n \n def _update_query(self, *events, query=None):\n if self._syncing:\n return\n- query = query or {}\n+ serialized = query or {}\n for e in events:\n matches = [ps for o, ps, _ in self._synced if o in (e.cls, e.obj)]\n if not matches:\n continue\n owner = e.cls if e.obj is None else e.obj\n try:\n- val = owner.param.serialize_value(e.name)\n+ val = owner.param[e.name].serialize(e.new)\n except Exception:\n val = e.new\n- query[matches[0][e.name]] = val\n+ if not isinstance(val, str):\n+ val = json.dumps(val)\n+ serialized[matches[0][e.name]] = val\n self._syncing = True\n try:\n- self.update_query(**{k: v for k, v in query.items() if v is not None})\n+ self.update_query(**{k: v for k, v in serialized.items() if v is not None})\n finally:\n self._syncing = False\n \n@@ -150,8 +150,19 @@\n watcher = parameterized.param.watch(self._update_query, list(parameters))\n self._synced.append((parameterized, parameters, watcher))\n self._update_synced()\n- self._update_query(query={v: getattr(parameterized, k)\n- for k, v in parameters.items()})\n+ query = {}\n+ for p, name in parameters.items():\n+ v = getattr(parameterized, p)\n+ if v is None:\n+ continue\n+ try:\n+ parameterized.param[p].serialize(v)\n+ except Exception:\n+ pass\n+ if not isinstance(v, str):\n+ v = json.dumps(v)\n+ query[name] = v\n+ self._update_query(query=query)\n \n def unsync(self, parameterized, parameters=None):\n \"\"\"\n", "issue": "Location sync broken for dates, ints\n#### ALL software version info\r\n\r\n```\r\nPython 3.9.0\r\n\r\nbokeh==2.2.3\r\nnotebook==5.7.9\r\npanel==0.10.1\r\n\r\nmacOS Catalina 10.15.7\r\n\r\nChrome 85.0.4183.121\r\n```\r\n\r\n#### Description of expected behavior and the observed behavior\r\n\r\n##### Expected\r\n\r\nWhen syncing params with the browser location, refreshing the page or sharing the URL should restore those param values.\r\n\r\n##### Observed\r\n\r\nRefreshing the page or sharing the URL does not restore param values for dates and ints. It does seem to work for strings and floats.\r\n\r\nEditing the URL by changing `?bday=\"2000-01-01\"` to `?bday=2000-01-01` lets you share the URL (though the sync on page load immediately changes the URL back).\r\n\r\n#### Complete, minimal, self-contained example code that reproduces the issue\r\n\r\n```python\r\nimport panel as pn\r\nimport panel.widgets as pnw\r\n\r\nbday = pnw.DatePicker()\r\npn.state.location.sync(bday, {\"value\": \"bday\"})\r\n\r\[email protected](bday)\r\ndef summary(bday):\r\n return f\"My birthday is {bday}\"\r\n\r\napp = pn.Column(bday, summary)\r\napp.servable()\r\n```\r\n\r\n#### Stack traceback and/or browser JavaScript console output\r\n\r\n```\r\n2020-11-13 16:12:06,453 Exception in callback functools.partial(<bound method IOLoop._discard_future_result of <tornado.platform.asyncio.AsyncIOMainLoop object at 0x142be9c10>>, <Task finished name='Task-369' coro=<_needs_document_lock.<locals>._needs_document_lock_wrapper() done, defined at /Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/bokeh/server/session.py:51> exception=ValueError(\"CalendarDate 'value' only takes datetime types.\")>)\r\nTraceback (most recent call last):\r\n File \"/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/tornado/ioloop.py\", line 741, in _run_callback\r\n ret = callback()\r\n File \"/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/tornado/ioloop.py\", line 765, in _discard_future_result\r\n future.result()\r\n File \"/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/bokeh/server/session.py\", line 71, in _needs_document_lock_wrapper\r\n result = await result\r\n File \"/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/tornado/gen.py\", line 216, in wrapper\r\n result = ctx_run(func, *args, **kwargs)\r\n File \"/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/panel/reactive.py\", line 194, in _change_coroutine\r\n self._change_event(doc)\r\n File \"/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/panel/reactive.py\", line 204, in _change_event\r\n self._process_events(events)\r\n File \"/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/panel/reactive.py\", line 187, in _process_events\r\n self.param.set_param(**self._process_property_change(events))\r\n File \"/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/param/parameterized.py\", line 1451, in set_param\r\n self_._batch_call_watchers()\r\n File \"/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/param/parameterized.py\", line 1578, in _batch_call_watchers\r\n watcher.fn(*events)\r\n File \"/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/panel/io/location.py\", line 108, in _update_synced\r\n p.param.set_param(**mapped)\r\n File \"/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/param/parameterized.py\", line 1444, in set_param\r\n setattr(self_or_cls, k, v)\r\n File \"/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/param/parameterized.py\", line 302, in _f\r\n instance_param.__set__(obj, val)\r\n File \"/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/param/parameterized.py\", line 304, in _f\r\n return f(self, obj, val)\r\n File \"/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/param/__init__.py\", line 623, in __set__\r\n super(Dynamic,self).__set__(obj,val)\r\n File \"/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/param/parameterized.py\", line 304, in _f\r\n return f(self, obj, val)\r\n File \"/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/param/parameterized.py\", line 871, in __set__\r\n self._validate(val)\r\n File \"/Users/johnzeringue/.pyenv/versions/3.9.0/envs/pydata-panel/lib/python3.9/site-packages/param/__init__.py\", line 1891, in _validate\r\n raise ValueError(\"CalendarDate '%s' only takes datetime types.\"%self.name)\r\nValueError: CalendarDate 'value' only takes datetime types.\r\n```\r\n\r\n#### Screenshots or screencasts of the bug in action\r\n\r\n<img width=\"478\" alt=\"Screen Shot 2020-11-13 at 4 24 00 PM\" src=\"https://user-images.githubusercontent.com/3858785/99122635-b844ba00-25cc-11eb-83c4-57760c138586.png\">\r\n\n", "before_files": [{"content": "\"\"\"\nDefines the Location widget which allows changing the href of the window.\n\"\"\"\n\nimport urllib.parse as urlparse\n\nimport param\n\nfrom ..models.location import Location as _BkLocation\nfrom ..reactive import Syncable\nfrom ..util import parse_query\nfrom .state import state\n\n\nclass Location(Syncable):\n \"\"\"\n The Location component can be made available in a server context\n to provide read and write access to the URL components in the\n browser.\n \"\"\"\n\n href = param.String(readonly=True, doc=\"\"\"\n The full url, e.g. 'https://localhost:80?color=blue#interact'\"\"\")\n\n hostname = param.String(readonly=True, doc=\"\"\"\n hostname in window.location e.g. 'panel.holoviz.org'\"\"\")\n\n pathname = param.String(regex=r\"^$|[\\/].*$\", doc=\"\"\"\n pathname in window.location e.g. '/user_guide/Interact.html'\"\"\")\n\n protocol = param.String(readonly=True, doc=\"\"\"\n protocol in window.location e.g. 'http:' or 'https:'\"\"\")\n\n port = param.String(readonly=True, doc=\"\"\"\n port in window.location e.g. '80'\"\"\")\n\n search = param.String(regex=r\"^$|\\?\", doc=\"\"\"\n search in window.location e.g. '?color=blue'\"\"\")\n\n hash = param.String(regex=r\"^$|#\", doc=\"\"\"\n hash in window.location e.g. '#interact'\"\"\")\n\n reload = param.Boolean(default=False, doc=\"\"\"\n Reload the page when the location is updated. For multipage\n apps this should be set to True, For single page apps this\n should be set to False\"\"\")\n\n # Mapping from parameter name to bokeh model property name\n _rename = {\"name\": None}\n\n def __init__(self, **params):\n super(Location, self).__init__(**params)\n self._synced = []\n self._syncing = False\n self.param.watch(self._update_synced, ['search'])\n\n def _get_model(self, doc, root=None, parent=None, comm=None):\n model = _BkLocation(**self._process_param_change(self._init_properties()))\n root = root or model\n values = dict(self.param.get_param_values())\n properties = list(self._process_param_change(values))\n self._models[root.ref['id']] = (model, parent)\n self._link_props(model, properties, doc, root, comm)\n return model\n\n def _get_root(self, doc=None, comm=None):\n root = self._get_model(doc, comm=comm)\n ref = root.ref['id']\n state._views[ref] = (self, root, doc, comm)\n self._documents[doc] = root\n return root\n\n def _cleanup(self, root):\n if root.document in self._documents:\n del self._documents[root.document]\n ref = root.ref['id']\n super()._cleanup(root)\n if ref in state._views:\n del state._views[ref]\n\n def _update_synced(self, event=None):\n if self._syncing:\n return\n query_params = self.query_params\n for p, parameters, _ in self._synced:\n mapping = {v: k for k, v in parameters.items()}\n mapped = {}\n for k, v in query_params.items():\n if k not in mapping:\n continue\n pname = mapping[k]\n if isinstance(v, str) and v.startswith('\"') and v.endswith('\"'):\n v = v[1:-1]\n else:\n try:\n v = p.param[pname].deserialize(v)\n except Exception:\n pass\n mapped[pname] = v\n p.param.set_param(**mapped)\n\n def _update_query(self, *events, query=None):\n if self._syncing:\n return\n query = query or {}\n for e in events:\n matches = [ps for o, ps, _ in self._synced if o in (e.cls, e.obj)]\n if not matches:\n continue\n owner = e.cls if e.obj is None else e.obj\n try:\n val = owner.param.serialize_value(e.name)\n except Exception:\n val = e.new\n query[matches[0][e.name]] = val\n self._syncing = True\n try:\n self.update_query(**{k: v for k, v in query.items() if v is not None})\n finally:\n self._syncing = False\n\n @property\n def query_params(self):\n return parse_query(self.search)\n\n def update_query(self, **kwargs):\n query = self.query_params\n query.update(kwargs)\n self.search = '?' + urlparse.urlencode(query)\n\n def sync(self, parameterized, parameters=None):\n \"\"\"\n Syncs the parameters of a Parameterized object with the query\n parameters in the URL. If no parameters are supplied all\n parameters except the name are synced.\n\n Arguments\n ---------\n parameterized (param.Parameterized):\n The Parameterized object to sync query parameters with\n parameters (list or dict):\n A list or dictionary specifying parameters to sync.\n If a dictionary is supplied it should define a mapping from\n the Parameterized's parameteres to the names of the query\n parameters.\n \"\"\"\n parameters = parameters or [p for p in parameterized.param if p != 'name']\n if not isinstance(parameters, dict):\n parameters = dict(zip(parameters, parameters))\n watcher = parameterized.param.watch(self._update_query, list(parameters))\n self._synced.append((parameterized, parameters, watcher))\n self._update_synced()\n self._update_query(query={v: getattr(parameterized, k)\n for k, v in parameters.items()})\n\n def unsync(self, parameterized, parameters=None):\n \"\"\"\n Unsyncs the parameters of the Parameterized with the query\n params in the URL. If no parameters are supplied all\n parameters except the name are unsynced.\n\n Arguments\n ---------\n parameterized (param.Parameterized):\n The Parameterized object to unsync query parameters with\n parameters (list or dict):\n A list of parameters to unsync.\n \"\"\"\n matches = [s for s in self._synced if s[0] is parameterized]\n if not matches:\n ptype = type(parameterized)\n raise ValueError(f\"Cannot unsync {ptype} object since it \"\n \"was never synced in the first place.\")\n synced = []\n for p, params, watcher in self._synced:\n if parameterized is p:\n parameterized.param.unwatch(watcher)\n if parameters is not None:\n new_params = {p: q for p, q in params.items()\n if p not in parameters}\n new_watcher = parameterized.param.watch(watcher.fn, list(new_params))\n synced.append((p, new_params, new_watcher))\n else:\n synced.append((p, params, watcher))\n self._synced = synced\n", "path": "panel/io/location.py"}], "after_files": [{"content": "\"\"\"\nDefines the Location widget which allows changing the href of the window.\n\"\"\"\n\nimport json\nimport urllib.parse as urlparse\n\nimport param\n\nfrom ..models.location import Location as _BkLocation\nfrom ..reactive import Syncable\nfrom ..util import parse_query\nfrom .state import state\n\n\nclass Location(Syncable):\n \"\"\"\n The Location component can be made available in a server context\n to provide read and write access to the URL components in the\n browser.\n \"\"\"\n\n href = param.String(readonly=True, doc=\"\"\"\n The full url, e.g. 'https://localhost:80?color=blue#interact'\"\"\")\n\n hostname = param.String(readonly=True, doc=\"\"\"\n hostname in window.location e.g. 'panel.holoviz.org'\"\"\")\n\n pathname = param.String(regex=r\"^$|[\\/].*$\", doc=\"\"\"\n pathname in window.location e.g. '/user_guide/Interact.html'\"\"\")\n\n protocol = param.String(readonly=True, doc=\"\"\"\n protocol in window.location e.g. 'http:' or 'https:'\"\"\")\n\n port = param.String(readonly=True, doc=\"\"\"\n port in window.location e.g. '80'\"\"\")\n\n search = param.String(regex=r\"^$|\\?\", doc=\"\"\"\n search in window.location e.g. '?color=blue'\"\"\")\n\n hash = param.String(regex=r\"^$|#\", doc=\"\"\"\n hash in window.location e.g. '#interact'\"\"\")\n\n reload = param.Boolean(default=False, doc=\"\"\"\n Reload the page when the location is updated. For multipage\n apps this should be set to True, For single page apps this\n should be set to False\"\"\")\n\n # Mapping from parameter name to bokeh model property name\n _rename = {\"name\": None}\n\n def __init__(self, **params):\n super(Location, self).__init__(**params)\n self._synced = []\n self._syncing = False\n self.param.watch(self._update_synced, ['search'])\n\n def _get_model(self, doc, root=None, parent=None, comm=None):\n model = _BkLocation(**self._process_param_change(self._init_properties()))\n root = root or model\n values = dict(self.param.get_param_values())\n properties = list(self._process_param_change(values))\n self._models[root.ref['id']] = (model, parent)\n self._link_props(model, properties, doc, root, comm)\n return model\n\n def _get_root(self, doc=None, comm=None):\n root = self._get_model(doc, comm=comm)\n ref = root.ref['id']\n state._views[ref] = (self, root, doc, comm)\n self._documents[doc] = root\n return root\n\n def _cleanup(self, root):\n if root.document in self._documents:\n del self._documents[root.document]\n ref = root.ref['id']\n super()._cleanup(root)\n if ref in state._views:\n del state._views[ref]\n\n def _update_synced(self, event=None):\n if self._syncing:\n return\n query_params = self.query_params\n for p, parameters, _ in self._synced:\n mapping = {v: k for k, v in parameters.items()}\n mapped = {}\n for k, v in query_params.items():\n if k not in mapping:\n continue\n pname = mapping[k]\n try:\n v = p.param[pname].deserialize(v)\n except Exception:\n pass\n mapped[pname] = v\n p.param.set_param(**mapped)\n\n def _update_query(self, *events, query=None):\n if self._syncing:\n return\n serialized = query or {}\n for e in events:\n matches = [ps for o, ps, _ in self._synced if o in (e.cls, e.obj)]\n if not matches:\n continue\n owner = e.cls if e.obj is None else e.obj\n try:\n val = owner.param[e.name].serialize(e.new)\n except Exception:\n val = e.new\n if not isinstance(val, str):\n val = json.dumps(val)\n serialized[matches[0][e.name]] = val\n self._syncing = True\n try:\n self.update_query(**{k: v for k, v in serialized.items() if v is not None})\n finally:\n self._syncing = False\n\n @property\n def query_params(self):\n return parse_query(self.search)\n\n def update_query(self, **kwargs):\n query = self.query_params\n query.update(kwargs)\n self.search = '?' + urlparse.urlencode(query)\n\n def sync(self, parameterized, parameters=None):\n \"\"\"\n Syncs the parameters of a Parameterized object with the query\n parameters in the URL. If no parameters are supplied all\n parameters except the name are synced.\n\n Arguments\n ---------\n parameterized (param.Parameterized):\n The Parameterized object to sync query parameters with\n parameters (list or dict):\n A list or dictionary specifying parameters to sync.\n If a dictionary is supplied it should define a mapping from\n the Parameterized's parameteres to the names of the query\n parameters.\n \"\"\"\n parameters = parameters or [p for p in parameterized.param if p != 'name']\n if not isinstance(parameters, dict):\n parameters = dict(zip(parameters, parameters))\n watcher = parameterized.param.watch(self._update_query, list(parameters))\n self._synced.append((parameterized, parameters, watcher))\n self._update_synced()\n query = {}\n for p, name in parameters.items():\n v = getattr(parameterized, p)\n if v is None:\n continue\n try:\n parameterized.param[p].serialize(v)\n except Exception:\n pass\n if not isinstance(v, str):\n v = json.dumps(v)\n query[name] = v\n self._update_query(query=query)\n\n def unsync(self, parameterized, parameters=None):\n \"\"\"\n Unsyncs the parameters of the Parameterized with the query\n params in the URL. If no parameters are supplied all\n parameters except the name are unsynced.\n\n Arguments\n ---------\n parameterized (param.Parameterized):\n The Parameterized object to unsync query parameters with\n parameters (list or dict):\n A list of parameters to unsync.\n \"\"\"\n matches = [s for s in self._synced if s[0] is parameterized]\n if not matches:\n ptype = type(parameterized)\n raise ValueError(f\"Cannot unsync {ptype} object since it \"\n \"was never synced in the first place.\")\n synced = []\n for p, params, watcher in self._synced:\n if parameterized is p:\n parameterized.param.unwatch(watcher)\n if parameters is not None:\n new_params = {p: q for p, q in params.items()\n if p not in parameters}\n new_watcher = parameterized.param.watch(watcher.fn, list(new_params))\n synced.append((p, new_params, new_watcher))\n else:\n synced.append((p, params, watcher))\n self._synced = synced\n", "path": "panel/io/location.py"}]}
3,811
628
gh_patches_debug_29588
rasdani/github-patches
git_diff
saleor__saleor-13989
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Increase timeout of sync shipping method filtering webhooks The following sync webhooks currently use 2s timeout: - `ORDER_FILTER_SHIPPING_METHODS` - `CHECKOUT_FILTER_SHIPPING_METHODS` There are no reasons to make the timout shorter than other webhooks. Other sync webhooks use `settings.WEBHOOK_SYNC_TIMEOUT` which should be used for these two for consistency. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `saleor/plugins/webhook/const.py` Content: ``` 1 CACHE_EXCLUDED_SHIPPING_KEY = "webhook_exclude_shipping_id_" 2 CACHE_EXCLUDED_SHIPPING_TIME = 60 * 3 3 EXCLUDED_SHIPPING_REQUEST_TIMEOUT = 2 4 WEBHOOK_CACHE_DEFAULT_TIMEOUT: int = 5 * 60 # 5 minutes 5 ``` Path: `saleor/plugins/webhook/shipping.py` Content: ``` 1 import base64 2 import json 3 import logging 4 from collections import defaultdict 5 from typing import Any, Callable, Dict, List, Optional, Union 6 7 from django.core.cache import cache 8 from django.db.models import QuerySet 9 from graphql import GraphQLError 10 from prices import Money 11 12 from ...app.models import App 13 from ...checkout.models import Checkout 14 from ...graphql.core.utils import from_global_id_or_error 15 from ...graphql.shipping.types import ShippingMethod 16 from ...order.models import Order 17 from ...shipping.interface import ShippingMethodData 18 from ...webhook.utils import get_webhooks_for_event 19 from ..base_plugin import ExcludedShippingMethod 20 from ..const import APP_ID_PREFIX 21 from .const import CACHE_EXCLUDED_SHIPPING_TIME, EXCLUDED_SHIPPING_REQUEST_TIMEOUT 22 from .tasks import trigger_webhook_sync 23 24 logger = logging.getLogger(__name__) 25 26 27 def to_shipping_app_id(app: "App", shipping_method_id: str) -> "str": 28 app_identifier = app.identifier or app.id 29 return base64.b64encode( 30 str.encode(f"{APP_ID_PREFIX}:{app_identifier}:{shipping_method_id}") 31 ).decode("utf-8") 32 33 34 def convert_to_app_id_with_identifier(shipping_app_id: str): 35 """Prepare the shipping_app_id in format `app:<app-identifier>/method_id>`. 36 37 The format of shipping_app_id has been changes so we need to support both of them. 38 This method is preparing the new shipping_app_id format based on assumptions 39 that right now the old one is used which is `app:<app-pk>:method_id>` 40 """ 41 decoded_id = base64.b64decode(shipping_app_id).decode() 42 splitted_id = decoded_id.split(":") 43 if len(splitted_id) != 3: 44 return 45 try: 46 app_id = int(splitted_id[1]) 47 except (TypeError, ValueError): 48 return None 49 app = App.objects.filter(id=app_id).first() 50 if app is None: 51 return None 52 return to_shipping_app_id(app, splitted_id[2]) 53 54 55 def parse_list_shipping_methods_response( 56 response_data: Any, app: "App" 57 ) -> List["ShippingMethodData"]: 58 shipping_methods = [] 59 for shipping_method_data in response_data: 60 method_id = shipping_method_data.get("id") 61 method_name = shipping_method_data.get("name") 62 method_amount = shipping_method_data.get("amount") 63 method_currency = shipping_method_data.get("currency") 64 method_maximum_delivery_days = shipping_method_data.get("maximum_delivery_days") 65 66 shipping_methods.append( 67 ShippingMethodData( 68 id=to_shipping_app_id(app, method_id), 69 name=method_name, 70 price=Money(method_amount, method_currency), 71 maximum_delivery_days=method_maximum_delivery_days, 72 ) 73 ) 74 return shipping_methods 75 76 77 def _compare_order_payloads(payload: str, cached_payload: str) -> bool: 78 """Compare two strings of order payloads ignoring meta.""" 79 EXCLUDED_KEY = "meta" 80 try: 81 order_payload = json.loads(payload)["order"] 82 cached_order_payload = json.loads(cached_payload)["order"] 83 except: # noqa 84 return False 85 return {k: v for k, v in order_payload.items() if k != EXCLUDED_KEY} == { 86 k: v for k, v in cached_order_payload.items() if k != EXCLUDED_KEY 87 } 88 89 90 def get_excluded_shipping_methods_or_fetch( 91 webhooks: QuerySet, 92 event_type: str, 93 payload: str, 94 cache_key: str, 95 subscribable_object: Optional[Union["Order", "Checkout"]], 96 ) -> Dict[str, List[ExcludedShippingMethod]]: 97 """Return data of all excluded shipping methods. 98 99 The data will be fetched from the cache. If missing it will fetch it from all 100 defined webhooks by calling a request to each of them one by one. 101 """ 102 cached_data = cache.get(cache_key) 103 if cached_data: 104 cached_payload, excluded_shipping_methods = cached_data 105 if (payload == cached_payload) or _compare_order_payloads( 106 payload, cached_payload 107 ): 108 return parse_excluded_shipping_methods(excluded_shipping_methods) 109 110 excluded_methods = [] 111 # Gather responses from webhooks 112 for webhook in webhooks: 113 if not webhook: 114 continue 115 response_data = trigger_webhook_sync( 116 event_type, 117 payload, 118 webhook, 119 subscribable_object=subscribable_object, 120 timeout=EXCLUDED_SHIPPING_REQUEST_TIMEOUT, 121 ) 122 if response_data: 123 excluded_methods.extend( 124 get_excluded_shipping_methods_from_response(response_data) 125 ) 126 cache.set(cache_key, (payload, excluded_methods), CACHE_EXCLUDED_SHIPPING_TIME) 127 return parse_excluded_shipping_methods(excluded_methods) 128 129 130 def get_excluded_shipping_data( 131 event_type: str, 132 previous_value: List[ExcludedShippingMethod], 133 payload_fun: Callable[[], str], 134 cache_key: str, 135 subscribable_object: Optional[Union["Order", "Checkout"]], 136 ) -> List[ExcludedShippingMethod]: 137 """Exclude not allowed shipping methods by sync webhook. 138 139 Fetch excluded shipping methods from sync webhooks and return them as a list of 140 excluded shipping methods. 141 The function uses a cache_key to reduce the number of 142 requests which we call to the external APIs. In case when we have the same payload 143 in a cache as we're going to send now, we will skip an additional request and use 144 the response fetched from cache. 145 The function will fetch the payload only in the case that we have any defined 146 webhook. 147 """ 148 149 excluded_methods_map: Dict[str, List[ExcludedShippingMethod]] = defaultdict(list) 150 webhooks = get_webhooks_for_event(event_type) 151 if webhooks: 152 payload = payload_fun() 153 154 excluded_methods_map = get_excluded_shipping_methods_or_fetch( 155 webhooks, event_type, payload, cache_key, subscribable_object 156 ) 157 158 # Gather responses for previous plugins 159 for method in previous_value: 160 excluded_methods_map[method.id].append(method) 161 162 # Return a list of excluded methods, unique by id 163 excluded_methods = [] 164 for method_id, methods in excluded_methods_map.items(): 165 reason = None 166 if reasons := [m.reason for m in methods if m.reason]: 167 reason = " ".join(reasons) 168 excluded_methods.append(ExcludedShippingMethod(id=method_id, reason=reason)) 169 return excluded_methods 170 171 172 def get_excluded_shipping_methods_from_response( 173 response_data: dict, 174 ) -> List[dict]: 175 excluded_methods = [] 176 for method_data in response_data.get("excluded_methods", []): 177 try: 178 type_name, method_id = from_global_id_or_error(method_data["id"]) 179 if type_name not in (APP_ID_PREFIX, str(ShippingMethod)): 180 logger.warning( 181 "Invalid type received. Expected ShippingMethod, got %s", type_name 182 ) 183 continue 184 185 except (KeyError, ValueError, TypeError, GraphQLError) as e: 186 logger.warning("Malformed ShippingMethod id was provided: %s", e) 187 continue 188 excluded_methods.append( 189 {"id": method_id, "reason": method_data.get("reason", "")} 190 ) 191 return excluded_methods 192 193 194 def parse_excluded_shipping_methods( 195 excluded_methods: List[dict], 196 ) -> Dict[str, List[ExcludedShippingMethod]]: 197 excluded_methods_map = defaultdict(list) 198 for excluded_method in excluded_methods: 199 method_id = excluded_method["id"] 200 excluded_methods_map[method_id].append( 201 ExcludedShippingMethod( 202 id=method_id, reason=excluded_method.get("reason", "") 203 ) 204 ) 205 return excluded_methods_map 206 207 208 def get_cache_data_for_shipping_list_methods_for_checkout(payload: str) -> dict: 209 key_data = json.loads(payload) 210 211 # drop fields that change between requests but are not relevant for cache key 212 key_data[0].pop("last_change") 213 key_data[0]["meta"].pop("issued_at") 214 return key_data 215 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/saleor/plugins/webhook/const.py b/saleor/plugins/webhook/const.py --- a/saleor/plugins/webhook/const.py +++ b/saleor/plugins/webhook/const.py @@ -1,4 +1,3 @@ CACHE_EXCLUDED_SHIPPING_KEY = "webhook_exclude_shipping_id_" CACHE_EXCLUDED_SHIPPING_TIME = 60 * 3 -EXCLUDED_SHIPPING_REQUEST_TIMEOUT = 2 WEBHOOK_CACHE_DEFAULT_TIMEOUT: int = 5 * 60 # 5 minutes diff --git a/saleor/plugins/webhook/shipping.py b/saleor/plugins/webhook/shipping.py --- a/saleor/plugins/webhook/shipping.py +++ b/saleor/plugins/webhook/shipping.py @@ -4,6 +4,7 @@ from collections import defaultdict from typing import Any, Callable, Dict, List, Optional, Union +from django.conf import settings from django.core.cache import cache from django.db.models import QuerySet from graphql import GraphQLError @@ -18,7 +19,7 @@ from ...webhook.utils import get_webhooks_for_event from ..base_plugin import ExcludedShippingMethod from ..const import APP_ID_PREFIX -from .const import CACHE_EXCLUDED_SHIPPING_TIME, EXCLUDED_SHIPPING_REQUEST_TIMEOUT +from .const import CACHE_EXCLUDED_SHIPPING_TIME from .tasks import trigger_webhook_sync logger = logging.getLogger(__name__) @@ -117,7 +118,7 @@ payload, webhook, subscribable_object=subscribable_object, - timeout=EXCLUDED_SHIPPING_REQUEST_TIMEOUT, + timeout=settings.WEBHOOK_SYNC_TIMEOUT, ) if response_data: excluded_methods.extend(
{"golden_diff": "diff --git a/saleor/plugins/webhook/const.py b/saleor/plugins/webhook/const.py\n--- a/saleor/plugins/webhook/const.py\n+++ b/saleor/plugins/webhook/const.py\n@@ -1,4 +1,3 @@\n CACHE_EXCLUDED_SHIPPING_KEY = \"webhook_exclude_shipping_id_\"\n CACHE_EXCLUDED_SHIPPING_TIME = 60 * 3\n-EXCLUDED_SHIPPING_REQUEST_TIMEOUT = 2\n WEBHOOK_CACHE_DEFAULT_TIMEOUT: int = 5 * 60 # 5 minutes\ndiff --git a/saleor/plugins/webhook/shipping.py b/saleor/plugins/webhook/shipping.py\n--- a/saleor/plugins/webhook/shipping.py\n+++ b/saleor/plugins/webhook/shipping.py\n@@ -4,6 +4,7 @@\n from collections import defaultdict\n from typing import Any, Callable, Dict, List, Optional, Union\n \n+from django.conf import settings\n from django.core.cache import cache\n from django.db.models import QuerySet\n from graphql import GraphQLError\n@@ -18,7 +19,7 @@\n from ...webhook.utils import get_webhooks_for_event\n from ..base_plugin import ExcludedShippingMethod\n from ..const import APP_ID_PREFIX\n-from .const import CACHE_EXCLUDED_SHIPPING_TIME, EXCLUDED_SHIPPING_REQUEST_TIMEOUT\n+from .const import CACHE_EXCLUDED_SHIPPING_TIME\n from .tasks import trigger_webhook_sync\n \n logger = logging.getLogger(__name__)\n@@ -117,7 +118,7 @@\n payload,\n webhook,\n subscribable_object=subscribable_object,\n- timeout=EXCLUDED_SHIPPING_REQUEST_TIMEOUT,\n+ timeout=settings.WEBHOOK_SYNC_TIMEOUT,\n )\n if response_data:\n excluded_methods.extend(\n", "issue": "Increase timeout of sync shipping method filtering webhooks\nThe following sync webhooks currently use 2s timeout:\r\n- `ORDER_FILTER_SHIPPING_METHODS` \r\n- `CHECKOUT_FILTER_SHIPPING_METHODS`\r\n\r\nThere are no reasons to make the timout shorter than other webhooks. Other sync webhooks use `settings.WEBHOOK_SYNC_TIMEOUT` which should be used for these two for consistency.\n", "before_files": [{"content": "CACHE_EXCLUDED_SHIPPING_KEY = \"webhook_exclude_shipping_id_\"\nCACHE_EXCLUDED_SHIPPING_TIME = 60 * 3\nEXCLUDED_SHIPPING_REQUEST_TIMEOUT = 2\nWEBHOOK_CACHE_DEFAULT_TIMEOUT: int = 5 * 60 # 5 minutes\n", "path": "saleor/plugins/webhook/const.py"}, {"content": "import base64\nimport json\nimport logging\nfrom collections import defaultdict\nfrom typing import Any, Callable, Dict, List, Optional, Union\n\nfrom django.core.cache import cache\nfrom django.db.models import QuerySet\nfrom graphql import GraphQLError\nfrom prices import Money\n\nfrom ...app.models import App\nfrom ...checkout.models import Checkout\nfrom ...graphql.core.utils import from_global_id_or_error\nfrom ...graphql.shipping.types import ShippingMethod\nfrom ...order.models import Order\nfrom ...shipping.interface import ShippingMethodData\nfrom ...webhook.utils import get_webhooks_for_event\nfrom ..base_plugin import ExcludedShippingMethod\nfrom ..const import APP_ID_PREFIX\nfrom .const import CACHE_EXCLUDED_SHIPPING_TIME, EXCLUDED_SHIPPING_REQUEST_TIMEOUT\nfrom .tasks import trigger_webhook_sync\n\nlogger = logging.getLogger(__name__)\n\n\ndef to_shipping_app_id(app: \"App\", shipping_method_id: str) -> \"str\":\n app_identifier = app.identifier or app.id\n return base64.b64encode(\n str.encode(f\"{APP_ID_PREFIX}:{app_identifier}:{shipping_method_id}\")\n ).decode(\"utf-8\")\n\n\ndef convert_to_app_id_with_identifier(shipping_app_id: str):\n \"\"\"Prepare the shipping_app_id in format `app:<app-identifier>/method_id>`.\n\n The format of shipping_app_id has been changes so we need to support both of them.\n This method is preparing the new shipping_app_id format based on assumptions\n that right now the old one is used which is `app:<app-pk>:method_id>`\n \"\"\"\n decoded_id = base64.b64decode(shipping_app_id).decode()\n splitted_id = decoded_id.split(\":\")\n if len(splitted_id) != 3:\n return\n try:\n app_id = int(splitted_id[1])\n except (TypeError, ValueError):\n return None\n app = App.objects.filter(id=app_id).first()\n if app is None:\n return None\n return to_shipping_app_id(app, splitted_id[2])\n\n\ndef parse_list_shipping_methods_response(\n response_data: Any, app: \"App\"\n) -> List[\"ShippingMethodData\"]:\n shipping_methods = []\n for shipping_method_data in response_data:\n method_id = shipping_method_data.get(\"id\")\n method_name = shipping_method_data.get(\"name\")\n method_amount = shipping_method_data.get(\"amount\")\n method_currency = shipping_method_data.get(\"currency\")\n method_maximum_delivery_days = shipping_method_data.get(\"maximum_delivery_days\")\n\n shipping_methods.append(\n ShippingMethodData(\n id=to_shipping_app_id(app, method_id),\n name=method_name,\n price=Money(method_amount, method_currency),\n maximum_delivery_days=method_maximum_delivery_days,\n )\n )\n return shipping_methods\n\n\ndef _compare_order_payloads(payload: str, cached_payload: str) -> bool:\n \"\"\"Compare two strings of order payloads ignoring meta.\"\"\"\n EXCLUDED_KEY = \"meta\"\n try:\n order_payload = json.loads(payload)[\"order\"]\n cached_order_payload = json.loads(cached_payload)[\"order\"]\n except: # noqa\n return False\n return {k: v for k, v in order_payload.items() if k != EXCLUDED_KEY} == {\n k: v for k, v in cached_order_payload.items() if k != EXCLUDED_KEY\n }\n\n\ndef get_excluded_shipping_methods_or_fetch(\n webhooks: QuerySet,\n event_type: str,\n payload: str,\n cache_key: str,\n subscribable_object: Optional[Union[\"Order\", \"Checkout\"]],\n) -> Dict[str, List[ExcludedShippingMethod]]:\n \"\"\"Return data of all excluded shipping methods.\n\n The data will be fetched from the cache. If missing it will fetch it from all\n defined webhooks by calling a request to each of them one by one.\n \"\"\"\n cached_data = cache.get(cache_key)\n if cached_data:\n cached_payload, excluded_shipping_methods = cached_data\n if (payload == cached_payload) or _compare_order_payloads(\n payload, cached_payload\n ):\n return parse_excluded_shipping_methods(excluded_shipping_methods)\n\n excluded_methods = []\n # Gather responses from webhooks\n for webhook in webhooks:\n if not webhook:\n continue\n response_data = trigger_webhook_sync(\n event_type,\n payload,\n webhook,\n subscribable_object=subscribable_object,\n timeout=EXCLUDED_SHIPPING_REQUEST_TIMEOUT,\n )\n if response_data:\n excluded_methods.extend(\n get_excluded_shipping_methods_from_response(response_data)\n )\n cache.set(cache_key, (payload, excluded_methods), CACHE_EXCLUDED_SHIPPING_TIME)\n return parse_excluded_shipping_methods(excluded_methods)\n\n\ndef get_excluded_shipping_data(\n event_type: str,\n previous_value: List[ExcludedShippingMethod],\n payload_fun: Callable[[], str],\n cache_key: str,\n subscribable_object: Optional[Union[\"Order\", \"Checkout\"]],\n) -> List[ExcludedShippingMethod]:\n \"\"\"Exclude not allowed shipping methods by sync webhook.\n\n Fetch excluded shipping methods from sync webhooks and return them as a list of\n excluded shipping methods.\n The function uses a cache_key to reduce the number of\n requests which we call to the external APIs. In case when we have the same payload\n in a cache as we're going to send now, we will skip an additional request and use\n the response fetched from cache.\n The function will fetch the payload only in the case that we have any defined\n webhook.\n \"\"\"\n\n excluded_methods_map: Dict[str, List[ExcludedShippingMethod]] = defaultdict(list)\n webhooks = get_webhooks_for_event(event_type)\n if webhooks:\n payload = payload_fun()\n\n excluded_methods_map = get_excluded_shipping_methods_or_fetch(\n webhooks, event_type, payload, cache_key, subscribable_object\n )\n\n # Gather responses for previous plugins\n for method in previous_value:\n excluded_methods_map[method.id].append(method)\n\n # Return a list of excluded methods, unique by id\n excluded_methods = []\n for method_id, methods in excluded_methods_map.items():\n reason = None\n if reasons := [m.reason for m in methods if m.reason]:\n reason = \" \".join(reasons)\n excluded_methods.append(ExcludedShippingMethod(id=method_id, reason=reason))\n return excluded_methods\n\n\ndef get_excluded_shipping_methods_from_response(\n response_data: dict,\n) -> List[dict]:\n excluded_methods = []\n for method_data in response_data.get(\"excluded_methods\", []):\n try:\n type_name, method_id = from_global_id_or_error(method_data[\"id\"])\n if type_name not in (APP_ID_PREFIX, str(ShippingMethod)):\n logger.warning(\n \"Invalid type received. Expected ShippingMethod, got %s\", type_name\n )\n continue\n\n except (KeyError, ValueError, TypeError, GraphQLError) as e:\n logger.warning(\"Malformed ShippingMethod id was provided: %s\", e)\n continue\n excluded_methods.append(\n {\"id\": method_id, \"reason\": method_data.get(\"reason\", \"\")}\n )\n return excluded_methods\n\n\ndef parse_excluded_shipping_methods(\n excluded_methods: List[dict],\n) -> Dict[str, List[ExcludedShippingMethod]]:\n excluded_methods_map = defaultdict(list)\n for excluded_method in excluded_methods:\n method_id = excluded_method[\"id\"]\n excluded_methods_map[method_id].append(\n ExcludedShippingMethod(\n id=method_id, reason=excluded_method.get(\"reason\", \"\")\n )\n )\n return excluded_methods_map\n\n\ndef get_cache_data_for_shipping_list_methods_for_checkout(payload: str) -> dict:\n key_data = json.loads(payload)\n\n # drop fields that change between requests but are not relevant for cache key\n key_data[0].pop(\"last_change\")\n key_data[0][\"meta\"].pop(\"issued_at\")\n return key_data\n", "path": "saleor/plugins/webhook/shipping.py"}], "after_files": [{"content": "CACHE_EXCLUDED_SHIPPING_KEY = \"webhook_exclude_shipping_id_\"\nCACHE_EXCLUDED_SHIPPING_TIME = 60 * 3\nWEBHOOK_CACHE_DEFAULT_TIMEOUT: int = 5 * 60 # 5 minutes\n", "path": "saleor/plugins/webhook/const.py"}, {"content": "import base64\nimport json\nimport logging\nfrom collections import defaultdict\nfrom typing import Any, Callable, Dict, List, Optional, Union\n\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.db.models import QuerySet\nfrom graphql import GraphQLError\nfrom prices import Money\n\nfrom ...app.models import App\nfrom ...checkout.models import Checkout\nfrom ...graphql.core.utils import from_global_id_or_error\nfrom ...graphql.shipping.types import ShippingMethod\nfrom ...order.models import Order\nfrom ...shipping.interface import ShippingMethodData\nfrom ...webhook.utils import get_webhooks_for_event\nfrom ..base_plugin import ExcludedShippingMethod\nfrom ..const import APP_ID_PREFIX\nfrom .const import CACHE_EXCLUDED_SHIPPING_TIME\nfrom .tasks import trigger_webhook_sync\n\nlogger = logging.getLogger(__name__)\n\n\ndef to_shipping_app_id(app: \"App\", shipping_method_id: str) -> \"str\":\n app_identifier = app.identifier or app.id\n return base64.b64encode(\n str.encode(f\"{APP_ID_PREFIX}:{app_identifier}:{shipping_method_id}\")\n ).decode(\"utf-8\")\n\n\ndef convert_to_app_id_with_identifier(shipping_app_id: str):\n \"\"\"Prepare the shipping_app_id in format `app:<app-identifier>/method_id>`.\n\n The format of shipping_app_id has been changes so we need to support both of them.\n This method is preparing the new shipping_app_id format based on assumptions\n that right now the old one is used which is `app:<app-pk>:method_id>`\n \"\"\"\n decoded_id = base64.b64decode(shipping_app_id).decode()\n splitted_id = decoded_id.split(\":\")\n if len(splitted_id) != 3:\n return\n try:\n app_id = int(splitted_id[1])\n except (TypeError, ValueError):\n return None\n app = App.objects.filter(id=app_id).first()\n if app is None:\n return None\n return to_shipping_app_id(app, splitted_id[2])\n\n\ndef parse_list_shipping_methods_response(\n response_data: Any, app: \"App\"\n) -> List[\"ShippingMethodData\"]:\n shipping_methods = []\n for shipping_method_data in response_data:\n method_id = shipping_method_data.get(\"id\")\n method_name = shipping_method_data.get(\"name\")\n method_amount = shipping_method_data.get(\"amount\")\n method_currency = shipping_method_data.get(\"currency\")\n method_maximum_delivery_days = shipping_method_data.get(\"maximum_delivery_days\")\n\n shipping_methods.append(\n ShippingMethodData(\n id=to_shipping_app_id(app, method_id),\n name=method_name,\n price=Money(method_amount, method_currency),\n maximum_delivery_days=method_maximum_delivery_days,\n )\n )\n return shipping_methods\n\n\ndef _compare_order_payloads(payload: str, cached_payload: str) -> bool:\n \"\"\"Compare two strings of order payloads ignoring meta.\"\"\"\n EXCLUDED_KEY = \"meta\"\n try:\n order_payload = json.loads(payload)[\"order\"]\n cached_order_payload = json.loads(cached_payload)[\"order\"]\n except: # noqa\n return False\n return {k: v for k, v in order_payload.items() if k != EXCLUDED_KEY} == {\n k: v for k, v in cached_order_payload.items() if k != EXCLUDED_KEY\n }\n\n\ndef get_excluded_shipping_methods_or_fetch(\n webhooks: QuerySet,\n event_type: str,\n payload: str,\n cache_key: str,\n subscribable_object: Optional[Union[\"Order\", \"Checkout\"]],\n) -> Dict[str, List[ExcludedShippingMethod]]:\n \"\"\"Return data of all excluded shipping methods.\n\n The data will be fetched from the cache. If missing it will fetch it from all\n defined webhooks by calling a request to each of them one by one.\n \"\"\"\n cached_data = cache.get(cache_key)\n if cached_data:\n cached_payload, excluded_shipping_methods = cached_data\n if (payload == cached_payload) or _compare_order_payloads(\n payload, cached_payload\n ):\n return parse_excluded_shipping_methods(excluded_shipping_methods)\n\n excluded_methods = []\n # Gather responses from webhooks\n for webhook in webhooks:\n if not webhook:\n continue\n response_data = trigger_webhook_sync(\n event_type,\n payload,\n webhook,\n subscribable_object=subscribable_object,\n timeout=settings.WEBHOOK_SYNC_TIMEOUT,\n )\n if response_data:\n excluded_methods.extend(\n get_excluded_shipping_methods_from_response(response_data)\n )\n cache.set(cache_key, (payload, excluded_methods), CACHE_EXCLUDED_SHIPPING_TIME)\n return parse_excluded_shipping_methods(excluded_methods)\n\n\ndef get_excluded_shipping_data(\n event_type: str,\n previous_value: List[ExcludedShippingMethod],\n payload_fun: Callable[[], str],\n cache_key: str,\n subscribable_object: Optional[Union[\"Order\", \"Checkout\"]],\n) -> List[ExcludedShippingMethod]:\n \"\"\"Exclude not allowed shipping methods by sync webhook.\n\n Fetch excluded shipping methods from sync webhooks and return them as a list of\n excluded shipping methods.\n The function uses a cache_key to reduce the number of\n requests which we call to the external APIs. In case when we have the same payload\n in a cache as we're going to send now, we will skip an additional request and use\n the response fetched from cache.\n The function will fetch the payload only in the case that we have any defined\n webhook.\n \"\"\"\n\n excluded_methods_map: Dict[str, List[ExcludedShippingMethod]] = defaultdict(list)\n webhooks = get_webhooks_for_event(event_type)\n if webhooks:\n payload = payload_fun()\n\n excluded_methods_map = get_excluded_shipping_methods_or_fetch(\n webhooks, event_type, payload, cache_key, subscribable_object\n )\n\n # Gather responses for previous plugins\n for method in previous_value:\n excluded_methods_map[method.id].append(method)\n\n # Return a list of excluded methods, unique by id\n excluded_methods = []\n for method_id, methods in excluded_methods_map.items():\n reason = None\n if reasons := [m.reason for m in methods if m.reason]:\n reason = \" \".join(reasons)\n excluded_methods.append(ExcludedShippingMethod(id=method_id, reason=reason))\n return excluded_methods\n\n\ndef get_excluded_shipping_methods_from_response(\n response_data: dict,\n) -> List[dict]:\n excluded_methods = []\n for method_data in response_data.get(\"excluded_methods\", []):\n try:\n type_name, method_id = from_global_id_or_error(method_data[\"id\"])\n if type_name not in (APP_ID_PREFIX, str(ShippingMethod)):\n logger.warning(\n \"Invalid type received. Expected ShippingMethod, got %s\", type_name\n )\n continue\n\n except (KeyError, ValueError, TypeError, GraphQLError) as e:\n logger.warning(\"Malformed ShippingMethod id was provided: %s\", e)\n continue\n excluded_methods.append(\n {\"id\": method_id, \"reason\": method_data.get(\"reason\", \"\")}\n )\n return excluded_methods\n\n\ndef parse_excluded_shipping_methods(\n excluded_methods: List[dict],\n) -> Dict[str, List[ExcludedShippingMethod]]:\n excluded_methods_map = defaultdict(list)\n for excluded_method in excluded_methods:\n method_id = excluded_method[\"id\"]\n excluded_methods_map[method_id].append(\n ExcludedShippingMethod(\n id=method_id, reason=excluded_method.get(\"reason\", \"\")\n )\n )\n return excluded_methods_map\n\n\ndef get_cache_data_for_shipping_list_methods_for_checkout(payload: str) -> dict:\n key_data = json.loads(payload)\n\n # drop fields that change between requests but are not relevant for cache key\n key_data[0].pop(\"last_change\")\n key_data[0][\"meta\"].pop(\"issued_at\")\n return key_data\n", "path": "saleor/plugins/webhook/shipping.py"}]}
2,682
375
gh_patches_debug_34495
rasdani/github-patches
git_diff
twisted__twisted-850
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add a --add-header flag to twist web |[<img alt="alex's avatar" src="https://avatars.githubusercontent.com/u/772?s=50" width="50" height="50">](https://github.com/alex)| @alex reported| |-|-| |Trac ID|trac#9241| |Type|enhancement| |Created|2017-07-28 12:17:46Z| This would make it easy to add headers to all responses. This is useful for things like "adding an HSTS header to all responses in a static web app, without writing code". <details><summary>Searchable metadata</summary> ``` trac-id__9241 9241 type__enhancement enhancement reporter__alex alex priority__normal normal milestone__None None branch__ branch_author__ status__closed closed resolution__fixed fixed component__core core keywords__None None time__1501244266950695 1501244266950695 changetime__1501595055861628 1501595055861628 version__None None owner__Alex Alex ``` </details> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/twisted/web/tap.py` Content: ``` 1 # -*- test-case-name: twisted.web.test.test_tap -*- 2 # Copyright (c) Twisted Matrix Laboratories. 3 # See LICENSE for details. 4 5 """ 6 Support for creating a service which runs a web server. 7 """ 8 9 from __future__ import absolute_import, division 10 11 import os 12 13 from twisted.application import internet, service, strports 14 from twisted.internet import interfaces, reactor 15 from twisted.python import usage, reflect, threadpool 16 from twisted.spread import pb 17 from twisted.web import distrib 18 from twisted.web import server, static, script, demo, wsgi 19 from twisted.web import twcgi 20 21 22 23 class Options(usage.Options): 24 """ 25 Define the options accepted by the I{twistd web} plugin. 26 """ 27 synopsis = "[web options]" 28 29 optParameters = [["port", "p", None, "strports description of the port to " 30 "start the server on."], 31 ["logfile", "l", None, 32 "Path to web CLF (Combined Log Format) log file."], 33 ["https", None, None, 34 "Port to listen on for Secure HTTP."], 35 ["certificate", "c", "server.pem", 36 "SSL certificate to use for HTTPS. "], 37 ["privkey", "k", "server.pem", 38 "SSL certificate to use for HTTPS."], 39 ] 40 41 optFlags = [ 42 ["notracebacks", "n", ( 43 "Do not display tracebacks in broken web pages. Displaying " 44 "tracebacks to users may be security risk!")], 45 ] 46 47 optFlags.append([ 48 "personal", "", 49 "Instead of generating a webserver, generate a " 50 "ResourcePublisher which listens on the port given by " 51 "--port, or ~/%s " % (distrib.UserDirectory.userSocketName,) + 52 "if --port is not specified."]) 53 54 compData = usage.Completions( 55 optActions={"logfile" : usage.CompleteFiles("*.log"), 56 "certificate" : usage.CompleteFiles("*.pem"), 57 "privkey" : usage.CompleteFiles("*.pem")} 58 ) 59 60 longdesc = """\ 61 This starts a webserver. If you specify no arguments, it will be a 62 demo webserver that has the Test class from twisted.web.demo in it.""" 63 64 def __init__(self): 65 usage.Options.__init__(self) 66 self['indexes'] = [] 67 self['root'] = None 68 69 70 def opt_index(self, indexName): 71 """ 72 Add the name of a file used to check for directory indexes. 73 [default: index, index.html] 74 """ 75 self['indexes'].append(indexName) 76 77 opt_i = opt_index 78 79 80 def opt_user(self): 81 """ 82 Makes a server with ~/public_html and ~/.twistd-web-pb support for 83 users. 84 """ 85 self['root'] = distrib.UserDirectory() 86 87 opt_u = opt_user 88 89 90 def opt_path(self, path): 91 """ 92 <path> is either a specific file or a directory to be set as the root 93 of the web server. Use this if you have a directory full of HTML, cgi, 94 epy, or rpy files or any other files that you want to be served up raw. 95 """ 96 self['root'] = static.File(os.path.abspath(path)) 97 self['root'].processors = { 98 '.epy': script.PythonScript, 99 '.rpy': script.ResourceScript, 100 } 101 self['root'].processors['.cgi'] = twcgi.CGIScript 102 103 104 def opt_processor(self, proc): 105 """ 106 `ext=class' where `class' is added as a Processor for files ending 107 with `ext'. 108 """ 109 if not isinstance(self['root'], static.File): 110 raise usage.UsageError( 111 "You can only use --processor after --path.") 112 ext, klass = proc.split('=', 1) 113 self['root'].processors[ext] = reflect.namedClass(klass) 114 115 116 def opt_class(self, className): 117 """ 118 Create a Resource subclass with a zero-argument constructor. 119 """ 120 classObj = reflect.namedClass(className) 121 self['root'] = classObj() 122 123 124 def opt_resource_script(self, name): 125 """ 126 An .rpy file to be used as the root resource of the webserver. 127 """ 128 self['root'] = script.ResourceScriptWrapper(name) 129 130 131 def opt_wsgi(self, name): 132 """ 133 The FQPN of a WSGI application object to serve as the root resource of 134 the webserver. 135 """ 136 try: 137 application = reflect.namedAny(name) 138 except (AttributeError, ValueError): 139 raise usage.UsageError("No such WSGI application: %r" % (name,)) 140 pool = threadpool.ThreadPool() 141 reactor.callWhenRunning(pool.start) 142 reactor.addSystemEventTrigger('after', 'shutdown', pool.stop) 143 self['root'] = wsgi.WSGIResource(reactor, pool, application) 144 145 146 def opt_mime_type(self, defaultType): 147 """ 148 Specify the default mime-type for static files. 149 """ 150 if not isinstance(self['root'], static.File): 151 raise usage.UsageError( 152 "You can only use --mime_type after --path.") 153 self['root'].defaultType = defaultType 154 opt_m = opt_mime_type 155 156 157 def opt_allow_ignore_ext(self): 158 """ 159 Specify whether or not a request for 'foo' should return 'foo.ext' 160 """ 161 if not isinstance(self['root'], static.File): 162 raise usage.UsageError("You can only use --allow_ignore_ext " 163 "after --path.") 164 self['root'].ignoreExt('*') 165 166 167 def opt_ignore_ext(self, ext): 168 """ 169 Specify an extension to ignore. These will be processed in order. 170 """ 171 if not isinstance(self['root'], static.File): 172 raise usage.UsageError("You can only use --ignore_ext " 173 "after --path.") 174 self['root'].ignoreExt(ext) 175 176 177 def postOptions(self): 178 """ 179 Set up conditional defaults and check for dependencies. 180 181 If SSL is not available but an HTTPS server was configured, raise a 182 L{UsageError} indicating that this is not possible. 183 184 If no server port was supplied, select a default appropriate for the 185 other options supplied. 186 """ 187 if self['https']: 188 try: 189 reflect.namedModule('OpenSSL.SSL') 190 except ImportError: 191 raise usage.UsageError("SSL support not installed") 192 if self['port'] is None: 193 if self['personal']: 194 path = os.path.expanduser( 195 os.path.join('~', distrib.UserDirectory.userSocketName)) 196 self['port'] = 'unix:' + path 197 else: 198 self['port'] = 'tcp:8080' 199 200 201 202 def makePersonalServerFactory(site): 203 """ 204 Create and return a factory which will respond to I{distrib} requests 205 against the given site. 206 207 @type site: L{twisted.web.server.Site} 208 @rtype: L{twisted.internet.protocol.Factory} 209 """ 210 return pb.PBServerFactory(distrib.ResourcePublisher(site)) 211 212 213 214 def makeService(config): 215 s = service.MultiService() 216 if config['root']: 217 root = config['root'] 218 if config['indexes']: 219 config['root'].indexNames = config['indexes'] 220 else: 221 # This really ought to be web.Admin or something 222 root = demo.Test() 223 224 if isinstance(root, static.File): 225 root.registry.setComponent(interfaces.IServiceCollection, s) 226 227 if config['logfile']: 228 site = server.Site(root, logPath=config['logfile']) 229 else: 230 site = server.Site(root) 231 232 site.displayTracebacks = not config["notracebacks"] 233 234 if config['personal']: 235 personal = strports.service( 236 config['port'], makePersonalServerFactory(site)) 237 personal.setServiceParent(s) 238 else: 239 if config['https']: 240 from twisted.internet.ssl import DefaultOpenSSLContextFactory 241 i = internet.SSLServer(int(config['https']), site, 242 DefaultOpenSSLContextFactory(config['privkey'], 243 config['certificate'])) 244 i.setServiceParent(s) 245 strports.service(config['port'], site).setServiceParent(s) 246 247 return s 248 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/twisted/web/tap.py b/src/twisted/web/tap.py --- a/src/twisted/web/tap.py +++ b/src/twisted/web/tap.py @@ -15,7 +15,7 @@ from twisted.python import usage, reflect, threadpool from twisted.spread import pb from twisted.web import distrib -from twisted.web import server, static, script, demo, wsgi +from twisted.web import resource, server, static, script, demo, wsgi from twisted.web import twcgi @@ -65,6 +65,7 @@ usage.Options.__init__(self) self['indexes'] = [] self['root'] = None + self['extraHeaders'] = [] def opt_index(self, indexName): @@ -174,6 +175,15 @@ self['root'].ignoreExt(ext) + def opt_add_header(self, header): + """ + Specify an additional header to be included in all responses. Specified + as "HeaderName: HeaderValue". + """ + name, value = header.split(':', 1) + self['extraHeaders'].append((name.strip(), value.strip())) + + def postOptions(self): """ Set up conditional defaults and check for dependencies. @@ -211,6 +221,19 @@ +class _AddHeadersResource(resource.Resource): + def __init__(self, originalResource, headers): + self._originalResource = originalResource + self._headers = headers + + + def getChildWithDefault(self, name, request): + for k, v in self._headers: + request.responseHeaders.addRawHeader(k, v) + return self._originalResource.getChildWithDefault(name, request) + + + def makeService(config): s = service.MultiService() if config['root']: @@ -224,6 +247,9 @@ if isinstance(root, static.File): root.registry.setComponent(interfaces.IServiceCollection, s) + if config['extraHeaders']: + root = _AddHeadersResource(root, config['extraHeaders']) + if config['logfile']: site = server.Site(root, logPath=config['logfile']) else:
{"golden_diff": "diff --git a/src/twisted/web/tap.py b/src/twisted/web/tap.py\n--- a/src/twisted/web/tap.py\n+++ b/src/twisted/web/tap.py\n@@ -15,7 +15,7 @@\n from twisted.python import usage, reflect, threadpool\n from twisted.spread import pb\n from twisted.web import distrib\n-from twisted.web import server, static, script, demo, wsgi\n+from twisted.web import resource, server, static, script, demo, wsgi\n from twisted.web import twcgi\n \n \n@@ -65,6 +65,7 @@\n usage.Options.__init__(self)\n self['indexes'] = []\n self['root'] = None\n+ self['extraHeaders'] = []\n \n \n def opt_index(self, indexName):\n@@ -174,6 +175,15 @@\n self['root'].ignoreExt(ext)\n \n \n+ def opt_add_header(self, header):\n+ \"\"\"\n+ Specify an additional header to be included in all responses. Specified\n+ as \"HeaderName: HeaderValue\".\n+ \"\"\"\n+ name, value = header.split(':', 1)\n+ self['extraHeaders'].append((name.strip(), value.strip()))\n+\n+\n def postOptions(self):\n \"\"\"\n Set up conditional defaults and check for dependencies.\n@@ -211,6 +221,19 @@\n \n \n \n+class _AddHeadersResource(resource.Resource):\n+ def __init__(self, originalResource, headers):\n+ self._originalResource = originalResource\n+ self._headers = headers\n+\n+\n+ def getChildWithDefault(self, name, request):\n+ for k, v in self._headers:\n+ request.responseHeaders.addRawHeader(k, v)\n+ return self._originalResource.getChildWithDefault(name, request)\n+\n+\n+\n def makeService(config):\n s = service.MultiService()\n if config['root']:\n@@ -224,6 +247,9 @@\n if isinstance(root, static.File):\n root.registry.setComponent(interfaces.IServiceCollection, s)\n \n+ if config['extraHeaders']:\n+ root = _AddHeadersResource(root, config['extraHeaders'])\n+\n if config['logfile']:\n site = server.Site(root, logPath=config['logfile'])\n else:\n", "issue": "Add a --add-header flag to twist web\n|[<img alt=\"alex's avatar\" src=\"https://avatars.githubusercontent.com/u/772?s=50\" width=\"50\" height=\"50\">](https://github.com/alex)| @alex reported|\n|-|-|\n|Trac ID|trac#9241|\n|Type|enhancement|\n|Created|2017-07-28 12:17:46Z|\n\nThis would make it easy to add headers to all responses. This is useful for things like \"adding an HSTS header to all responses in a static web app, without writing code\".\n\n<details><summary>Searchable metadata</summary>\n\n```\ntrac-id__9241 9241\ntype__enhancement enhancement\nreporter__alex alex\npriority__normal normal\nmilestone__None None\nbranch__ \nbranch_author__ \nstatus__closed closed\nresolution__fixed fixed\ncomponent__core core\nkeywords__None None\ntime__1501244266950695 1501244266950695\nchangetime__1501595055861628 1501595055861628\nversion__None None\nowner__Alex Alex\n\n```\n</details>\n\n", "before_files": [{"content": "# -*- test-case-name: twisted.web.test.test_tap -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nSupport for creating a service which runs a web server.\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport os\n\nfrom twisted.application import internet, service, strports\nfrom twisted.internet import interfaces, reactor\nfrom twisted.python import usage, reflect, threadpool\nfrom twisted.spread import pb\nfrom twisted.web import distrib\nfrom twisted.web import server, static, script, demo, wsgi\nfrom twisted.web import twcgi\n\n\n\nclass Options(usage.Options):\n \"\"\"\n Define the options accepted by the I{twistd web} plugin.\n \"\"\"\n synopsis = \"[web options]\"\n\n optParameters = [[\"port\", \"p\", None, \"strports description of the port to \"\n \"start the server on.\"],\n [\"logfile\", \"l\", None,\n \"Path to web CLF (Combined Log Format) log file.\"],\n [\"https\", None, None,\n \"Port to listen on for Secure HTTP.\"],\n [\"certificate\", \"c\", \"server.pem\",\n \"SSL certificate to use for HTTPS. \"],\n [\"privkey\", \"k\", \"server.pem\",\n \"SSL certificate to use for HTTPS.\"],\n ]\n\n optFlags = [\n [\"notracebacks\", \"n\", (\n \"Do not display tracebacks in broken web pages. Displaying \"\n \"tracebacks to users may be security risk!\")],\n ]\n\n optFlags.append([\n \"personal\", \"\",\n \"Instead of generating a webserver, generate a \"\n \"ResourcePublisher which listens on the port given by \"\n \"--port, or ~/%s \" % (distrib.UserDirectory.userSocketName,) +\n \"if --port is not specified.\"])\n\n compData = usage.Completions(\n optActions={\"logfile\" : usage.CompleteFiles(\"*.log\"),\n \"certificate\" : usage.CompleteFiles(\"*.pem\"),\n \"privkey\" : usage.CompleteFiles(\"*.pem\")}\n )\n\n longdesc = \"\"\"\\\nThis starts a webserver. If you specify no arguments, it will be a\ndemo webserver that has the Test class from twisted.web.demo in it.\"\"\"\n\n def __init__(self):\n usage.Options.__init__(self)\n self['indexes'] = []\n self['root'] = None\n\n\n def opt_index(self, indexName):\n \"\"\"\n Add the name of a file used to check for directory indexes.\n [default: index, index.html]\n \"\"\"\n self['indexes'].append(indexName)\n\n opt_i = opt_index\n\n\n def opt_user(self):\n \"\"\"\n Makes a server with ~/public_html and ~/.twistd-web-pb support for\n users.\n \"\"\"\n self['root'] = distrib.UserDirectory()\n\n opt_u = opt_user\n\n\n def opt_path(self, path):\n \"\"\"\n <path> is either a specific file or a directory to be set as the root\n of the web server. Use this if you have a directory full of HTML, cgi,\n epy, or rpy files or any other files that you want to be served up raw.\n \"\"\"\n self['root'] = static.File(os.path.abspath(path))\n self['root'].processors = {\n '.epy': script.PythonScript,\n '.rpy': script.ResourceScript,\n }\n self['root'].processors['.cgi'] = twcgi.CGIScript\n\n\n def opt_processor(self, proc):\n \"\"\"\n `ext=class' where `class' is added as a Processor for files ending\n with `ext'.\n \"\"\"\n if not isinstance(self['root'], static.File):\n raise usage.UsageError(\n \"You can only use --processor after --path.\")\n ext, klass = proc.split('=', 1)\n self['root'].processors[ext] = reflect.namedClass(klass)\n\n\n def opt_class(self, className):\n \"\"\"\n Create a Resource subclass with a zero-argument constructor.\n \"\"\"\n classObj = reflect.namedClass(className)\n self['root'] = classObj()\n\n\n def opt_resource_script(self, name):\n \"\"\"\n An .rpy file to be used as the root resource of the webserver.\n \"\"\"\n self['root'] = script.ResourceScriptWrapper(name)\n\n\n def opt_wsgi(self, name):\n \"\"\"\n The FQPN of a WSGI application object to serve as the root resource of\n the webserver.\n \"\"\"\n try:\n application = reflect.namedAny(name)\n except (AttributeError, ValueError):\n raise usage.UsageError(\"No such WSGI application: %r\" % (name,))\n pool = threadpool.ThreadPool()\n reactor.callWhenRunning(pool.start)\n reactor.addSystemEventTrigger('after', 'shutdown', pool.stop)\n self['root'] = wsgi.WSGIResource(reactor, pool, application)\n\n\n def opt_mime_type(self, defaultType):\n \"\"\"\n Specify the default mime-type for static files.\n \"\"\"\n if not isinstance(self['root'], static.File):\n raise usage.UsageError(\n \"You can only use --mime_type after --path.\")\n self['root'].defaultType = defaultType\n opt_m = opt_mime_type\n\n\n def opt_allow_ignore_ext(self):\n \"\"\"\n Specify whether or not a request for 'foo' should return 'foo.ext'\n \"\"\"\n if not isinstance(self['root'], static.File):\n raise usage.UsageError(\"You can only use --allow_ignore_ext \"\n \"after --path.\")\n self['root'].ignoreExt('*')\n\n\n def opt_ignore_ext(self, ext):\n \"\"\"\n Specify an extension to ignore. These will be processed in order.\n \"\"\"\n if not isinstance(self['root'], static.File):\n raise usage.UsageError(\"You can only use --ignore_ext \"\n \"after --path.\")\n self['root'].ignoreExt(ext)\n\n\n def postOptions(self):\n \"\"\"\n Set up conditional defaults and check for dependencies.\n\n If SSL is not available but an HTTPS server was configured, raise a\n L{UsageError} indicating that this is not possible.\n\n If no server port was supplied, select a default appropriate for the\n other options supplied.\n \"\"\"\n if self['https']:\n try:\n reflect.namedModule('OpenSSL.SSL')\n except ImportError:\n raise usage.UsageError(\"SSL support not installed\")\n if self['port'] is None:\n if self['personal']:\n path = os.path.expanduser(\n os.path.join('~', distrib.UserDirectory.userSocketName))\n self['port'] = 'unix:' + path\n else:\n self['port'] = 'tcp:8080'\n\n\n\ndef makePersonalServerFactory(site):\n \"\"\"\n Create and return a factory which will respond to I{distrib} requests\n against the given site.\n\n @type site: L{twisted.web.server.Site}\n @rtype: L{twisted.internet.protocol.Factory}\n \"\"\"\n return pb.PBServerFactory(distrib.ResourcePublisher(site))\n\n\n\ndef makeService(config):\n s = service.MultiService()\n if config['root']:\n root = config['root']\n if config['indexes']:\n config['root'].indexNames = config['indexes']\n else:\n # This really ought to be web.Admin or something\n root = demo.Test()\n\n if isinstance(root, static.File):\n root.registry.setComponent(interfaces.IServiceCollection, s)\n\n if config['logfile']:\n site = server.Site(root, logPath=config['logfile'])\n else:\n site = server.Site(root)\n\n site.displayTracebacks = not config[\"notracebacks\"]\n\n if config['personal']:\n personal = strports.service(\n config['port'], makePersonalServerFactory(site))\n personal.setServiceParent(s)\n else:\n if config['https']:\n from twisted.internet.ssl import DefaultOpenSSLContextFactory\n i = internet.SSLServer(int(config['https']), site,\n DefaultOpenSSLContextFactory(config['privkey'],\n config['certificate']))\n i.setServiceParent(s)\n strports.service(config['port'], site).setServiceParent(s)\n\n return s\n", "path": "src/twisted/web/tap.py"}], "after_files": [{"content": "# -*- test-case-name: twisted.web.test.test_tap -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nSupport for creating a service which runs a web server.\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport os\n\nfrom twisted.application import internet, service, strports\nfrom twisted.internet import interfaces, reactor\nfrom twisted.python import usage, reflect, threadpool\nfrom twisted.spread import pb\nfrom twisted.web import distrib\nfrom twisted.web import resource, server, static, script, demo, wsgi\nfrom twisted.web import twcgi\n\n\n\nclass Options(usage.Options):\n \"\"\"\n Define the options accepted by the I{twistd web} plugin.\n \"\"\"\n synopsis = \"[web options]\"\n\n optParameters = [[\"port\", \"p\", None, \"strports description of the port to \"\n \"start the server on.\"],\n [\"logfile\", \"l\", None,\n \"Path to web CLF (Combined Log Format) log file.\"],\n [\"https\", None, None,\n \"Port to listen on for Secure HTTP.\"],\n [\"certificate\", \"c\", \"server.pem\",\n \"SSL certificate to use for HTTPS. \"],\n [\"privkey\", \"k\", \"server.pem\",\n \"SSL certificate to use for HTTPS.\"],\n ]\n\n optFlags = [\n [\"notracebacks\", \"n\", (\n \"Do not display tracebacks in broken web pages. Displaying \"\n \"tracebacks to users may be security risk!\")],\n ]\n\n optFlags.append([\n \"personal\", \"\",\n \"Instead of generating a webserver, generate a \"\n \"ResourcePublisher which listens on the port given by \"\n \"--port, or ~/%s \" % (distrib.UserDirectory.userSocketName,) +\n \"if --port is not specified.\"])\n\n compData = usage.Completions(\n optActions={\"logfile\" : usage.CompleteFiles(\"*.log\"),\n \"certificate\" : usage.CompleteFiles(\"*.pem\"),\n \"privkey\" : usage.CompleteFiles(\"*.pem\")}\n )\n\n longdesc = \"\"\"\\\nThis starts a webserver. If you specify no arguments, it will be a\ndemo webserver that has the Test class from twisted.web.demo in it.\"\"\"\n\n def __init__(self):\n usage.Options.__init__(self)\n self['indexes'] = []\n self['root'] = None\n self['extraHeaders'] = []\n\n\n def opt_index(self, indexName):\n \"\"\"\n Add the name of a file used to check for directory indexes.\n [default: index, index.html]\n \"\"\"\n self['indexes'].append(indexName)\n\n opt_i = opt_index\n\n\n def opt_user(self):\n \"\"\"\n Makes a server with ~/public_html and ~/.twistd-web-pb support for\n users.\n \"\"\"\n self['root'] = distrib.UserDirectory()\n\n opt_u = opt_user\n\n\n def opt_path(self, path):\n \"\"\"\n <path> is either a specific file or a directory to be set as the root\n of the web server. Use this if you have a directory full of HTML, cgi,\n epy, or rpy files or any other files that you want to be served up raw.\n \"\"\"\n self['root'] = static.File(os.path.abspath(path))\n self['root'].processors = {\n '.epy': script.PythonScript,\n '.rpy': script.ResourceScript,\n }\n self['root'].processors['.cgi'] = twcgi.CGIScript\n\n\n def opt_processor(self, proc):\n \"\"\"\n `ext=class' where `class' is added as a Processor for files ending\n with `ext'.\n \"\"\"\n if not isinstance(self['root'], static.File):\n raise usage.UsageError(\n \"You can only use --processor after --path.\")\n ext, klass = proc.split('=', 1)\n self['root'].processors[ext] = reflect.namedClass(klass)\n\n\n def opt_class(self, className):\n \"\"\"\n Create a Resource subclass with a zero-argument constructor.\n \"\"\"\n classObj = reflect.namedClass(className)\n self['root'] = classObj()\n\n\n def opt_resource_script(self, name):\n \"\"\"\n An .rpy file to be used as the root resource of the webserver.\n \"\"\"\n self['root'] = script.ResourceScriptWrapper(name)\n\n\n def opt_wsgi(self, name):\n \"\"\"\n The FQPN of a WSGI application object to serve as the root resource of\n the webserver.\n \"\"\"\n try:\n application = reflect.namedAny(name)\n except (AttributeError, ValueError):\n raise usage.UsageError(\"No such WSGI application: %r\" % (name,))\n pool = threadpool.ThreadPool()\n reactor.callWhenRunning(pool.start)\n reactor.addSystemEventTrigger('after', 'shutdown', pool.stop)\n self['root'] = wsgi.WSGIResource(reactor, pool, application)\n\n\n def opt_mime_type(self, defaultType):\n \"\"\"\n Specify the default mime-type for static files.\n \"\"\"\n if not isinstance(self['root'], static.File):\n raise usage.UsageError(\n \"You can only use --mime_type after --path.\")\n self['root'].defaultType = defaultType\n opt_m = opt_mime_type\n\n\n def opt_allow_ignore_ext(self):\n \"\"\"\n Specify whether or not a request for 'foo' should return 'foo.ext'\n \"\"\"\n if not isinstance(self['root'], static.File):\n raise usage.UsageError(\"You can only use --allow_ignore_ext \"\n \"after --path.\")\n self['root'].ignoreExt('*')\n\n\n def opt_ignore_ext(self, ext):\n \"\"\"\n Specify an extension to ignore. These will be processed in order.\n \"\"\"\n if not isinstance(self['root'], static.File):\n raise usage.UsageError(\"You can only use --ignore_ext \"\n \"after --path.\")\n self['root'].ignoreExt(ext)\n\n\n def opt_add_header(self, header):\n \"\"\"\n Specify an additional header to be included in all responses. Specified\n as \"HeaderName: HeaderValue\".\n \"\"\"\n name, value = header.split(':', 1)\n self['extraHeaders'].append((name.strip(), value.strip()))\n\n\n def postOptions(self):\n \"\"\"\n Set up conditional defaults and check for dependencies.\n\n If SSL is not available but an HTTPS server was configured, raise a\n L{UsageError} indicating that this is not possible.\n\n If no server port was supplied, select a default appropriate for the\n other options supplied.\n \"\"\"\n if self['https']:\n try:\n reflect.namedModule('OpenSSL.SSL')\n except ImportError:\n raise usage.UsageError(\"SSL support not installed\")\n if self['port'] is None:\n if self['personal']:\n path = os.path.expanduser(\n os.path.join('~', distrib.UserDirectory.userSocketName))\n self['port'] = 'unix:' + path\n else:\n self['port'] = 'tcp:8080'\n\n\n\ndef makePersonalServerFactory(site):\n \"\"\"\n Create and return a factory which will respond to I{distrib} requests\n against the given site.\n\n @type site: L{twisted.web.server.Site}\n @rtype: L{twisted.internet.protocol.Factory}\n \"\"\"\n return pb.PBServerFactory(distrib.ResourcePublisher(site))\n\n\n\nclass _AddHeadersResource(resource.Resource):\n def __init__(self, originalResource, headers):\n self._originalResource = originalResource\n self._headers = headers\n\n\n def getChildWithDefault(self, name, request):\n for k, v in self._headers:\n request.responseHeaders.addRawHeader(k, v)\n return self._originalResource.getChildWithDefault(name, request)\n\n\n\ndef makeService(config):\n s = service.MultiService()\n if config['root']:\n root = config['root']\n if config['indexes']:\n config['root'].indexNames = config['indexes']\n else:\n # This really ought to be web.Admin or something\n root = demo.Test()\n\n if isinstance(root, static.File):\n root.registry.setComponent(interfaces.IServiceCollection, s)\n\n if config['extraHeaders']:\n root = _AddHeadersResource(root, config['extraHeaders'])\n\n if config['logfile']:\n site = server.Site(root, logPath=config['logfile'])\n else:\n site = server.Site(root)\n\n site.displayTracebacks = not config[\"notracebacks\"]\n\n if config['personal']:\n personal = strports.service(\n config['port'], makePersonalServerFactory(site))\n personal.setServiceParent(s)\n else:\n if config['https']:\n from twisted.internet.ssl import DefaultOpenSSLContextFactory\n i = internet.SSLServer(int(config['https']), site,\n DefaultOpenSSLContextFactory(config['privkey'],\n config['certificate']))\n i.setServiceParent(s)\n strports.service(config['port'], site).setServiceParent(s)\n\n return s\n", "path": "src/twisted/web/tap.py"}]}
2,979
500
gh_patches_debug_11251
rasdani/github-patches
git_diff
yt-project__yt-4694
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- DEP: (yt 4.3) set upper limit on runtime requirement on numpy (<2.0) This issue is a reminder that, if yt 4.3.0 is released, as scheduled (sometime about end of September), *before* numpy 2.0 (currently aimed at December 2023), we should update the runtime requirement on the backport branch (basically replaying https://github.com/yt-project/yt/pull/4573). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `doc/source/conf.py` Content: ``` 1 # 2 # yt documentation build configuration file, created by 3 # sphinx-quickstart on Tue Jan 11 09:46:53 2011. 4 # 5 # This file is execfile()d with the current directory set to its containing dir. 6 # 7 # Note that not all possible configuration values are present in this 8 # autogenerated file. 9 # 10 # All configuration values have a default; values that are commented out 11 # serve to show the default. 12 13 import glob 14 import os 15 import sys 16 17 import sphinx_bootstrap_theme 18 19 on_rtd = os.environ.get("READTHEDOCS", None) == "True" 20 21 # If extensions (or modules to document with autodoc) are in another directory, 22 # add these directories to sys.path here. If the directory is relative to the 23 # documentation root, use os.path.abspath to make it absolute, like shown here. 24 sys.path.insert(0, os.path.abspath("../extensions/")) 25 26 # -- General configuration ----------------------------------------------------- 27 28 # If your documentation needs a minimal Sphinx version, state it here. 29 # needs_sphinx = '1.0' 30 31 # Add any Sphinx extension module names here, as strings. They can be extensions 32 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 33 extensions = [ 34 "sphinx.ext.autodoc", 35 "sphinx.ext.intersphinx", 36 "sphinx.ext.mathjax", 37 "sphinx.ext.viewcode", 38 "sphinx.ext.napoleon", 39 "yt_cookbook", 40 "yt_colormaps", 41 "config_help", 42 "yt_showfields", 43 "nbsphinx", 44 ] 45 46 if not on_rtd: 47 extensions.append("sphinx.ext.autosummary") 48 extensions.append("pythonscript_sphinxext") 49 50 # Add any paths that contain templates here, relative to this directory. 51 templates_path = ["_templates"] 52 53 # The suffix of source filenames. 54 source_suffix = ".rst" 55 56 # The encoding of source files. 57 # source_encoding = 'utf-8-sig' 58 59 # The master toctree document. 60 master_doc = "index" 61 62 # General information about the project. 63 project = "The yt Project" 64 copyright = "2013-2021, the yt Project" 65 66 # The version info for the project you're documenting, acts as replacement for 67 # |version| and |release|, also used in various other places throughout the 68 # built documents. 69 # 70 # The short X.Y version. 71 version = "4.3" 72 # The full version, including alpha/beta/rc tags. 73 release = "4.3-dev" 74 75 # The language for content autogenerated by Sphinx. Refer to documentation 76 # for a list of supported languages. 77 # language = None 78 79 # There are two options for replacing |today|: either, you set today to some 80 # non-false value, then it is used: 81 # today = '' 82 # Else, today_fmt is used as the format for a strftime call. 83 # today_fmt = '%B %d, %Y' 84 85 # List of patterns, relative to source directory, that match files and 86 # directories to ignore when looking for source files. 87 exclude_patterns = [] 88 89 # The reST default role (used for this markup: `text`) to use for all documents. 90 # default_role = None 91 92 # If true, '()' will be appended to :func: etc. cross-reference text. 93 # add_function_parentheses = True 94 95 # If true, the current module name will be prepended to all description 96 # unit titles (such as .. function::). 97 # add_module_names = True 98 99 # If true, sectionauthor and moduleauthor directives will be shown in the 100 # output. They are ignored by default. 101 show_authors = False 102 103 # The name of the Pygments (syntax highlighting) style to use. 104 pygments_style = "sphinx" 105 106 # A list of ignored prefixes for module index sorting. 107 # modindex_common_prefix = [] 108 109 110 # -- Options for HTML output --------------------------------------------------- 111 112 # The theme to use for HTML and HTML Help pages. See the documentation for 113 # a list of builtin themes. 114 html_theme = "bootstrap" 115 html_theme_path = sphinx_bootstrap_theme.get_html_theme_path() 116 117 # Theme options are theme-specific and customize the look and feel of a theme 118 # further. For a list of options available for each theme, see the 119 # documentation. 120 html_theme_options = dict( 121 bootstrap_version="3", 122 bootswatch_theme="readable", 123 navbar_links=[ 124 ("", ""), # see https://github.com/yt-project/yt/pull/3423 125 ("How to get help", "help/index"), 126 ("Quickstart notebooks", "quickstart/index"), 127 ("Cookbook", "cookbook/index"), 128 ], 129 navbar_sidebarrel=False, 130 globaltoc_depth=2, 131 ) 132 133 # Add any paths that contain custom themes here, relative to this directory. 134 # html_theme_path = [] 135 136 # The name for this set of Sphinx documents. If None, it defaults to 137 # "<project> v<release> documentation". 138 # html_title = None 139 140 # A shorter title for the navigation bar. Default is the same as html_title. 141 # html_short_title = None 142 143 # The name of an image file (relative to this directory) to place at the top 144 # of the sidebar. 145 html_logo = "_static/yt_icon.png" 146 147 # The name of an image file (within the static path) to use as favicon of the 148 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 149 # pixels large. 150 # html_favicon = None 151 152 # Add any paths that contain custom static files (such as style sheets) here, 153 # relative to this directory. They are copied after the builtin static files, 154 # so a file named "default.css" will overwrite the builtin "default.css". 155 html_static_path = ["_static", "analyzing/_static"] 156 157 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 158 # using the given strftime format. 159 # html_last_updated_fmt = '%b %d, %Y' 160 161 # If true, SmartyPants will be used to convert quotes and dashes to 162 # typographically correct entities. 163 # html_use_smartypants = True 164 165 # Custom sidebar templates, maps document names to template names. 166 # html_sidebars = {} 167 168 # Additional templates that should be rendered to pages, maps page names to 169 # template names. 170 # html_additional_pages = {} 171 172 # If false, no module index is generated. 173 html_domain_indices = False 174 175 # If false, no index is generated. 176 html_use_index = True 177 178 # If true, the index is split into individual pages for each letter. 179 # html_split_index = False 180 181 # If true, links to the reST sources are added to the pages. 182 html_show_sourcelink = False 183 184 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 185 # html_show_sphinx = True 186 187 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 188 # html_show_copyright = True 189 190 # If true, an OpenSearch description file will be output, and all pages will 191 # contain a <link> tag referring to it. The value of this option must be the 192 # base URL from which the finished HTML is served. 193 # html_use_opensearch = '' 194 195 # This is the file name suffix for HTML files (e.g. ".xhtml"). 196 # html_file_suffix = None 197 198 # Output file base name for HTML help builder. 199 htmlhelp_basename = "ytdoc" 200 201 202 # -- Options for LaTeX output -------------------------------------------------- 203 204 # The paper size ('letter' or 'a4'). 205 # latex_paper_size = 'letter' 206 207 # The font size ('10pt', '11pt' or '12pt'). 208 # latex_font_size = '10pt' 209 210 # Grouping the document tree into LaTeX files. List of tuples 211 # (source start file, target name, title, author, documentclass [howto/manual]). 212 latex_documents = [ 213 ("index", "yt.tex", "yt Documentation", "The yt Project", "manual"), 214 ] 215 216 # The name of an image file (relative to this directory) to place at the top of 217 # the title page. 218 # latex_logo = None 219 220 # For "manual" documents, if this is true, then toplevel headings are parts, 221 # not chapters. 222 # latex_use_parts = False 223 224 # If true, show page references after internal links. 225 # latex_show_pagerefs = False 226 227 # If true, show URL addresses after external links. 228 # latex_show_urls = False 229 230 # Documents to append as an appendix to all manuals. 231 # latex_appendices = [] 232 233 # If false, no module index is generated. 234 # latex_domain_indices = True 235 236 237 # -- Options for manual page output -------------------------------------------- 238 239 # One entry per manual page. List of tuples 240 # (source start file, name, description, authors, manual section). 241 man_pages = [("index", "yt", "yt Documentation", ["The yt Project"], 1)] 242 243 nbsphinx_allow_errors = True 244 245 # Example configuration for intersphinx: refer to the Python standard library. 246 intersphinx_mapping = { 247 "python": ("https://docs.python.org/3/", None), 248 "ipython": ("https://ipython.readthedocs.io/en/stable/", None), 249 "numpy": ("https://numpy.org/doc/stable/", None), 250 "matplotlib": ("https://matplotlib.org/stable/", None), 251 "astropy": ("https://docs.astropy.org/en/stable", None), 252 "pandas": ("https://pandas.pydata.org/pandas-docs/stable", None), 253 "trident": ("https://trident.readthedocs.io/en/latest/", None), 254 "yt_astro_analysis": ("https://yt-astro-analysis.readthedocs.io/en/latest/", None), 255 "yt_attic": ("https://yt-attic.readthedocs.io/en/latest/", None), 256 "pytest": ("https://docs.pytest.org/en/stable", None), 257 } 258 259 if not on_rtd: 260 autosummary_generate = glob.glob("reference/api/api.rst") 261 262 263 # as of Sphinx 3.1.2 this is the supported way to link custom style sheets 264 def setup(app): 265 app.add_css_file("custom.css") 266 ``` Path: `yt/_version.py` Content: ``` 1 from typing import NamedTuple 2 3 from packaging.version import Version 4 5 __all__ = [ 6 "__version__", 7 "version_info", 8 ] 9 10 __version__ = "4.3.dev0" # keep in sync with pyproject.toml 11 12 13 class VersionTuple(NamedTuple): 14 """ 15 A minimal representation of the current version number 16 that can be used downstream to check the runtime version 17 simply by comparing with builtin tuples, as can be done with 18 the runtime Python version using sys.version_info 19 20 https://docs.python.org/3/library/sys.html#sys.version_info 21 """ 22 23 major: int 24 minor: int 25 micro: int 26 releaselevel: str 27 serial: int 28 29 30 def _parse_to_version_info(version_str: str) -> VersionTuple: 31 # adapted from matplotlib 3.5 32 """ 33 Parse a version string to a namedtuple analogous to sys.version_info. 34 See: 35 https://packaging.pypa.io/en/latest/version.html#packaging.version.parse 36 https://docs.python.org/3/library/sys.html#sys.version_info 37 """ 38 v = Version(version_str) 39 if v.pre is None and v.post is None and v.dev is None: 40 return VersionTuple(v.major, v.minor, v.micro, "final", 0) 41 elif v.dev is not None: 42 return VersionTuple(v.major, v.minor, v.micro, "alpha", v.dev) 43 elif v.pre is not None: 44 releaselevel = {"a": "alpha", "b": "beta", "rc": "candidate"}.get( 45 v.pre[0], "alpha" 46 ) 47 return VersionTuple(v.major, v.minor, v.micro, releaselevel, v.pre[1]) 48 elif v.post is not None: 49 # fallback for v.post: guess-next-dev scheme from setuptools_scm 50 return VersionTuple(v.major, v.minor, v.micro + 1, "alpha", v.post) 51 else: 52 return VersionTuple(v.major, v.minor, v.micro + 1, "alpha", 0) 53 54 55 version_info = _parse_to_version_info(__version__) 56 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -70,7 +70,7 @@ # The short X.Y version. version = "4.3" # The full version, including alpha/beta/rc tags. -release = "4.3-dev" +release = "4.3.0" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/yt/_version.py b/yt/_version.py --- a/yt/_version.py +++ b/yt/_version.py @@ -7,7 +7,7 @@ "version_info", ] -__version__ = "4.3.dev0" # keep in sync with pyproject.toml +__version__ = "4.3.0" # keep in sync with pyproject.toml class VersionTuple(NamedTuple):
{"golden_diff": "diff --git a/doc/source/conf.py b/doc/source/conf.py\n--- a/doc/source/conf.py\n+++ b/doc/source/conf.py\n@@ -70,7 +70,7 @@\n # The short X.Y version.\n version = \"4.3\"\n # The full version, including alpha/beta/rc tags.\n-release = \"4.3-dev\"\n+release = \"4.3.0\"\n \n # The language for content autogenerated by Sphinx. Refer to documentation\n # for a list of supported languages.\ndiff --git a/yt/_version.py b/yt/_version.py\n--- a/yt/_version.py\n+++ b/yt/_version.py\n@@ -7,7 +7,7 @@\n \"version_info\",\n ]\n \n-__version__ = \"4.3.dev0\" # keep in sync with pyproject.toml\n+__version__ = \"4.3.0\" # keep in sync with pyproject.toml\n \n \n class VersionTuple(NamedTuple):\n", "issue": "DEP: (yt 4.3) set upper limit on runtime requirement on numpy (<2.0)\nThis issue is a reminder that, if yt 4.3.0 is released, as scheduled (sometime about end of September), *before* numpy 2.0 (currently aimed at December 2023), we should update the runtime requirement on the backport branch (basically replaying https://github.com/yt-project/yt/pull/4573).\n", "before_files": [{"content": "#\n# yt documentation build configuration file, created by\n# sphinx-quickstart on Tue Jan 11 09:46:53 2011.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport glob\nimport os\nimport sys\n\nimport sphinx_bootstrap_theme\n\non_rtd = os.environ.get(\"READTHEDOCS\", None) == \"True\"\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"../extensions/\"))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.napoleon\",\n \"yt_cookbook\",\n \"yt_colormaps\",\n \"config_help\",\n \"yt_showfields\",\n \"nbsphinx\",\n]\n\nif not on_rtd:\n extensions.append(\"sphinx.ext.autosummary\")\n extensions.append(\"pythonscript_sphinxext\")\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"The yt Project\"\ncopyright = \"2013-2021, the yt Project\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \"4.3\"\n# The full version, including alpha/beta/rc tags.\nrelease = \"4.3-dev\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n# language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\nshow_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"bootstrap\"\nhtml_theme_path = sphinx_bootstrap_theme.get_html_theme_path()\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = dict(\n bootstrap_version=\"3\",\n bootswatch_theme=\"readable\",\n navbar_links=[\n (\"\", \"\"), # see https://github.com/yt-project/yt/pull/3423\n (\"How to get help\", \"help/index\"),\n (\"Quickstart notebooks\", \"quickstart/index\"),\n (\"Cookbook\", \"cookbook/index\"),\n ],\n navbar_sidebarrel=False,\n globaltoc_depth=2,\n)\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = \"_static/yt_icon.png\"\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\", \"analyzing/_static\"]\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\nhtml_domain_indices = False\n\n# If false, no index is generated.\nhtml_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = False\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"ytdoc\"\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\n# The paper size ('letter' or 'a4').\n# latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n# latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n (\"index\", \"yt.tex\", \"yt Documentation\", \"The yt Project\", \"manual\"),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(\"index\", \"yt\", \"yt Documentation\", [\"The yt Project\"], 1)]\n\nnbsphinx_allow_errors = True\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"ipython\": (\"https://ipython.readthedocs.io/en/stable/\", None),\n \"numpy\": (\"https://numpy.org/doc/stable/\", None),\n \"matplotlib\": (\"https://matplotlib.org/stable/\", None),\n \"astropy\": (\"https://docs.astropy.org/en/stable\", None),\n \"pandas\": (\"https://pandas.pydata.org/pandas-docs/stable\", None),\n \"trident\": (\"https://trident.readthedocs.io/en/latest/\", None),\n \"yt_astro_analysis\": (\"https://yt-astro-analysis.readthedocs.io/en/latest/\", None),\n \"yt_attic\": (\"https://yt-attic.readthedocs.io/en/latest/\", None),\n \"pytest\": (\"https://docs.pytest.org/en/stable\", None),\n}\n\nif not on_rtd:\n autosummary_generate = glob.glob(\"reference/api/api.rst\")\n\n\n# as of Sphinx 3.1.2 this is the supported way to link custom style sheets\ndef setup(app):\n app.add_css_file(\"custom.css\")\n", "path": "doc/source/conf.py"}, {"content": "from typing import NamedTuple\n\nfrom packaging.version import Version\n\n__all__ = [\n \"__version__\",\n \"version_info\",\n]\n\n__version__ = \"4.3.dev0\" # keep in sync with pyproject.toml\n\n\nclass VersionTuple(NamedTuple):\n \"\"\"\n A minimal representation of the current version number\n that can be used downstream to check the runtime version\n simply by comparing with builtin tuples, as can be done with\n the runtime Python version using sys.version_info\n\n https://docs.python.org/3/library/sys.html#sys.version_info\n \"\"\"\n\n major: int\n minor: int\n micro: int\n releaselevel: str\n serial: int\n\n\ndef _parse_to_version_info(version_str: str) -> VersionTuple:\n # adapted from matplotlib 3.5\n \"\"\"\n Parse a version string to a namedtuple analogous to sys.version_info.\n See:\n https://packaging.pypa.io/en/latest/version.html#packaging.version.parse\n https://docs.python.org/3/library/sys.html#sys.version_info\n \"\"\"\n v = Version(version_str)\n if v.pre is None and v.post is None and v.dev is None:\n return VersionTuple(v.major, v.minor, v.micro, \"final\", 0)\n elif v.dev is not None:\n return VersionTuple(v.major, v.minor, v.micro, \"alpha\", v.dev)\n elif v.pre is not None:\n releaselevel = {\"a\": \"alpha\", \"b\": \"beta\", \"rc\": \"candidate\"}.get(\n v.pre[0], \"alpha\"\n )\n return VersionTuple(v.major, v.minor, v.micro, releaselevel, v.pre[1])\n elif v.post is not None:\n # fallback for v.post: guess-next-dev scheme from setuptools_scm\n return VersionTuple(v.major, v.minor, v.micro + 1, \"alpha\", v.post)\n else:\n return VersionTuple(v.major, v.minor, v.micro + 1, \"alpha\", 0)\n\n\nversion_info = _parse_to_version_info(__version__)\n", "path": "yt/_version.py"}], "after_files": [{"content": "#\n# yt documentation build configuration file, created by\n# sphinx-quickstart on Tue Jan 11 09:46:53 2011.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport glob\nimport os\nimport sys\n\nimport sphinx_bootstrap_theme\n\non_rtd = os.environ.get(\"READTHEDOCS\", None) == \"True\"\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"../extensions/\"))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.napoleon\",\n \"yt_cookbook\",\n \"yt_colormaps\",\n \"config_help\",\n \"yt_showfields\",\n \"nbsphinx\",\n]\n\nif not on_rtd:\n extensions.append(\"sphinx.ext.autosummary\")\n extensions.append(\"pythonscript_sphinxext\")\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"The yt Project\"\ncopyright = \"2013-2021, the yt Project\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \"4.3\"\n# The full version, including alpha/beta/rc tags.\nrelease = \"4.3.0\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n# language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\nshow_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"bootstrap\"\nhtml_theme_path = sphinx_bootstrap_theme.get_html_theme_path()\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = dict(\n bootstrap_version=\"3\",\n bootswatch_theme=\"readable\",\n navbar_links=[\n (\"\", \"\"), # see https://github.com/yt-project/yt/pull/3423\n (\"How to get help\", \"help/index\"),\n (\"Quickstart notebooks\", \"quickstart/index\"),\n (\"Cookbook\", \"cookbook/index\"),\n ],\n navbar_sidebarrel=False,\n globaltoc_depth=2,\n)\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = \"_static/yt_icon.png\"\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\", \"analyzing/_static\"]\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\nhtml_domain_indices = False\n\n# If false, no index is generated.\nhtml_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = False\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"ytdoc\"\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\n# The paper size ('letter' or 'a4').\n# latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n# latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n (\"index\", \"yt.tex\", \"yt Documentation\", \"The yt Project\", \"manual\"),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(\"index\", \"yt\", \"yt Documentation\", [\"The yt Project\"], 1)]\n\nnbsphinx_allow_errors = True\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"ipython\": (\"https://ipython.readthedocs.io/en/stable/\", None),\n \"numpy\": (\"https://numpy.org/doc/stable/\", None),\n \"matplotlib\": (\"https://matplotlib.org/stable/\", None),\n \"astropy\": (\"https://docs.astropy.org/en/stable\", None),\n \"pandas\": (\"https://pandas.pydata.org/pandas-docs/stable\", None),\n \"trident\": (\"https://trident.readthedocs.io/en/latest/\", None),\n \"yt_astro_analysis\": (\"https://yt-astro-analysis.readthedocs.io/en/latest/\", None),\n \"yt_attic\": (\"https://yt-attic.readthedocs.io/en/latest/\", None),\n \"pytest\": (\"https://docs.pytest.org/en/stable\", None),\n}\n\nif not on_rtd:\n autosummary_generate = glob.glob(\"reference/api/api.rst\")\n\n\n# as of Sphinx 3.1.2 this is the supported way to link custom style sheets\ndef setup(app):\n app.add_css_file(\"custom.css\")\n", "path": "doc/source/conf.py"}, {"content": "from typing import NamedTuple\n\nfrom packaging.version import Version\n\n__all__ = [\n \"__version__\",\n \"version_info\",\n]\n\n__version__ = \"4.3.0\" # keep in sync with pyproject.toml\n\n\nclass VersionTuple(NamedTuple):\n \"\"\"\n A minimal representation of the current version number\n that can be used downstream to check the runtime version\n simply by comparing with builtin tuples, as can be done with\n the runtime Python version using sys.version_info\n\n https://docs.python.org/3/library/sys.html#sys.version_info\n \"\"\"\n\n major: int\n minor: int\n micro: int\n releaselevel: str\n serial: int\n\n\ndef _parse_to_version_info(version_str: str) -> VersionTuple:\n # adapted from matplotlib 3.5\n \"\"\"\n Parse a version string to a namedtuple analogous to sys.version_info.\n See:\n https://packaging.pypa.io/en/latest/version.html#packaging.version.parse\n https://docs.python.org/3/library/sys.html#sys.version_info\n \"\"\"\n v = Version(version_str)\n if v.pre is None and v.post is None and v.dev is None:\n return VersionTuple(v.major, v.minor, v.micro, \"final\", 0)\n elif v.dev is not None:\n return VersionTuple(v.major, v.minor, v.micro, \"alpha\", v.dev)\n elif v.pre is not None:\n releaselevel = {\"a\": \"alpha\", \"b\": \"beta\", \"rc\": \"candidate\"}.get(\n v.pre[0], \"alpha\"\n )\n return VersionTuple(v.major, v.minor, v.micro, releaselevel, v.pre[1])\n elif v.post is not None:\n # fallback for v.post: guess-next-dev scheme from setuptools_scm\n return VersionTuple(v.major, v.minor, v.micro + 1, \"alpha\", v.post)\n else:\n return VersionTuple(v.major, v.minor, v.micro + 1, \"alpha\", 0)\n\n\nversion_info = _parse_to_version_info(__version__)\n", "path": "yt/_version.py"}]}
3,857
209
gh_patches_debug_17889
rasdani/github-patches
git_diff
akvo__akvo-rsr-1763
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Sector vocabulary saved value not updated ## Test plan GIVEN the project editor WHEN the sector vocabulary AND sector code are filled in THEN the 'saved-value' attribute of the vocabulary should be correctly updated --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `akvo/rsr/models/sector.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # Akvo RSR is covered by the GNU Affero General Public License. 4 # See more details in the license.txt file located at the root folder of the Akvo RSR module. 5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 6 7 8 from django.db import models 9 from django.db.models.signals import post_save 10 from django.dispatch import receiver 11 from django.core.validators import MaxValueValidator, MinValueValidator 12 from django.utils.translation import ugettext_lazy as _ 13 14 from ..fields import ValidXMLCharField 15 16 from akvo.codelists import models as codelist_models 17 from akvo.codelists.store.codelists_v201 import SECTOR_VOCABULARY 18 from akvo.utils import codelist_choices, codelist_value 19 20 21 class Sector(models.Model): 22 project = models.ForeignKey('Project', verbose_name=_(u'project'), related_name='sectors') 23 sector_code = ValidXMLCharField( 24 _(u'sector code'), blank=True, max_length=5, 25 help_text=_(u'Enter the sector code of the sectors that the project is working within.<br>' 26 u'See these lists for the DAC-5 and DAC-3 sector codes:<br>' 27 u'- <a href="http://iatistandard.org/201/codelists/Sector/" target="_blank">' 28 u'DAC-5 sector codes</a><br>' 29 u'- <a href="http://iatistandard.org/201/codelists/SectorCategory/" ' 30 u'target="_blank">DAC-3 sector codes</a>') 31 ) 32 text = ValidXMLCharField( 33 _(u'description'), blank=True, max_length=100, help_text=_(u'(max 100 characters)') 34 ) 35 vocabulary = ValidXMLCharField( 36 _(u'vocabulary'), blank=True, max_length=5, choices=codelist_choices(SECTOR_VOCABULARY) 37 ) 38 percentage = models.DecimalField( 39 _(u'sector percentage'), blank=True, null=True, max_digits=4, decimal_places=1, 40 validators=[MaxValueValidator(100), MinValueValidator(0)], 41 help_text=_(u'You can set the percentage of the project that is relevant for ' 42 u'this sector here.') 43 ) 44 45 def __unicode__(self): 46 if self.sector_code: 47 try: 48 sector_unicode = self.iati_sector().name.capitalize() 49 except Exception as e: 50 sector_unicode = u'%s' % _(u'Sector code not found') 51 else: 52 sector_unicode = u'%s' % _(u'No sector code specified') 53 54 if self.percentage: 55 sector_unicode += u' (%s%%)' % str(self.percentage) 56 57 return sector_unicode 58 59 60 def iati_sector_codes(self): 61 if self.sector_code and (self.vocabulary == '1' or self.vocabulary == 'DAC'): 62 return self.sector_code, codelist_value(codelist_models.Sector, self, 'sector_code') 63 elif self.sector_code and (self.vocabulary == '2' or self.vocabulary == 'DAC-3'): 64 return self.sector_code, codelist_value(codelist_models.SectorCategory, 65 self, 66 'sector_code') 67 else: 68 return self.sector_code, self.sector_code 69 70 def iati_sector(self): 71 if self.sector_code and (self.vocabulary == '1' or self.vocabulary == 'DAC'): 72 return codelist_value(codelist_models.Sector, self, 'sector_code') 73 elif self.sector_code and (self.vocabulary == '2' or self.vocabulary == 'DAC-3'): 74 return codelist_value(codelist_models.SectorCategory, self, 'sector_code') 75 else: 76 return self.sector_code 77 78 def iati_vocabulary(self): 79 return codelist_value(codelist_models.SectorVocabulary, self, 'vocabulary') 80 81 class Meta: 82 app_label = 'rsr' 83 verbose_name = _(u'sector') 84 verbose_name_plural = _(u'sectors') 85 86 @receiver(post_save, sender=Sector) 87 def update_vocabulary(sender, **kwargs): 88 "Updates the vocabulary if not specified." 89 sector = kwargs['instance'] 90 if not sector.vocabulary and sector.sector_code: 91 if len(sector.sector_code) == 3: 92 sector.vocabulary = '2' 93 elif len(sector.sector_code) == 5: 94 sector.vocabulary = '1' 95 sector.save() 96 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/akvo/rsr/models/sector.py b/akvo/rsr/models/sector.py --- a/akvo/rsr/models/sector.py +++ b/akvo/rsr/models/sector.py @@ -6,8 +6,6 @@ from django.db import models -from django.db.models.signals import post_save -from django.dispatch import receiver from django.core.validators import MaxValueValidator, MinValueValidator from django.utils.translation import ugettext_lazy as _ @@ -82,14 +80,3 @@ app_label = 'rsr' verbose_name = _(u'sector') verbose_name_plural = _(u'sectors') - -@receiver(post_save, sender=Sector) -def update_vocabulary(sender, **kwargs): - "Updates the vocabulary if not specified." - sector = kwargs['instance'] - if not sector.vocabulary and sector.sector_code: - if len(sector.sector_code) == 3: - sector.vocabulary = '2' - elif len(sector.sector_code) == 5: - sector.vocabulary = '1' - sector.save()
{"golden_diff": "diff --git a/akvo/rsr/models/sector.py b/akvo/rsr/models/sector.py\n--- a/akvo/rsr/models/sector.py\n+++ b/akvo/rsr/models/sector.py\n@@ -6,8 +6,6 @@\n \n \n from django.db import models\n-from django.db.models.signals import post_save\n-from django.dispatch import receiver\n from django.core.validators import MaxValueValidator, MinValueValidator\n from django.utils.translation import ugettext_lazy as _\n \n@@ -82,14 +80,3 @@\n app_label = 'rsr'\n verbose_name = _(u'sector')\n verbose_name_plural = _(u'sectors')\n-\n-@receiver(post_save, sender=Sector)\n-def update_vocabulary(sender, **kwargs):\n- \"Updates the vocabulary if not specified.\"\n- sector = kwargs['instance']\n- if not sector.vocabulary and sector.sector_code:\n- if len(sector.sector_code) == 3:\n- sector.vocabulary = '2'\n- elif len(sector.sector_code) == 5:\n- sector.vocabulary = '1'\n- sector.save()\n", "issue": "Sector vocabulary saved value not updated\n## Test plan\n\nGIVEN the project editor\nWHEN the sector vocabulary AND sector code are filled in\nTHEN the 'saved-value' attribute of the vocabulary should be correctly updated\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ..fields import ValidXMLCharField\n\nfrom akvo.codelists import models as codelist_models\nfrom akvo.codelists.store.codelists_v201 import SECTOR_VOCABULARY\nfrom akvo.utils import codelist_choices, codelist_value\n\n\nclass Sector(models.Model):\n project = models.ForeignKey('Project', verbose_name=_(u'project'), related_name='sectors')\n sector_code = ValidXMLCharField(\n _(u'sector code'), blank=True, max_length=5,\n help_text=_(u'Enter the sector code of the sectors that the project is working within.<br>'\n u'See these lists for the DAC-5 and DAC-3 sector codes:<br>'\n u'- <a href=\"http://iatistandard.org/201/codelists/Sector/\" target=\"_blank\">'\n u'DAC-5 sector codes</a><br>'\n u'- <a href=\"http://iatistandard.org/201/codelists/SectorCategory/\" '\n u'target=\"_blank\">DAC-3 sector codes</a>')\n )\n text = ValidXMLCharField(\n _(u'description'), blank=True, max_length=100, help_text=_(u'(max 100 characters)')\n )\n vocabulary = ValidXMLCharField(\n _(u'vocabulary'), blank=True, max_length=5, choices=codelist_choices(SECTOR_VOCABULARY)\n )\n percentage = models.DecimalField(\n _(u'sector percentage'), blank=True, null=True, max_digits=4, decimal_places=1,\n validators=[MaxValueValidator(100), MinValueValidator(0)],\n help_text=_(u'You can set the percentage of the project that is relevant for '\n u'this sector here.')\n )\n\n def __unicode__(self):\n if self.sector_code:\n try:\n sector_unicode = self.iati_sector().name.capitalize()\n except Exception as e:\n sector_unicode = u'%s' % _(u'Sector code not found')\n else:\n sector_unicode = u'%s' % _(u'No sector code specified')\n\n if self.percentage:\n sector_unicode += u' (%s%%)' % str(self.percentage)\n\n return sector_unicode\n\n\n def iati_sector_codes(self):\n if self.sector_code and (self.vocabulary == '1' or self.vocabulary == 'DAC'):\n return self.sector_code, codelist_value(codelist_models.Sector, self, 'sector_code')\n elif self.sector_code and (self.vocabulary == '2' or self.vocabulary == 'DAC-3'):\n return self.sector_code, codelist_value(codelist_models.SectorCategory,\n self,\n 'sector_code')\n else:\n return self.sector_code, self.sector_code\n\n def iati_sector(self):\n if self.sector_code and (self.vocabulary == '1' or self.vocabulary == 'DAC'):\n return codelist_value(codelist_models.Sector, self, 'sector_code')\n elif self.sector_code and (self.vocabulary == '2' or self.vocabulary == 'DAC-3'):\n return codelist_value(codelist_models.SectorCategory, self, 'sector_code')\n else:\n return self.sector_code\n\n def iati_vocabulary(self):\n return codelist_value(codelist_models.SectorVocabulary, self, 'vocabulary')\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _(u'sector')\n verbose_name_plural = _(u'sectors')\n\n@receiver(post_save, sender=Sector)\ndef update_vocabulary(sender, **kwargs):\n \"Updates the vocabulary if not specified.\"\n sector = kwargs['instance']\n if not sector.vocabulary and sector.sector_code:\n if len(sector.sector_code) == 3:\n sector.vocabulary = '2'\n elif len(sector.sector_code) == 5:\n sector.vocabulary = '1'\n sector.save()\n", "path": "akvo/rsr/models/sector.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom django.db import models\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ..fields import ValidXMLCharField\n\nfrom akvo.codelists import models as codelist_models\nfrom akvo.codelists.store.codelists_v201 import SECTOR_VOCABULARY\nfrom akvo.utils import codelist_choices, codelist_value\n\n\nclass Sector(models.Model):\n project = models.ForeignKey('Project', verbose_name=_(u'project'), related_name='sectors')\n sector_code = ValidXMLCharField(\n _(u'sector code'), blank=True, max_length=5,\n help_text=_(u'Enter the sector code of the sectors that the project is working within.<br>'\n u'See these lists for the DAC-5 and DAC-3 sector codes:<br>'\n u'- <a href=\"http://iatistandard.org/201/codelists/Sector/\" target=\"_blank\">'\n u'DAC-5 sector codes</a><br>'\n u'- <a href=\"http://iatistandard.org/201/codelists/SectorCategory/\" '\n u'target=\"_blank\">DAC-3 sector codes</a>')\n )\n text = ValidXMLCharField(\n _(u'description'), blank=True, max_length=100, help_text=_(u'(max 100 characters)')\n )\n vocabulary = ValidXMLCharField(\n _(u'vocabulary'), blank=True, max_length=5, choices=codelist_choices(SECTOR_VOCABULARY)\n )\n percentage = models.DecimalField(\n _(u'sector percentage'), blank=True, null=True, max_digits=4, decimal_places=1,\n validators=[MaxValueValidator(100), MinValueValidator(0)],\n help_text=_(u'You can set the percentage of the project that is relevant for '\n u'this sector here.')\n )\n\n def __unicode__(self):\n if self.sector_code:\n try:\n sector_unicode = self.iati_sector().name.capitalize()\n except Exception as e:\n sector_unicode = u'%s' % _(u'Sector code not found')\n else:\n sector_unicode = u'%s' % _(u'No sector code specified')\n\n if self.percentage:\n sector_unicode += u' (%s%%)' % str(self.percentage)\n\n return sector_unicode\n\n\n def iati_sector_codes(self):\n if self.sector_code and (self.vocabulary == '1' or self.vocabulary == 'DAC'):\n return self.sector_code, codelist_value(codelist_models.Sector, self, 'sector_code')\n elif self.sector_code and (self.vocabulary == '2' or self.vocabulary == 'DAC-3'):\n return self.sector_code, codelist_value(codelist_models.SectorCategory,\n self,\n 'sector_code')\n else:\n return self.sector_code, self.sector_code\n\n def iati_sector(self):\n if self.sector_code and (self.vocabulary == '1' or self.vocabulary == 'DAC'):\n return codelist_value(codelist_models.Sector, self, 'sector_code')\n elif self.sector_code and (self.vocabulary == '2' or self.vocabulary == 'DAC-3'):\n return codelist_value(codelist_models.SectorCategory, self, 'sector_code')\n else:\n return self.sector_code\n\n def iati_vocabulary(self):\n return codelist_value(codelist_models.SectorVocabulary, self, 'vocabulary')\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _(u'sector')\n verbose_name_plural = _(u'sectors')\n", "path": "akvo/rsr/models/sector.py"}]}
1,468
246
gh_patches_debug_34317
rasdani/github-patches
git_diff
mindee__doctr-240
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [document] Check integrity of PDF --> img conversion with default DPI The current PDF reading implies a conversion to an image. As we are using the default args for this conversion, the DPI value might be low. We need to check that the default parameter is not bringing about performance issues due to low resolution rendering. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `doctr/documents/reader.py` Content: ``` 1 # Copyright (C) 2021, Mindee. 2 3 # This program is licensed under the Apache License version 2. 4 # See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details. 5 6 import numpy as np 7 import cv2 8 from pathlib import Path 9 import fitz 10 from weasyprint import HTML 11 from typing import List, Tuple, Optional, Any, Union, Sequence 12 13 __all__ = ['read_pdf', 'read_img', 'read_html', 'DocumentFile'] 14 15 16 AbstractPath = Union[str, Path] 17 AbstractFile = Union[AbstractPath, bytes] 18 Bbox = Tuple[float, float, float, float] 19 20 21 def read_img( 22 file: AbstractFile, 23 output_size: Optional[Tuple[int, int]] = None, 24 rgb_output: bool = True, 25 ) -> np.ndarray: 26 """Read an image file into numpy format 27 28 Example:: 29 >>> from doctr.documents import read_img 30 >>> page = read_img("path/to/your/doc.jpg") 31 32 Args: 33 file: the path to the image file 34 output_size: the expected output size of each page in format H x W 35 rgb_output: whether the output ndarray channel order should be RGB instead of BGR. 36 Returns: 37 the page decoded as numpy ndarray of shape H x W x 3 38 """ 39 40 if isinstance(file, (str, Path)): 41 if not Path(file).is_file(): 42 raise FileNotFoundError(f"unable to access {file}") 43 img = cv2.imread(str(file), cv2.IMREAD_COLOR) 44 elif isinstance(file, bytes): 45 file = np.frombuffer(file, np.uint8) 46 img = cv2.imdecode(file, cv2.IMREAD_COLOR) 47 else: 48 raise TypeError("unsupported object type for argument 'file'") 49 50 # Validity check 51 if img is None: 52 raise ValueError("unable to read file.") 53 # Resizing 54 if isinstance(output_size, tuple): 55 img = cv2.resize(img, output_size[::-1], interpolation=cv2.INTER_LINEAR) 56 # Switch the channel order 57 if rgb_output: 58 img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 59 return img 60 61 62 def read_pdf(file: AbstractFile, **kwargs: Any) -> fitz.Document: 63 """Read a PDF file and convert it into an image in numpy format 64 65 Example:: 66 >>> from doctr.documents import read_pdf 67 >>> doc = read_pdf("path/to/your/doc.pdf") 68 69 Args: 70 file: the path to the PDF file 71 Returns: 72 the list of pages decoded as numpy ndarray of shape H x W x 3 73 """ 74 75 if isinstance(file, (str, Path)) and not Path(file).is_file(): 76 raise FileNotFoundError(f"unable to access {file}") 77 78 fitz_args = {} 79 80 if isinstance(file, (str, Path)): 81 fitz_args['filename'] = file 82 elif isinstance(file, bytes): 83 fitz_args['stream'] = file 84 else: 85 raise TypeError("unsupported object type for argument 'file'") 86 87 # Read pages with fitz and convert them to numpy ndarrays 88 return fitz.open(**fitz_args, filetype="pdf", **kwargs) 89 90 91 def convert_page_to_numpy( 92 page: fitz.fitz.Page, 93 output_size: Optional[Tuple[int, int]] = None, 94 rgb_output: bool = True, 95 ) -> np.ndarray: 96 """Convert a fitz page to a numpy-formatted image 97 98 Args: 99 page: the page of a file read with PyMuPDF 100 output_size: the expected output size of each page in format H x W. Default goes to 840 x 595 for A4 pdf, 101 if you want to increase the resolution while preserving the original A4 aspect ratio can pass (1024, 726) 102 rgb_output: whether the output ndarray channel order should be RGB instead of BGR. 103 104 Returns: 105 the rendered image in numpy format 106 """ 107 108 transform_matrix = None 109 110 # If no output size is specified, keep the origin one 111 if output_size is not None: 112 scales = (output_size[1] / page.MediaBox[2], output_size[0] / page.MediaBox[3]) 113 transform_matrix = fitz.Matrix(*scales) 114 115 # Generate the pixel map using the transformation matrix 116 stream = page.getPixmap(matrix=transform_matrix).getImageData() 117 # Decode it into a numpy 118 img = cv2.imdecode(np.frombuffer(stream, dtype=np.uint8), cv2.IMREAD_UNCHANGED) 119 120 # Switch the channel order 121 if rgb_output: 122 img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 123 124 return img 125 126 127 def read_html(url: str, **kwargs: Any) -> bytes: 128 """Read a PDF file and convert it into an image in numpy format 129 130 Example:: 131 >>> from doctr.documents import read_html 132 >>> doc = read_html("https://www.yoursite.com") 133 134 Args: 135 url: URL of the target web page 136 Returns: 137 decoded PDF file as a bytes stream 138 """ 139 140 return HTML(url, **kwargs).write_pdf() 141 142 143 class PDF: 144 """PDF document template 145 146 Args: 147 doc: input PDF document 148 """ 149 def __init__(self, doc: fitz.Document) -> None: 150 self.doc = doc 151 152 def as_images(self, **kwargs) -> List[np.ndarray]: 153 """Convert all document pages to images 154 155 Example:: 156 >>> from doctr.documents import DocumentFile 157 >>> pages = DocumentFile.from_pdf("path/to/your/doc.pdf").as_images() 158 159 Args: 160 kwargs: keyword arguments of `convert_page_to_numpy` 161 Returns: 162 the list of pages decoded as numpy ndarray of shape H x W x 3 163 """ 164 return [convert_page_to_numpy(page, **kwargs) for page in self.doc] 165 166 def get_page_words(self, idx, **kwargs) -> List[Tuple[Bbox, str]]: 167 """Get the annotations for all words of a given page""" 168 169 # xmin, ymin, xmax, ymax, value, block_idx, line_idx, word_idx 170 return [(info[:4], info[4]) for info in self.doc[idx].getTextWords(**kwargs)] 171 172 def get_words(self, **kwargs) -> List[List[Tuple[Bbox, str]]]: 173 """Get the annotations for all words in the document 174 175 Example:: 176 >>> from doctr.documents import DocumentFile 177 >>> words = DocumentFile.from_pdf("path/to/your/doc.pdf").get_words() 178 179 Args: 180 kwargs: keyword arguments of `fitz.Page.getTextWords` 181 Returns: 182 the list of pages annotations, represented as a list of tuple (bounding box, value) 183 """ 184 return [self.get_page_words(idx, **kwargs) for idx in range(len(self.doc))] 185 186 def get_page_artefacts(self, idx) -> List[Tuple[float, float, float, float]]: 187 return [tuple(self.doc[idx].getImageBbox(artefact)) for artefact in self.doc[idx].get_images(full=True)] 188 189 def get_artefacts(self) -> List[List[Tuple[float, float, float, float]]]: 190 """Get the artefacts for the entire document 191 192 Example:: 193 >>> from doctr.documents import DocumentFile 194 >>> artefacts = DocumentFile.from_pdf("path/to/your/doc.pdf").get_artefacts() 195 196 Returns: 197 the list of pages artefacts, represented as a list of bounding boxes 198 """ 199 200 return [self.get_page_artefacts(idx) for idx in range(len(self.doc))] 201 202 203 class DocumentFile: 204 """Read a document from multiple extensions""" 205 206 @classmethod 207 def from_pdf(cls, file: AbstractFile, **kwargs) -> PDF: 208 """Read a PDF file 209 210 Example:: 211 >>> from doctr.documents import DocumentFile 212 >>> doc = DocumentFile.from_pdf("path/to/your/doc.pdf") 213 214 Args: 215 file: the path to the PDF file or a binary stream 216 Returns: 217 a PDF document 218 """ 219 220 doc = read_pdf(file, **kwargs) 221 222 return PDF(doc) 223 224 @classmethod 225 def from_url(cls, url: str, **kwargs) -> PDF: 226 """Interpret a web page as a PDF document 227 228 Example:: 229 >>> from doctr.documents import DocumentFile 230 >>> doc = DocumentFile.from_url("https://www.yoursite.com") 231 232 Args: 233 url: the URL of the target web page 234 Returns: 235 a PDF document 236 """ 237 pdf_stream = read_html(url) 238 return cls.from_pdf(pdf_stream, **kwargs) 239 240 @classmethod 241 def from_images(cls, files: Union[Sequence[AbstractFile], AbstractFile], **kwargs) -> List[np.ndarray]: 242 """Read an image file (or a collection of image files) and convert it into an image in numpy format 243 244 Example:: 245 >>> from doctr.documents import DocumentFile 246 >>> pages = DocumentFile.from_images(["path/to/your/page1.png", "path/to/your/page2.png"]) 247 248 Args: 249 files: the path to the image file or a binary stream, or a collection of those 250 Returns: 251 the list of pages decoded as numpy ndarray of shape H x W x 3 252 """ 253 if isinstance(files, (str, Path, bytes)): 254 files = [files] 255 256 return [read_img(file, **kwargs) for file in files] 257 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/doctr/documents/reader.py b/doctr/documents/reader.py --- a/doctr/documents/reader.py +++ b/doctr/documents/reader.py @@ -10,7 +10,7 @@ from weasyprint import HTML from typing import List, Tuple, Optional, Any, Union, Sequence -__all__ = ['read_pdf', 'read_img', 'read_html', 'DocumentFile'] +__all__ = ['read_pdf', 'read_img', 'read_html', 'DocumentFile', 'PDF'] AbstractPath = Union[str, Path] @@ -92,6 +92,7 @@ page: fitz.fitz.Page, output_size: Optional[Tuple[int, int]] = None, rgb_output: bool = True, + default_scales: Tuple[float, float] = (2, 2), ) -> np.ndarray: """Convert a fitz page to a numpy-formatted image @@ -100,17 +101,21 @@ output_size: the expected output size of each page in format H x W. Default goes to 840 x 595 for A4 pdf, if you want to increase the resolution while preserving the original A4 aspect ratio can pass (1024, 726) rgb_output: whether the output ndarray channel order should be RGB instead of BGR. + default_scales: spatial scaling to be applied when output_size is not specified where (1, 1) + corresponds to 72 dpi rendering. Returns: the rendered image in numpy format """ - transform_matrix = None - # If no output size is specified, keep the origin one if output_size is not None: scales = (output_size[1] / page.MediaBox[2], output_size[0] / page.MediaBox[3]) - transform_matrix = fitz.Matrix(*scales) + else: + # Default 72 DPI (scales of (1, 1)) is unnecessarily low + scales = default_scales + + transform_matrix = fitz.Matrix(*scales) # Generate the pixel map using the transformation matrix stream = page.getPixmap(matrix=transform_matrix).getImageData()
{"golden_diff": "diff --git a/doctr/documents/reader.py b/doctr/documents/reader.py\n--- a/doctr/documents/reader.py\n+++ b/doctr/documents/reader.py\n@@ -10,7 +10,7 @@\n from weasyprint import HTML\n from typing import List, Tuple, Optional, Any, Union, Sequence\n \n-__all__ = ['read_pdf', 'read_img', 'read_html', 'DocumentFile']\n+__all__ = ['read_pdf', 'read_img', 'read_html', 'DocumentFile', 'PDF']\n \n \n AbstractPath = Union[str, Path]\n@@ -92,6 +92,7 @@\n page: fitz.fitz.Page,\n output_size: Optional[Tuple[int, int]] = None,\n rgb_output: bool = True,\n+ default_scales: Tuple[float, float] = (2, 2),\n ) -> np.ndarray:\n \"\"\"Convert a fitz page to a numpy-formatted image\n \n@@ -100,17 +101,21 @@\n output_size: the expected output size of each page in format H x W. Default goes to 840 x 595 for A4 pdf,\n if you want to increase the resolution while preserving the original A4 aspect ratio can pass (1024, 726)\n rgb_output: whether the output ndarray channel order should be RGB instead of BGR.\n+ default_scales: spatial scaling to be applied when output_size is not specified where (1, 1)\n+ corresponds to 72 dpi rendering.\n \n Returns:\n the rendered image in numpy format\n \"\"\"\n \n- transform_matrix = None\n-\n # If no output size is specified, keep the origin one\n if output_size is not None:\n scales = (output_size[1] / page.MediaBox[2], output_size[0] / page.MediaBox[3])\n- transform_matrix = fitz.Matrix(*scales)\n+ else:\n+ # Default 72 DPI (scales of (1, 1)) is unnecessarily low\n+ scales = default_scales\n+\n+ transform_matrix = fitz.Matrix(*scales)\n \n # Generate the pixel map using the transformation matrix\n stream = page.getPixmap(matrix=transform_matrix).getImageData()\n", "issue": "[document] Check integrity of PDF --> img conversion with default DPI\nThe current PDF reading implies a conversion to an image. As we are using the default args for this conversion, the DPI value might be low. We need to check that the default parameter is not bringing about performance issues due to low resolution rendering.\n", "before_files": [{"content": "# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport numpy as np\nimport cv2\nfrom pathlib import Path\nimport fitz\nfrom weasyprint import HTML\nfrom typing import List, Tuple, Optional, Any, Union, Sequence\n\n__all__ = ['read_pdf', 'read_img', 'read_html', 'DocumentFile']\n\n\nAbstractPath = Union[str, Path]\nAbstractFile = Union[AbstractPath, bytes]\nBbox = Tuple[float, float, float, float]\n\n\ndef read_img(\n file: AbstractFile,\n output_size: Optional[Tuple[int, int]] = None,\n rgb_output: bool = True,\n) -> np.ndarray:\n \"\"\"Read an image file into numpy format\n\n Example::\n >>> from doctr.documents import read_img\n >>> page = read_img(\"path/to/your/doc.jpg\")\n\n Args:\n file: the path to the image file\n output_size: the expected output size of each page in format H x W\n rgb_output: whether the output ndarray channel order should be RGB instead of BGR.\n Returns:\n the page decoded as numpy ndarray of shape H x W x 3\n \"\"\"\n\n if isinstance(file, (str, Path)):\n if not Path(file).is_file():\n raise FileNotFoundError(f\"unable to access {file}\")\n img = cv2.imread(str(file), cv2.IMREAD_COLOR)\n elif isinstance(file, bytes):\n file = np.frombuffer(file, np.uint8)\n img = cv2.imdecode(file, cv2.IMREAD_COLOR)\n else:\n raise TypeError(\"unsupported object type for argument 'file'\")\n\n # Validity check\n if img is None:\n raise ValueError(\"unable to read file.\")\n # Resizing\n if isinstance(output_size, tuple):\n img = cv2.resize(img, output_size[::-1], interpolation=cv2.INTER_LINEAR)\n # Switch the channel order\n if rgb_output:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n\n\ndef read_pdf(file: AbstractFile, **kwargs: Any) -> fitz.Document:\n \"\"\"Read a PDF file and convert it into an image in numpy format\n\n Example::\n >>> from doctr.documents import read_pdf\n >>> doc = read_pdf(\"path/to/your/doc.pdf\")\n\n Args:\n file: the path to the PDF file\n Returns:\n the list of pages decoded as numpy ndarray of shape H x W x 3\n \"\"\"\n\n if isinstance(file, (str, Path)) and not Path(file).is_file():\n raise FileNotFoundError(f\"unable to access {file}\")\n\n fitz_args = {}\n\n if isinstance(file, (str, Path)):\n fitz_args['filename'] = file\n elif isinstance(file, bytes):\n fitz_args['stream'] = file\n else:\n raise TypeError(\"unsupported object type for argument 'file'\")\n\n # Read pages with fitz and convert them to numpy ndarrays\n return fitz.open(**fitz_args, filetype=\"pdf\", **kwargs)\n\n\ndef convert_page_to_numpy(\n page: fitz.fitz.Page,\n output_size: Optional[Tuple[int, int]] = None,\n rgb_output: bool = True,\n) -> np.ndarray:\n \"\"\"Convert a fitz page to a numpy-formatted image\n\n Args:\n page: the page of a file read with PyMuPDF\n output_size: the expected output size of each page in format H x W. Default goes to 840 x 595 for A4 pdf,\n if you want to increase the resolution while preserving the original A4 aspect ratio can pass (1024, 726)\n rgb_output: whether the output ndarray channel order should be RGB instead of BGR.\n\n Returns:\n the rendered image in numpy format\n \"\"\"\n\n transform_matrix = None\n\n # If no output size is specified, keep the origin one\n if output_size is not None:\n scales = (output_size[1] / page.MediaBox[2], output_size[0] / page.MediaBox[3])\n transform_matrix = fitz.Matrix(*scales)\n\n # Generate the pixel map using the transformation matrix\n stream = page.getPixmap(matrix=transform_matrix).getImageData()\n # Decode it into a numpy\n img = cv2.imdecode(np.frombuffer(stream, dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n\n # Switch the channel order\n if rgb_output:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n return img\n\n\ndef read_html(url: str, **kwargs: Any) -> bytes:\n \"\"\"Read a PDF file and convert it into an image in numpy format\n\n Example::\n >>> from doctr.documents import read_html\n >>> doc = read_html(\"https://www.yoursite.com\")\n\n Args:\n url: URL of the target web page\n Returns:\n decoded PDF file as a bytes stream\n \"\"\"\n\n return HTML(url, **kwargs).write_pdf()\n\n\nclass PDF:\n \"\"\"PDF document template\n\n Args:\n doc: input PDF document\n \"\"\"\n def __init__(self, doc: fitz.Document) -> None:\n self.doc = doc\n\n def as_images(self, **kwargs) -> List[np.ndarray]:\n \"\"\"Convert all document pages to images\n\n Example::\n >>> from doctr.documents import DocumentFile\n >>> pages = DocumentFile.from_pdf(\"path/to/your/doc.pdf\").as_images()\n\n Args:\n kwargs: keyword arguments of `convert_page_to_numpy`\n Returns:\n the list of pages decoded as numpy ndarray of shape H x W x 3\n \"\"\"\n return [convert_page_to_numpy(page, **kwargs) for page in self.doc]\n\n def get_page_words(self, idx, **kwargs) -> List[Tuple[Bbox, str]]:\n \"\"\"Get the annotations for all words of a given page\"\"\"\n\n # xmin, ymin, xmax, ymax, value, block_idx, line_idx, word_idx\n return [(info[:4], info[4]) for info in self.doc[idx].getTextWords(**kwargs)]\n\n def get_words(self, **kwargs) -> List[List[Tuple[Bbox, str]]]:\n \"\"\"Get the annotations for all words in the document\n\n Example::\n >>> from doctr.documents import DocumentFile\n >>> words = DocumentFile.from_pdf(\"path/to/your/doc.pdf\").get_words()\n\n Args:\n kwargs: keyword arguments of `fitz.Page.getTextWords`\n Returns:\n the list of pages annotations, represented as a list of tuple (bounding box, value)\n \"\"\"\n return [self.get_page_words(idx, **kwargs) for idx in range(len(self.doc))]\n\n def get_page_artefacts(self, idx) -> List[Tuple[float, float, float, float]]:\n return [tuple(self.doc[idx].getImageBbox(artefact)) for artefact in self.doc[idx].get_images(full=True)]\n\n def get_artefacts(self) -> List[List[Tuple[float, float, float, float]]]:\n \"\"\"Get the artefacts for the entire document\n\n Example::\n >>> from doctr.documents import DocumentFile\n >>> artefacts = DocumentFile.from_pdf(\"path/to/your/doc.pdf\").get_artefacts()\n\n Returns:\n the list of pages artefacts, represented as a list of bounding boxes\n \"\"\"\n\n return [self.get_page_artefacts(idx) for idx in range(len(self.doc))]\n\n\nclass DocumentFile:\n \"\"\"Read a document from multiple extensions\"\"\"\n\n @classmethod\n def from_pdf(cls, file: AbstractFile, **kwargs) -> PDF:\n \"\"\"Read a PDF file\n\n Example::\n >>> from doctr.documents import DocumentFile\n >>> doc = DocumentFile.from_pdf(\"path/to/your/doc.pdf\")\n\n Args:\n file: the path to the PDF file or a binary stream\n Returns:\n a PDF document\n \"\"\"\n\n doc = read_pdf(file, **kwargs)\n\n return PDF(doc)\n\n @classmethod\n def from_url(cls, url: str, **kwargs) -> PDF:\n \"\"\"Interpret a web page as a PDF document\n\n Example::\n >>> from doctr.documents import DocumentFile\n >>> doc = DocumentFile.from_url(\"https://www.yoursite.com\")\n\n Args:\n url: the URL of the target web page\n Returns:\n a PDF document\n \"\"\"\n pdf_stream = read_html(url)\n return cls.from_pdf(pdf_stream, **kwargs)\n\n @classmethod\n def from_images(cls, files: Union[Sequence[AbstractFile], AbstractFile], **kwargs) -> List[np.ndarray]:\n \"\"\"Read an image file (or a collection of image files) and convert it into an image in numpy format\n\n Example::\n >>> from doctr.documents import DocumentFile\n >>> pages = DocumentFile.from_images([\"path/to/your/page1.png\", \"path/to/your/page2.png\"])\n\n Args:\n files: the path to the image file or a binary stream, or a collection of those\n Returns:\n the list of pages decoded as numpy ndarray of shape H x W x 3\n \"\"\"\n if isinstance(files, (str, Path, bytes)):\n files = [files]\n\n return [read_img(file, **kwargs) for file in files]\n", "path": "doctr/documents/reader.py"}], "after_files": [{"content": "# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport numpy as np\nimport cv2\nfrom pathlib import Path\nimport fitz\nfrom weasyprint import HTML\nfrom typing import List, Tuple, Optional, Any, Union, Sequence\n\n__all__ = ['read_pdf', 'read_img', 'read_html', 'DocumentFile', 'PDF']\n\n\nAbstractPath = Union[str, Path]\nAbstractFile = Union[AbstractPath, bytes]\nBbox = Tuple[float, float, float, float]\n\n\ndef read_img(\n file: AbstractFile,\n output_size: Optional[Tuple[int, int]] = None,\n rgb_output: bool = True,\n) -> np.ndarray:\n \"\"\"Read an image file into numpy format\n\n Example::\n >>> from doctr.documents import read_img\n >>> page = read_img(\"path/to/your/doc.jpg\")\n\n Args:\n file: the path to the image file\n output_size: the expected output size of each page in format H x W\n rgb_output: whether the output ndarray channel order should be RGB instead of BGR.\n Returns:\n the page decoded as numpy ndarray of shape H x W x 3\n \"\"\"\n\n if isinstance(file, (str, Path)):\n if not Path(file).is_file():\n raise FileNotFoundError(f\"unable to access {file}\")\n img = cv2.imread(str(file), cv2.IMREAD_COLOR)\n elif isinstance(file, bytes):\n file = np.frombuffer(file, np.uint8)\n img = cv2.imdecode(file, cv2.IMREAD_COLOR)\n else:\n raise TypeError(\"unsupported object type for argument 'file'\")\n\n # Validity check\n if img is None:\n raise ValueError(\"unable to read file.\")\n # Resizing\n if isinstance(output_size, tuple):\n img = cv2.resize(img, output_size[::-1], interpolation=cv2.INTER_LINEAR)\n # Switch the channel order\n if rgb_output:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n\n\ndef read_pdf(file: AbstractFile, **kwargs: Any) -> fitz.Document:\n \"\"\"Read a PDF file and convert it into an image in numpy format\n\n Example::\n >>> from doctr.documents import read_pdf\n >>> doc = read_pdf(\"path/to/your/doc.pdf\")\n\n Args:\n file: the path to the PDF file\n Returns:\n the list of pages decoded as numpy ndarray of shape H x W x 3\n \"\"\"\n\n if isinstance(file, (str, Path)) and not Path(file).is_file():\n raise FileNotFoundError(f\"unable to access {file}\")\n\n fitz_args = {}\n\n if isinstance(file, (str, Path)):\n fitz_args['filename'] = file\n elif isinstance(file, bytes):\n fitz_args['stream'] = file\n else:\n raise TypeError(\"unsupported object type for argument 'file'\")\n\n # Read pages with fitz and convert them to numpy ndarrays\n return fitz.open(**fitz_args, filetype=\"pdf\", **kwargs)\n\n\ndef convert_page_to_numpy(\n page: fitz.fitz.Page,\n output_size: Optional[Tuple[int, int]] = None,\n rgb_output: bool = True,\n default_scales: Tuple[float, float] = (2, 2),\n) -> np.ndarray:\n \"\"\"Convert a fitz page to a numpy-formatted image\n\n Args:\n page: the page of a file read with PyMuPDF\n output_size: the expected output size of each page in format H x W. Default goes to 840 x 595 for A4 pdf,\n if you want to increase the resolution while preserving the original A4 aspect ratio can pass (1024, 726)\n rgb_output: whether the output ndarray channel order should be RGB instead of BGR.\n default_scales: spatial scaling to be applied when output_size is not specified where (1, 1)\n corresponds to 72 dpi rendering.\n\n Returns:\n the rendered image in numpy format\n \"\"\"\n\n # If no output size is specified, keep the origin one\n if output_size is not None:\n scales = (output_size[1] / page.MediaBox[2], output_size[0] / page.MediaBox[3])\n else:\n # Default 72 DPI (scales of (1, 1)) is unnecessarily low\n scales = default_scales\n\n transform_matrix = fitz.Matrix(*scales)\n\n # Generate the pixel map using the transformation matrix\n stream = page.getPixmap(matrix=transform_matrix).getImageData()\n # Decode it into a numpy\n img = cv2.imdecode(np.frombuffer(stream, dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n\n # Switch the channel order\n if rgb_output:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n return img\n\n\ndef read_html(url: str, **kwargs: Any) -> bytes:\n \"\"\"Read a PDF file and convert it into an image in numpy format\n\n Example::\n >>> from doctr.documents import read_html\n >>> doc = read_html(\"https://www.yoursite.com\")\n\n Args:\n url: URL of the target web page\n Returns:\n decoded PDF file as a bytes stream\n \"\"\"\n\n return HTML(url, **kwargs).write_pdf()\n\n\nclass PDF:\n \"\"\"PDF document template\n\n Args:\n doc: input PDF document\n \"\"\"\n def __init__(self, doc: fitz.Document) -> None:\n self.doc = doc\n\n def as_images(self, **kwargs) -> List[np.ndarray]:\n \"\"\"Convert all document pages to images\n\n Example::\n >>> from doctr.documents import DocumentFile\n >>> pages = DocumentFile.from_pdf(\"path/to/your/doc.pdf\").as_images()\n\n Args:\n kwargs: keyword arguments of `convert_page_to_numpy`\n Returns:\n the list of pages decoded as numpy ndarray of shape H x W x 3\n \"\"\"\n return [convert_page_to_numpy(page, **kwargs) for page in self.doc]\n\n def get_page_words(self, idx, **kwargs) -> List[Tuple[Bbox, str]]:\n \"\"\"Get the annotations for all words of a given page\"\"\"\n\n # xmin, ymin, xmax, ymax, value, block_idx, line_idx, word_idx\n return [(info[:4], info[4]) for info in self.doc[idx].getTextWords(**kwargs)]\n\n def get_words(self, **kwargs) -> List[List[Tuple[Bbox, str]]]:\n \"\"\"Get the annotations for all words in the document\n\n Example::\n >>> from doctr.documents import DocumentFile\n >>> words = DocumentFile.from_pdf(\"path/to/your/doc.pdf\").get_words()\n\n Args:\n kwargs: keyword arguments of `fitz.Page.getTextWords`\n Returns:\n the list of pages annotations, represented as a list of tuple (bounding box, value)\n \"\"\"\n return [self.get_page_words(idx, **kwargs) for idx in range(len(self.doc))]\n\n def get_page_artefacts(self, idx) -> List[Tuple[float, float, float, float]]:\n return [tuple(self.doc[idx].getImageBbox(artefact)) for artefact in self.doc[idx].get_images(full=True)]\n\n def get_artefacts(self) -> List[List[Tuple[float, float, float, float]]]:\n \"\"\"Get the artefacts for the entire document\n\n Example::\n >>> from doctr.documents import DocumentFile\n >>> artefacts = DocumentFile.from_pdf(\"path/to/your/doc.pdf\").get_artefacts()\n\n Returns:\n the list of pages artefacts, represented as a list of bounding boxes\n \"\"\"\n\n return [self.get_page_artefacts(idx) for idx in range(len(self.doc))]\n\n\nclass DocumentFile:\n \"\"\"Read a document from multiple extensions\"\"\"\n\n @classmethod\n def from_pdf(cls, file: AbstractFile, **kwargs) -> PDF:\n \"\"\"Read a PDF file\n\n Example::\n >>> from doctr.documents import DocumentFile\n >>> doc = DocumentFile.from_pdf(\"path/to/your/doc.pdf\")\n\n Args:\n file: the path to the PDF file or a binary stream\n Returns:\n a PDF document\n \"\"\"\n\n doc = read_pdf(file, **kwargs)\n\n return PDF(doc)\n\n @classmethod\n def from_url(cls, url: str, **kwargs) -> PDF:\n \"\"\"Interpret a web page as a PDF document\n\n Example::\n >>> from doctr.documents import DocumentFile\n >>> doc = DocumentFile.from_url(\"https://www.yoursite.com\")\n\n Args:\n url: the URL of the target web page\n Returns:\n a PDF document\n \"\"\"\n pdf_stream = read_html(url)\n return cls.from_pdf(pdf_stream, **kwargs)\n\n @classmethod\n def from_images(cls, files: Union[Sequence[AbstractFile], AbstractFile], **kwargs) -> List[np.ndarray]:\n \"\"\"Read an image file (or a collection of image files) and convert it into an image in numpy format\n\n Example::\n >>> from doctr.documents import DocumentFile\n >>> pages = DocumentFile.from_images([\"path/to/your/page1.png\", \"path/to/your/page2.png\"])\n\n Args:\n files: the path to the image file or a binary stream, or a collection of those\n Returns:\n the list of pages decoded as numpy ndarray of shape H x W x 3\n \"\"\"\n if isinstance(files, (str, Path, bytes)):\n files = [files]\n\n return [read_img(file, **kwargs) for file in files]\n", "path": "doctr/documents/reader.py"}]}
3,044
493
gh_patches_debug_22825
rasdani/github-patches
git_diff
TheAlgorithms__Python-10397
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Improve our test coverage ### Feature description Many of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase. ### How to find low-coverage files Go to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under "Run Tests" and scroll down until you find the section on code coverage: ``` ---------- coverage: platform linux, python 3.12.0-final-0 ----------- Name Stmts Miss Cover Missing ----------------------------------------------------------------------------------------------------------- quantum/q_fourier_transform.py 30 30 0% 14-93 scripts/validate_solutions.py 54 54 0% 2-94 strings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129 ... ``` The "Cover" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests. Some files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage. _**When you open your PR, put "Contributes to #9943" in the PR description.**_ Do not use the word "fixes", "resolves", or "closes". This issue is an ongoing one, and your PR will not single-handedly resolve this issue. ### How to add doctests A doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring: ```py def add(a: int, b: int) -> int: """ Adds two non-negative numbers. >>> add(1, 1) 2 >>> add(2, 5) 7 >>> add(1, 0) 1 >>> add(-1, -1) Traceback (most recent last): ... ValueError: Numbers must be non-negative """ ``` For every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc). Do not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it. _**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `graphs/a_star.py` Content: ``` 1 from __future__ import annotations 2 3 DIRECTIONS = [ 4 [-1, 0], # left 5 [0, -1], # down 6 [1, 0], # right 7 [0, 1], # up 8 ] 9 10 11 # function to search the path 12 def search( 13 grid: list[list[int]], 14 init: list[int], 15 goal: list[int], 16 cost: int, 17 heuristic: list[list[int]], 18 ) -> tuple[list[list[int]], list[list[int]]]: 19 closed = [ 20 [0 for col in range(len(grid[0]))] for row in range(len(grid)) 21 ] # the reference grid 22 closed[init[0]][init[1]] = 1 23 action = [ 24 [0 for col in range(len(grid[0]))] for row in range(len(grid)) 25 ] # the action grid 26 27 x = init[0] 28 y = init[1] 29 g = 0 30 f = g + heuristic[x][y] # cost from starting cell to destination cell 31 cell = [[f, g, x, y]] 32 33 found = False # flag that is set when search is complete 34 resign = False # flag set if we can't find expand 35 36 while not found and not resign: 37 if len(cell) == 0: 38 raise ValueError("Algorithm is unable to find solution") 39 else: # to choose the least costliest action so as to move closer to the goal 40 cell.sort() 41 cell.reverse() 42 next_cell = cell.pop() 43 x = next_cell[2] 44 y = next_cell[3] 45 g = next_cell[1] 46 47 if x == goal[0] and y == goal[1]: 48 found = True 49 else: 50 for i in range(len(DIRECTIONS)): # to try out different valid actions 51 x2 = x + DIRECTIONS[i][0] 52 y2 = y + DIRECTIONS[i][1] 53 if x2 >= 0 and x2 < len(grid) and y2 >= 0 and y2 < len(grid[0]): 54 if closed[x2][y2] == 0 and grid[x2][y2] == 0: 55 g2 = g + cost 56 f2 = g2 + heuristic[x2][y2] 57 cell.append([f2, g2, x2, y2]) 58 closed[x2][y2] = 1 59 action[x2][y2] = i 60 invpath = [] 61 x = goal[0] 62 y = goal[1] 63 invpath.append([x, y]) # we get the reverse path from here 64 while x != init[0] or y != init[1]: 65 x2 = x - DIRECTIONS[action[x][y]][0] 66 y2 = y - DIRECTIONS[action[x][y]][1] 67 x = x2 68 y = y2 69 invpath.append([x, y]) 70 71 path = [] 72 for i in range(len(invpath)): 73 path.append(invpath[len(invpath) - 1 - i]) 74 return path, action 75 76 77 if __name__ == "__main__": 78 grid = [ 79 [0, 1, 0, 0, 0, 0], 80 [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles 81 [0, 1, 0, 0, 0, 0], 82 [0, 1, 0, 0, 1, 0], 83 [0, 0, 0, 0, 1, 0], 84 ] 85 86 init = [0, 0] 87 # all coordinates are given in format [y,x] 88 goal = [len(grid) - 1, len(grid[0]) - 1] 89 cost = 1 90 91 # the cost map which pushes the path closer to the goal 92 heuristic = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] 93 for i in range(len(grid)): 94 for j in range(len(grid[0])): 95 heuristic[i][j] = abs(i - goal[0]) + abs(j - goal[1]) 96 if grid[i][j] == 1: 97 # added extra penalty in the heuristic map 98 heuristic[i][j] = 99 99 100 path, action = search(grid, init, goal, cost, heuristic) 101 102 print("ACTION MAP") 103 for i in range(len(action)): 104 print(action[i]) 105 106 for i in range(len(path)): 107 print(path[i]) 108 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/graphs/a_star.py b/graphs/a_star.py --- a/graphs/a_star.py +++ b/graphs/a_star.py @@ -16,6 +16,31 @@ cost: int, heuristic: list[list[int]], ) -> tuple[list[list[int]], list[list[int]]]: + """ + Search for a path on a grid avoiding obstacles. + >>> grid = [[0, 1, 0, 0, 0, 0], + ... [0, 1, 0, 0, 0, 0], + ... [0, 1, 0, 0, 0, 0], + ... [0, 1, 0, 0, 1, 0], + ... [0, 0, 0, 0, 1, 0]] + >>> init = [0, 0] + >>> goal = [len(grid) - 1, len(grid[0]) - 1] + >>> cost = 1 + >>> heuristic = [[0] * len(grid[0]) for _ in range(len(grid))] + >>> heuristic = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] + >>> for i in range(len(grid)): + ... for j in range(len(grid[0])): + ... heuristic[i][j] = abs(i - goal[0]) + abs(j - goal[1]) + ... if grid[i][j] == 1: + ... heuristic[i][j] = 99 + >>> path, action = search(grid, init, goal, cost, heuristic) + >>> path # doctest: +NORMALIZE_WHITESPACE + [[0, 0], [1, 0], [2, 0], [3, 0], [4, 0], [4, 1], [4, 2], [4, 3], [3, 3], + [2, 3], [2, 4], [2, 5], [3, 5], [4, 5]] + >>> action # doctest: +NORMALIZE_WHITESPACE + [[0, 0, 0, 0, 0, 0], [2, 0, 0, 0, 0, 0], [2, 0, 0, 0, 3, 3], + [2, 0, 0, 0, 0, 2], [2, 3, 3, 3, 0, 2]] + """ closed = [ [0 for col in range(len(grid[0]))] for row in range(len(grid)) ] # the reference grid
{"golden_diff": "diff --git a/graphs/a_star.py b/graphs/a_star.py\n--- a/graphs/a_star.py\n+++ b/graphs/a_star.py\n@@ -16,6 +16,31 @@\n cost: int,\n heuristic: list[list[int]],\n ) -> tuple[list[list[int]], list[list[int]]]:\n+ \"\"\"\n+ Search for a path on a grid avoiding obstacles.\n+ >>> grid = [[0, 1, 0, 0, 0, 0],\n+ ... [0, 1, 0, 0, 0, 0],\n+ ... [0, 1, 0, 0, 0, 0],\n+ ... [0, 1, 0, 0, 1, 0],\n+ ... [0, 0, 0, 0, 1, 0]]\n+ >>> init = [0, 0]\n+ >>> goal = [len(grid) - 1, len(grid[0]) - 1]\n+ >>> cost = 1\n+ >>> heuristic = [[0] * len(grid[0]) for _ in range(len(grid))]\n+ >>> heuristic = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]\n+ >>> for i in range(len(grid)):\n+ ... for j in range(len(grid[0])):\n+ ... heuristic[i][j] = abs(i - goal[0]) + abs(j - goal[1])\n+ ... if grid[i][j] == 1:\n+ ... heuristic[i][j] = 99\n+ >>> path, action = search(grid, init, goal, cost, heuristic)\n+ >>> path # doctest: +NORMALIZE_WHITESPACE\n+ [[0, 0], [1, 0], [2, 0], [3, 0], [4, 0], [4, 1], [4, 2], [4, 3], [3, 3],\n+ [2, 3], [2, 4], [2, 5], [3, 5], [4, 5]]\n+ >>> action # doctest: +NORMALIZE_WHITESPACE\n+ [[0, 0, 0, 0, 0, 0], [2, 0, 0, 0, 0, 0], [2, 0, 0, 0, 3, 3],\n+ [2, 0, 0, 0, 0, 2], [2, 3, 3, 3, 0, 2]]\n+ \"\"\"\n closed = [\n [0 for col in range(len(grid[0]))] for row in range(len(grid))\n ] # the reference grid\n", "issue": "Improve our test coverage\n### Feature description\r\n\r\nMany of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.\r\n\r\n### How to find low-coverage files\r\n\r\nGo to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under \"Run Tests\" and scroll down until you find the section on code coverage:\r\n```\r\n---------- coverage: platform linux, python 3.12.0-final-0 -----------\r\nName Stmts Miss Cover Missing\r\n-----------------------------------------------------------------------------------------------------------\r\nquantum/q_fourier_transform.py 30 30 0% 14-93\r\nscripts/validate_solutions.py 54 54 0% 2-94\r\nstrings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129\r\n...\r\n```\r\nThe \"Cover\" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.\r\n\r\nSome files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.\r\n\r\n_**When you open your PR, put \"Contributes to #9943\" in the PR description.**_ Do not use the word \"fixes\", \"resolves\", or \"closes\". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.\r\n\r\n### How to add doctests\r\n\r\nA doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:\r\n```py\r\ndef add(a: int, b: int) -> int:\r\n \"\"\"\r\n Adds two non-negative numbers.\r\n >>> add(1, 1)\r\n 2\r\n >>> add(2, 5)\r\n 7\r\n >>> add(1, 0)\r\n 1\r\n >>> add(-1, -1)\r\n Traceback (most recent last):\r\n ...\r\n ValueError: Numbers must be non-negative\r\n \"\"\"\r\n```\r\nFor every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).\r\n\r\nDo not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.\r\n\r\n_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_\n", "before_files": [{"content": "from __future__ import annotations\n\nDIRECTIONS = [\n [-1, 0], # left\n [0, -1], # down\n [1, 0], # right\n [0, 1], # up\n]\n\n\n# function to search the path\ndef search(\n grid: list[list[int]],\n init: list[int],\n goal: list[int],\n cost: int,\n heuristic: list[list[int]],\n) -> tuple[list[list[int]], list[list[int]]]:\n closed = [\n [0 for col in range(len(grid[0]))] for row in range(len(grid))\n ] # the reference grid\n closed[init[0]][init[1]] = 1\n action = [\n [0 for col in range(len(grid[0]))] for row in range(len(grid))\n ] # the action grid\n\n x = init[0]\n y = init[1]\n g = 0\n f = g + heuristic[x][y] # cost from starting cell to destination cell\n cell = [[f, g, x, y]]\n\n found = False # flag that is set when search is complete\n resign = False # flag set if we can't find expand\n\n while not found and not resign:\n if len(cell) == 0:\n raise ValueError(\"Algorithm is unable to find solution\")\n else: # to choose the least costliest action so as to move closer to the goal\n cell.sort()\n cell.reverse()\n next_cell = cell.pop()\n x = next_cell[2]\n y = next_cell[3]\n g = next_cell[1]\n\n if x == goal[0] and y == goal[1]:\n found = True\n else:\n for i in range(len(DIRECTIONS)): # to try out different valid actions\n x2 = x + DIRECTIONS[i][0]\n y2 = y + DIRECTIONS[i][1]\n if x2 >= 0 and x2 < len(grid) and y2 >= 0 and y2 < len(grid[0]):\n if closed[x2][y2] == 0 and grid[x2][y2] == 0:\n g2 = g + cost\n f2 = g2 + heuristic[x2][y2]\n cell.append([f2, g2, x2, y2])\n closed[x2][y2] = 1\n action[x2][y2] = i\n invpath = []\n x = goal[0]\n y = goal[1]\n invpath.append([x, y]) # we get the reverse path from here\n while x != init[0] or y != init[1]:\n x2 = x - DIRECTIONS[action[x][y]][0]\n y2 = y - DIRECTIONS[action[x][y]][1]\n x = x2\n y = y2\n invpath.append([x, y])\n\n path = []\n for i in range(len(invpath)):\n path.append(invpath[len(invpath) - 1 - i])\n return path, action\n\n\nif __name__ == \"__main__\":\n grid = [\n [0, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles\n [0, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 1, 0],\n [0, 0, 0, 0, 1, 0],\n ]\n\n init = [0, 0]\n # all coordinates are given in format [y,x]\n goal = [len(grid) - 1, len(grid[0]) - 1]\n cost = 1\n\n # the cost map which pushes the path closer to the goal\n heuristic = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n heuristic[i][j] = abs(i - goal[0]) + abs(j - goal[1])\n if grid[i][j] == 1:\n # added extra penalty in the heuristic map\n heuristic[i][j] = 99\n\n path, action = search(grid, init, goal, cost, heuristic)\n\n print(\"ACTION MAP\")\n for i in range(len(action)):\n print(action[i])\n\n for i in range(len(path)):\n print(path[i])\n", "path": "graphs/a_star.py"}], "after_files": [{"content": "from __future__ import annotations\n\nDIRECTIONS = [\n [-1, 0], # left\n [0, -1], # down\n [1, 0], # right\n [0, 1], # up\n]\n\n\n# function to search the path\ndef search(\n grid: list[list[int]],\n init: list[int],\n goal: list[int],\n cost: int,\n heuristic: list[list[int]],\n) -> tuple[list[list[int]], list[list[int]]]:\n \"\"\"\n Search for a path on a grid avoiding obstacles.\n >>> grid = [[0, 1, 0, 0, 0, 0],\n ... [0, 1, 0, 0, 0, 0],\n ... [0, 1, 0, 0, 0, 0],\n ... [0, 1, 0, 0, 1, 0],\n ... [0, 0, 0, 0, 1, 0]]\n >>> init = [0, 0]\n >>> goal = [len(grid) - 1, len(grid[0]) - 1]\n >>> cost = 1\n >>> heuristic = [[0] * len(grid[0]) for _ in range(len(grid))]\n >>> heuristic = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]\n >>> for i in range(len(grid)):\n ... for j in range(len(grid[0])):\n ... heuristic[i][j] = abs(i - goal[0]) + abs(j - goal[1])\n ... if grid[i][j] == 1:\n ... heuristic[i][j] = 99\n >>> path, action = search(grid, init, goal, cost, heuristic)\n >>> path # doctest: +NORMALIZE_WHITESPACE\n [[0, 0], [1, 0], [2, 0], [3, 0], [4, 0], [4, 1], [4, 2], [4, 3], [3, 3],\n [2, 3], [2, 4], [2, 5], [3, 5], [4, 5]]\n >>> action # doctest: +NORMALIZE_WHITESPACE\n [[0, 0, 0, 0, 0, 0], [2, 0, 0, 0, 0, 0], [2, 0, 0, 0, 3, 3],\n [2, 0, 0, 0, 0, 2], [2, 3, 3, 3, 0, 2]]\n \"\"\"\n closed = [\n [0 for col in range(len(grid[0]))] for row in range(len(grid))\n ] # the reference grid\n closed[init[0]][init[1]] = 1\n action = [\n [0 for col in range(len(grid[0]))] for row in range(len(grid))\n ] # the action grid\n\n x = init[0]\n y = init[1]\n g = 0\n f = g + heuristic[x][y] # cost from starting cell to destination cell\n cell = [[f, g, x, y]]\n\n found = False # flag that is set when search is complete\n resign = False # flag set if we can't find expand\n\n while not found and not resign:\n if len(cell) == 0:\n raise ValueError(\"Algorithm is unable to find solution\")\n else: # to choose the least costliest action so as to move closer to the goal\n cell.sort()\n cell.reverse()\n next_cell = cell.pop()\n x = next_cell[2]\n y = next_cell[3]\n g = next_cell[1]\n\n if x == goal[0] and y == goal[1]:\n found = True\n else:\n for i in range(len(DIRECTIONS)): # to try out different valid actions\n x2 = x + DIRECTIONS[i][0]\n y2 = y + DIRECTIONS[i][1]\n if x2 >= 0 and x2 < len(grid) and y2 >= 0 and y2 < len(grid[0]):\n if closed[x2][y2] == 0 and grid[x2][y2] == 0:\n g2 = g + cost\n f2 = g2 + heuristic[x2][y2]\n cell.append([f2, g2, x2, y2])\n closed[x2][y2] = 1\n action[x2][y2] = i\n invpath = []\n x = goal[0]\n y = goal[1]\n invpath.append([x, y]) # we get the reverse path from here\n while x != init[0] or y != init[1]:\n x2 = x - DIRECTIONS[action[x][y]][0]\n y2 = y - DIRECTIONS[action[x][y]][1]\n x = x2\n y = y2\n invpath.append([x, y])\n\n path = []\n for i in range(len(invpath)):\n path.append(invpath[len(invpath) - 1 - i])\n return path, action\n\n\nif __name__ == \"__main__\":\n grid = [\n [0, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles\n [0, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 1, 0],\n [0, 0, 0, 0, 1, 0],\n ]\n\n init = [0, 0]\n # all coordinates are given in format [y,x]\n goal = [len(grid) - 1, len(grid[0]) - 1]\n cost = 1\n\n # the cost map which pushes the path closer to the goal\n heuristic = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n heuristic[i][j] = abs(i - goal[0]) + abs(j - goal[1])\n if grid[i][j] == 1:\n # added extra penalty in the heuristic map\n heuristic[i][j] = 99\n\n path, action = search(grid, init, goal, cost, heuristic)\n\n print(\"ACTION MAP\")\n for i in range(len(action)):\n print(action[i])\n\n for i in range(len(path)):\n print(path[i])\n", "path": "graphs/a_star.py"}]}
2,337
624
gh_patches_debug_13378
rasdani/github-patches
git_diff
TheAlgorithms__Python-6467
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Enter the logic for hash table ### Describe your change: * [ ] Add an algorithm? * [ ] Fix a bug or typo in an existing algorithm? * [x] Documentation change? ### Checklist: * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `data_structures/hashing/double_hash.py` Content: ``` 1 #!/usr/bin/env python3 2 from .hash_table import HashTable 3 from .number_theory.prime_numbers import is_prime, next_prime 4 5 6 class DoubleHash(HashTable): 7 """ 8 Hash Table example with open addressing and Double Hash 9 """ 10 11 def __init__(self, *args, **kwargs): 12 super().__init__(*args, **kwargs) 13 14 def __hash_function_2(self, value, data): 15 16 next_prime_gt = ( 17 next_prime(value % self.size_table) 18 if not is_prime(value % self.size_table) 19 else value % self.size_table 20 ) # gt = bigger than 21 return next_prime_gt - (data % next_prime_gt) 22 23 def __hash_double_function(self, key, data, increment): 24 return (increment * self.__hash_function_2(key, data)) % self.size_table 25 26 def _collision_resolution(self, key, data=None): 27 i = 1 28 new_key = self.hash_function(data) 29 30 while self.values[new_key] is not None and self.values[new_key] != key: 31 new_key = ( 32 self.__hash_double_function(key, data, i) 33 if self.balanced_factor() >= self.lim_charge 34 else None 35 ) 36 if new_key is None: 37 break 38 else: 39 i += 1 40 41 return new_key 42 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/data_structures/hashing/double_hash.py b/data_structures/hashing/double_hash.py --- a/data_structures/hashing/double_hash.py +++ b/data_structures/hashing/double_hash.py @@ -1,4 +1,16 @@ #!/usr/bin/env python3 +""" +Double hashing is a collision resolving technique in Open Addressed Hash tables. +Double hashing uses the idea of applying a second hash function to key when a collision +occurs. The advantage of Double hashing is that it is one of the best form of probing, +producing a uniform distribution of records throughout a hash table. This technique +does not yield any clusters. It is one of effective method for resolving collisions. + +Double hashing can be done using: (hash1(key) + i * hash2(key)) % TABLE_SIZE +Where hash1() and hash2() are hash functions and TABLE_SIZE is size of hash table. + +Reference: https://en.wikipedia.org/wiki/Double_hashing +""" from .hash_table import HashTable from .number_theory.prime_numbers import is_prime, next_prime
{"golden_diff": "diff --git a/data_structures/hashing/double_hash.py b/data_structures/hashing/double_hash.py\n--- a/data_structures/hashing/double_hash.py\n+++ b/data_structures/hashing/double_hash.py\n@@ -1,4 +1,16 @@\n #!/usr/bin/env python3\n+\"\"\"\n+Double hashing is a collision resolving technique in Open Addressed Hash tables.\n+Double hashing uses the idea of applying a second hash function to key when a collision\n+occurs. The advantage of Double hashing is that it is one of the best form of probing,\n+producing a uniform distribution of records throughout a hash table. This technique\n+does not yield any clusters. It is one of effective method for resolving collisions.\n+\n+Double hashing can be done using: (hash1(key) + i * hash2(key)) % TABLE_SIZE\n+Where hash1() and hash2() are hash functions and TABLE_SIZE is size of hash table.\n+\n+Reference: https://en.wikipedia.org/wiki/Double_hashing\n+\"\"\"\n from .hash_table import HashTable\n from .number_theory.prime_numbers import is_prime, next_prime\n", "issue": "Enter the logic for hash table\n### Describe your change:\r\n\r\n\r\n\r\n* [ ] Add an algorithm?\r\n* [ ] Fix a bug or typo in an existing algorithm?\r\n* [x] Documentation change?\r\n\r\n### Checklist:\r\n* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).\r\n* [x] This pull request is all my own work -- I have not plagiarized.\r\n* [x] I know that pull requests will not be merged if they fail the automated tests.\r\n* [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.\r\n* [ ] All new Python files are placed inside an existing directory.\r\n* [x] All filenames are in all lowercase characters with no spaces or dashes.\r\n* [x] All functions and variable names follow Python naming conventions.\r\n* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).\r\n* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.\r\n* [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.\r\n* [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\nfrom .hash_table import HashTable\nfrom .number_theory.prime_numbers import is_prime, next_prime\n\n\nclass DoubleHash(HashTable):\n \"\"\"\n Hash Table example with open addressing and Double Hash\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __hash_function_2(self, value, data):\n\n next_prime_gt = (\n next_prime(value % self.size_table)\n if not is_prime(value % self.size_table)\n else value % self.size_table\n ) # gt = bigger than\n return next_prime_gt - (data % next_prime_gt)\n\n def __hash_double_function(self, key, data, increment):\n return (increment * self.__hash_function_2(key, data)) % self.size_table\n\n def _collision_resolution(self, key, data=None):\n i = 1\n new_key = self.hash_function(data)\n\n while self.values[new_key] is not None and self.values[new_key] != key:\n new_key = (\n self.__hash_double_function(key, data, i)\n if self.balanced_factor() >= self.lim_charge\n else None\n )\n if new_key is None:\n break\n else:\n i += 1\n\n return new_key\n", "path": "data_structures/hashing/double_hash.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\"\"\"\nDouble hashing is a collision resolving technique in Open Addressed Hash tables.\nDouble hashing uses the idea of applying a second hash function to key when a collision\noccurs. The advantage of Double hashing is that it is one of the best form of probing,\nproducing a uniform distribution of records throughout a hash table. This technique\ndoes not yield any clusters. It is one of effective method for resolving collisions.\n\nDouble hashing can be done using: (hash1(key) + i * hash2(key)) % TABLE_SIZE\nWhere hash1() and hash2() are hash functions and TABLE_SIZE is size of hash table.\n\nReference: https://en.wikipedia.org/wiki/Double_hashing\n\"\"\"\nfrom .hash_table import HashTable\nfrom .number_theory.prime_numbers import is_prime, next_prime\n\n\nclass DoubleHash(HashTable):\n \"\"\"\n Hash Table example with open addressing and Double Hash\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __hash_function_2(self, value, data):\n\n next_prime_gt = (\n next_prime(value % self.size_table)\n if not is_prime(value % self.size_table)\n else value % self.size_table\n ) # gt = bigger than\n return next_prime_gt - (data % next_prime_gt)\n\n def __hash_double_function(self, key, data, increment):\n return (increment * self.__hash_function_2(key, data)) % self.size_table\n\n def _collision_resolution(self, key, data=None):\n i = 1\n new_key = self.hash_function(data)\n\n while self.values[new_key] is not None and self.values[new_key] != key:\n new_key = (\n self.__hash_double_function(key, data, i)\n if self.balanced_factor() >= self.lim_charge\n else None\n )\n if new_key is None:\n break\n else:\n i += 1\n\n return new_key\n", "path": "data_structures/hashing/double_hash.py"}]}
929
243
gh_patches_debug_29279
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-121
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Type Inference 1: Check column against a type **Problem** <!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.--> Different types in Mathesar will enable different operations; for example, strings could be aggregated by concatenating, but numeric types could be aggregated by summing or multiplying. So far, while we can reflect different types, we have no way to determine the type most appropriate for a column. **Proposed solution** <!-- A clear and concise description of your proposed solution or feature. --> Given a `schema`, `table_name`, `column_name`, and `type`, we need to be able to return a boolean giving whether the column can be cast to that type. **Additional context** <!-- Add any other context or screenshots about the feature request here.--> We may need to take an optional sample size parameter to do this for large data. Performance testing will be necessary. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `db/types/base.py` Content: ``` 1 from sqlalchemy import create_engine 2 from db import constants 3 4 SCHEMA = f"{constants.MATHESAR_PREFIX}types" 5 # Since we want to have our identifiers quoted appropriately for use in 6 # PostgreSQL, we want to use the postgres dialect preparer to set this up. 7 preparer = create_engine("postgresql://").dialect.identifier_preparer 8 9 10 def get_qualified_name(name): 11 return ".".join([preparer.quote_schema(SCHEMA), name]) 12 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/db/types/base.py b/db/types/base.py --- a/db/types/base.py +++ b/db/types/base.py @@ -1,5 +1,6 @@ -from sqlalchemy import create_engine +from sqlalchemy import create_engine, MetaData, Table, DDL from db import constants +from db.types import email SCHEMA = f"{constants.MATHESAR_PREFIX}types" # Since we want to have our identifiers quoted appropriately for use in @@ -9,3 +10,41 @@ def get_qualified_name(name): return ".".join([preparer.quote_schema(SCHEMA), name]) + + +def get_supported_alter_column_types(engine): + dialect_types = engine.dialect.ischema_names + type_map = { + # Default Postgres types + "boolean": dialect_types.get("boolean"), + "interval": dialect_types.get("interval"), + "numeric": dialect_types.get("numeric"), + "string": dialect_types.get("name"), + # Custom Mathesar types + "email": dialect_types.get(email.QUALIFIED_EMAIL) + } + return {k: v for k, v in type_map.items() if v is not None} + + +def alter_column_type( + schema, table_name, column_name, target_type_str, engine +): + _preparer = engine.dialect.identifier_preparer + supported_types = get_supported_alter_column_types(engine) + target_type = supported_types.get(target_type_str.lower()) + with engine.begin() as conn: + metadata = MetaData(bind=engine, schema=schema) + table = Table( + table_name, metadata, schema=schema, autoload_with=engine + ) + column = table.columns[column_name] + prepared_table_name = _preparer.format_table(table) + prepared_column_name = _preparer.format_column(column) + prepared_type_name = target_type().compile(dialect=engine.dialect) + alter_stmt = f""" + ALTER TABLE {prepared_table_name} + ALTER COLUMN {prepared_column_name} + TYPE {prepared_type_name} + USING {prepared_column_name}::{prepared_type_name}; + """ + conn.execute(DDL(alter_stmt))
{"golden_diff": "diff --git a/db/types/base.py b/db/types/base.py\n--- a/db/types/base.py\n+++ b/db/types/base.py\n@@ -1,5 +1,6 @@\n-from sqlalchemy import create_engine\n+from sqlalchemy import create_engine, MetaData, Table, DDL\n from db import constants\n+from db.types import email\n \n SCHEMA = f\"{constants.MATHESAR_PREFIX}types\"\n # Since we want to have our identifiers quoted appropriately for use in\n@@ -9,3 +10,41 @@\n \n def get_qualified_name(name):\n return \".\".join([preparer.quote_schema(SCHEMA), name])\n+\n+\n+def get_supported_alter_column_types(engine):\n+ dialect_types = engine.dialect.ischema_names\n+ type_map = {\n+ # Default Postgres types\n+ \"boolean\": dialect_types.get(\"boolean\"),\n+ \"interval\": dialect_types.get(\"interval\"),\n+ \"numeric\": dialect_types.get(\"numeric\"),\n+ \"string\": dialect_types.get(\"name\"),\n+ # Custom Mathesar types\n+ \"email\": dialect_types.get(email.QUALIFIED_EMAIL)\n+ }\n+ return {k: v for k, v in type_map.items() if v is not None}\n+\n+\n+def alter_column_type(\n+ schema, table_name, column_name, target_type_str, engine\n+):\n+ _preparer = engine.dialect.identifier_preparer\n+ supported_types = get_supported_alter_column_types(engine)\n+ target_type = supported_types.get(target_type_str.lower())\n+ with engine.begin() as conn:\n+ metadata = MetaData(bind=engine, schema=schema)\n+ table = Table(\n+ table_name, metadata, schema=schema, autoload_with=engine\n+ )\n+ column = table.columns[column_name]\n+ prepared_table_name = _preparer.format_table(table)\n+ prepared_column_name = _preparer.format_column(column)\n+ prepared_type_name = target_type().compile(dialect=engine.dialect)\n+ alter_stmt = f\"\"\"\n+ ALTER TABLE {prepared_table_name}\n+ ALTER COLUMN {prepared_column_name}\n+ TYPE {prepared_type_name}\n+ USING {prepared_column_name}::{prepared_type_name};\n+ \"\"\"\n+ conn.execute(DDL(alter_stmt))\n", "issue": "Type Inference 1: Check column against a type\n**Problem**\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\n\r\nDifferent types in Mathesar will enable different operations; for example, strings could be aggregated by concatenating, but numeric types could be aggregated by summing or multiplying. So far, while we can reflect different types, we have no way to determine the type most appropriate for a column.\r\n\r\n**Proposed solution**\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\n\r\nGiven a `schema`, `table_name`, `column_name`, and `type`, we need to be able to return a boolean giving whether the column can be cast to that type.\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here.-->\r\n\r\nWe may need to take an optional sample size parameter to do this for large data. Performance testing will be necessary.\r\n\n", "before_files": [{"content": "from sqlalchemy import create_engine\nfrom db import constants\n\nSCHEMA = f\"{constants.MATHESAR_PREFIX}types\"\n# Since we want to have our identifiers quoted appropriately for use in\n# PostgreSQL, we want to use the postgres dialect preparer to set this up.\npreparer = create_engine(\"postgresql://\").dialect.identifier_preparer\n\n\ndef get_qualified_name(name):\n return \".\".join([preparer.quote_schema(SCHEMA), name])\n", "path": "db/types/base.py"}], "after_files": [{"content": "from sqlalchemy import create_engine, MetaData, Table, DDL\nfrom db import constants\nfrom db.types import email\n\nSCHEMA = f\"{constants.MATHESAR_PREFIX}types\"\n# Since we want to have our identifiers quoted appropriately for use in\n# PostgreSQL, we want to use the postgres dialect preparer to set this up.\npreparer = create_engine(\"postgresql://\").dialect.identifier_preparer\n\n\ndef get_qualified_name(name):\n return \".\".join([preparer.quote_schema(SCHEMA), name])\n\n\ndef get_supported_alter_column_types(engine):\n dialect_types = engine.dialect.ischema_names\n type_map = {\n # Default Postgres types\n \"boolean\": dialect_types.get(\"boolean\"),\n \"interval\": dialect_types.get(\"interval\"),\n \"numeric\": dialect_types.get(\"numeric\"),\n \"string\": dialect_types.get(\"name\"),\n # Custom Mathesar types\n \"email\": dialect_types.get(email.QUALIFIED_EMAIL)\n }\n return {k: v for k, v in type_map.items() if v is not None}\n\n\ndef alter_column_type(\n schema, table_name, column_name, target_type_str, engine\n):\n _preparer = engine.dialect.identifier_preparer\n supported_types = get_supported_alter_column_types(engine)\n target_type = supported_types.get(target_type_str.lower())\n with engine.begin() as conn:\n metadata = MetaData(bind=engine, schema=schema)\n table = Table(\n table_name, metadata, schema=schema, autoload_with=engine\n )\n column = table.columns[column_name]\n prepared_table_name = _preparer.format_table(table)\n prepared_column_name = _preparer.format_column(column)\n prepared_type_name = target_type().compile(dialect=engine.dialect)\n alter_stmt = f\"\"\"\n ALTER TABLE {prepared_table_name}\n ALTER COLUMN {prepared_column_name}\n TYPE {prepared_type_name}\n USING {prepared_column_name}::{prepared_type_name};\n \"\"\"\n conn.execute(DDL(alter_stmt))\n", "path": "db/types/base.py"}]}
564
487
gh_patches_debug_3896
rasdani/github-patches
git_diff
Flexget__Flexget-1157
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- t411 : BUG: Unhandled error in plugin discover: maximum recursion depth exceeded while getting the str of an object I've got an error when executing my flexget script flexget execute --task tv-t411 --discover-now This is the config file http://pastie.org/10809799 And this is the crash log [crash_report.2016.04.23.150112065543.zip](https://github.com/Flexget/Flexget/files/233121/crash_report.2016.04.23.150112065543.zip) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `flexget/plugins/plugin_torrent411.py` Content: ``` 1 from __future__ import unicode_literals, division, absolute_import 2 from builtins import * # pylint: disable=unused-import, redefined-builtin 3 4 import logging 5 import re 6 7 from flexget.config_schema import one_or_more 8 from flexget.manager import Session 9 from flexget.plugins.api_t411 import T411Proxy, FriendlySearchQuery, ApiError 10 from flexget import plugin 11 from flexget.event import event 12 13 log = logging.getLogger('t411_plugin') 14 15 16 def escape_query(search_strings): 17 """ 18 Escaping some expression Grey's -> Grey's + Greys + Grey, Marvel's ->Marvel's + Marvels + Marvel etc 19 :param query str[]: 20 :return: 21 """ 22 result = [] 23 for search_string in search_strings: 24 result.append(search_string) 25 short_query = re.sub("'", "", search_string) 26 if search_string != short_query: 27 result.append(short_query) 28 very_short_query = re.sub("'[a-z]", "", search_string) 29 if short_query != very_short_query: 30 result.append(very_short_query) 31 return result 32 33 34 class T411InputPlugin(object): 35 """T411 search/Input plugin. 36 Before any usage, please add your credential with 37 "flexget t411 add-auth <username> <password>" 38 39 t411: 40 category: <see available categories on "flexget t411 list-cats"> 41 terms: <see available terms on "flexget t411 list-terms --category <category name>" 42 max_resutls: XXX 43 """ 44 45 def __init__(self): 46 self.schema = { 47 'type': 'object', 48 'properties': { 49 'category': {'type': 'string'}, 50 'terms': one_or_more({'type': 'string'}), 51 'max_results': {'type': 'number', 'default': 100} 52 }, 53 'additionalProperties': False 54 } 55 56 @staticmethod 57 def build_request_from(config): 58 """ 59 Build a query from plugin config dict 60 :param config: dict 61 :return: 62 """ 63 query = FriendlySearchQuery() 64 query.category_name = config.get('category') 65 query.term_names = config.get('terms', []) 66 query.max_results = config.get('max_results') 67 return query 68 69 @plugin.internet(log) 70 def on_task_input(self, task, config): 71 proxy = T411Proxy() 72 proxy.set_credential() 73 query = T411InputPlugin.build_request_from(config) 74 try: 75 return proxy.search(query) 76 except ApiError as e: 77 log.warning("Server send an error message : %d - %s", e.code, e.message) 78 return [] 79 80 @classmethod 81 @plugin.internet(log) 82 def search(cls, entry=None, config=None, task=None): 83 proxy = T411Proxy() 84 proxy.set_credential() 85 86 query = T411InputPlugin.build_request_from(config) 87 if entry.get('series_season'): 88 query.add_season_term(entry['series_season']) 89 query.add_episode_term(entry['series_episode']) 90 search_strings = escape_query([entry['series_name']]) 91 else: 92 search_strings = entry.get('search_strings', [entry['title']]) 93 search_strings = escape_query(search_strings) 94 95 produced_entries = set() 96 for search_string in search_strings: 97 query.expression = search_string 98 try: 99 search_result = proxy.search(query) 100 produced_entries.update(search_result) 101 except ApiError as e: 102 log.warning("Server send an error message : %d - %s", e.code, e.message) 103 104 return produced_entries 105 106 107 class T411LookupPlugin(object): 108 schema = {'type': 'string', 'enum': ['fill', 'override']} 109 110 @staticmethod 111 def lazy_lookup(entry): 112 string_torrent_id = entry.get('t411_torrent_id') 113 if string_torrent_id is None: 114 log.warning('Looking up T411 for entry pass, no t411_torrent_id found.') 115 pass 116 117 torrent_id = int(string_torrent_id) 118 proxy = T411Proxy() 119 proxy.set_credential() 120 with Session() as session: 121 try: 122 log.info("Lookup torrent details for %d", torrent_id) 123 bind_details = proxy.details(torrent_id, session=session) 124 unbind_details = [dict([ 125 ('term_type_name', term.type.name), 126 ('term_type_id', term.type.id), 127 ('term_id', term.id), 128 ('term_name', term.name)]) for term in bind_details.terms] 129 entry['t411_terms'] = unbind_details 130 except ApiError as e: 131 log.warning("Server send an error message : %d - %s", e.code, e.message) 132 133 # Run after series and metainfo series 134 @plugin.priority(110) 135 def on_task_metainfo(self, task, config): 136 proxy = T411Proxy() 137 proxy.set_credential() 138 for entry in task.entries: 139 if entry.get('t411_torrent_id') is None: 140 continue 141 142 # entry.register_lazy_func(T411LookupPlugin.lazy_lookup, T411LookupPlugin.torrent_details_map) 143 T411LookupPlugin.lazy_lookup(entry) 144 if entry.get('t411_terms', eval_lazy=True) is not None: 145 video_quality = proxy.parse_terms_to_quality(entry.get('t411_terms')) 146 entry_quality = entry.get('quality') 147 if video_quality is None: 148 log.info('Torrent %i hasn\'t video quality description, pass.', entry.get('t411_torrent_id')) 149 continue 150 if entry_quality.source.name == 'unknown' or config == 'override': 151 entry_quality.source = video_quality.source 152 if entry_quality.resolution.name == 'unknown' or config == 'override': 153 entry_quality.resolution = video_quality.resolution 154 155 156 @event('plugin.register') 157 def register_plugin(): 158 plugin.register(T411InputPlugin, 't411', groups=['search', 'input'], api_ver=2) 159 plugin.register(T411LookupPlugin, 't411_lookup', api_ver=2) 160 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/flexget/plugins/plugin_torrent411.py b/flexget/plugins/plugin_torrent411.py old mode 100644 new mode 100755 --- a/flexget/plugins/plugin_torrent411.py +++ b/flexget/plugins/plugin_torrent411.py @@ -61,7 +61,7 @@ """ query = FriendlySearchQuery() query.category_name = config.get('category') - query.term_names = config.get('terms', []) + query.term_names = list(config.get('terms', [])) query.max_results = config.get('max_results') return query
{"golden_diff": "diff --git a/flexget/plugins/plugin_torrent411.py b/flexget/plugins/plugin_torrent411.py\nold mode 100644\nnew mode 100755\n--- a/flexget/plugins/plugin_torrent411.py\n+++ b/flexget/plugins/plugin_torrent411.py\n@@ -61,7 +61,7 @@\n \"\"\"\n query = FriendlySearchQuery()\n query.category_name = config.get('category')\n- query.term_names = config.get('terms', [])\n+ query.term_names = list(config.get('terms', []))\n query.max_results = config.get('max_results')\n return query\n", "issue": "t411 : BUG: Unhandled error in plugin discover: maximum recursion depth exceeded while getting the str of an object\nI've got an error when executing my flexget script\n\nflexget execute --task tv-t411 --discover-now\n\nThis is the config file\nhttp://pastie.org/10809799\n\nAnd this is the crash log\n\n[crash_report.2016.04.23.150112065543.zip](https://github.com/Flexget/Flexget/files/233121/crash_report.2016.04.23.150112065543.zip)\n\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # pylint: disable=unused-import, redefined-builtin\n\nimport logging\nimport re\n\nfrom flexget.config_schema import one_or_more\nfrom flexget.manager import Session\nfrom flexget.plugins.api_t411 import T411Proxy, FriendlySearchQuery, ApiError\nfrom flexget import plugin\nfrom flexget.event import event\n\nlog = logging.getLogger('t411_plugin')\n\n\ndef escape_query(search_strings):\n \"\"\"\n Escaping some expression Grey's -> Grey's + Greys + Grey, Marvel's ->Marvel's + Marvels + Marvel etc\n :param query str[]:\n :return:\n \"\"\"\n result = []\n for search_string in search_strings:\n result.append(search_string)\n short_query = re.sub(\"'\", \"\", search_string)\n if search_string != short_query:\n result.append(short_query)\n very_short_query = re.sub(\"'[a-z]\", \"\", search_string)\n if short_query != very_short_query:\n result.append(very_short_query)\n return result\n\n\nclass T411InputPlugin(object):\n \"\"\"T411 search/Input plugin.\n Before any usage, please add your credential with\n \"flexget t411 add-auth <username> <password>\"\n\n t411:\n category: <see available categories on \"flexget t411 list-cats\">\n terms: <see available terms on \"flexget t411 list-terms --category <category name>\"\n max_resutls: XXX\n \"\"\"\n\n def __init__(self):\n self.schema = {\n 'type': 'object',\n 'properties': {\n 'category': {'type': 'string'},\n 'terms': one_or_more({'type': 'string'}),\n 'max_results': {'type': 'number', 'default': 100}\n },\n 'additionalProperties': False\n }\n\n @staticmethod\n def build_request_from(config):\n \"\"\"\n Build a query from plugin config dict\n :param config: dict\n :return:\n \"\"\"\n query = FriendlySearchQuery()\n query.category_name = config.get('category')\n query.term_names = config.get('terms', [])\n query.max_results = config.get('max_results')\n return query\n\n @plugin.internet(log)\n def on_task_input(self, task, config):\n proxy = T411Proxy()\n proxy.set_credential()\n query = T411InputPlugin.build_request_from(config)\n try:\n return proxy.search(query)\n except ApiError as e:\n log.warning(\"Server send an error message : %d - %s\", e.code, e.message)\n return []\n\n @classmethod\n @plugin.internet(log)\n def search(cls, entry=None, config=None, task=None):\n proxy = T411Proxy()\n proxy.set_credential()\n\n query = T411InputPlugin.build_request_from(config)\n if entry.get('series_season'):\n query.add_season_term(entry['series_season'])\n query.add_episode_term(entry['series_episode'])\n search_strings = escape_query([entry['series_name']])\n else:\n search_strings = entry.get('search_strings', [entry['title']])\n search_strings = escape_query(search_strings)\n\n produced_entries = set()\n for search_string in search_strings:\n query.expression = search_string\n try:\n search_result = proxy.search(query)\n produced_entries.update(search_result)\n except ApiError as e:\n log.warning(\"Server send an error message : %d - %s\", e.code, e.message)\n\n return produced_entries\n\n\nclass T411LookupPlugin(object):\n schema = {'type': 'string', 'enum': ['fill', 'override']}\n\n @staticmethod\n def lazy_lookup(entry):\n string_torrent_id = entry.get('t411_torrent_id')\n if string_torrent_id is None:\n log.warning('Looking up T411 for entry pass, no t411_torrent_id found.')\n pass\n\n torrent_id = int(string_torrent_id)\n proxy = T411Proxy()\n proxy.set_credential()\n with Session() as session:\n try:\n log.info(\"Lookup torrent details for %d\", torrent_id)\n bind_details = proxy.details(torrent_id, session=session)\n unbind_details = [dict([\n ('term_type_name', term.type.name),\n ('term_type_id', term.type.id),\n ('term_id', term.id),\n ('term_name', term.name)]) for term in bind_details.terms]\n entry['t411_terms'] = unbind_details\n except ApiError as e:\n log.warning(\"Server send an error message : %d - %s\", e.code, e.message)\n\n # Run after series and metainfo series\n @plugin.priority(110)\n def on_task_metainfo(self, task, config):\n proxy = T411Proxy()\n proxy.set_credential()\n for entry in task.entries:\n if entry.get('t411_torrent_id') is None:\n continue\n\n # entry.register_lazy_func(T411LookupPlugin.lazy_lookup, T411LookupPlugin.torrent_details_map)\n T411LookupPlugin.lazy_lookup(entry)\n if entry.get('t411_terms', eval_lazy=True) is not None:\n video_quality = proxy.parse_terms_to_quality(entry.get('t411_terms'))\n entry_quality = entry.get('quality')\n if video_quality is None:\n log.info('Torrent %i hasn\\'t video quality description, pass.', entry.get('t411_torrent_id'))\n continue\n if entry_quality.source.name == 'unknown' or config == 'override':\n entry_quality.source = video_quality.source\n if entry_quality.resolution.name == 'unknown' or config == 'override':\n entry_quality.resolution = video_quality.resolution\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(T411InputPlugin, 't411', groups=['search', 'input'], api_ver=2)\n plugin.register(T411LookupPlugin, 't411_lookup', api_ver=2)\n", "path": "flexget/plugins/plugin_torrent411.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals, division, absolute_import\nimport logging\nimport re\nfrom flexget.config_schema import one_or_more\nfrom flexget.manager import Session\nfrom flexget.plugins.api_t411 import T411Proxy, FriendlySearchQuery, ApiError\n\nfrom flexget import plugin\nfrom flexget.event import event\n\nlog = logging.getLogger('t411_plugin')\n\n\ndef escape_query(search_strings):\n \"\"\"\n Escaping some expression Grey's -> Grey's + Greys + Grey, Marvel's ->Marvel's + Marvels + Marvel etc\n :param query str[]:\n :return:\n \"\"\"\n result = []\n for search_string in search_strings:\n result.append(search_string)\n short_query = re.sub(\"'\", \"\", search_string)\n if search_string != short_query:\n result.append(short_query)\n very_short_query = re.sub(\"'[a-z]\", \"\", search_string)\n if short_query != very_short_query:\n result.append(very_short_query)\n return result\n\n\nclass T411InputPlugin(object):\n \"\"\"T411 search/Input plugin.\n Before any usage, please add your credential with\n \"flexget t411 add-auth <username> <password>\"\n\n t411:\n category: <see available categories on \"flexget t411 list-cats\">\n terms: <see available terms on \"flexget t411 list-terms --category <category name>\"\n max_resutls: XXX\n \"\"\"\n\n def __init__(self):\n self.schema = {\n 'type': 'object',\n 'properties': {\n 'category': {'type': 'string'},\n 'terms': one_or_more({'type': 'string'}),\n 'max_results': {'type': 'number', 'default': 100}\n },\n 'additionalProperties': False\n }\n\n @staticmethod\n def build_request_from(config):\n \"\"\"\n Build a query from plugin config dict\n :param config: dict\n :return:\n \"\"\"\n query = FriendlySearchQuery()\n query.category_name = config.get('category')\n query.term_names = list(config.get('terms', []))\n query.max_results = config.get('max_results')\n return query\n\n @plugin.internet(log)\n def on_task_input(self, task, config):\n proxy = T411Proxy()\n proxy.set_credential()\n query = T411InputPlugin.build_request_from(config)\n try:\n return proxy.search(query)\n except ApiError as e:\n log.warning(\"Server send an error message : %d - %s\", e.code, e.message)\n return []\n\n @classmethod\n @plugin.internet(log)\n def search(cls, entry=None, config=None, task=None):\n proxy = T411Proxy()\n proxy.set_credential()\n\n query = T411InputPlugin.build_request_from(config)\n if entry.get('series_season'):\n query.add_season_term(entry['series_season'])\n query.add_episode_term(entry['series_episode'])\n search_strings = escape_query([entry['series_name']])\n else:\n search_strings = entry.get('search_strings', [entry['title']])\n search_strings = escape_query(search_strings)\n\n produced_entries = set()\n for search_string in search_strings:\n query.expression = search_string\n try:\n search_result = proxy.search(query)\n produced_entries.update(search_result)\n except ApiError as e:\n log.warning(\"Server send an error message : %d - %s\", e.code, e.message)\n\n return produced_entries\n\n\nclass T411LookupPlugin(object):\n schema = {'type': 'string', 'enum': ['fill', 'override']}\n\n @staticmethod\n def lazy_lookup(entry):\n string_torrent_id = entry.get('t411_torrent_id')\n if string_torrent_id is None:\n log.warning('Looking up T411 for entry pass, no t411_torrent_id found.')\n pass\n\n torrent_id = int(string_torrent_id)\n proxy = T411Proxy()\n proxy.set_credential()\n with Session() as session:\n try:\n log.info(\"Lookup torrent details for %d\", torrent_id)\n bind_details = proxy.details(torrent_id, session=session)\n unbind_details = [dict([\n ('term_type_name', term.type.name),\n ('term_type_id', term.type.id),\n ('term_id', term.id),\n ('term_name', term.name)]) for term in bind_details.terms]\n entry['t411_terms'] = unbind_details\n except ApiError as e:\n log.warning(\"Server send an error message : %d - %s\", e.code, e.message)\n\n # Run after series and metainfo series\n @plugin.priority(110)\n def on_task_metainfo(self, task, config):\n proxy = T411Proxy()\n proxy.set_credential()\n for entry in task.entries:\n if entry.get('t411_torrent_id') is None:\n continue\n\n # entry.register_lazy_func(T411LookupPlugin.lazy_lookup, T411LookupPlugin.torrent_details_map)\n T411LookupPlugin.lazy_lookup(entry)\n if entry.get('t411_terms', eval_lazy=True) is not None:\n video_quality = proxy.parse_terms_to_quality(entry.get('t411_terms'))\n entry_quality = entry.get('quality')\n if video_quality is None:\n log.info('Torrent %i hasn\\'t video quality description, pass.', entry.get('t411_torrent_id'))\n continue\n if entry_quality.source.name == 'unknown' or config == 'override':\n entry_quality.source = video_quality.source\n if entry_quality.resolution.name == 'unknown' or config == 'override':\n entry_quality.resolution = video_quality.resolution\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(T411InputPlugin, 't411', groups=['search', 'input'], api_ver=2)\n plugin.register(T411LookupPlugin, 't411_lookup', api_ver=2)\n", "path": "flexget/plugins/plugin_torrent411.py"}]}
2,139
148
gh_patches_debug_40048
rasdani/github-patches
git_diff
sotetsuk__pgx-1171
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Kuhn Poker] Simplify the implementation Hey, multiple implementations of Kuhn Poker out there ([open_spiel ](https://github.com/google-deepmind/open_spiel/blob/b8c2ff8e9a4f5dad9b179217f740ddb0df967f7c/open_spiel/games/kuhn_poker.cc)for instance) use only two actions (pass, bet) instead of the four considered in pgx (call, bet, check, fold). In fact we can group bet/call and check/fold without ambiguity for this game. Would you be interested by this simplification? I would be happy to open a PR if you are! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pgx/kuhn_poker.py` Content: ``` 1 # Copyright 2023 The Pgx Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import jax 16 import jax.numpy as jnp 17 18 import pgx.core as core 19 from pgx._src.struct import dataclass 20 from pgx._src.types import Array, PRNGKey 21 22 FALSE = jnp.bool_(False) 23 TRUE = jnp.bool_(True) 24 CALL = jnp.int32(0) 25 BET = jnp.int32(1) 26 FOLD = jnp.int32(2) 27 CHECK = jnp.int32(3) 28 29 30 @dataclass 31 class State(core.State): 32 current_player: Array = jnp.int32(0) 33 observation: Array = jnp.zeros((8, 8, 2), dtype=jnp.bool_) 34 rewards: Array = jnp.float32([0.0, 0.0]) 35 terminated: Array = FALSE 36 truncated: Array = FALSE 37 legal_action_mask: Array = jnp.ones(4, dtype=jnp.bool_) 38 _step_count: Array = jnp.int32(0) 39 # --- Kuhn poker specific --- 40 _cards: Array = jnp.int32([-1, -1]) 41 # [(player 0),(player 1)] 42 _last_action: Array = jnp.int32(-1) 43 # 0(Call) 1(Bet) 2(Fold) 3(Check) 44 _pot: Array = jnp.int32([0, 0]) 45 46 @property 47 def env_id(self) -> core.EnvId: 48 return "kuhn_poker" 49 50 51 class KuhnPoker(core.Env): 52 def __init__(self): 53 super().__init__() 54 55 def _init(self, key: PRNGKey) -> State: 56 return _init(key) 57 58 def _step(self, state: core.State, action: Array, key) -> State: 59 del key 60 assert isinstance(state, State) 61 return _step(state, action) 62 63 def _observe(self, state: core.State, player_id: Array) -> Array: 64 assert isinstance(state, State) 65 return _observe(state, player_id) 66 67 @property 68 def id(self) -> core.EnvId: 69 return "kuhn_poker" 70 71 @property 72 def version(self) -> str: 73 return "v0" 74 75 @property 76 def num_players(self) -> int: 77 return 2 78 79 80 def _init(rng: PRNGKey) -> State: 81 rng1, rng2 = jax.random.split(rng) 82 current_player = jnp.int32(jax.random.bernoulli(rng1)) 83 init_card = jax.random.choice(rng2, jnp.int32([[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]])) 84 return State( # type:ignore 85 current_player=current_player, 86 _cards=init_card, 87 legal_action_mask=jnp.bool_([0, 1, 0, 1]), 88 ) 89 90 91 def _step(state: State, action): 92 action = jnp.int32(action) 93 pot = jax.lax.cond( 94 (action == BET) | (action == CALL), 95 lambda: state._pot.at[state.current_player].add(1), 96 lambda: state._pot, 97 ) 98 99 terminated, reward = jax.lax.cond( 100 action == FOLD, 101 lambda: ( 102 TRUE, 103 jnp.float32([-1, -1]).at[1 - state.current_player].set(1), 104 ), 105 lambda: (FALSE, jnp.float32([0, 0])), 106 ) 107 terminated, reward = jax.lax.cond( 108 (state._last_action == BET) & (action == CALL), 109 lambda: (TRUE, _get_unit_reward(state) * 2), 110 lambda: (terminated, reward), 111 ) 112 terminated, reward = jax.lax.cond( 113 (state._last_action == CHECK) & (action == CHECK), 114 lambda: (TRUE, _get_unit_reward(state)), 115 lambda: (terminated, reward), 116 ) 117 118 legal_action = jax.lax.switch( 119 action, 120 [ 121 lambda: jnp.bool_([0, 0, 0, 0]), # CALL 122 lambda: jnp.bool_([1, 0, 1, 0]), # BET 123 lambda: jnp.bool_([0, 0, 0, 0]), # FOLD 124 lambda: jnp.bool_([0, 1, 0, 1]), # CHECK 125 ], 126 ) 127 128 return state.replace( # type:ignore 129 current_player=1 - state.current_player, 130 _last_action=action, 131 legal_action_mask=legal_action, 132 terminated=terminated, 133 rewards=reward, 134 _pot=pot, 135 ) 136 137 138 def _get_unit_reward(state: State): 139 return jax.lax.cond( 140 state._cards[state.current_player] > state._cards[1 - state.current_player], 141 lambda: jnp.float32([-1, -1]).at[state.current_player].set(1), 142 lambda: jnp.float32([-1, -1]).at[1 - state.current_player].set(1), 143 ) 144 145 146 def _observe(state: State, player_id) -> Array: 147 """ 148 Index Meaning 149 0~2 J ~ K in hand 150 3~4 0~1 chips for the current player 151 5~6 0~1 chips for the opponent 152 """ 153 obs = jnp.zeros(7, dtype=jnp.bool_) 154 obs = obs.at[state._cards[player_id]].set(TRUE) 155 obs = obs.at[3 + state._pot[player_id]].set(TRUE) 156 obs = obs.at[5 + state._pot[1 - player_id]].set(TRUE) 157 158 return obs 159 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pgx/kuhn_poker.py b/pgx/kuhn_poker.py --- a/pgx/kuhn_poker.py +++ b/pgx/kuhn_poker.py @@ -21,10 +21,8 @@ FALSE = jnp.bool_(False) TRUE = jnp.bool_(True) -CALL = jnp.int32(0) -BET = jnp.int32(1) -FOLD = jnp.int32(2) -CHECK = jnp.int32(3) +BET = jnp.int32(0) +PASS = jnp.int32(1) @dataclass @@ -34,13 +32,13 @@ rewards: Array = jnp.float32([0.0, 0.0]) terminated: Array = FALSE truncated: Array = FALSE - legal_action_mask: Array = jnp.ones(4, dtype=jnp.bool_) + legal_action_mask: Array = jnp.ones(2, dtype=jnp.bool_) _step_count: Array = jnp.int32(0) # --- Kuhn poker specific --- _cards: Array = jnp.int32([-1, -1]) # [(player 0),(player 1)] _last_action: Array = jnp.int32(-1) - # 0(Call) 1(Bet) 2(Fold) 3(Check) + # 0(Bet) 1(Pass) _pot: Array = jnp.int32([0, 0]) @property @@ -84,20 +82,20 @@ return State( # type:ignore current_player=current_player, _cards=init_card, - legal_action_mask=jnp.bool_([0, 1, 0, 1]), + legal_action_mask=jnp.bool_([1, 1]), ) def _step(state: State, action): action = jnp.int32(action) pot = jax.lax.cond( - (action == BET) | (action == CALL), + (action == BET), lambda: state._pot.at[state.current_player].add(1), lambda: state._pot, ) terminated, reward = jax.lax.cond( - action == FOLD, + (state._last_action == BET) & (action == PASS), lambda: ( TRUE, jnp.float32([-1, -1]).at[1 - state.current_player].set(1), @@ -105,25 +103,17 @@ lambda: (FALSE, jnp.float32([0, 0])), ) terminated, reward = jax.lax.cond( - (state._last_action == BET) & (action == CALL), + (state._last_action == BET) & (action == BET), lambda: (TRUE, _get_unit_reward(state) * 2), lambda: (terminated, reward), ) terminated, reward = jax.lax.cond( - (state._last_action == CHECK) & (action == CHECK), + (state._last_action == PASS) & (action == PASS), lambda: (TRUE, _get_unit_reward(state)), lambda: (terminated, reward), ) - legal_action = jax.lax.switch( - action, - [ - lambda: jnp.bool_([0, 0, 0, 0]), # CALL - lambda: jnp.bool_([1, 0, 1, 0]), # BET - lambda: jnp.bool_([0, 0, 0, 0]), # FOLD - lambda: jnp.bool_([0, 1, 0, 1]), # CHECK - ], - ) + legal_action = jax.lax.select(terminated, jnp.bool_([0, 0]), jnp.bool_([1, 1])) return state.replace( # type:ignore current_player=1 - state.current_player,
{"golden_diff": "diff --git a/pgx/kuhn_poker.py b/pgx/kuhn_poker.py\n--- a/pgx/kuhn_poker.py\n+++ b/pgx/kuhn_poker.py\n@@ -21,10 +21,8 @@\n \n FALSE = jnp.bool_(False)\n TRUE = jnp.bool_(True)\n-CALL = jnp.int32(0)\n-BET = jnp.int32(1)\n-FOLD = jnp.int32(2)\n-CHECK = jnp.int32(3)\n+BET = jnp.int32(0)\n+PASS = jnp.int32(1)\n \n \n @dataclass\n@@ -34,13 +32,13 @@\n rewards: Array = jnp.float32([0.0, 0.0])\n terminated: Array = FALSE\n truncated: Array = FALSE\n- legal_action_mask: Array = jnp.ones(4, dtype=jnp.bool_)\n+ legal_action_mask: Array = jnp.ones(2, dtype=jnp.bool_)\n _step_count: Array = jnp.int32(0)\n # --- Kuhn poker specific ---\n _cards: Array = jnp.int32([-1, -1])\n # [(player 0),(player 1)]\n _last_action: Array = jnp.int32(-1)\n- # 0(Call) 1(Bet) 2(Fold) 3(Check)\n+ # 0(Bet) 1(Pass)\n _pot: Array = jnp.int32([0, 0])\n \n @property\n@@ -84,20 +82,20 @@\n return State( # type:ignore\n current_player=current_player,\n _cards=init_card,\n- legal_action_mask=jnp.bool_([0, 1, 0, 1]),\n+ legal_action_mask=jnp.bool_([1, 1]),\n )\n \n \n def _step(state: State, action):\n action = jnp.int32(action)\n pot = jax.lax.cond(\n- (action == BET) | (action == CALL),\n+ (action == BET),\n lambda: state._pot.at[state.current_player].add(1),\n lambda: state._pot,\n )\n \n terminated, reward = jax.lax.cond(\n- action == FOLD,\n+ (state._last_action == BET) & (action == PASS),\n lambda: (\n TRUE,\n jnp.float32([-1, -1]).at[1 - state.current_player].set(1),\n@@ -105,25 +103,17 @@\n lambda: (FALSE, jnp.float32([0, 0])),\n )\n terminated, reward = jax.lax.cond(\n- (state._last_action == BET) & (action == CALL),\n+ (state._last_action == BET) & (action == BET),\n lambda: (TRUE, _get_unit_reward(state) * 2),\n lambda: (terminated, reward),\n )\n terminated, reward = jax.lax.cond(\n- (state._last_action == CHECK) & (action == CHECK),\n+ (state._last_action == PASS) & (action == PASS),\n lambda: (TRUE, _get_unit_reward(state)),\n lambda: (terminated, reward),\n )\n \n- legal_action = jax.lax.switch(\n- action,\n- [\n- lambda: jnp.bool_([0, 0, 0, 0]), # CALL\n- lambda: jnp.bool_([1, 0, 1, 0]), # BET\n- lambda: jnp.bool_([0, 0, 0, 0]), # FOLD\n- lambda: jnp.bool_([0, 1, 0, 1]), # CHECK\n- ],\n- )\n+ legal_action = jax.lax.select(terminated, jnp.bool_([0, 0]), jnp.bool_([1, 1]))\n \n return state.replace( # type:ignore\n current_player=1 - state.current_player,\n", "issue": "[Kuhn Poker] Simplify the implementation\nHey, multiple implementations of Kuhn Poker out there ([open_spiel ](https://github.com/google-deepmind/open_spiel/blob/b8c2ff8e9a4f5dad9b179217f740ddb0df967f7c/open_spiel/games/kuhn_poker.cc)for instance) use only two actions (pass, bet) instead of the four considered in pgx (call, bet, check, fold). In fact we can group bet/call and check/fold without ambiguity for this game. \r\n\r\nWould you be interested by this simplification? I would be happy to open a PR if you are!\n", "before_files": [{"content": "# Copyright 2023 The Pgx Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport jax\nimport jax.numpy as jnp\n\nimport pgx.core as core\nfrom pgx._src.struct import dataclass\nfrom pgx._src.types import Array, PRNGKey\n\nFALSE = jnp.bool_(False)\nTRUE = jnp.bool_(True)\nCALL = jnp.int32(0)\nBET = jnp.int32(1)\nFOLD = jnp.int32(2)\nCHECK = jnp.int32(3)\n\n\n@dataclass\nclass State(core.State):\n current_player: Array = jnp.int32(0)\n observation: Array = jnp.zeros((8, 8, 2), dtype=jnp.bool_)\n rewards: Array = jnp.float32([0.0, 0.0])\n terminated: Array = FALSE\n truncated: Array = FALSE\n legal_action_mask: Array = jnp.ones(4, dtype=jnp.bool_)\n _step_count: Array = jnp.int32(0)\n # --- Kuhn poker specific ---\n _cards: Array = jnp.int32([-1, -1])\n # [(player 0),(player 1)]\n _last_action: Array = jnp.int32(-1)\n # 0(Call) 1(Bet) 2(Fold) 3(Check)\n _pot: Array = jnp.int32([0, 0])\n\n @property\n def env_id(self) -> core.EnvId:\n return \"kuhn_poker\"\n\n\nclass KuhnPoker(core.Env):\n def __init__(self):\n super().__init__()\n\n def _init(self, key: PRNGKey) -> State:\n return _init(key)\n\n def _step(self, state: core.State, action: Array, key) -> State:\n del key\n assert isinstance(state, State)\n return _step(state, action)\n\n def _observe(self, state: core.State, player_id: Array) -> Array:\n assert isinstance(state, State)\n return _observe(state, player_id)\n\n @property\n def id(self) -> core.EnvId:\n return \"kuhn_poker\"\n\n @property\n def version(self) -> str:\n return \"v0\"\n\n @property\n def num_players(self) -> int:\n return 2\n\n\ndef _init(rng: PRNGKey) -> State:\n rng1, rng2 = jax.random.split(rng)\n current_player = jnp.int32(jax.random.bernoulli(rng1))\n init_card = jax.random.choice(rng2, jnp.int32([[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]]))\n return State( # type:ignore\n current_player=current_player,\n _cards=init_card,\n legal_action_mask=jnp.bool_([0, 1, 0, 1]),\n )\n\n\ndef _step(state: State, action):\n action = jnp.int32(action)\n pot = jax.lax.cond(\n (action == BET) | (action == CALL),\n lambda: state._pot.at[state.current_player].add(1),\n lambda: state._pot,\n )\n\n terminated, reward = jax.lax.cond(\n action == FOLD,\n lambda: (\n TRUE,\n jnp.float32([-1, -1]).at[1 - state.current_player].set(1),\n ),\n lambda: (FALSE, jnp.float32([0, 0])),\n )\n terminated, reward = jax.lax.cond(\n (state._last_action == BET) & (action == CALL),\n lambda: (TRUE, _get_unit_reward(state) * 2),\n lambda: (terminated, reward),\n )\n terminated, reward = jax.lax.cond(\n (state._last_action == CHECK) & (action == CHECK),\n lambda: (TRUE, _get_unit_reward(state)),\n lambda: (terminated, reward),\n )\n\n legal_action = jax.lax.switch(\n action,\n [\n lambda: jnp.bool_([0, 0, 0, 0]), # CALL\n lambda: jnp.bool_([1, 0, 1, 0]), # BET\n lambda: jnp.bool_([0, 0, 0, 0]), # FOLD\n lambda: jnp.bool_([0, 1, 0, 1]), # CHECK\n ],\n )\n\n return state.replace( # type:ignore\n current_player=1 - state.current_player,\n _last_action=action,\n legal_action_mask=legal_action,\n terminated=terminated,\n rewards=reward,\n _pot=pot,\n )\n\n\ndef _get_unit_reward(state: State):\n return jax.lax.cond(\n state._cards[state.current_player] > state._cards[1 - state.current_player],\n lambda: jnp.float32([-1, -1]).at[state.current_player].set(1),\n lambda: jnp.float32([-1, -1]).at[1 - state.current_player].set(1),\n )\n\n\ndef _observe(state: State, player_id) -> Array:\n \"\"\"\n Index Meaning\n 0~2 J ~ K in hand\n 3~4 0~1 chips for the current player\n 5~6 0~1 chips for the opponent\n \"\"\"\n obs = jnp.zeros(7, dtype=jnp.bool_)\n obs = obs.at[state._cards[player_id]].set(TRUE)\n obs = obs.at[3 + state._pot[player_id]].set(TRUE)\n obs = obs.at[5 + state._pot[1 - player_id]].set(TRUE)\n\n return obs\n", "path": "pgx/kuhn_poker.py"}], "after_files": [{"content": "# Copyright 2023 The Pgx Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport jax\nimport jax.numpy as jnp\n\nimport pgx.core as core\nfrom pgx._src.struct import dataclass\nfrom pgx._src.types import Array, PRNGKey\n\nFALSE = jnp.bool_(False)\nTRUE = jnp.bool_(True)\nBET = jnp.int32(0)\nPASS = jnp.int32(1)\n\n\n@dataclass\nclass State(core.State):\n current_player: Array = jnp.int32(0)\n observation: Array = jnp.zeros((8, 8, 2), dtype=jnp.bool_)\n rewards: Array = jnp.float32([0.0, 0.0])\n terminated: Array = FALSE\n truncated: Array = FALSE\n legal_action_mask: Array = jnp.ones(2, dtype=jnp.bool_)\n _step_count: Array = jnp.int32(0)\n # --- Kuhn poker specific ---\n _cards: Array = jnp.int32([-1, -1])\n # [(player 0),(player 1)]\n _last_action: Array = jnp.int32(-1)\n # 0(Bet) 1(Pass)\n _pot: Array = jnp.int32([0, 0])\n\n @property\n def env_id(self) -> core.EnvId:\n return \"kuhn_poker\"\n\n\nclass KuhnPoker(core.Env):\n def __init__(self):\n super().__init__()\n\n def _init(self, key: PRNGKey) -> State:\n return _init(key)\n\n def _step(self, state: core.State, action: Array, key) -> State:\n del key\n assert isinstance(state, State)\n return _step(state, action)\n\n def _observe(self, state: core.State, player_id: Array) -> Array:\n assert isinstance(state, State)\n return _observe(state, player_id)\n\n @property\n def id(self) -> core.EnvId:\n return \"kuhn_poker\"\n\n @property\n def version(self) -> str:\n return \"v0\"\n\n @property\n def num_players(self) -> int:\n return 2\n\n\ndef _init(rng: PRNGKey) -> State:\n rng1, rng2 = jax.random.split(rng)\n current_player = jnp.int32(jax.random.bernoulli(rng1))\n init_card = jax.random.choice(rng2, jnp.int32([[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]]))\n return State( # type:ignore\n current_player=current_player,\n _cards=init_card,\n legal_action_mask=jnp.bool_([1, 1]),\n )\n\n\ndef _step(state: State, action):\n action = jnp.int32(action)\n pot = jax.lax.cond(\n (action == BET),\n lambda: state._pot.at[state.current_player].add(1),\n lambda: state._pot,\n )\n\n terminated, reward = jax.lax.cond(\n (state._last_action == BET) & (action == PASS),\n lambda: (\n TRUE,\n jnp.float32([-1, -1]).at[1 - state.current_player].set(1),\n ),\n lambda: (FALSE, jnp.float32([0, 0])),\n )\n terminated, reward = jax.lax.cond(\n (state._last_action == BET) & (action == BET),\n lambda: (TRUE, _get_unit_reward(state) * 2),\n lambda: (terminated, reward),\n )\n terminated, reward = jax.lax.cond(\n (state._last_action == PASS) & (action == PASS),\n lambda: (TRUE, _get_unit_reward(state)),\n lambda: (terminated, reward),\n )\n\n legal_action = jax.lax.select(terminated, jnp.bool_([0, 0]), jnp.bool_([1, 1]))\n\n return state.replace( # type:ignore\n current_player=1 - state.current_player,\n _last_action=action,\n legal_action_mask=legal_action,\n terminated=terminated,\n rewards=reward,\n _pot=pot,\n )\n\n\ndef _get_unit_reward(state: State):\n return jax.lax.cond(\n state._cards[state.current_player] > state._cards[1 - state.current_player],\n lambda: jnp.float32([-1, -1]).at[state.current_player].set(1),\n lambda: jnp.float32([-1, -1]).at[1 - state.current_player].set(1),\n )\n\n\ndef _observe(state: State, player_id) -> Array:\n \"\"\"\n Index Meaning\n 0~2 J ~ K in hand\n 3~4 0~1 chips for the current player\n 5~6 0~1 chips for the opponent\n \"\"\"\n obs = jnp.zeros(7, dtype=jnp.bool_)\n obs = obs.at[state._cards[player_id]].set(TRUE)\n obs = obs.at[3 + state._pot[player_id]].set(TRUE)\n obs = obs.at[5 + state._pot[1 - player_id]].set(TRUE)\n\n return obs\n", "path": "pgx/kuhn_poker.py"}]}
2,205
917
gh_patches_debug_19467
rasdani/github-patches
git_diff
mlcommons__GaNDLF-675
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Perform penalty calculation after all sanity checks are completed **Is your feature request related to a problem? Please describe.** The penalty calculation takes a long time, and there are sanity checks that happen after this, which can be a pain. **Describe the solution you'd like** It would be great to have these checks before the penalty calculation for quality-of-life improvements. **Describe alternatives you've considered** N.A. **Additional context** From Evan C. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `GANDLF/compute/generic.py` Content: ``` 1 from GANDLF.models import get_model 2 from GANDLF.schedulers import get_scheduler 3 from GANDLF.optimizers import get_optimizer 4 from GANDLF.data import ( 5 get_train_loader, 6 get_validation_loader, 7 ) 8 from GANDLF.utils import ( 9 populate_header_in_parameters, 10 parseTrainingCSV, 11 send_model_to_device, 12 get_class_imbalance_weights, 13 ) 14 15 16 def create_pytorch_objects(parameters, train_csv=None, val_csv=None, device="cpu"): 17 """ 18 This function creates all the PyTorch objects needed for training. 19 20 Args: 21 parameters (dict): The parameters dictionary. 22 train_csv (str): The path to the training CSV file. 23 val_csv (str): The path to the validation CSV file. 24 device (str): The device to perform computations on. 25 26 Returns: 27 model (torch.nn.Module): The model to use for training. 28 optimizer (Optimizer): The optimizer to use for training. 29 train_loader (torch.utils.data.DataLoader): The training data loader. 30 val_loader (torch.utils.data.DataLoader): The validation data loader. 31 scheduler (object): The scheduler to use for training. 32 parameters (dict): The updated parameters dictionary. 33 """ 34 # initialize train and val loaders 35 train_loader, val_loader = None, None 36 headers_to_populate_train, headers_to_populate_val = None, None 37 38 if train_csv is not None: 39 # populate the data frames 40 parameters["training_data"], headers_to_populate_train = parseTrainingCSV( 41 train_csv, train=True 42 ) 43 parameters = populate_header_in_parameters( 44 parameters, headers_to_populate_train 45 ) 46 # get the train loader 47 train_loader = get_train_loader(parameters) 48 parameters["training_samples_size"] = len(train_loader) 49 50 # Calculate the weights here 51 ( 52 parameters["weights"], 53 parameters["class_weights"], 54 ) = get_class_imbalance_weights(parameters["training_data"], parameters) 55 56 if val_csv is not None: 57 parameters["validation_data"], headers_to_populate_val = parseTrainingCSV( 58 val_csv, train=False 59 ) 60 if headers_to_populate_train is None: 61 parameters = populate_header_in_parameters( 62 parameters, headers_to_populate_val 63 ) 64 # get the validation loader 65 val_loader = get_validation_loader(parameters) 66 67 # get the model 68 model = get_model(parameters) 69 parameters["model_parameters"] = model.parameters() 70 71 # get the optimizer 72 optimizer = get_optimizer(parameters) 73 parameters["optimizer_object"] = optimizer 74 75 # send model to correct device 76 ( 77 model, 78 parameters["model"]["amp"], 79 parameters["device"], 80 parameters["device_id"], 81 ) = send_model_to_device( 82 model, amp=parameters["model"]["amp"], device=device, optimizer=optimizer 83 ) 84 85 # only need to create scheduler if training 86 if train_csv is not None: 87 if not ("step_size" in parameters["scheduler"]): 88 parameters["scheduler"]["step_size"] = ( 89 parameters["training_samples_size"] / parameters["learning_rate"] 90 ) 91 92 scheduler = get_scheduler(parameters) 93 else: 94 scheduler = None 95 96 # these keys contain generators, and are not needed beyond this point in params 97 generator_keys_to_remove = ["optimizer_object", "model_parameters"] 98 for key in generator_keys_to_remove: 99 parameters.pop(key, None) 100 101 return model, optimizer, train_loader, val_loader, scheduler, parameters 102 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/GANDLF/compute/generic.py b/GANDLF/compute/generic.py --- a/GANDLF/compute/generic.py +++ b/GANDLF/compute/generic.py @@ -47,12 +47,6 @@ train_loader = get_train_loader(parameters) parameters["training_samples_size"] = len(train_loader) - # Calculate the weights here - ( - parameters["weights"], - parameters["class_weights"], - ) = get_class_imbalance_weights(parameters["training_data"], parameters) - if val_csv is not None: parameters["validation_data"], headers_to_populate_val = parseTrainingCSV( val_csv, train=False @@ -90,6 +84,13 @@ ) scheduler = get_scheduler(parameters) + + # Calculate the weights here + ( + parameters["weights"], + parameters["class_weights"], + ) = get_class_imbalance_weights(parameters["training_data"], parameters) + else: scheduler = None
{"golden_diff": "diff --git a/GANDLF/compute/generic.py b/GANDLF/compute/generic.py\n--- a/GANDLF/compute/generic.py\n+++ b/GANDLF/compute/generic.py\n@@ -47,12 +47,6 @@\n train_loader = get_train_loader(parameters)\n parameters[\"training_samples_size\"] = len(train_loader)\n \n- # Calculate the weights here\n- (\n- parameters[\"weights\"],\n- parameters[\"class_weights\"],\n- ) = get_class_imbalance_weights(parameters[\"training_data\"], parameters)\n-\n if val_csv is not None:\n parameters[\"validation_data\"], headers_to_populate_val = parseTrainingCSV(\n val_csv, train=False\n@@ -90,6 +84,13 @@\n )\n \n scheduler = get_scheduler(parameters)\n+\n+ # Calculate the weights here\n+ (\n+ parameters[\"weights\"],\n+ parameters[\"class_weights\"],\n+ ) = get_class_imbalance_weights(parameters[\"training_data\"], parameters)\n+\n else:\n scheduler = None\n", "issue": "Perform penalty calculation after all sanity checks are completed\n**Is your feature request related to a problem? Please describe.**\r\nThe penalty calculation takes a long time, and there are sanity checks that happen after this, which can be a pain.\r\n\r\n**Describe the solution you'd like**\r\nIt would be great to have these checks before the penalty calculation for quality-of-life improvements.\r\n\r\n**Describe alternatives you've considered**\r\nN.A.\r\n\r\n**Additional context**\r\nFrom Evan C.\n", "before_files": [{"content": "from GANDLF.models import get_model\nfrom GANDLF.schedulers import get_scheduler\nfrom GANDLF.optimizers import get_optimizer\nfrom GANDLF.data import (\n get_train_loader,\n get_validation_loader,\n)\nfrom GANDLF.utils import (\n populate_header_in_parameters,\n parseTrainingCSV,\n send_model_to_device,\n get_class_imbalance_weights,\n)\n\n\ndef create_pytorch_objects(parameters, train_csv=None, val_csv=None, device=\"cpu\"):\n \"\"\"\n This function creates all the PyTorch objects needed for training.\n\n Args:\n parameters (dict): The parameters dictionary.\n train_csv (str): The path to the training CSV file.\n val_csv (str): The path to the validation CSV file.\n device (str): The device to perform computations on.\n\n Returns:\n model (torch.nn.Module): The model to use for training.\n optimizer (Optimizer): The optimizer to use for training.\n train_loader (torch.utils.data.DataLoader): The training data loader.\n val_loader (torch.utils.data.DataLoader): The validation data loader.\n scheduler (object): The scheduler to use for training.\n parameters (dict): The updated parameters dictionary.\n \"\"\"\n # initialize train and val loaders\n train_loader, val_loader = None, None\n headers_to_populate_train, headers_to_populate_val = None, None\n\n if train_csv is not None:\n # populate the data frames\n parameters[\"training_data\"], headers_to_populate_train = parseTrainingCSV(\n train_csv, train=True\n )\n parameters = populate_header_in_parameters(\n parameters, headers_to_populate_train\n )\n # get the train loader\n train_loader = get_train_loader(parameters)\n parameters[\"training_samples_size\"] = len(train_loader)\n\n # Calculate the weights here\n (\n parameters[\"weights\"],\n parameters[\"class_weights\"],\n ) = get_class_imbalance_weights(parameters[\"training_data\"], parameters)\n\n if val_csv is not None:\n parameters[\"validation_data\"], headers_to_populate_val = parseTrainingCSV(\n val_csv, train=False\n )\n if headers_to_populate_train is None:\n parameters = populate_header_in_parameters(\n parameters, headers_to_populate_val\n )\n # get the validation loader\n val_loader = get_validation_loader(parameters)\n\n # get the model\n model = get_model(parameters)\n parameters[\"model_parameters\"] = model.parameters()\n\n # get the optimizer\n optimizer = get_optimizer(parameters)\n parameters[\"optimizer_object\"] = optimizer\n\n # send model to correct device\n (\n model,\n parameters[\"model\"][\"amp\"],\n parameters[\"device\"],\n parameters[\"device_id\"],\n ) = send_model_to_device(\n model, amp=parameters[\"model\"][\"amp\"], device=device, optimizer=optimizer\n )\n\n # only need to create scheduler if training\n if train_csv is not None:\n if not (\"step_size\" in parameters[\"scheduler\"]):\n parameters[\"scheduler\"][\"step_size\"] = (\n parameters[\"training_samples_size\"] / parameters[\"learning_rate\"]\n )\n\n scheduler = get_scheduler(parameters)\n else:\n scheduler = None\n\n # these keys contain generators, and are not needed beyond this point in params\n generator_keys_to_remove = [\"optimizer_object\", \"model_parameters\"]\n for key in generator_keys_to_remove:\n parameters.pop(key, None)\n\n return model, optimizer, train_loader, val_loader, scheduler, parameters\n", "path": "GANDLF/compute/generic.py"}], "after_files": [{"content": "from GANDLF.models import get_model\nfrom GANDLF.schedulers import get_scheduler\nfrom GANDLF.optimizers import get_optimizer\nfrom GANDLF.data import (\n get_train_loader,\n get_validation_loader,\n)\nfrom GANDLF.utils import (\n populate_header_in_parameters,\n parseTrainingCSV,\n send_model_to_device,\n get_class_imbalance_weights,\n)\n\n\ndef create_pytorch_objects(parameters, train_csv=None, val_csv=None, device=\"cpu\"):\n \"\"\"\n This function creates all the PyTorch objects needed for training.\n\n Args:\n parameters (dict): The parameters dictionary.\n train_csv (str): The path to the training CSV file.\n val_csv (str): The path to the validation CSV file.\n device (str): The device to perform computations on.\n\n Returns:\n model (torch.nn.Module): The model to use for training.\n optimizer (Optimizer): The optimizer to use for training.\n train_loader (torch.utils.data.DataLoader): The training data loader.\n val_loader (torch.utils.data.DataLoader): The validation data loader.\n scheduler (object): The scheduler to use for training.\n parameters (dict): The updated parameters dictionary.\n \"\"\"\n # initialize train and val loaders\n train_loader, val_loader = None, None\n headers_to_populate_train, headers_to_populate_val = None, None\n\n if train_csv is not None:\n # populate the data frames\n parameters[\"training_data\"], headers_to_populate_train = parseTrainingCSV(\n train_csv, train=True\n )\n parameters = populate_header_in_parameters(\n parameters, headers_to_populate_train\n )\n # get the train loader\n train_loader = get_train_loader(parameters)\n parameters[\"training_samples_size\"] = len(train_loader)\n\n if val_csv is not None:\n parameters[\"validation_data\"], headers_to_populate_val = parseTrainingCSV(\n val_csv, train=False\n )\n if headers_to_populate_train is None:\n parameters = populate_header_in_parameters(\n parameters, headers_to_populate_val\n )\n # get the validation loader\n val_loader = get_validation_loader(parameters)\n\n # get the model\n model = get_model(parameters)\n parameters[\"model_parameters\"] = model.parameters()\n\n # get the optimizer\n optimizer = get_optimizer(parameters)\n parameters[\"optimizer_object\"] = optimizer\n\n # send model to correct device\n (\n model,\n parameters[\"model\"][\"amp\"],\n parameters[\"device\"],\n parameters[\"device_id\"],\n ) = send_model_to_device(\n model, amp=parameters[\"model\"][\"amp\"], device=device, optimizer=optimizer\n )\n\n # only need to create scheduler if training\n if train_csv is not None:\n if not (\"step_size\" in parameters[\"scheduler\"]):\n parameters[\"scheduler\"][\"step_size\"] = (\n parameters[\"training_samples_size\"] / parameters[\"learning_rate\"]\n )\n\n scheduler = get_scheduler(parameters)\n\n # Calculate the weights here\n (\n parameters[\"weights\"],\n parameters[\"class_weights\"],\n ) = get_class_imbalance_weights(parameters[\"training_data\"], parameters)\n\n else:\n scheduler = None\n\n # these keys contain generators, and are not needed beyond this point in params\n generator_keys_to_remove = [\"optimizer_object\", \"model_parameters\"]\n for key in generator_keys_to_remove:\n parameters.pop(key, None)\n\n return model, optimizer, train_loader, val_loader, scheduler, parameters\n", "path": "GANDLF/compute/generic.py"}]}
1,288
224
gh_patches_debug_32962
rasdani/github-patches
git_diff
microsoft__torchgeo-250
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- torchgeo.models.RFC should have a seed argument The parameters of this model are randomly initialized, but it is not trainable. To have repeatable results with this we need a seed parameter so we can guarantee that parameter init happens the same. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchgeo/models/rcf.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 """Implementation of a random convolutional feature projection model.""" 5 6 from typing import cast 7 8 import torch 9 import torch.nn.functional as F 10 from torch import Tensor 11 from torch.nn.modules import Conv2d, Module 12 13 Module.__module__ = "torch.nn" 14 Conv2d.__module__ = "torch.nn" 15 16 17 class RCF(Module): 18 """This model extracts random convolutional features (RCFs) from its input. 19 20 RCFs are used in Multi-task Observation using Satellite Imagery & Kitchen Sinks 21 (MOSAIKS) method proposed in https://www.nature.com/articles/s41467-021-24638-z. 22 23 .. note:: 24 25 This Module is *not* trainable. It is only used as a feature extractor. 26 """ 27 28 def __init__( 29 self, 30 in_channels: int = 4, 31 features: int = 16, 32 kernel_size: int = 3, 33 bias: float = -1.0, 34 ) -> None: 35 """Initializes the RCF model. 36 37 This is a static model that serves to extract fixed length feature vectors from 38 input patches. 39 40 Args: 41 in_channels: number of input channels 42 features: number of features to compute, must be divisible by 2 43 kernel_size: size of the kernel used to compute the RCFs 44 bias: bias of the convolutional layer 45 """ 46 super().__init__() 47 48 assert features % 2 == 0 49 50 # We register the weight and bias tensors as "buffers". This does two things: 51 # makes them behave correctly when we call .to(...) on the module, and makes 52 # them explicitely _not_ Parameters of the model (which might get updated) if 53 # a user tries to train with this model. 54 self.register_buffer( 55 "weights", 56 torch.randn( 57 features // 2, 58 in_channels, 59 kernel_size, 60 kernel_size, 61 requires_grad=False, 62 ), 63 ) 64 self.register_buffer( 65 "biases", 66 torch.zeros( # type: ignore[attr-defined] 67 features // 2, requires_grad=False 68 ) 69 + bias, 70 ) 71 72 def forward(self, x: Tensor) -> Tensor: 73 """Forward pass of the RCF model. 74 75 Args: 76 x: a tensor with shape (B, C, H, W) 77 78 Returns: 79 a tensor of size (B, ``self.num_features``) 80 """ 81 x1a = F.relu( 82 F.conv2d(x, self.weights, bias=self.biases, stride=1, padding=0), 83 inplace=True, 84 ) 85 x1b = F.relu( 86 -F.conv2d(x, self.weights, bias=self.biases, stride=1, padding=0), 87 inplace=False, 88 ) 89 90 x1a = F.adaptive_avg_pool2d(x1a, (1, 1)).squeeze() 91 x1b = F.adaptive_avg_pool2d(x1b, (1, 1)).squeeze() 92 93 if len(x1a.shape) == 1: # case where we passed a single input 94 output = torch.cat((x1a, x1b), dim=0) # type: ignore[attr-defined] 95 return cast(Tensor, output) 96 else: # case where we passed a batch of > 1 inputs 97 assert len(x1a.shape) == 2 98 output = torch.cat((x1a, x1b), dim=1) # type: ignore[attr-defined] 99 return cast(Tensor, output) 100 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchgeo/models/rcf.py b/torchgeo/models/rcf.py --- a/torchgeo/models/rcf.py +++ b/torchgeo/models/rcf.py @@ -3,7 +3,7 @@ """Implementation of a random convolutional feature projection model.""" -from typing import cast +from typing import Optional, cast import torch import torch.nn.functional as F @@ -31,6 +31,7 @@ features: int = 16, kernel_size: int = 3, bias: float = -1.0, + seed: Optional[int] = None, ) -> None: """Initializes the RCF model. @@ -42,11 +43,19 @@ features: number of features to compute, must be divisible by 2 kernel_size: size of the kernel used to compute the RCFs bias: bias of the convolutional layer + seed: random seed used to initialize the convolutional layer """ super().__init__() assert features % 2 == 0 + if seed is None: + generator = None + else: + generator = torch.Generator().manual_seed( # type: ignore[attr-defined] + seed + ) + # We register the weight and bias tensors as "buffers". This does two things: # makes them behave correctly when we call .to(...) on the module, and makes # them explicitely _not_ Parameters of the model (which might get updated) if @@ -59,6 +68,7 @@ kernel_size, kernel_size, requires_grad=False, + generator=generator, ), ) self.register_buffer(
{"golden_diff": "diff --git a/torchgeo/models/rcf.py b/torchgeo/models/rcf.py\n--- a/torchgeo/models/rcf.py\n+++ b/torchgeo/models/rcf.py\n@@ -3,7 +3,7 @@\n \n \"\"\"Implementation of a random convolutional feature projection model.\"\"\"\n \n-from typing import cast\n+from typing import Optional, cast\n \n import torch\n import torch.nn.functional as F\n@@ -31,6 +31,7 @@\n features: int = 16,\n kernel_size: int = 3,\n bias: float = -1.0,\n+ seed: Optional[int] = None,\n ) -> None:\n \"\"\"Initializes the RCF model.\n \n@@ -42,11 +43,19 @@\n features: number of features to compute, must be divisible by 2\n kernel_size: size of the kernel used to compute the RCFs\n bias: bias of the convolutional layer\n+ seed: random seed used to initialize the convolutional layer\n \"\"\"\n super().__init__()\n \n assert features % 2 == 0\n \n+ if seed is None:\n+ generator = None\n+ else:\n+ generator = torch.Generator().manual_seed( # type: ignore[attr-defined]\n+ seed\n+ )\n+\n # We register the weight and bias tensors as \"buffers\". This does two things:\n # makes them behave correctly when we call .to(...) on the module, and makes\n # them explicitely _not_ Parameters of the model (which might get updated) if\n@@ -59,6 +68,7 @@\n kernel_size,\n kernel_size,\n requires_grad=False,\n+ generator=generator,\n ),\n )\n self.register_buffer(\n", "issue": "torchgeo.models.RFC should have a seed argument\nThe parameters of this model are randomly initialized, but it is not trainable. To have repeatable results with this we need a seed parameter so we can guarantee that parameter init happens the same.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Implementation of a random convolutional feature projection model.\"\"\"\n\nfrom typing import cast\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom torch.nn.modules import Conv2d, Module\n\nModule.__module__ = \"torch.nn\"\nConv2d.__module__ = \"torch.nn\"\n\n\nclass RCF(Module):\n \"\"\"This model extracts random convolutional features (RCFs) from its input.\n\n RCFs are used in Multi-task Observation using Satellite Imagery & Kitchen Sinks\n (MOSAIKS) method proposed in https://www.nature.com/articles/s41467-021-24638-z.\n\n .. note::\n\n This Module is *not* trainable. It is only used as a feature extractor.\n \"\"\"\n\n def __init__(\n self,\n in_channels: int = 4,\n features: int = 16,\n kernel_size: int = 3,\n bias: float = -1.0,\n ) -> None:\n \"\"\"Initializes the RCF model.\n\n This is a static model that serves to extract fixed length feature vectors from\n input patches.\n\n Args:\n in_channels: number of input channels\n features: number of features to compute, must be divisible by 2\n kernel_size: size of the kernel used to compute the RCFs\n bias: bias of the convolutional layer\n \"\"\"\n super().__init__()\n\n assert features % 2 == 0\n\n # We register the weight and bias tensors as \"buffers\". This does two things:\n # makes them behave correctly when we call .to(...) on the module, and makes\n # them explicitely _not_ Parameters of the model (which might get updated) if\n # a user tries to train with this model.\n self.register_buffer(\n \"weights\",\n torch.randn(\n features // 2,\n in_channels,\n kernel_size,\n kernel_size,\n requires_grad=False,\n ),\n )\n self.register_buffer(\n \"biases\",\n torch.zeros( # type: ignore[attr-defined]\n features // 2, requires_grad=False\n )\n + bias,\n )\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Forward pass of the RCF model.\n\n Args:\n x: a tensor with shape (B, C, H, W)\n\n Returns:\n a tensor of size (B, ``self.num_features``)\n \"\"\"\n x1a = F.relu(\n F.conv2d(x, self.weights, bias=self.biases, stride=1, padding=0),\n inplace=True,\n )\n x1b = F.relu(\n -F.conv2d(x, self.weights, bias=self.biases, stride=1, padding=0),\n inplace=False,\n )\n\n x1a = F.adaptive_avg_pool2d(x1a, (1, 1)).squeeze()\n x1b = F.adaptive_avg_pool2d(x1b, (1, 1)).squeeze()\n\n if len(x1a.shape) == 1: # case where we passed a single input\n output = torch.cat((x1a, x1b), dim=0) # type: ignore[attr-defined]\n return cast(Tensor, output)\n else: # case where we passed a batch of > 1 inputs\n assert len(x1a.shape) == 2\n output = torch.cat((x1a, x1b), dim=1) # type: ignore[attr-defined]\n return cast(Tensor, output)\n", "path": "torchgeo/models/rcf.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Implementation of a random convolutional feature projection model.\"\"\"\n\nfrom typing import Optional, cast\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom torch.nn.modules import Conv2d, Module\n\nModule.__module__ = \"torch.nn\"\nConv2d.__module__ = \"torch.nn\"\n\n\nclass RCF(Module):\n \"\"\"This model extracts random convolutional features (RCFs) from its input.\n\n RCFs are used in Multi-task Observation using Satellite Imagery & Kitchen Sinks\n (MOSAIKS) method proposed in https://www.nature.com/articles/s41467-021-24638-z.\n\n .. note::\n\n This Module is *not* trainable. It is only used as a feature extractor.\n \"\"\"\n\n def __init__(\n self,\n in_channels: int = 4,\n features: int = 16,\n kernel_size: int = 3,\n bias: float = -1.0,\n seed: Optional[int] = None,\n ) -> None:\n \"\"\"Initializes the RCF model.\n\n This is a static model that serves to extract fixed length feature vectors from\n input patches.\n\n Args:\n in_channels: number of input channels\n features: number of features to compute, must be divisible by 2\n kernel_size: size of the kernel used to compute the RCFs\n bias: bias of the convolutional layer\n seed: random seed used to initialize the convolutional layer\n \"\"\"\n super().__init__()\n\n assert features % 2 == 0\n\n if seed is None:\n generator = None\n else:\n generator = torch.Generator().manual_seed( # type: ignore[attr-defined]\n seed\n )\n\n # We register the weight and bias tensors as \"buffers\". This does two things:\n # makes them behave correctly when we call .to(...) on the module, and makes\n # them explicitely _not_ Parameters of the model (which might get updated) if\n # a user tries to train with this model.\n self.register_buffer(\n \"weights\",\n torch.randn(\n features // 2,\n in_channels,\n kernel_size,\n kernel_size,\n requires_grad=False,\n generator=generator,\n ),\n )\n self.register_buffer(\n \"biases\",\n torch.zeros( # type: ignore[attr-defined]\n features // 2, requires_grad=False\n )\n + bias,\n )\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Forward pass of the RCF model.\n\n Args:\n x: a tensor with shape (B, C, H, W)\n\n Returns:\n a tensor of size (B, ``self.num_features``)\n \"\"\"\n x1a = F.relu(\n F.conv2d(x, self.weights, bias=self.biases, stride=1, padding=0),\n inplace=True,\n )\n x1b = F.relu(\n -F.conv2d(x, self.weights, bias=self.biases, stride=1, padding=0),\n inplace=False,\n )\n\n x1a = F.adaptive_avg_pool2d(x1a, (1, 1)).squeeze()\n x1b = F.adaptive_avg_pool2d(x1b, (1, 1)).squeeze()\n\n if len(x1a.shape) == 1: # case where we passed a single input\n output = torch.cat((x1a, x1b), dim=0) # type: ignore[attr-defined]\n return cast(Tensor, output)\n else: # case where we passed a batch of > 1 inputs\n assert len(x1a.shape) == 2\n output = torch.cat((x1a, x1b), dim=1) # type: ignore[attr-defined]\n return cast(Tensor, output)\n", "path": "torchgeo/models/rcf.py"}]}
1,301
380
gh_patches_debug_3790
rasdani/github-patches
git_diff
opsdroid__opsdroid-233
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Docker] DEFAULT_ROOT_PATH should be created if it does not exist I have tried to run opsdroid in a Docker container, in the following environment: ``` OS: Ubuntu 16.04.3 LTS Docker version: 17.06.2-ce Docker API version: 1.30 ``` The process I followed is the following: 1. `docker pull opsdroid/opsdroid:latest` 2. Created an initial configuration in the host: `/var/tmp/configuration.yaml` 3. Ran the following command: ` docker run --rm -v /var/tmp/configuration.yaml:/etc/opsdroid/configuration.yaml:ro opsdroid/opsdroid:latest` The configuration file contents are: ``` connectors: - name: shell skills: - name: hello ``` But I got the following error: ``` ubuntu@ubuntu:~$ docker run --rm -v /var/tmp/configuration.yaml:/etc/opsdroid/configuration.yaml:ro opsdroid/opsdroid:latest Traceback (most recent call last): File "/usr/local/lib/python3.5/runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "/usr/local/lib/python3.5/runpy.py", line 85, in _run_code exec(code, run_globals) File "/usr/src/app/opsdroid/__main__.py", line 112, in <module> main() File "/usr/src/app/opsdroid/__main__.py", line 105, in main configure_logging(opsdroid.config) File "/usr/src/app/opsdroid/__main__.py", line 51, in configure_logging file_handler = logging.FileHandler(logfile_path) File "/usr/local/lib/python3.5/logging/__init__.py", line 1014, in __init__ StreamHandler.__init__(self, self._open()) File "/usr/local/lib/python3.5/logging/__init__.py", line 1043, in _open return open(self.baseFilename, self.mode, encoding=self.encoding) FileNotFoundError: [Errno 2] No such file or directory: '/root/.opsdroid/output.log' ubuntu@ubuntu:~$ ``` When running the container in interactive mode to debug the issue, by issuing `docker run -it -v /var/tmp/configuration.yaml:/etc/opsdroid/configuration.yaml:ro opsdroid/opsdroid:latest /bin/sh` and executing the default command (`python -m opsdroid`), I reproduced the issue: ``` /usr/src/app # python -m opsdroid Traceback (most recent call last): File "/usr/local/lib/python3.5/runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "/usr/local/lib/python3.5/runpy.py", line 85, in _run_code exec(code, run_globals) File "/usr/src/app/opsdroid/__main__.py", line 112, in <module> main() File "/usr/src/app/opsdroid/__main__.py", line 105, in main configure_logging(opsdroid.config) File "/usr/src/app/opsdroid/__main__.py", line 51, in configure_logging file_handler = logging.FileHandler(logfile_path) File "/usr/local/lib/python3.5/logging/__init__.py", line 1014, in __init__ StreamHandler.__init__(self, self._open()) File "/usr/local/lib/python3.5/logging/__init__.py", line 1043, in _open return open(self.baseFilename, self.mode, encoding=self.encoding) FileNotFoundError: [Errno 2] No such file or directory: '/root/.opsdroid/output.log' /usr/src/app # ``` When checking if the `/root/.opsdroid/` directory existed, I got the following: ``` /usr/src/app # ls /root/.opsdroid ls: /root/.opsdroid: No such file or directory ``` Concluding, opsdroid should check if that directory exists and create it if not. [Docker] DEFAULT_ROOT_PATH should be created if it does not exist I have tried to run opsdroid in a Docker container, in the following environment: ``` OS: Ubuntu 16.04.3 LTS Docker version: 17.06.2-ce Docker API version: 1.30 ``` The process I followed is the following: 1. `docker pull opsdroid/opsdroid:latest` 2. Created an initial configuration in the host: `/var/tmp/configuration.yaml` 3. Ran the following command: ` docker run --rm -v /var/tmp/configuration.yaml:/etc/opsdroid/configuration.yaml:ro opsdroid/opsdroid:latest` The configuration file contents are: ``` connectors: - name: shell skills: - name: hello ``` But I got the following error: ``` ubuntu@ubuntu:~$ docker run --rm -v /var/tmp/configuration.yaml:/etc/opsdroid/configuration.yaml:ro opsdroid/opsdroid:latest Traceback (most recent call last): File "/usr/local/lib/python3.5/runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "/usr/local/lib/python3.5/runpy.py", line 85, in _run_code exec(code, run_globals) File "/usr/src/app/opsdroid/__main__.py", line 112, in <module> main() File "/usr/src/app/opsdroid/__main__.py", line 105, in main configure_logging(opsdroid.config) File "/usr/src/app/opsdroid/__main__.py", line 51, in configure_logging file_handler = logging.FileHandler(logfile_path) File "/usr/local/lib/python3.5/logging/__init__.py", line 1014, in __init__ StreamHandler.__init__(self, self._open()) File "/usr/local/lib/python3.5/logging/__init__.py", line 1043, in _open return open(self.baseFilename, self.mode, encoding=self.encoding) FileNotFoundError: [Errno 2] No such file or directory: '/root/.opsdroid/output.log' ubuntu@ubuntu:~$ ``` When running the container in interactive mode to debug the issue, by issuing `docker run -it -v /var/tmp/configuration.yaml:/etc/opsdroid/configuration.yaml:ro opsdroid/opsdroid:latest /bin/sh` and executing the default command (`python -m opsdroid`), I reproduced the issue: ``` /usr/src/app # python -m opsdroid Traceback (most recent call last): File "/usr/local/lib/python3.5/runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "/usr/local/lib/python3.5/runpy.py", line 85, in _run_code exec(code, run_globals) File "/usr/src/app/opsdroid/__main__.py", line 112, in <module> main() File "/usr/src/app/opsdroid/__main__.py", line 105, in main configure_logging(opsdroid.config) File "/usr/src/app/opsdroid/__main__.py", line 51, in configure_logging file_handler = logging.FileHandler(logfile_path) File "/usr/local/lib/python3.5/logging/__init__.py", line 1014, in __init__ StreamHandler.__init__(self, self._open()) File "/usr/local/lib/python3.5/logging/__init__.py", line 1043, in _open return open(self.baseFilename, self.mode, encoding=self.encoding) FileNotFoundError: [Errno 2] No such file or directory: '/root/.opsdroid/output.log' /usr/src/app # ``` When checking if the `/root/.opsdroid/` directory existed, I got the following: ``` /usr/src/app # ls /root/.opsdroid ls: /root/.opsdroid: No such file or directory ``` Concluding, opsdroid should check if that directory exists and create it if not. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `opsdroid/__main__.py` Content: ``` 1 """Starts opsdroid.""" 2 3 import os 4 import sys 5 import logging 6 import argparse 7 8 from opsdroid.core import OpsDroid 9 from opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE 10 from opsdroid.web import Web 11 12 13 _LOGGER = logging.getLogger("opsdroid") 14 15 16 def configure_logging(config): 17 """Configure the root logger based on user config.""" 18 rootlogger = logging.getLogger() 19 while rootlogger.handlers: 20 rootlogger.handlers.pop() 21 22 try: 23 if config["logging"]["path"]: 24 logfile_path = os.path.expanduser(config["logging"]["path"]) 25 else: 26 logfile_path = config["logging"]["path"] 27 except KeyError: 28 logfile_path = DEFAULT_LOG_FILENAME 29 30 try: 31 log_level = get_logging_level( 32 config["logging"]["level"]) 33 except KeyError: 34 log_level = logging.INFO 35 36 rootlogger.setLevel(log_level) 37 formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s') 38 39 console_handler = logging.StreamHandler() 40 console_handler.setLevel(log_level) 41 console_handler.setFormatter(formatter) 42 rootlogger.addHandler(console_handler) 43 44 try: 45 if not config["logging"]["console"]: 46 console_handler.setLevel(logging.CRITICAL) 47 except KeyError: 48 pass 49 50 if logfile_path: 51 file_handler = logging.FileHandler(logfile_path) 52 file_handler.setLevel(log_level) 53 file_handler.setFormatter(formatter) 54 rootlogger.addHandler(file_handler) 55 56 _LOGGER.info("="*40) 57 _LOGGER.info("Stated application") 58 59 60 def get_logging_level(logging_level): 61 """Get the logger level based on the user configuration.""" 62 if logging_level == 'critical': 63 return logging.CRITICAL 64 elif logging_level == 'error': 65 return logging.ERROR 66 elif logging_level == 'warning': 67 return logging.WARNING 68 elif logging_level == 'debug': 69 return logging.DEBUG 70 71 return logging.INFO 72 73 74 def parse_args(args): 75 """Parse command line arguments.""" 76 parser = argparse.ArgumentParser(description='Run opsdroid.') 77 parser.add_argument('--gen-config', action="store_true", 78 help='prints out an example configuration file') 79 return parser.parse_args(args) 80 81 82 def check_dependencies(): 83 """Check for system dependencies required by opsdroid.""" 84 if sys.version_info[0] < 3 or sys.version_info[1] < 5: 85 logging.critical("Whoops! opsdroid requires python 3.5 or above.") 86 sys.exit(1) 87 88 89 def main(): 90 """Enter the application here.""" 91 args = parse_args(sys.argv[1:]) 92 93 if args.gen_config: 94 with open(EXAMPLE_CONFIG_FILE, 'r') as conf: 95 print(conf.read()) 96 sys.exit(0) 97 98 check_dependencies() 99 100 restart = True 101 102 while restart: 103 with OpsDroid() as opsdroid: 104 opsdroid.load() 105 configure_logging(opsdroid.config) 106 opsdroid.web_server = Web(opsdroid) 107 opsdroid.start_loop() 108 restart = opsdroid.should_restart 109 110 111 if __name__ == "__main__": 112 main() 113 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py --- a/opsdroid/__main__.py +++ b/opsdroid/__main__.py @@ -48,6 +48,9 @@ pass if logfile_path: + logdir = os.path.dirname(os.path.realpath(logfile_path)) + if not os.path.isdir(logdir): + os.makedirs(logdir) file_handler = logging.FileHandler(logfile_path) file_handler.setLevel(log_level) file_handler.setFormatter(formatter)
{"golden_diff": "diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py\n--- a/opsdroid/__main__.py\n+++ b/opsdroid/__main__.py\n@@ -48,6 +48,9 @@\n pass\n \n if logfile_path:\n+ logdir = os.path.dirname(os.path.realpath(logfile_path))\n+ if not os.path.isdir(logdir):\n+ os.makedirs(logdir)\n file_handler = logging.FileHandler(logfile_path)\n file_handler.setLevel(log_level)\n file_handler.setFormatter(formatter)\n", "issue": "[Docker] DEFAULT_ROOT_PATH should be created if it does not exist\nI have tried to run opsdroid in a Docker container, in the following environment:\r\n\r\n```\r\nOS: Ubuntu 16.04.3 LTS\r\nDocker version: 17.06.2-ce\r\nDocker API version: 1.30\r\n```\r\n\r\nThe process I followed is the following:\r\n\r\n1. `docker pull opsdroid/opsdroid:latest`\r\n2. Created an initial configuration in the host: `/var/tmp/configuration.yaml`\r\n3. Ran the following command: ` docker run --rm -v /var/tmp/configuration.yaml:/etc/opsdroid/configuration.yaml:ro opsdroid/opsdroid:latest`\r\n\r\nThe configuration file contents are:\r\n```\r\nconnectors:\r\n - name: shell\r\n\r\nskills:\r\n - name: hello\r\n```\r\n\r\nBut I got the following error:\r\n\r\n```\r\nubuntu@ubuntu:~$ docker run --rm -v /var/tmp/configuration.yaml:/etc/opsdroid/configuration.yaml:ro opsdroid/opsdroid:latest\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.5/runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/usr/local/lib/python3.5/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/usr/src/app/opsdroid/__main__.py\", line 112, in <module>\r\n main()\r\n File \"/usr/src/app/opsdroid/__main__.py\", line 105, in main\r\n configure_logging(opsdroid.config)\r\n File \"/usr/src/app/opsdroid/__main__.py\", line 51, in configure_logging\r\n file_handler = logging.FileHandler(logfile_path)\r\n File \"/usr/local/lib/python3.5/logging/__init__.py\", line 1014, in __init__\r\n StreamHandler.__init__(self, self._open())\r\n File \"/usr/local/lib/python3.5/logging/__init__.py\", line 1043, in _open\r\n return open(self.baseFilename, self.mode, encoding=self.encoding)\r\nFileNotFoundError: [Errno 2] No such file or directory: '/root/.opsdroid/output.log'\r\nubuntu@ubuntu:~$\r\n```\r\n\r\nWhen running the container in interactive mode to debug the issue, by issuing `docker run -it -v /var/tmp/configuration.yaml:/etc/opsdroid/configuration.yaml:ro opsdroid/opsdroid:latest /bin/sh` and executing the default command (`python -m opsdroid`), I reproduced the issue:\r\n\r\n```\r\n/usr/src/app # python -m opsdroid\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.5/runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/usr/local/lib/python3.5/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/usr/src/app/opsdroid/__main__.py\", line 112, in <module>\r\n main()\r\n File \"/usr/src/app/opsdroid/__main__.py\", line 105, in main\r\n configure_logging(opsdroid.config)\r\n File \"/usr/src/app/opsdroid/__main__.py\", line 51, in configure_logging\r\n file_handler = logging.FileHandler(logfile_path)\r\n File \"/usr/local/lib/python3.5/logging/__init__.py\", line 1014, in __init__\r\n StreamHandler.__init__(self, self._open())\r\n File \"/usr/local/lib/python3.5/logging/__init__.py\", line 1043, in _open\r\n return open(self.baseFilename, self.mode, encoding=self.encoding)\r\nFileNotFoundError: [Errno 2] No such file or directory: '/root/.opsdroid/output.log'\r\n/usr/src/app #\r\n```\r\n\r\nWhen checking if the `/root/.opsdroid/` directory existed, I got the following:\r\n```\r\n/usr/src/app # ls /root/.opsdroid\r\nls: /root/.opsdroid: No such file or directory\r\n```\r\n\r\nConcluding, opsdroid should check if that directory exists and create it if not. \n[Docker] DEFAULT_ROOT_PATH should be created if it does not exist\nI have tried to run opsdroid in a Docker container, in the following environment:\r\n\r\n```\r\nOS: Ubuntu 16.04.3 LTS\r\nDocker version: 17.06.2-ce\r\nDocker API version: 1.30\r\n```\r\n\r\nThe process I followed is the following:\r\n\r\n1. `docker pull opsdroid/opsdroid:latest`\r\n2. Created an initial configuration in the host: `/var/tmp/configuration.yaml`\r\n3. Ran the following command: ` docker run --rm -v /var/tmp/configuration.yaml:/etc/opsdroid/configuration.yaml:ro opsdroid/opsdroid:latest`\r\n\r\nThe configuration file contents are:\r\n```\r\nconnectors:\r\n - name: shell\r\n\r\nskills:\r\n - name: hello\r\n```\r\n\r\nBut I got the following error:\r\n\r\n```\r\nubuntu@ubuntu:~$ docker run --rm -v /var/tmp/configuration.yaml:/etc/opsdroid/configuration.yaml:ro opsdroid/opsdroid:latest\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.5/runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/usr/local/lib/python3.5/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/usr/src/app/opsdroid/__main__.py\", line 112, in <module>\r\n main()\r\n File \"/usr/src/app/opsdroid/__main__.py\", line 105, in main\r\n configure_logging(opsdroid.config)\r\n File \"/usr/src/app/opsdroid/__main__.py\", line 51, in configure_logging\r\n file_handler = logging.FileHandler(logfile_path)\r\n File \"/usr/local/lib/python3.5/logging/__init__.py\", line 1014, in __init__\r\n StreamHandler.__init__(self, self._open())\r\n File \"/usr/local/lib/python3.5/logging/__init__.py\", line 1043, in _open\r\n return open(self.baseFilename, self.mode, encoding=self.encoding)\r\nFileNotFoundError: [Errno 2] No such file or directory: '/root/.opsdroid/output.log'\r\nubuntu@ubuntu:~$\r\n```\r\n\r\nWhen running the container in interactive mode to debug the issue, by issuing `docker run -it -v /var/tmp/configuration.yaml:/etc/opsdroid/configuration.yaml:ro opsdroid/opsdroid:latest /bin/sh` and executing the default command (`python -m opsdroid`), I reproduced the issue:\r\n\r\n```\r\n/usr/src/app # python -m opsdroid\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.5/runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/usr/local/lib/python3.5/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/usr/src/app/opsdroid/__main__.py\", line 112, in <module>\r\n main()\r\n File \"/usr/src/app/opsdroid/__main__.py\", line 105, in main\r\n configure_logging(opsdroid.config)\r\n File \"/usr/src/app/opsdroid/__main__.py\", line 51, in configure_logging\r\n file_handler = logging.FileHandler(logfile_path)\r\n File \"/usr/local/lib/python3.5/logging/__init__.py\", line 1014, in __init__\r\n StreamHandler.__init__(self, self._open())\r\n File \"/usr/local/lib/python3.5/logging/__init__.py\", line 1043, in _open\r\n return open(self.baseFilename, self.mode, encoding=self.encoding)\r\nFileNotFoundError: [Errno 2] No such file or directory: '/root/.opsdroid/output.log'\r\n/usr/src/app #\r\n```\r\n\r\nWhen checking if the `/root/.opsdroid/` directory existed, I got the following:\r\n```\r\n/usr/src/app # ls /root/.opsdroid\r\nls: /root/.opsdroid: No such file or directory\r\n```\r\n\r\nConcluding, opsdroid should check if that directory exists and create it if not. \n", "before_files": [{"content": "\"\"\"Starts opsdroid.\"\"\"\n\nimport os\nimport sys\nimport logging\nimport argparse\n\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE\nfrom opsdroid.web import Web\n\n\n_LOGGER = logging.getLogger(\"opsdroid\")\n\n\ndef configure_logging(config):\n \"\"\"Configure the root logger based on user config.\"\"\"\n rootlogger = logging.getLogger()\n while rootlogger.handlers:\n rootlogger.handlers.pop()\n\n try:\n if config[\"logging\"][\"path\"]:\n logfile_path = os.path.expanduser(config[\"logging\"][\"path\"])\n else:\n logfile_path = config[\"logging\"][\"path\"]\n except KeyError:\n logfile_path = DEFAULT_LOG_FILENAME\n\n try:\n log_level = get_logging_level(\n config[\"logging\"][\"level\"])\n except KeyError:\n log_level = logging.INFO\n\n rootlogger.setLevel(log_level)\n formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s')\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(log_level)\n console_handler.setFormatter(formatter)\n rootlogger.addHandler(console_handler)\n\n try:\n if not config[\"logging\"][\"console\"]:\n console_handler.setLevel(logging.CRITICAL)\n except KeyError:\n pass\n\n if logfile_path:\n file_handler = logging.FileHandler(logfile_path)\n file_handler.setLevel(log_level)\n file_handler.setFormatter(formatter)\n rootlogger.addHandler(file_handler)\n\n _LOGGER.info(\"=\"*40)\n _LOGGER.info(\"Stated application\")\n\n\ndef get_logging_level(logging_level):\n \"\"\"Get the logger level based on the user configuration.\"\"\"\n if logging_level == 'critical':\n return logging.CRITICAL\n elif logging_level == 'error':\n return logging.ERROR\n elif logging_level == 'warning':\n return logging.WARNING\n elif logging_level == 'debug':\n return logging.DEBUG\n\n return logging.INFO\n\n\ndef parse_args(args):\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description='Run opsdroid.')\n parser.add_argument('--gen-config', action=\"store_true\",\n help='prints out an example configuration file')\n return parser.parse_args(args)\n\n\ndef check_dependencies():\n \"\"\"Check for system dependencies required by opsdroid.\"\"\"\n if sys.version_info[0] < 3 or sys.version_info[1] < 5:\n logging.critical(\"Whoops! opsdroid requires python 3.5 or above.\")\n sys.exit(1)\n\n\ndef main():\n \"\"\"Enter the application here.\"\"\"\n args = parse_args(sys.argv[1:])\n\n if args.gen_config:\n with open(EXAMPLE_CONFIG_FILE, 'r') as conf:\n print(conf.read())\n sys.exit(0)\n\n check_dependencies()\n\n restart = True\n\n while restart:\n with OpsDroid() as opsdroid:\n opsdroid.load()\n configure_logging(opsdroid.config)\n opsdroid.web_server = Web(opsdroid)\n opsdroid.start_loop()\n restart = opsdroid.should_restart\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "opsdroid/__main__.py"}], "after_files": [{"content": "\"\"\"Starts opsdroid.\"\"\"\n\nimport os\nimport sys\nimport logging\nimport argparse\n\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE\nfrom opsdroid.web import Web\n\n\n_LOGGER = logging.getLogger(\"opsdroid\")\n\n\ndef configure_logging(config):\n \"\"\"Configure the root logger based on user config.\"\"\"\n rootlogger = logging.getLogger()\n while rootlogger.handlers:\n rootlogger.handlers.pop()\n\n try:\n if config[\"logging\"][\"path\"]:\n logfile_path = os.path.expanduser(config[\"logging\"][\"path\"])\n else:\n logfile_path = config[\"logging\"][\"path\"]\n except KeyError:\n logfile_path = DEFAULT_LOG_FILENAME\n\n try:\n log_level = get_logging_level(\n config[\"logging\"][\"level\"])\n except KeyError:\n log_level = logging.INFO\n\n rootlogger.setLevel(log_level)\n formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s')\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(log_level)\n console_handler.setFormatter(formatter)\n rootlogger.addHandler(console_handler)\n\n try:\n if not config[\"logging\"][\"console\"]:\n console_handler.setLevel(logging.CRITICAL)\n except KeyError:\n pass\n\n if logfile_path:\n logdir = os.path.dirname(os.path.realpath(logfile_path))\n if not os.path.isdir(logdir):\n os.makedirs(logdir)\n file_handler = logging.FileHandler(logfile_path)\n file_handler.setLevel(log_level)\n file_handler.setFormatter(formatter)\n rootlogger.addHandler(file_handler)\n\n _LOGGER.info(\"=\"*40)\n _LOGGER.info(\"Stated application\")\n\n\ndef get_logging_level(logging_level):\n \"\"\"Get the logger level based on the user configuration.\"\"\"\n if logging_level == 'critical':\n return logging.CRITICAL\n elif logging_level == 'error':\n return logging.ERROR\n elif logging_level == 'warning':\n return logging.WARNING\n elif logging_level == 'debug':\n return logging.DEBUG\n\n return logging.INFO\n\n\ndef parse_args(args):\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description='Run opsdroid.')\n parser.add_argument('--gen-config', action=\"store_true\",\n help='prints out an example configuration file')\n return parser.parse_args(args)\n\n\ndef check_dependencies():\n \"\"\"Check for system dependencies required by opsdroid.\"\"\"\n if sys.version_info[0] < 3 or sys.version_info[1] < 5:\n logging.critical(\"Whoops! opsdroid requires python 3.5 or above.\")\n sys.exit(1)\n\n\ndef main():\n \"\"\"Enter the application here.\"\"\"\n args = parse_args(sys.argv[1:])\n\n if args.gen_config:\n with open(EXAMPLE_CONFIG_FILE, 'r') as conf:\n print(conf.read())\n sys.exit(0)\n\n check_dependencies()\n\n restart = True\n\n while restart:\n with OpsDroid() as opsdroid:\n opsdroid.load()\n configure_logging(opsdroid.config)\n opsdroid.web_server = Web(opsdroid)\n opsdroid.start_loop()\n restart = opsdroid.should_restart\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "opsdroid/__main__.py"}]}
3,035
121
gh_patches_debug_12550
rasdani/github-patches
git_diff
psf__black-2852
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Make all documentation files .md For consistency and ease of contributing. Or at least, figure out why we can't use .md for everything. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/conf.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Configuration file for the Sphinx documentation builder. 4 # 5 # This file does only contain a selection of the most common options. For a 6 # full list see the documentation: 7 # http://www.sphinx-doc.org/en/stable/config 8 9 # -- Path setup -------------------------------------------------------------- 10 11 # If extensions (or modules to document with autodoc) are in another directory, 12 # add these directories to sys.path here. If the directory is relative to the 13 # documentation root, use os.path.abspath to make it absolute, like shown here. 14 # 15 16 import os 17 import string 18 from pathlib import Path 19 20 from pkg_resources import get_distribution 21 22 CURRENT_DIR = Path(__file__).parent 23 24 25 def make_pypi_svg(version: str) -> None: 26 template: Path = CURRENT_DIR / "_static" / "pypi_template.svg" 27 target: Path = CURRENT_DIR / "_static" / "pypi.svg" 28 with open(str(template), "r", encoding="utf8") as f: 29 svg: str = string.Template(f.read()).substitute(version=version) 30 with open(str(target), "w", encoding="utf8") as f: 31 f.write(svg) 32 33 34 # Necessary so Click doesn't hit an encode error when called by 35 # sphinxcontrib-programoutput on Windows. 36 os.putenv("pythonioencoding", "utf-8") 37 38 # -- Project information ----------------------------------------------------- 39 40 project = "Black" 41 copyright = "2018-Present, Łukasz Langa and contributors to Black" 42 author = "Łukasz Langa and contributors to Black" 43 44 # Autopopulate version 45 # The version, including alpha/beta/rc tags, but not commit hash and datestamps 46 release = get_distribution("black").version.split("+")[0] 47 # The short X.Y version. 48 version = release 49 for sp in "abcfr": 50 version = version.split(sp)[0] 51 52 make_pypi_svg(release) 53 54 55 # -- General configuration --------------------------------------------------- 56 57 # If your documentation needs a minimal Sphinx version, state it here. 58 needs_sphinx = "3.0" 59 60 # Add any Sphinx extension module names here, as strings. They can be 61 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 62 # ones. 63 extensions = [ 64 "sphinx.ext.autodoc", 65 "sphinx.ext.intersphinx", 66 "sphinx.ext.napoleon", 67 "myst_parser", 68 "sphinxcontrib.programoutput", 69 "sphinx_copybutton", 70 ] 71 72 # If you need extensions of a certain version or higher, list them here. 73 needs_extensions = {"myst_parser": "0.13.7"} 74 75 # Add any paths that contain templates here, relative to this directory. 76 templates_path = ["_templates"] 77 78 # The suffix(es) of source filenames. 79 # You can specify multiple suffix as a list of string: 80 source_suffix = [".rst", ".md"] 81 82 # The master toctree document. 83 master_doc = "index" 84 85 # The language for content autogenerated by Sphinx. Refer to documentation 86 # for a list of supported languages. 87 # 88 # This is also used if you do content translation via gettext catalogs. 89 # Usually you set "language" from the command line for these cases. 90 language = None 91 92 # List of patterns, relative to source directory, that match files and 93 # directories to ignore when looking for source files. 94 # This pattern also affects html_static_path and html_extra_path . 95 96 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 97 98 # The name of the Pygments (syntax highlighting) style to use. 99 pygments_style = "sphinx" 100 101 # We need headers to be linkable to so ask MyST-Parser to autogenerate anchor IDs for 102 # headers up to and including level 3. 103 myst_heading_anchors = 3 104 105 # Prettier support formatting some MyST syntax but not all, so let's disable the 106 # unsupported yet still enabled by default ones. 107 myst_disable_syntax = [ 108 "myst_block_break", 109 "myst_line_comment", 110 "math_block", 111 ] 112 113 # -- Options for HTML output ------------------------------------------------- 114 115 # The theme to use for HTML and HTML Help pages. See the documentation for 116 # a list of builtin themes. 117 # 118 html_theme = "furo" 119 html_logo = "_static/logo2-readme.png" 120 121 # Add any paths that contain custom static files (such as style sheets) here, 122 # relative to this directory. They are copied after the builtin static files, 123 # so a file named "default.css" will overwrite the builtin "default.css". 124 html_static_path = ["_static"] 125 126 # Custom sidebar templates, must be a dictionary that maps document names 127 # to template names. 128 # 129 # The default sidebars (for documents that don't match any pattern) are 130 # defined by theme itself. Builtin themes are using these templates by 131 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 132 # 'searchbox.html']``. 133 # 134 # html_sidebars = {} 135 136 137 # -- Options for HTMLHelp output --------------------------------------------- 138 139 # Output file base name for HTML help builder. 140 htmlhelp_basename = "blackdoc" 141 142 143 # -- Options for LaTeX output ------------------------------------------------ 144 145 # Grouping the document tree into LaTeX files. List of tuples 146 # (source start file, target name, title, 147 # author, documentclass [howto, manual, or own class]). 148 latex_documents = [ 149 ( 150 master_doc, 151 "black.tex", 152 "Documentation for Black", 153 "Łukasz Langa and contributors to Black", 154 "manual", 155 ) 156 ] 157 158 159 # -- Options for manual page output ------------------------------------------ 160 161 # One entry per manual page. List of tuples 162 # (source start file, name, description, authors, manual section). 163 man_pages = [(master_doc, "black", "Documentation for Black", [author], 1)] 164 165 166 # -- Options for Texinfo output ---------------------------------------------- 167 168 # Grouping the document tree into Texinfo files. List of tuples 169 # (source start file, target name, title, author, 170 # dir menu entry, description, category) 171 texinfo_documents = [ 172 ( 173 master_doc, 174 "Black", 175 "Documentation for Black", 176 author, 177 "Black", 178 "The uncompromising Python code formatter", 179 "Miscellaneous", 180 ) 181 ] 182 183 184 # -- Options for Epub output ------------------------------------------------- 185 186 # Bibliographic Dublin Core info. 187 epub_title = project 188 epub_author = author 189 epub_publisher = author 190 epub_copyright = copyright 191 192 # The unique identifier of the text. This can be a ISBN number 193 # or the project homepage. 194 # 195 # epub_identifier = '' 196 197 # A unique identification for the text. 198 # 199 # epub_uid = '' 200 201 # A list of files that should not be packed into the epub file. 202 epub_exclude_files = ["search.html"] 203 204 205 # -- Extension configuration ------------------------------------------------- 206 207 autodoc_member_order = "bysource" 208 209 # -- Options for intersphinx extension --------------------------------------- 210 211 # Example configuration for intersphinx: refer to the Python standard library. 212 intersphinx_mapping = {"https://docs.python.org/3/": None} 213 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -105,11 +105,15 @@ # Prettier support formatting some MyST syntax but not all, so let's disable the # unsupported yet still enabled by default ones. myst_disable_syntax = [ + "colon_fence", "myst_block_break", "myst_line_comment", "math_block", ] +# Optional MyST Syntaxes +myst_enable_extensions = [] + # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -105,11 +105,15 @@\n # Prettier support formatting some MyST syntax but not all, so let's disable the\n # unsupported yet still enabled by default ones.\n myst_disable_syntax = [\n+ \"colon_fence\",\n \"myst_block_break\",\n \"myst_line_comment\",\n \"math_block\",\n ]\n \n+# Optional MyST Syntaxes\n+myst_enable_extensions = []\n+\n # -- Options for HTML output -------------------------------------------------\n \n # The theme to use for HTML and HTML Help pages. See the documentation for\n", "issue": "Make all documentation files .md\nFor consistency and ease of contributing. Or at least, figure out why we can't use .md for everything.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n\nimport os\nimport string\nfrom pathlib import Path\n\nfrom pkg_resources import get_distribution\n\nCURRENT_DIR = Path(__file__).parent\n\n\ndef make_pypi_svg(version: str) -> None:\n template: Path = CURRENT_DIR / \"_static\" / \"pypi_template.svg\"\n target: Path = CURRENT_DIR / \"_static\" / \"pypi.svg\"\n with open(str(template), \"r\", encoding=\"utf8\") as f:\n svg: str = string.Template(f.read()).substitute(version=version)\n with open(str(target), \"w\", encoding=\"utf8\") as f:\n f.write(svg)\n\n\n# Necessary so Click doesn't hit an encode error when called by\n# sphinxcontrib-programoutput on Windows.\nos.putenv(\"pythonioencoding\", \"utf-8\")\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Black\"\ncopyright = \"2018-Present, \u0141ukasz Langa and contributors to Black\"\nauthor = \"\u0141ukasz Langa and contributors to Black\"\n\n# Autopopulate version\n# The version, including alpha/beta/rc tags, but not commit hash and datestamps\nrelease = get_distribution(\"black\").version.split(\"+\")[0]\n# The short X.Y version.\nversion = release\nfor sp in \"abcfr\":\n version = version.split(sp)[0]\n\nmake_pypi_svg(release)\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = \"3.0\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"myst_parser\",\n \"sphinxcontrib.programoutput\",\n \"sphinx_copybutton\",\n]\n\n# If you need extensions of a certain version or higher, list them here.\nneeds_extensions = {\"myst_parser\": \"0.13.7\"}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\n\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# We need headers to be linkable to so ask MyST-Parser to autogenerate anchor IDs for\n# headers up to and including level 3.\nmyst_heading_anchors = 3\n\n# Prettier support formatting some MyST syntax but not all, so let's disable the\n# unsupported yet still enabled by default ones.\nmyst_disable_syntax = [\n \"myst_block_break\",\n \"myst_line_comment\",\n \"math_block\",\n]\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"furo\"\nhtml_logo = \"_static/logo2-readme.png\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"blackdoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"black.tex\",\n \"Documentation for Black\",\n \"\u0141ukasz Langa and contributors to Black\",\n \"manual\",\n )\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"black\", \"Documentation for Black\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"Black\",\n \"Documentation for Black\",\n author,\n \"Black\",\n \"The uncompromising Python code formatter\",\n \"Miscellaneous\",\n )\n]\n\n\n# -- Options for Epub output -------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\n\n# -- Extension configuration -------------------------------------------------\n\nautodoc_member_order = \"bysource\"\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\"https://docs.python.org/3/\": None}\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n\nimport os\nimport string\nfrom pathlib import Path\n\nfrom pkg_resources import get_distribution\n\nCURRENT_DIR = Path(__file__).parent\n\n\ndef make_pypi_svg(version: str) -> None:\n template: Path = CURRENT_DIR / \"_static\" / \"pypi_template.svg\"\n target: Path = CURRENT_DIR / \"_static\" / \"pypi.svg\"\n with open(str(template), \"r\", encoding=\"utf8\") as f:\n svg: str = string.Template(f.read()).substitute(version=version)\n with open(str(target), \"w\", encoding=\"utf8\") as f:\n f.write(svg)\n\n\n# Necessary so Click doesn't hit an encode error when called by\n# sphinxcontrib-programoutput on Windows.\nos.putenv(\"pythonioencoding\", \"utf-8\")\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Black\"\ncopyright = \"2018-Present, \u0141ukasz Langa and contributors to Black\"\nauthor = \"\u0141ukasz Langa and contributors to Black\"\n\n# Autopopulate version\n# The version, including alpha/beta/rc tags, but not commit hash and datestamps\nrelease = get_distribution(\"black\").version.split(\"+\")[0]\n# The short X.Y version.\nversion = release\nfor sp in \"abcfr\":\n version = version.split(sp)[0]\n\nmake_pypi_svg(release)\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = \"3.0\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"myst_parser\",\n \"sphinxcontrib.programoutput\",\n \"sphinx_copybutton\",\n]\n\n# If you need extensions of a certain version or higher, list them here.\nneeds_extensions = {\"myst_parser\": \"0.13.7\"}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\n\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# We need headers to be linkable to so ask MyST-Parser to autogenerate anchor IDs for\n# headers up to and including level 3.\nmyst_heading_anchors = 3\n\n# Prettier support formatting some MyST syntax but not all, so let's disable the\n# unsupported yet still enabled by default ones.\nmyst_disable_syntax = [\n \"colon_fence\",\n \"myst_block_break\",\n \"myst_line_comment\",\n \"math_block\",\n]\n\n# Optional MyST Syntaxes\nmyst_enable_extensions = []\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"furo\"\nhtml_logo = \"_static/logo2-readme.png\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"blackdoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"black.tex\",\n \"Documentation for Black\",\n \"\u0141ukasz Langa and contributors to Black\",\n \"manual\",\n )\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"black\", \"Documentation for Black\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"Black\",\n \"Documentation for Black\",\n author,\n \"Black\",\n \"The uncompromising Python code formatter\",\n \"Miscellaneous\",\n )\n]\n\n\n# -- Options for Epub output -------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\n\n# -- Extension configuration -------------------------------------------------\n\nautodoc_member_order = \"bysource\"\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\"https://docs.python.org/3/\": None}\n", "path": "docs/conf.py"}]}
2,331
142
gh_patches_debug_2491
rasdani/github-patches
git_diff
PyGithub__PyGithub-2460
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 401 unauthorized after upgrading to 1.58.0 We use `GithubIntegration.get_access_token` to authenticate and after upgrading we now get this response: ``` github.GithubException.GithubException: 401 {"message": "'Expiration time' claim ('exp') must be a numeric value representing the future time at which the assertion expires", "documentation_url": "https://docs.github.com/rest"} ``` Reverting to 1.57 solves the issue. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `github/GithubIntegration.py` Content: ``` 1 import time 2 3 import deprecated 4 import jwt 5 6 from github import Consts 7 from github.GithubException import GithubException 8 from github.Installation import Installation 9 from github.InstallationAuthorization import InstallationAuthorization 10 from github.PaginatedList import PaginatedList 11 from github.Requester import Requester 12 13 14 class GithubIntegration: 15 """ 16 Main class to obtain tokens for a GitHub integration. 17 """ 18 19 def __init__( 20 self, 21 integration_id, 22 private_key, 23 base_url=Consts.DEFAULT_BASE_URL, 24 jwt_expiry=Consts.DEFAULT_JWT_EXPIRY, 25 jwt_issued_at=Consts.DEFAULT_JWT_ISSUED_AT, 26 ): 27 """ 28 :param integration_id: int 29 :param private_key: string 30 :param base_url: string 31 :param jwt_expiry: int. Expiry of the JWT used to get the information about this integration. 32 The default expiration is in 5 minutes and is capped at 10 minutes according to GitHub documentation 33 https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#generating-a-json-web-token-jwt 34 :param jwt_issued_at: int. Number of seconds, relative to now, to set for the "iat" (issued at) parameter. 35 The default value is -60 to protect against clock drift 36 """ 37 assert isinstance(integration_id, (int, str)), integration_id 38 assert isinstance(private_key, str), "supplied private key should be a string" 39 assert isinstance(base_url, str), base_url 40 assert isinstance(jwt_expiry, int), jwt_expiry 41 assert Consts.MIN_JWT_EXPIRY <= jwt_expiry <= Consts.MAX_JWT_EXPIRY, jwt_expiry 42 assert isinstance(jwt_issued_at, int) 43 44 self.base_url = base_url 45 self.integration_id = integration_id 46 self.private_key = private_key 47 self.jwt_expiry = jwt_expiry 48 self.jwt_issued_at = jwt_issued_at 49 self.__requester = Requester( 50 login_or_token=None, 51 password=None, 52 jwt=self.create_jwt(), 53 app_auth=None, 54 base_url=self.base_url, 55 timeout=Consts.DEFAULT_TIMEOUT, 56 user_agent="PyGithub/Python", 57 per_page=Consts.DEFAULT_PER_PAGE, 58 verify=True, 59 retry=None, 60 pool_size=None, 61 ) 62 63 def _get_headers(self): 64 """ 65 Get headers for the requests. 66 67 :return: dict 68 """ 69 return { 70 "Authorization": f"Bearer {self.create_jwt()}", 71 "Accept": Consts.mediaTypeIntegrationPreview, 72 "User-Agent": "PyGithub/Python", 73 } 74 75 def _get_installed_app(self, url): 76 """ 77 Get installation for the given URL. 78 79 :param url: str 80 :rtype: :class:`github.Installation.Installation` 81 """ 82 headers, response = self.__requester.requestJsonAndCheck( 83 "GET", url, headers=self._get_headers() 84 ) 85 86 return Installation( 87 requester=self.__requester, 88 headers=headers, 89 attributes=response, 90 completed=True, 91 ) 92 93 def create_jwt(self, expiration=None): 94 """ 95 Create a signed JWT 96 https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#authenticating-as-a-github-app 97 98 :return string: 99 """ 100 if expiration is not None: 101 assert isinstance(expiration, int), expiration 102 assert ( 103 Consts.MIN_JWT_EXPIRY <= expiration <= Consts.MAX_JWT_EXPIRY 104 ), expiration 105 106 now = int(time.time()) 107 payload = { 108 "iat": now + self.jwt_issued_at, 109 "exp": now + (expiration if expiration is not None else self.jwt_expiry), 110 "iss": self.integration_id, 111 } 112 encrypted = jwt.encode(payload, key=self.private_key, algorithm="RS256") 113 114 if isinstance(encrypted, bytes): 115 encrypted = encrypted.decode("utf-8") 116 117 return encrypted 118 119 def get_access_token(self, installation_id, permissions=None): 120 """ 121 :calls: `POST /app/installations/{installation_id}/access_tokens <https://docs.github.com/en/rest/apps/apps#create-an-installation-access-token-for-an-app>` 122 :param installation_id: int 123 :param permissions: dict 124 :return: :class:`github.InstallationAuthorization.InstallationAuthorization` 125 """ 126 if permissions is None: 127 permissions = {} 128 129 if not isinstance(permissions, dict): 130 raise GithubException( 131 status=400, data={"message": "Invalid permissions"}, headers=None 132 ) 133 134 body = {"permissions": permissions} 135 headers, response = self.__requester.requestJsonAndCheck( 136 "POST", 137 f"/app/installations/{installation_id}/access_tokens", 138 input=body, 139 ) 140 141 return InstallationAuthorization( 142 requester=self.__requester, 143 headers=headers, 144 attributes=response, 145 completed=True, 146 ) 147 148 @deprecated.deprecated("Use get_repo_installation") 149 def get_installation(self, owner, repo): 150 """ 151 Deprecated by get_repo_installation 152 153 :calls: `GET /repos/{owner}/{repo}/installation <https://docs.github.com/en/rest/reference/apps#get-a-repository-installation-for-the-authenticated-app>` 154 :param owner: str 155 :param repo: str 156 :rtype: :class:`github.Installation.Installation` 157 """ 158 return self._get_installed_app(url=f"/repos/{owner}/{repo}/installation") 159 160 def get_installations(self): 161 """ 162 :calls: GET /app/installations <https://docs.github.com/en/rest/reference/apps#list-installations-for-the-authenticated-app> 163 :rtype: :class:`github.PaginatedList.PaginatedList[github.Installation.Installation]` 164 """ 165 return PaginatedList( 166 contentClass=Installation, 167 requester=self.__requester, 168 firstUrl="/app/installations", 169 firstParams=None, 170 headers=self._get_headers(), 171 list_item="installations", 172 ) 173 174 def get_org_installation(self, org): 175 """ 176 :calls: `GET /orgs/{org}/installation <https://docs.github.com/en/rest/apps/apps#get-an-organization-installation-for-the-authenticated-app>` 177 :param org: str 178 :rtype: :class:`github.Installation.Installation` 179 """ 180 return self._get_installed_app(url=f"/orgs/{org}/installation") 181 182 def get_repo_installation(self, owner, repo): 183 """ 184 :calls: `GET /repos/{owner}/{repo}/installation <https://docs.github.com/en/rest/reference/apps#get-a-repository-installation-for-the-authenticated-app>` 185 :param owner: str 186 :param repo: str 187 :rtype: :class:`github.Installation.Installation` 188 """ 189 return self._get_installed_app(url=f"/repos/{owner}/{repo}/installation") 190 191 def get_user_installation(self, username): 192 """ 193 :calls: `GET /users/{username}/installation <https://docs.github.com/en/rest/apps/apps#get-a-user-installation-for-the-authenticated-app>` 194 :param username: str 195 :rtype: :class:`github.Installation.Installation` 196 """ 197 return self._get_installed_app(url=f"/users/{username}/installation") 198 199 def get_app_installation(self, installation_id): 200 """ 201 :calls: `GET /app/installations/{installation_id} <https://docs.github.com/en/rest/apps/apps#get-an-installation-for-the-authenticated-app>` 202 :param installation_id: int 203 :rtype: :class:`github.Installation.Installation` 204 """ 205 return self._get_installed_app(url=f"/app/installations/{installation_id}") 206 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/github/GithubIntegration.py b/github/GithubIntegration.py --- a/github/GithubIntegration.py +++ b/github/GithubIntegration.py @@ -135,6 +135,7 @@ headers, response = self.__requester.requestJsonAndCheck( "POST", f"/app/installations/{installation_id}/access_tokens", + headers=self._get_headers(), input=body, )
{"golden_diff": "diff --git a/github/GithubIntegration.py b/github/GithubIntegration.py\n--- a/github/GithubIntegration.py\n+++ b/github/GithubIntegration.py\n@@ -135,6 +135,7 @@\n headers, response = self.__requester.requestJsonAndCheck(\n \"POST\",\n f\"/app/installations/{installation_id}/access_tokens\",\n+ headers=self._get_headers(),\n input=body,\n )\n", "issue": "401 unauthorized after upgrading to 1.58.0\nWe use `GithubIntegration.get_access_token` to authenticate and after upgrading we now get this response:\r\n\r\n```\r\ngithub.GithubException.GithubException: 401 {\"message\": \"'Expiration time' claim ('exp') must be a numeric value representing the future time at which the assertion expires\", \"documentation_url\": \"https://docs.github.com/rest\"}\r\n```\r\n\r\nReverting to 1.57 solves the issue.\n", "before_files": [{"content": "import time\n\nimport deprecated\nimport jwt\n\nfrom github import Consts\nfrom github.GithubException import GithubException\nfrom github.Installation import Installation\nfrom github.InstallationAuthorization import InstallationAuthorization\nfrom github.PaginatedList import PaginatedList\nfrom github.Requester import Requester\n\n\nclass GithubIntegration:\n \"\"\"\n Main class to obtain tokens for a GitHub integration.\n \"\"\"\n\n def __init__(\n self,\n integration_id,\n private_key,\n base_url=Consts.DEFAULT_BASE_URL,\n jwt_expiry=Consts.DEFAULT_JWT_EXPIRY,\n jwt_issued_at=Consts.DEFAULT_JWT_ISSUED_AT,\n ):\n \"\"\"\n :param integration_id: int\n :param private_key: string\n :param base_url: string\n :param jwt_expiry: int. Expiry of the JWT used to get the information about this integration.\n The default expiration is in 5 minutes and is capped at 10 minutes according to GitHub documentation\n https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#generating-a-json-web-token-jwt\n :param jwt_issued_at: int. Number of seconds, relative to now, to set for the \"iat\" (issued at) parameter.\n The default value is -60 to protect against clock drift\n \"\"\"\n assert isinstance(integration_id, (int, str)), integration_id\n assert isinstance(private_key, str), \"supplied private key should be a string\"\n assert isinstance(base_url, str), base_url\n assert isinstance(jwt_expiry, int), jwt_expiry\n assert Consts.MIN_JWT_EXPIRY <= jwt_expiry <= Consts.MAX_JWT_EXPIRY, jwt_expiry\n assert isinstance(jwt_issued_at, int)\n\n self.base_url = base_url\n self.integration_id = integration_id\n self.private_key = private_key\n self.jwt_expiry = jwt_expiry\n self.jwt_issued_at = jwt_issued_at\n self.__requester = Requester(\n login_or_token=None,\n password=None,\n jwt=self.create_jwt(),\n app_auth=None,\n base_url=self.base_url,\n timeout=Consts.DEFAULT_TIMEOUT,\n user_agent=\"PyGithub/Python\",\n per_page=Consts.DEFAULT_PER_PAGE,\n verify=True,\n retry=None,\n pool_size=None,\n )\n\n def _get_headers(self):\n \"\"\"\n Get headers for the requests.\n\n :return: dict\n \"\"\"\n return {\n \"Authorization\": f\"Bearer {self.create_jwt()}\",\n \"Accept\": Consts.mediaTypeIntegrationPreview,\n \"User-Agent\": \"PyGithub/Python\",\n }\n\n def _get_installed_app(self, url):\n \"\"\"\n Get installation for the given URL.\n\n :param url: str\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n headers, response = self.__requester.requestJsonAndCheck(\n \"GET\", url, headers=self._get_headers()\n )\n\n return Installation(\n requester=self.__requester,\n headers=headers,\n attributes=response,\n completed=True,\n )\n\n def create_jwt(self, expiration=None):\n \"\"\"\n Create a signed JWT\n https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#authenticating-as-a-github-app\n\n :return string:\n \"\"\"\n if expiration is not None:\n assert isinstance(expiration, int), expiration\n assert (\n Consts.MIN_JWT_EXPIRY <= expiration <= Consts.MAX_JWT_EXPIRY\n ), expiration\n\n now = int(time.time())\n payload = {\n \"iat\": now + self.jwt_issued_at,\n \"exp\": now + (expiration if expiration is not None else self.jwt_expiry),\n \"iss\": self.integration_id,\n }\n encrypted = jwt.encode(payload, key=self.private_key, algorithm=\"RS256\")\n\n if isinstance(encrypted, bytes):\n encrypted = encrypted.decode(\"utf-8\")\n\n return encrypted\n\n def get_access_token(self, installation_id, permissions=None):\n \"\"\"\n :calls: `POST /app/installations/{installation_id}/access_tokens <https://docs.github.com/en/rest/apps/apps#create-an-installation-access-token-for-an-app>`\n :param installation_id: int\n :param permissions: dict\n :return: :class:`github.InstallationAuthorization.InstallationAuthorization`\n \"\"\"\n if permissions is None:\n permissions = {}\n\n if not isinstance(permissions, dict):\n raise GithubException(\n status=400, data={\"message\": \"Invalid permissions\"}, headers=None\n )\n\n body = {\"permissions\": permissions}\n headers, response = self.__requester.requestJsonAndCheck(\n \"POST\",\n f\"/app/installations/{installation_id}/access_tokens\",\n input=body,\n )\n\n return InstallationAuthorization(\n requester=self.__requester,\n headers=headers,\n attributes=response,\n completed=True,\n )\n\n @deprecated.deprecated(\"Use get_repo_installation\")\n def get_installation(self, owner, repo):\n \"\"\"\n Deprecated by get_repo_installation\n\n :calls: `GET /repos/{owner}/{repo}/installation <https://docs.github.com/en/rest/reference/apps#get-a-repository-installation-for-the-authenticated-app>`\n :param owner: str\n :param repo: str\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n return self._get_installed_app(url=f\"/repos/{owner}/{repo}/installation\")\n\n def get_installations(self):\n \"\"\"\n :calls: GET /app/installations <https://docs.github.com/en/rest/reference/apps#list-installations-for-the-authenticated-app>\n :rtype: :class:`github.PaginatedList.PaginatedList[github.Installation.Installation]`\n \"\"\"\n return PaginatedList(\n contentClass=Installation,\n requester=self.__requester,\n firstUrl=\"/app/installations\",\n firstParams=None,\n headers=self._get_headers(),\n list_item=\"installations\",\n )\n\n def get_org_installation(self, org):\n \"\"\"\n :calls: `GET /orgs/{org}/installation <https://docs.github.com/en/rest/apps/apps#get-an-organization-installation-for-the-authenticated-app>`\n :param org: str\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n return self._get_installed_app(url=f\"/orgs/{org}/installation\")\n\n def get_repo_installation(self, owner, repo):\n \"\"\"\n :calls: `GET /repos/{owner}/{repo}/installation <https://docs.github.com/en/rest/reference/apps#get-a-repository-installation-for-the-authenticated-app>`\n :param owner: str\n :param repo: str\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n return self._get_installed_app(url=f\"/repos/{owner}/{repo}/installation\")\n\n def get_user_installation(self, username):\n \"\"\"\n :calls: `GET /users/{username}/installation <https://docs.github.com/en/rest/apps/apps#get-a-user-installation-for-the-authenticated-app>`\n :param username: str\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n return self._get_installed_app(url=f\"/users/{username}/installation\")\n\n def get_app_installation(self, installation_id):\n \"\"\"\n :calls: `GET /app/installations/{installation_id} <https://docs.github.com/en/rest/apps/apps#get-an-installation-for-the-authenticated-app>`\n :param installation_id: int\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n return self._get_installed_app(url=f\"/app/installations/{installation_id}\")\n", "path": "github/GithubIntegration.py"}], "after_files": [{"content": "import time\n\nimport deprecated\nimport jwt\n\nfrom github import Consts\nfrom github.GithubException import GithubException\nfrom github.Installation import Installation\nfrom github.InstallationAuthorization import InstallationAuthorization\nfrom github.PaginatedList import PaginatedList\nfrom github.Requester import Requester\n\n\nclass GithubIntegration:\n \"\"\"\n Main class to obtain tokens for a GitHub integration.\n \"\"\"\n\n def __init__(\n self,\n integration_id,\n private_key,\n base_url=Consts.DEFAULT_BASE_URL,\n jwt_expiry=Consts.DEFAULT_JWT_EXPIRY,\n jwt_issued_at=Consts.DEFAULT_JWT_ISSUED_AT,\n ):\n \"\"\"\n :param integration_id: int\n :param private_key: string\n :param base_url: string\n :param jwt_expiry: int. Expiry of the JWT used to get the information about this integration.\n The default expiration is in 5 minutes and is capped at 10 minutes according to GitHub documentation\n https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#generating-a-json-web-token-jwt\n :param jwt_issued_at: int. Number of seconds, relative to now, to set for the \"iat\" (issued at) parameter.\n The default value is -60 to protect against clock drift\n \"\"\"\n assert isinstance(integration_id, (int, str)), integration_id\n assert isinstance(private_key, str), \"supplied private key should be a string\"\n assert isinstance(base_url, str), base_url\n assert isinstance(jwt_expiry, int), jwt_expiry\n assert Consts.MIN_JWT_EXPIRY <= jwt_expiry <= Consts.MAX_JWT_EXPIRY, jwt_expiry\n assert isinstance(jwt_issued_at, int)\n\n self.base_url = base_url\n self.integration_id = integration_id\n self.private_key = private_key\n self.jwt_expiry = jwt_expiry\n self.jwt_issued_at = jwt_issued_at\n self.__requester = Requester(\n login_or_token=None,\n password=None,\n jwt=self.create_jwt(),\n app_auth=None,\n base_url=self.base_url,\n timeout=Consts.DEFAULT_TIMEOUT,\n user_agent=\"PyGithub/Python\",\n per_page=Consts.DEFAULT_PER_PAGE,\n verify=True,\n retry=None,\n pool_size=None,\n )\n\n def _get_headers(self):\n \"\"\"\n Get headers for the requests.\n\n :return: dict\n \"\"\"\n return {\n \"Authorization\": f\"Bearer {self.create_jwt()}\",\n \"Accept\": Consts.mediaTypeIntegrationPreview,\n \"User-Agent\": \"PyGithub/Python\",\n }\n\n def _get_installed_app(self, url):\n \"\"\"\n Get installation for the given URL.\n\n :param url: str\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n headers, response = self.__requester.requestJsonAndCheck(\n \"GET\", url, headers=self._get_headers()\n )\n\n return Installation(\n requester=self.__requester,\n headers=headers,\n attributes=response,\n completed=True,\n )\n\n def create_jwt(self, expiration=None):\n \"\"\"\n Create a signed JWT\n https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#authenticating-as-a-github-app\n\n :return string:\n \"\"\"\n if expiration is not None:\n assert isinstance(expiration, int), expiration\n assert (\n Consts.MIN_JWT_EXPIRY <= expiration <= Consts.MAX_JWT_EXPIRY\n ), expiration\n\n now = int(time.time())\n payload = {\n \"iat\": now + self.jwt_issued_at,\n \"exp\": now + (expiration if expiration is not None else self.jwt_expiry),\n \"iss\": self.integration_id,\n }\n encrypted = jwt.encode(payload, key=self.private_key, algorithm=\"RS256\")\n\n if isinstance(encrypted, bytes):\n encrypted = encrypted.decode(\"utf-8\")\n\n return encrypted\n\n def get_access_token(self, installation_id, permissions=None):\n \"\"\"\n :calls: `POST /app/installations/{installation_id}/access_tokens <https://docs.github.com/en/rest/apps/apps#create-an-installation-access-token-for-an-app>`\n :param installation_id: int\n :param permissions: dict\n :return: :class:`github.InstallationAuthorization.InstallationAuthorization`\n \"\"\"\n if permissions is None:\n permissions = {}\n\n if not isinstance(permissions, dict):\n raise GithubException(\n status=400, data={\"message\": \"Invalid permissions\"}, headers=None\n )\n\n body = {\"permissions\": permissions}\n headers, response = self.__requester.requestJsonAndCheck(\n \"POST\",\n f\"/app/installations/{installation_id}/access_tokens\",\n headers=self._get_headers(),\n input=body,\n )\n\n return InstallationAuthorization(\n requester=self.__requester,\n headers=headers,\n attributes=response,\n completed=True,\n )\n\n @deprecated.deprecated(\"Use get_repo_installation\")\n def get_installation(self, owner, repo):\n \"\"\"\n Deprecated by get_repo_installation\n\n :calls: `GET /repos/{owner}/{repo}/installation <https://docs.github.com/en/rest/reference/apps#get-a-repository-installation-for-the-authenticated-app>`\n :param owner: str\n :param repo: str\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n return self._get_installed_app(url=f\"/repos/{owner}/{repo}/installation\")\n\n def get_installations(self):\n \"\"\"\n :calls: GET /app/installations <https://docs.github.com/en/rest/reference/apps#list-installations-for-the-authenticated-app>\n :rtype: :class:`github.PaginatedList.PaginatedList[github.Installation.Installation]`\n \"\"\"\n return PaginatedList(\n contentClass=Installation,\n requester=self.__requester,\n firstUrl=\"/app/installations\",\n firstParams=None,\n headers=self._get_headers(),\n list_item=\"installations\",\n )\n\n def get_org_installation(self, org):\n \"\"\"\n :calls: `GET /orgs/{org}/installation <https://docs.github.com/en/rest/apps/apps#get-an-organization-installation-for-the-authenticated-app>`\n :param org: str\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n return self._get_installed_app(url=f\"/orgs/{org}/installation\")\n\n def get_repo_installation(self, owner, repo):\n \"\"\"\n :calls: `GET /repos/{owner}/{repo}/installation <https://docs.github.com/en/rest/reference/apps#get-a-repository-installation-for-the-authenticated-app>`\n :param owner: str\n :param repo: str\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n return self._get_installed_app(url=f\"/repos/{owner}/{repo}/installation\")\n\n def get_user_installation(self, username):\n \"\"\"\n :calls: `GET /users/{username}/installation <https://docs.github.com/en/rest/apps/apps#get-a-user-installation-for-the-authenticated-app>`\n :param username: str\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n return self._get_installed_app(url=f\"/users/{username}/installation\")\n\n def get_app_installation(self, installation_id):\n \"\"\"\n :calls: `GET /app/installations/{installation_id} <https://docs.github.com/en/rest/apps/apps#get-an-installation-for-the-authenticated-app>`\n :param installation_id: int\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n return self._get_installed_app(url=f\"/app/installations/{installation_id}\")\n", "path": "github/GithubIntegration.py"}]}
2,546
92
gh_patches_debug_7732
rasdani/github-patches
git_diff
pytorch__vision-3325
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- a question about segmentation model loading ## ❓ Questions and Help Why they are different? ### Please note that this issue tracker is not a help form and this issue will be closed. ![image](https://user-images.githubusercontent.com/32593161/106227085-8d5d2000-6223-11eb-9c66-fcb037faac92.png) We have a set of [listed resources available on the website](https://pytorch.org/resources). Our primary means of support is our discussion forum: - [Discussion Forum](https://discuss.pytorch.org/) cc @vfdev-5 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchvision/models/segmentation/segmentation.py` Content: ``` 1 from .._utils import IntermediateLayerGetter 2 from ..utils import load_state_dict_from_url 3 from .. import mobilenetv3 4 from .. import resnet 5 from .deeplabv3 import DeepLabHead, DeepLabV3 6 from .fcn import FCN, FCNHead 7 from .lraspp import LRASPP 8 9 10 __all__ = ['fcn_resnet50', 'fcn_resnet101', 'deeplabv3_resnet50', 'deeplabv3_resnet101', 11 'deeplabv3_mobilenet_v3_large', 'lraspp_mobilenet_v3_large'] 12 13 14 model_urls = { 15 'fcn_resnet50_coco': 'https://download.pytorch.org/models/fcn_resnet50_coco-1167a1af.pth', 16 'fcn_resnet101_coco': 'https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth', 17 'deeplabv3_resnet50_coco': 'https://download.pytorch.org/models/deeplabv3_resnet50_coco-cd0a2569.pth', 18 'deeplabv3_resnet101_coco': 'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth', 19 'deeplabv3_mobilenet_v3_large_coco': 20 'https://download.pytorch.org/models/deeplabv3_mobilenet_v3_large-fc3c493d.pth', 21 'lraspp_mobilenet_v3_large_coco': 'https://download.pytorch.org/models/lraspp_mobilenet_v3_large-d234d4ea.pth', 22 } 23 24 25 def _segm_model(name, backbone_name, num_classes, aux, pretrained_backbone=True): 26 if 'resnet' in backbone_name: 27 backbone = resnet.__dict__[backbone_name]( 28 pretrained=pretrained_backbone, 29 replace_stride_with_dilation=[False, True, True]) 30 out_layer = 'layer4' 31 out_inplanes = 2048 32 aux_layer = 'layer3' 33 aux_inplanes = 1024 34 elif 'mobilenet_v3' in backbone_name: 35 backbone = mobilenetv3.__dict__[backbone_name](pretrained=pretrained_backbone, _dilated=True).features 36 37 # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks. 38 # The first and last blocks are always included because they are the C0 (conv1) and Cn. 39 stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1] 40 out_pos = stage_indices[-1] # use C5 which has output_stride = 16 41 out_layer = str(out_pos) 42 out_inplanes = backbone[out_pos].out_channels 43 aux_pos = stage_indices[-4] # use C2 here which has output_stride = 8 44 aux_layer = str(aux_pos) 45 aux_inplanes = backbone[aux_pos].out_channels 46 else: 47 raise NotImplementedError('backbone {} is not supported as of now'.format(backbone_name)) 48 49 return_layers = {out_layer: 'out'} 50 if aux: 51 return_layers[aux_layer] = 'aux' 52 backbone = IntermediateLayerGetter(backbone, return_layers=return_layers) 53 54 aux_classifier = None 55 if aux: 56 aux_classifier = FCNHead(aux_inplanes, num_classes) 57 58 model_map = { 59 'deeplabv3': (DeepLabHead, DeepLabV3), 60 'fcn': (FCNHead, FCN), 61 } 62 classifier = model_map[name][0](out_inplanes, num_classes) 63 base_model = model_map[name][1] 64 65 model = base_model(backbone, classifier, aux_classifier) 66 return model 67 68 69 def _load_model(arch_type, backbone, pretrained, progress, num_classes, aux_loss, **kwargs): 70 if pretrained: 71 aux_loss = True 72 model = _segm_model(arch_type, backbone, num_classes, aux_loss, **kwargs) 73 if pretrained: 74 _load_weights(model, arch_type, backbone, progress) 75 return model 76 77 78 def _load_weights(model, arch_type, backbone, progress): 79 arch = arch_type + '_' + backbone + '_coco' 80 model_url = model_urls.get(arch, None) 81 if model_url is None: 82 raise NotImplementedError('pretrained {} is not supported as of now'.format(arch)) 83 else: 84 state_dict = load_state_dict_from_url(model_url, progress=progress) 85 model.load_state_dict(state_dict) 86 87 88 def _segm_lraspp_mobilenetv3(backbone_name, num_classes, pretrained_backbone=True): 89 backbone = mobilenetv3.__dict__[backbone_name](pretrained=pretrained_backbone, _dilated=True).features 90 91 # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks. 92 # The first and last blocks are always included because they are the C0 (conv1) and Cn. 93 stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1] 94 low_pos = stage_indices[-4] # use C2 here which has output_stride = 8 95 high_pos = stage_indices[-1] # use C5 which has output_stride = 16 96 low_channels = backbone[low_pos].out_channels 97 high_channels = backbone[high_pos].out_channels 98 99 backbone = IntermediateLayerGetter(backbone, return_layers={str(low_pos): 'low', str(high_pos): 'high'}) 100 101 model = LRASPP(backbone, low_channels, high_channels, num_classes) 102 return model 103 104 105 def fcn_resnet50(pretrained=False, progress=True, 106 num_classes=21, aux_loss=None, **kwargs): 107 """Constructs a Fully-Convolutional Network model with a ResNet-50 backbone. 108 109 Args: 110 pretrained (bool): If True, returns a model pre-trained on COCO train2017 which 111 contains the same classes as Pascal VOC 112 progress (bool): If True, displays a progress bar of the download to stderr 113 num_classes (int): number of output classes of the model (including the background) 114 aux_loss (bool): If True, it uses an auxiliary loss 115 """ 116 return _load_model('fcn', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs) 117 118 119 def fcn_resnet101(pretrained=False, progress=True, 120 num_classes=21, aux_loss=None, **kwargs): 121 """Constructs a Fully-Convolutional Network model with a ResNet-101 backbone. 122 123 Args: 124 pretrained (bool): If True, returns a model pre-trained on COCO train2017 which 125 contains the same classes as Pascal VOC 126 progress (bool): If True, displays a progress bar of the download to stderr 127 num_classes (int): number of output classes of the model (including the background) 128 aux_loss (bool): If True, it uses an auxiliary loss 129 """ 130 return _load_model('fcn', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs) 131 132 133 def deeplabv3_resnet50(pretrained=False, progress=True, 134 num_classes=21, aux_loss=None, **kwargs): 135 """Constructs a DeepLabV3 model with a ResNet-50 backbone. 136 137 Args: 138 pretrained (bool): If True, returns a model pre-trained on COCO train2017 which 139 contains the same classes as Pascal VOC 140 progress (bool): If True, displays a progress bar of the download to stderr 141 num_classes (int): number of output classes of the model (including the background) 142 aux_loss (bool): If True, it uses an auxiliary loss 143 """ 144 return _load_model('deeplabv3', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs) 145 146 147 def deeplabv3_resnet101(pretrained=False, progress=True, 148 num_classes=21, aux_loss=None, **kwargs): 149 """Constructs a DeepLabV3 model with a ResNet-101 backbone. 150 151 Args: 152 pretrained (bool): If True, returns a model pre-trained on COCO train2017 which 153 contains the same classes as Pascal VOC 154 progress (bool): If True, displays a progress bar of the download to stderr 155 num_classes (int): The number of classes 156 aux_loss (bool): If True, include an auxiliary classifier 157 """ 158 return _load_model('deeplabv3', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs) 159 160 161 def deeplabv3_mobilenet_v3_large(pretrained=False, progress=True, 162 num_classes=21, aux_loss=None, **kwargs): 163 """Constructs a DeepLabV3 model with a MobileNetV3-Large backbone. 164 165 Args: 166 pretrained (bool): If True, returns a model pre-trained on COCO train2017 which 167 contains the same classes as Pascal VOC 168 progress (bool): If True, displays a progress bar of the download to stderr 169 num_classes (int): number of output classes of the model (including the background) 170 aux_loss (bool): If True, it uses an auxiliary loss 171 """ 172 return _load_model('deeplabv3', 'mobilenet_v3_large', pretrained, progress, num_classes, aux_loss, **kwargs) 173 174 175 def lraspp_mobilenet_v3_large(pretrained=False, progress=True, num_classes=21, **kwargs): 176 """Constructs a Lite R-ASPP Network model with a MobileNetV3-Large backbone. 177 178 Args: 179 pretrained (bool): If True, returns a model pre-trained on COCO train2017 which 180 contains the same classes as Pascal VOC 181 progress (bool): If True, displays a progress bar of the download to stderr 182 num_classes (int): number of output classes of the model (including the background) 183 """ 184 if kwargs.pop("aux_loss", False): 185 raise NotImplementedError('This model does not use auxiliary loss') 186 187 backbone_name = 'mobilenet_v3_large' 188 model = _segm_lraspp_mobilenetv3(backbone_name, num_classes, **kwargs) 189 190 if pretrained: 191 _load_weights(model, 'lraspp', backbone_name, progress) 192 193 return model 194 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchvision/models/segmentation/segmentation.py b/torchvision/models/segmentation/segmentation.py --- a/torchvision/models/segmentation/segmentation.py +++ b/torchvision/models/segmentation/segmentation.py @@ -69,6 +69,7 @@ def _load_model(arch_type, backbone, pretrained, progress, num_classes, aux_loss, **kwargs): if pretrained: aux_loss = True + kwargs["pretrained_backbone"] = False model = _segm_model(arch_type, backbone, num_classes, aux_loss, **kwargs) if pretrained: _load_weights(model, arch_type, backbone, progress)
{"golden_diff": "diff --git a/torchvision/models/segmentation/segmentation.py b/torchvision/models/segmentation/segmentation.py\n--- a/torchvision/models/segmentation/segmentation.py\n+++ b/torchvision/models/segmentation/segmentation.py\n@@ -69,6 +69,7 @@\n def _load_model(arch_type, backbone, pretrained, progress, num_classes, aux_loss, **kwargs):\n if pretrained:\n aux_loss = True\n+ kwargs[\"pretrained_backbone\"] = False\n model = _segm_model(arch_type, backbone, num_classes, aux_loss, **kwargs)\n if pretrained:\n _load_weights(model, arch_type, backbone, progress)\n", "issue": "a question about segmentation model loading\n## \u2753 Questions and Help\r\nWhy they are different\uff1f\r\n### Please note that this issue tracker is not a help form and this issue will be closed.\r\n![image](https://user-images.githubusercontent.com/32593161/106227085-8d5d2000-6223-11eb-9c66-fcb037faac92.png)\r\n\r\nWe have a set of [listed resources available on the website](https://pytorch.org/resources). Our primary means of support is our discussion forum:\r\n\r\n- [Discussion Forum](https://discuss.pytorch.org/)\r\n\n\ncc @vfdev-5\n", "before_files": [{"content": "from .._utils import IntermediateLayerGetter\nfrom ..utils import load_state_dict_from_url\nfrom .. import mobilenetv3\nfrom .. import resnet\nfrom .deeplabv3 import DeepLabHead, DeepLabV3\nfrom .fcn import FCN, FCNHead\nfrom .lraspp import LRASPP\n\n\n__all__ = ['fcn_resnet50', 'fcn_resnet101', 'deeplabv3_resnet50', 'deeplabv3_resnet101',\n 'deeplabv3_mobilenet_v3_large', 'lraspp_mobilenet_v3_large']\n\n\nmodel_urls = {\n 'fcn_resnet50_coco': 'https://download.pytorch.org/models/fcn_resnet50_coco-1167a1af.pth',\n 'fcn_resnet101_coco': 'https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth',\n 'deeplabv3_resnet50_coco': 'https://download.pytorch.org/models/deeplabv3_resnet50_coco-cd0a2569.pth',\n 'deeplabv3_resnet101_coco': 'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth',\n 'deeplabv3_mobilenet_v3_large_coco':\n 'https://download.pytorch.org/models/deeplabv3_mobilenet_v3_large-fc3c493d.pth',\n 'lraspp_mobilenet_v3_large_coco': 'https://download.pytorch.org/models/lraspp_mobilenet_v3_large-d234d4ea.pth',\n}\n\n\ndef _segm_model(name, backbone_name, num_classes, aux, pretrained_backbone=True):\n if 'resnet' in backbone_name:\n backbone = resnet.__dict__[backbone_name](\n pretrained=pretrained_backbone,\n replace_stride_with_dilation=[False, True, True])\n out_layer = 'layer4'\n out_inplanes = 2048\n aux_layer = 'layer3'\n aux_inplanes = 1024\n elif 'mobilenet_v3' in backbone_name:\n backbone = mobilenetv3.__dict__[backbone_name](pretrained=pretrained_backbone, _dilated=True).features\n\n # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.\n # The first and last blocks are always included because they are the C0 (conv1) and Cn.\n stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, \"_is_cn\", False)] + [len(backbone) - 1]\n out_pos = stage_indices[-1] # use C5 which has output_stride = 16\n out_layer = str(out_pos)\n out_inplanes = backbone[out_pos].out_channels\n aux_pos = stage_indices[-4] # use C2 here which has output_stride = 8\n aux_layer = str(aux_pos)\n aux_inplanes = backbone[aux_pos].out_channels\n else:\n raise NotImplementedError('backbone {} is not supported as of now'.format(backbone_name))\n\n return_layers = {out_layer: 'out'}\n if aux:\n return_layers[aux_layer] = 'aux'\n backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)\n\n aux_classifier = None\n if aux:\n aux_classifier = FCNHead(aux_inplanes, num_classes)\n\n model_map = {\n 'deeplabv3': (DeepLabHead, DeepLabV3),\n 'fcn': (FCNHead, FCN),\n }\n classifier = model_map[name][0](out_inplanes, num_classes)\n base_model = model_map[name][1]\n\n model = base_model(backbone, classifier, aux_classifier)\n return model\n\n\ndef _load_model(arch_type, backbone, pretrained, progress, num_classes, aux_loss, **kwargs):\n if pretrained:\n aux_loss = True\n model = _segm_model(arch_type, backbone, num_classes, aux_loss, **kwargs)\n if pretrained:\n _load_weights(model, arch_type, backbone, progress)\n return model\n\n\ndef _load_weights(model, arch_type, backbone, progress):\n arch = arch_type + '_' + backbone + '_coco'\n model_url = model_urls.get(arch, None)\n if model_url is None:\n raise NotImplementedError('pretrained {} is not supported as of now'.format(arch))\n else:\n state_dict = load_state_dict_from_url(model_url, progress=progress)\n model.load_state_dict(state_dict)\n\n\ndef _segm_lraspp_mobilenetv3(backbone_name, num_classes, pretrained_backbone=True):\n backbone = mobilenetv3.__dict__[backbone_name](pretrained=pretrained_backbone, _dilated=True).features\n\n # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.\n # The first and last blocks are always included because they are the C0 (conv1) and Cn.\n stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, \"_is_cn\", False)] + [len(backbone) - 1]\n low_pos = stage_indices[-4] # use C2 here which has output_stride = 8\n high_pos = stage_indices[-1] # use C5 which has output_stride = 16\n low_channels = backbone[low_pos].out_channels\n high_channels = backbone[high_pos].out_channels\n\n backbone = IntermediateLayerGetter(backbone, return_layers={str(low_pos): 'low', str(high_pos): 'high'})\n\n model = LRASPP(backbone, low_channels, high_channels, num_classes)\n return model\n\n\ndef fcn_resnet50(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a Fully-Convolutional Network model with a ResNet-50 backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n aux_loss (bool): If True, it uses an auxiliary loss\n \"\"\"\n return _load_model('fcn', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef fcn_resnet101(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a Fully-Convolutional Network model with a ResNet-101 backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n aux_loss (bool): If True, it uses an auxiliary loss\n \"\"\"\n return _load_model('fcn', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef deeplabv3_resnet50(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a DeepLabV3 model with a ResNet-50 backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n aux_loss (bool): If True, it uses an auxiliary loss\n \"\"\"\n return _load_model('deeplabv3', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef deeplabv3_resnet101(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a DeepLabV3 model with a ResNet-101 backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): The number of classes\n aux_loss (bool): If True, include an auxiliary classifier\n \"\"\"\n return _load_model('deeplabv3', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef deeplabv3_mobilenet_v3_large(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a DeepLabV3 model with a MobileNetV3-Large backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n aux_loss (bool): If True, it uses an auxiliary loss\n \"\"\"\n return _load_model('deeplabv3', 'mobilenet_v3_large', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef lraspp_mobilenet_v3_large(pretrained=False, progress=True, num_classes=21, **kwargs):\n \"\"\"Constructs a Lite R-ASPP Network model with a MobileNetV3-Large backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n \"\"\"\n if kwargs.pop(\"aux_loss\", False):\n raise NotImplementedError('This model does not use auxiliary loss')\n\n backbone_name = 'mobilenet_v3_large'\n model = _segm_lraspp_mobilenetv3(backbone_name, num_classes, **kwargs)\n\n if pretrained:\n _load_weights(model, 'lraspp', backbone_name, progress)\n\n return model\n", "path": "torchvision/models/segmentation/segmentation.py"}], "after_files": [{"content": "from .._utils import IntermediateLayerGetter\nfrom ..utils import load_state_dict_from_url\nfrom .. import mobilenetv3\nfrom .. import resnet\nfrom .deeplabv3 import DeepLabHead, DeepLabV3\nfrom .fcn import FCN, FCNHead\nfrom .lraspp import LRASPP\n\n\n__all__ = ['fcn_resnet50', 'fcn_resnet101', 'deeplabv3_resnet50', 'deeplabv3_resnet101',\n 'deeplabv3_mobilenet_v3_large', 'lraspp_mobilenet_v3_large']\n\n\nmodel_urls = {\n 'fcn_resnet50_coco': 'https://download.pytorch.org/models/fcn_resnet50_coco-1167a1af.pth',\n 'fcn_resnet101_coco': 'https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth',\n 'deeplabv3_resnet50_coco': 'https://download.pytorch.org/models/deeplabv3_resnet50_coco-cd0a2569.pth',\n 'deeplabv3_resnet101_coco': 'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth',\n 'deeplabv3_mobilenet_v3_large_coco':\n 'https://download.pytorch.org/models/deeplabv3_mobilenet_v3_large-fc3c493d.pth',\n 'lraspp_mobilenet_v3_large_coco': 'https://download.pytorch.org/models/lraspp_mobilenet_v3_large-d234d4ea.pth',\n}\n\n\ndef _segm_model(name, backbone_name, num_classes, aux, pretrained_backbone=True):\n if 'resnet' in backbone_name:\n backbone = resnet.__dict__[backbone_name](\n pretrained=pretrained_backbone,\n replace_stride_with_dilation=[False, True, True])\n out_layer = 'layer4'\n out_inplanes = 2048\n aux_layer = 'layer3'\n aux_inplanes = 1024\n elif 'mobilenet_v3' in backbone_name:\n backbone = mobilenetv3.__dict__[backbone_name](pretrained=pretrained_backbone, _dilated=True).features\n\n # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.\n # The first and last blocks are always included because they are the C0 (conv1) and Cn.\n stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, \"_is_cn\", False)] + [len(backbone) - 1]\n out_pos = stage_indices[-1] # use C5 which has output_stride = 16\n out_layer = str(out_pos)\n out_inplanes = backbone[out_pos].out_channels\n aux_pos = stage_indices[-4] # use C2 here which has output_stride = 8\n aux_layer = str(aux_pos)\n aux_inplanes = backbone[aux_pos].out_channels\n else:\n raise NotImplementedError('backbone {} is not supported as of now'.format(backbone_name))\n\n return_layers = {out_layer: 'out'}\n if aux:\n return_layers[aux_layer] = 'aux'\n backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)\n\n aux_classifier = None\n if aux:\n aux_classifier = FCNHead(aux_inplanes, num_classes)\n\n model_map = {\n 'deeplabv3': (DeepLabHead, DeepLabV3),\n 'fcn': (FCNHead, FCN),\n }\n classifier = model_map[name][0](out_inplanes, num_classes)\n base_model = model_map[name][1]\n\n model = base_model(backbone, classifier, aux_classifier)\n return model\n\n\ndef _load_model(arch_type, backbone, pretrained, progress, num_classes, aux_loss, **kwargs):\n if pretrained:\n aux_loss = True\n kwargs[\"pretrained_backbone\"] = False\n model = _segm_model(arch_type, backbone, num_classes, aux_loss, **kwargs)\n if pretrained:\n _load_weights(model, arch_type, backbone, progress)\n return model\n\n\ndef _load_weights(model, arch_type, backbone, progress):\n arch = arch_type + '_' + backbone + '_coco'\n model_url = model_urls.get(arch, None)\n if model_url is None:\n raise NotImplementedError('pretrained {} is not supported as of now'.format(arch))\n else:\n state_dict = load_state_dict_from_url(model_url, progress=progress)\n model.load_state_dict(state_dict)\n\n\ndef _segm_lraspp_mobilenetv3(backbone_name, num_classes, pretrained_backbone=True):\n backbone = mobilenetv3.__dict__[backbone_name](pretrained=pretrained_backbone, _dilated=True).features\n\n # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.\n # The first and last blocks are always included because they are the C0 (conv1) and Cn.\n stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, \"_is_cn\", False)] + [len(backbone) - 1]\n low_pos = stage_indices[-4] # use C2 here which has output_stride = 8\n high_pos = stage_indices[-1] # use C5 which has output_stride = 16\n low_channels = backbone[low_pos].out_channels\n high_channels = backbone[high_pos].out_channels\n\n backbone = IntermediateLayerGetter(backbone, return_layers={str(low_pos): 'low', str(high_pos): 'high'})\n\n model = LRASPP(backbone, low_channels, high_channels, num_classes)\n return model\n\n\ndef fcn_resnet50(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a Fully-Convolutional Network model with a ResNet-50 backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n aux_loss (bool): If True, it uses an auxiliary loss\n \"\"\"\n return _load_model('fcn', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef fcn_resnet101(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a Fully-Convolutional Network model with a ResNet-101 backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n aux_loss (bool): If True, it uses an auxiliary loss\n \"\"\"\n return _load_model('fcn', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef deeplabv3_resnet50(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a DeepLabV3 model with a ResNet-50 backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n aux_loss (bool): If True, it uses an auxiliary loss\n \"\"\"\n return _load_model('deeplabv3', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef deeplabv3_resnet101(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a DeepLabV3 model with a ResNet-101 backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): The number of classes\n aux_loss (bool): If True, include an auxiliary classifier\n \"\"\"\n return _load_model('deeplabv3', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef deeplabv3_mobilenet_v3_large(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a DeepLabV3 model with a MobileNetV3-Large backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n aux_loss (bool): If True, it uses an auxiliary loss\n \"\"\"\n return _load_model('deeplabv3', 'mobilenet_v3_large', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef lraspp_mobilenet_v3_large(pretrained=False, progress=True, num_classes=21, **kwargs):\n \"\"\"Constructs a Lite R-ASPP Network model with a MobileNetV3-Large backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n \"\"\"\n if kwargs.pop(\"aux_loss\", False):\n raise NotImplementedError('This model does not use auxiliary loss')\n\n backbone_name = 'mobilenet_v3_large'\n model = _segm_lraspp_mobilenetv3(backbone_name, num_classes, **kwargs)\n\n if pretrained:\n _load_weights(model, 'lraspp', backbone_name, progress)\n\n return model\n", "path": "torchvision/models/segmentation/segmentation.py"}]}
3,286
152
gh_patches_debug_1893
rasdani/github-patches
git_diff
rasterio__rasterio-778
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Copy colormap when rasters are merged I'm running `rio merge` over a few single band images that contain a colormap. During the merge, the colormap is not copied to the new raster. Can we modify `rio merge` to preserve the colormap? I have an initial pass of this change at: https://github.com/kapadia/rasterio/tree/rio-merge-colormap --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `rasterio/rio/merge.py` Content: ``` 1 """Merge command.""" 2 3 import logging 4 5 import click 6 from cligj import files_inout_arg, format_opt 7 8 from .helpers import resolve_inout 9 from . import options 10 import rasterio 11 12 13 @click.command(short_help="Merge a stack of raster datasets.") 14 @files_inout_arg 15 @options.output_opt 16 @format_opt 17 @options.bounds_opt 18 @options.resolution_opt 19 @options.nodata_opt 20 @options.force_overwrite_opt 21 @click.option('--precision', type=int, default=7, 22 help="Number of decimal places of precision in alignment of " 23 "pixels") 24 @options.creation_options 25 @click.pass_context 26 def merge(ctx, files, output, driver, bounds, res, nodata, force_overwrite, 27 precision, creation_options): 28 """Copy valid pixels from input files to an output file. 29 30 All files must have the same number of bands, data type, and 31 coordinate reference system. 32 33 Input files are merged in their listed order using the reverse 34 painter's algorithm. If the output file exists, its values will be 35 overwritten by input values. 36 37 Geospatial bounds and resolution of a new output file in the 38 units of the input file coordinate reference system may be provided 39 and are otherwise taken from the first input file. 40 41 Note: --res changed from 2 parameters in 0.25. 42 43 \b 44 --res 0.1 0.1 => --res 0.1 (square) 45 --res 0.1 0.2 => --res 0.1 --res 0.2 (rectangular) 46 """ 47 from rasterio.merge import merge as merge_tool 48 49 verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1 50 51 output, files = resolve_inout( 52 files=files, output=output, force_overwrite=force_overwrite) 53 54 with rasterio.Env(CPL_DEBUG=verbosity > 2): 55 sources = [rasterio.open(f) for f in files] 56 dest, output_transform = merge_tool(sources, bounds=bounds, res=res, 57 nodata=nodata, precision=precision) 58 59 profile = sources[0].profile 60 profile.pop('affine') 61 profile['transform'] = output_transform 62 profile['height'] = dest.shape[1] 63 profile['width'] = dest.shape[2] 64 profile['driver'] = driver 65 66 profile.update(**creation_options) 67 68 with rasterio.open(output, 'w', **profile) as dst: 69 dst.write(dest) 70 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/rasterio/rio/merge.py b/rasterio/rio/merge.py --- a/rasterio/rio/merge.py +++ b/rasterio/rio/merge.py @@ -67,3 +67,10 @@ with rasterio.open(output, 'w', **profile) as dst: dst.write(dest) + + # uses the colormap in the first input raster. + try: + colormap = sources[0].colormap(1) + dst.write_colormap(1, colormap) + except ValueError: + pass
{"golden_diff": "diff --git a/rasterio/rio/merge.py b/rasterio/rio/merge.py\n--- a/rasterio/rio/merge.py\n+++ b/rasterio/rio/merge.py\n@@ -67,3 +67,10 @@\n \n with rasterio.open(output, 'w', **profile) as dst:\n dst.write(dest)\n+\n+ # uses the colormap in the first input raster.\n+ try:\n+ colormap = sources[0].colormap(1)\n+ dst.write_colormap(1, colormap)\n+ except ValueError:\n+ pass\n", "issue": "Copy colormap when rasters are merged\nI'm running `rio merge` over a few single band images that contain a colormap. During the merge, the colormap is not copied to the new raster. Can we modify `rio merge` to preserve the colormap?\n\nI have an initial pass of this change at:\n\nhttps://github.com/kapadia/rasterio/tree/rio-merge-colormap\n\n", "before_files": [{"content": "\"\"\"Merge command.\"\"\"\n\nimport logging\n\nimport click\nfrom cligj import files_inout_arg, format_opt\n\nfrom .helpers import resolve_inout\nfrom . import options\nimport rasterio\n\n\[email protected](short_help=\"Merge a stack of raster datasets.\")\n@files_inout_arg\[email protected]_opt\n@format_opt\[email protected]_opt\[email protected]_opt\[email protected]_opt\[email protected]_overwrite_opt\[email protected]('--precision', type=int, default=7,\n help=\"Number of decimal places of precision in alignment of \"\n \"pixels\")\[email protected]_options\[email protected]_context\ndef merge(ctx, files, output, driver, bounds, res, nodata, force_overwrite,\n precision, creation_options):\n \"\"\"Copy valid pixels from input files to an output file.\n\n All files must have the same number of bands, data type, and\n coordinate reference system.\n\n Input files are merged in their listed order using the reverse\n painter's algorithm. If the output file exists, its values will be\n overwritten by input values.\n\n Geospatial bounds and resolution of a new output file in the\n units of the input file coordinate reference system may be provided\n and are otherwise taken from the first input file.\n\n Note: --res changed from 2 parameters in 0.25.\n\n \\b\n --res 0.1 0.1 => --res 0.1 (square)\n --res 0.1 0.2 => --res 0.1 --res 0.2 (rectangular)\n \"\"\"\n from rasterio.merge import merge as merge_tool\n\n verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1\n\n output, files = resolve_inout(\n files=files, output=output, force_overwrite=force_overwrite)\n\n with rasterio.Env(CPL_DEBUG=verbosity > 2):\n sources = [rasterio.open(f) for f in files]\n dest, output_transform = merge_tool(sources, bounds=bounds, res=res,\n nodata=nodata, precision=precision)\n\n profile = sources[0].profile\n profile.pop('affine')\n profile['transform'] = output_transform\n profile['height'] = dest.shape[1]\n profile['width'] = dest.shape[2]\n profile['driver'] = driver\n\n profile.update(**creation_options)\n\n with rasterio.open(output, 'w', **profile) as dst:\n dst.write(dest)\n", "path": "rasterio/rio/merge.py"}], "after_files": [{"content": "\"\"\"Merge command.\"\"\"\n\nimport logging\n\nimport click\nfrom cligj import files_inout_arg, format_opt\n\nfrom .helpers import resolve_inout\nfrom . import options\nimport rasterio\n\n\[email protected](short_help=\"Merge a stack of raster datasets.\")\n@files_inout_arg\[email protected]_opt\n@format_opt\[email protected]_opt\[email protected]_opt\[email protected]_opt\[email protected]_overwrite_opt\[email protected]('--precision', type=int, default=7,\n help=\"Number of decimal places of precision in alignment of \"\n \"pixels\")\[email protected]_options\[email protected]_context\ndef merge(ctx, files, output, driver, bounds, res, nodata, force_overwrite,\n precision, creation_options):\n \"\"\"Copy valid pixels from input files to an output file.\n\n All files must have the same number of bands, data type, and\n coordinate reference system.\n\n Input files are merged in their listed order using the reverse\n painter's algorithm. If the output file exists, its values will be\n overwritten by input values.\n\n Geospatial bounds and resolution of a new output file in the\n units of the input file coordinate reference system may be provided\n and are otherwise taken from the first input file.\n\n Note: --res changed from 2 parameters in 0.25.\n\n \\b\n --res 0.1 0.1 => --res 0.1 (square)\n --res 0.1 0.2 => --res 0.1 --res 0.2 (rectangular)\n \"\"\"\n from rasterio.merge import merge as merge_tool\n\n verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1\n\n output, files = resolve_inout(\n files=files, output=output, force_overwrite=force_overwrite)\n\n with rasterio.Env(CPL_DEBUG=verbosity > 2):\n sources = [rasterio.open(f) for f in files]\n dest, output_transform = merge_tool(sources, bounds=bounds, res=res,\n nodata=nodata, precision=precision)\n\n profile = sources[0].profile\n profile.pop('affine')\n profile['transform'] = output_transform\n profile['height'] = dest.shape[1]\n profile['width'] = dest.shape[2]\n profile['driver'] = driver\n\n profile.update(**creation_options)\n\n with rasterio.open(output, 'w', **profile) as dst:\n dst.write(dest)\n\n # uses the colormap in the first input raster.\n try:\n colormap = sources[0].colormap(1)\n dst.write_colormap(1, colormap)\n except ValueError:\n pass\n", "path": "rasterio/rio/merge.py"}]}
1,023
129
gh_patches_debug_49630
rasdani/github-patches
git_diff
great-expectations__great_expectations-8512
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Not able to set persist to False using Spark Execution Engine Hey Guys, I was trying to migrate GX from 0.16.5 and am not being able to, cause apparently since GX 0.16.12 supposedly there was a fix for the persist parameter to work. The thing is that I wanted it to be False, but it seems the parameter is not taking effect. Anyone facing similar problems? I was following this guide [How to connect to in-memory data in a Spark dataframe | Great Expectations](https://docs.greatexpectations.io/docs/0.15.50/guides/connecting_to_your_data/in_memory/spark/), which seems to have been removed. Also tried with fluent approach and still failing. Is the property persist being considered/passed? Shouldn’t it be one of the parameters in add_or_update_spark ? More information: https://discourse.greatexpectations.io/t/not-able-to-set-persist-to-false-using-spark-execution-engine/1320?u=jose.correia --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `great_expectations/datasource/fluent/spark_datasource.py` Content: ``` 1 from __future__ import annotations 2 3 import logging 4 from pprint import pformat as pf 5 from typing import ( 6 TYPE_CHECKING, 7 ClassVar, 8 Dict, 9 Generic, 10 List, 11 Literal, 12 Optional, 13 Type, 14 TypeVar, 15 Union, 16 ) 17 18 import pydantic 19 from pydantic import StrictBool, StrictFloat, StrictInt, StrictStr 20 21 import great_expectations.exceptions as gx_exceptions 22 from great_expectations.compatibility.pyspark import DataFrame, pyspark 23 from great_expectations.core._docs_decorators import ( 24 deprecated_argument, 25 new_argument, 26 public_api, 27 ) 28 from great_expectations.core.batch_spec import RuntimeDataBatchSpec 29 from great_expectations.datasource.fluent import BatchRequest 30 from great_expectations.datasource.fluent.constants import ( 31 _DATA_CONNECTOR_NAME, 32 ) 33 from great_expectations.datasource.fluent.interfaces import ( 34 Batch, 35 DataAsset, 36 Datasource, 37 _DataAssetT, 38 ) 39 40 if TYPE_CHECKING: 41 from typing_extensions import TypeAlias 42 43 from great_expectations.datasource.fluent.interfaces import BatchMetadata 44 from great_expectations.execution_engine import SparkDFExecutionEngine 45 46 47 logger = logging.getLogger(__name__) 48 49 50 # this enables us to include dataframe in the json schema 51 _SparkDataFrameT = TypeVar("_SparkDataFrameT") 52 53 SparkConfig: TypeAlias = Dict[ 54 StrictStr, Union[StrictStr, StrictInt, StrictFloat, StrictBool] 55 ] 56 57 58 class SparkDatasourceError(Exception): 59 pass 60 61 62 class _SparkDatasource(Datasource): 63 # instance attributes 64 spark_config: Union[SparkConfig, None] = None 65 force_reuse_spark_context: bool = True 66 67 @staticmethod 68 def _update_asset_forward_refs(asset_type: Type[_DataAssetT]) -> None: 69 # Only update forward refs if pyspark types are available. 70 if pyspark: 71 asset_type.update_forward_refs() 72 73 # Abstract Methods 74 @property 75 def execution_engine_type(self) -> Type[SparkDFExecutionEngine]: 76 """Return the SparkDFExecutionEngine unless the override is set""" 77 from great_expectations.execution_engine.sparkdf_execution_engine import ( 78 SparkDFExecutionEngine, 79 ) 80 81 return SparkDFExecutionEngine 82 83 def test_connection(self, test_assets: bool = True) -> None: 84 """Test the connection for the _SparkDatasource. 85 86 Args: 87 test_assets: If assets have been passed to the _SparkDatasource, 88 an attempt can be made to test them as well. 89 90 Raises: 91 TestConnectionError: If the connection test fails. 92 """ 93 raise NotImplementedError( 94 """One needs to implement "test_connection" on a _SparkDatasource subclass.""" 95 ) 96 97 # End Abstract Methods 98 99 100 class DataFrameAsset(DataAsset, Generic[_SparkDataFrameT]): 101 # instance attributes 102 type: Literal["dataframe"] = "dataframe" 103 # TODO: <Alex>05/31/2023: Upon removal of deprecated "dataframe" argument to "PandasDatasource.add_dataframe_asset()", default can be deleted.</Alex> 104 dataframe: Optional[_SparkDataFrameT] = pydantic.Field( 105 default=None, exclude=True, repr=False 106 ) 107 108 class Config: 109 extra = pydantic.Extra.forbid 110 111 @pydantic.validator("dataframe") 112 def _validate_dataframe(cls, dataframe: DataFrame) -> DataFrame: 113 if not (DataFrame and isinstance(dataframe, DataFrame)): # type: ignore[truthy-function] 114 raise ValueError("dataframe must be of type pyspark.sql.DataFrame") 115 116 return dataframe 117 118 def test_connection(self) -> None: 119 ... 120 121 @property 122 def batch_request_options(self) -> tuple[str, ...]: 123 return tuple() 124 125 def _get_reader_method(self) -> str: 126 raise NotImplementedError( 127 """Spark DataFrameAsset does not implement "_get_reader_method()" method, because DataFrame is already available.""" 128 ) 129 130 def _get_reader_options_include(self) -> set[str]: 131 raise NotImplementedError( 132 """Spark DataFrameAsset does not implement "_get_reader_options_include()" method, because DataFrame is already available.""" 133 ) 134 135 @public_api 136 # TODO: <Alex>05/31/2023: Upon removal of deprecated "dataframe" argument to "PandasDatasource.add_dataframe_asset()", its validation code must be deleted.</Alex> 137 @new_argument( 138 argument_name="dataframe", 139 message='The "dataframe" argument is no longer part of "PandasDatasource.add_dataframe_asset()" method call; instead, "dataframe" is the required argument to "DataFrameAsset.build_batch_request()" method.', 140 version="0.16.15", 141 ) 142 def build_batch_request( 143 self, dataframe: Optional[_SparkDataFrameT] = None 144 ) -> BatchRequest: 145 """A batch request that can be used to obtain batches for this DataAsset. 146 147 Args: 148 dataframe: The Spark Dataframe containing the data for this DataFrame data asset. 149 150 Returns: 151 A BatchRequest object that can be used to obtain a batch list from a Datasource by calling the 152 get_batch_list_from_batch_request method. 153 """ 154 if dataframe is None: 155 df = self.dataframe 156 else: 157 df = dataframe 158 159 if df is None: 160 raise ValueError( 161 "Cannot build batch request for dataframe asset without a dataframe" 162 ) 163 164 self.dataframe = df 165 166 return BatchRequest( 167 datasource_name=self.datasource.name, 168 data_asset_name=self.name, 169 options={}, 170 ) 171 172 def _validate_batch_request(self, batch_request: BatchRequest) -> None: 173 """Validates the batch_request has the correct form. 174 175 Args: 176 batch_request: A batch request object to be validated. 177 """ 178 if not ( 179 batch_request.datasource_name == self.datasource.name 180 and batch_request.data_asset_name == self.name 181 and not batch_request.options 182 ): 183 expect_batch_request_form = BatchRequest( 184 datasource_name=self.datasource.name, 185 data_asset_name=self.name, 186 options={}, 187 batch_slice=batch_request._batch_slice_input, # type: ignore[attr-defined] 188 ) 189 raise gx_exceptions.InvalidBatchRequestError( 190 "BatchRequest should have form:\n" 191 f"{pf(expect_batch_request_form.dict())}\n" 192 f"but actually has form:\n{pf(batch_request.dict())}\n" 193 ) 194 195 def get_batch_list_from_batch_request( 196 self, batch_request: BatchRequest 197 ) -> list[Batch]: 198 self._validate_batch_request(batch_request) 199 200 batch_spec = RuntimeDataBatchSpec(batch_data=self.dataframe) 201 execution_engine: SparkDFExecutionEngine = ( 202 self.datasource.get_execution_engine() 203 ) 204 data, markers = execution_engine.get_batch_data_and_markers( 205 batch_spec=batch_spec 206 ) 207 208 # batch_definition (along with batch_spec and markers) is only here to satisfy a 209 # legacy constraint when computing usage statistics in a validator. We hope to remove 210 # it in the future. 211 # imports are done inline to prevent a circular dependency with core/batch.py 212 from great_expectations.core import IDDict 213 from great_expectations.core.batch import BatchDefinition 214 215 batch_definition = BatchDefinition( 216 datasource_name=self.datasource.name, 217 data_connector_name=_DATA_CONNECTOR_NAME, 218 data_asset_name=self.name, 219 batch_identifiers=IDDict(batch_request.options), 220 batch_spec_passthrough=None, 221 ) 222 223 batch_metadata: BatchMetadata = self._get_batch_metadata_from_batch_request( 224 batch_request=batch_request 225 ) 226 227 # Some pydantic annotations are postponed due to circular imports. 228 # Batch.update_forward_refs() will set the annotations before we 229 # instantiate the Batch class since we can import them in this scope. 230 Batch.update_forward_refs() 231 232 return [ 233 Batch( 234 datasource=self.datasource, 235 data_asset=self, 236 batch_request=batch_request, 237 data=data, 238 metadata=batch_metadata, 239 legacy_batch_markers=markers, 240 legacy_batch_spec=batch_spec, 241 legacy_batch_definition=batch_definition, 242 ) 243 ] 244 245 246 @public_api 247 class SparkDatasource(_SparkDatasource): 248 # class attributes 249 asset_types: ClassVar[List[Type[DataAsset]]] = [DataFrameAsset] 250 251 # instance attributes 252 type: Literal["spark"] = "spark" 253 254 assets: List[DataFrameAsset] = [] # type: ignore[assignment] 255 256 def test_connection(self, test_assets: bool = True) -> None: 257 ... 258 259 @public_api 260 @deprecated_argument( 261 argument_name="dataframe", 262 message='The "dataframe" argument is no longer part of "PandasDatasource.add_dataframe_asset()" method call; instead, "dataframe" is the required argument to "DataFrameAsset.build_batch_request()" method.', 263 version="0.16.15", 264 ) 265 def add_dataframe_asset( 266 self, 267 name: str, 268 dataframe: Optional[_SparkDataFrameT] = None, 269 batch_metadata: Optional[BatchMetadata] = None, 270 ) -> DataFrameAsset: 271 """Adds a Dataframe DataAsset to this SparkDatasource object. 272 273 Args: 274 name: The name of the DataFrame asset. This can be any arbitrary string. 275 dataframe: The Spark Dataframe containing the data for this DataFrame data asset. 276 batch_metadata: An arbitrary user defined dictionary with string keys which will get inherited by any 277 batches created from the asset. 278 279 Returns: 280 The DataFameAsset that has been added to this datasource. 281 """ 282 asset: DataFrameAsset = DataFrameAsset( 283 name=name, 284 batch_metadata=batch_metadata or {}, 285 ) 286 asset.dataframe = dataframe 287 return self._add_asset(asset=asset) 288 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/great_expectations/datasource/fluent/spark_datasource.py b/great_expectations/datasource/fluent/spark_datasource.py --- a/great_expectations/datasource/fluent/spark_datasource.py +++ b/great_expectations/datasource/fluent/spark_datasource.py @@ -63,6 +63,7 @@ # instance attributes spark_config: Union[SparkConfig, None] = None force_reuse_spark_context: bool = True + persist: bool = True @staticmethod def _update_asset_forward_refs(asset_type: Type[_DataAssetT]) -> None:
{"golden_diff": "diff --git a/great_expectations/datasource/fluent/spark_datasource.py b/great_expectations/datasource/fluent/spark_datasource.py\n--- a/great_expectations/datasource/fluent/spark_datasource.py\n+++ b/great_expectations/datasource/fluent/spark_datasource.py\n@@ -63,6 +63,7 @@\n # instance attributes\n spark_config: Union[SparkConfig, None] = None\n force_reuse_spark_context: bool = True\n+ persist: bool = True\n \n @staticmethod\n def _update_asset_forward_refs(asset_type: Type[_DataAssetT]) -> None:\n", "issue": "Not able to set persist to False using Spark Execution Engine \nHey Guys, I was trying to migrate GX from 0.16.5 and am not being able to, cause apparently since GX 0.16.12 supposedly there was a fix for the persist parameter to work. The thing is that I wanted it to be False, but it seems the parameter is not taking effect.\r\nAnyone facing similar problems?\r\nI was following this guide [How to connect to in-memory data in a Spark dataframe | Great Expectations](https://docs.greatexpectations.io/docs/0.15.50/guides/connecting_to_your_data/in_memory/spark/), which seems to have been removed. Also tried with fluent approach and still failing.\r\nIs the property persist being considered/passed? Shouldn\u2019t it be one of the parameters in add_or_update_spark ?\r\n\r\nMore information: https://discourse.greatexpectations.io/t/not-able-to-set-persist-to-false-using-spark-execution-engine/1320?u=jose.correia\n", "before_files": [{"content": "from __future__ import annotations\n\nimport logging\nfrom pprint import pformat as pf\nfrom typing import (\n TYPE_CHECKING,\n ClassVar,\n Dict,\n Generic,\n List,\n Literal,\n Optional,\n Type,\n TypeVar,\n Union,\n)\n\nimport pydantic\nfrom pydantic import StrictBool, StrictFloat, StrictInt, StrictStr\n\nimport great_expectations.exceptions as gx_exceptions\nfrom great_expectations.compatibility.pyspark import DataFrame, pyspark\nfrom great_expectations.core._docs_decorators import (\n deprecated_argument,\n new_argument,\n public_api,\n)\nfrom great_expectations.core.batch_spec import RuntimeDataBatchSpec\nfrom great_expectations.datasource.fluent import BatchRequest\nfrom great_expectations.datasource.fluent.constants import (\n _DATA_CONNECTOR_NAME,\n)\nfrom great_expectations.datasource.fluent.interfaces import (\n Batch,\n DataAsset,\n Datasource,\n _DataAssetT,\n)\n\nif TYPE_CHECKING:\n from typing_extensions import TypeAlias\n\n from great_expectations.datasource.fluent.interfaces import BatchMetadata\n from great_expectations.execution_engine import SparkDFExecutionEngine\n\n\nlogger = logging.getLogger(__name__)\n\n\n# this enables us to include dataframe in the json schema\n_SparkDataFrameT = TypeVar(\"_SparkDataFrameT\")\n\nSparkConfig: TypeAlias = Dict[\n StrictStr, Union[StrictStr, StrictInt, StrictFloat, StrictBool]\n]\n\n\nclass SparkDatasourceError(Exception):\n pass\n\n\nclass _SparkDatasource(Datasource):\n # instance attributes\n spark_config: Union[SparkConfig, None] = None\n force_reuse_spark_context: bool = True\n\n @staticmethod\n def _update_asset_forward_refs(asset_type: Type[_DataAssetT]) -> None:\n # Only update forward refs if pyspark types are available.\n if pyspark:\n asset_type.update_forward_refs()\n\n # Abstract Methods\n @property\n def execution_engine_type(self) -> Type[SparkDFExecutionEngine]:\n \"\"\"Return the SparkDFExecutionEngine unless the override is set\"\"\"\n from great_expectations.execution_engine.sparkdf_execution_engine import (\n SparkDFExecutionEngine,\n )\n\n return SparkDFExecutionEngine\n\n def test_connection(self, test_assets: bool = True) -> None:\n \"\"\"Test the connection for the _SparkDatasource.\n\n Args:\n test_assets: If assets have been passed to the _SparkDatasource,\n an attempt can be made to test them as well.\n\n Raises:\n TestConnectionError: If the connection test fails.\n \"\"\"\n raise NotImplementedError(\n \"\"\"One needs to implement \"test_connection\" on a _SparkDatasource subclass.\"\"\"\n )\n\n # End Abstract Methods\n\n\nclass DataFrameAsset(DataAsset, Generic[_SparkDataFrameT]):\n # instance attributes\n type: Literal[\"dataframe\"] = \"dataframe\"\n # TODO: <Alex>05/31/2023: Upon removal of deprecated \"dataframe\" argument to \"PandasDatasource.add_dataframe_asset()\", default can be deleted.</Alex>\n dataframe: Optional[_SparkDataFrameT] = pydantic.Field(\n default=None, exclude=True, repr=False\n )\n\n class Config:\n extra = pydantic.Extra.forbid\n\n @pydantic.validator(\"dataframe\")\n def _validate_dataframe(cls, dataframe: DataFrame) -> DataFrame:\n if not (DataFrame and isinstance(dataframe, DataFrame)): # type: ignore[truthy-function]\n raise ValueError(\"dataframe must be of type pyspark.sql.DataFrame\")\n\n return dataframe\n\n def test_connection(self) -> None:\n ...\n\n @property\n def batch_request_options(self) -> tuple[str, ...]:\n return tuple()\n\n def _get_reader_method(self) -> str:\n raise NotImplementedError(\n \"\"\"Spark DataFrameAsset does not implement \"_get_reader_method()\" method, because DataFrame is already available.\"\"\"\n )\n\n def _get_reader_options_include(self) -> set[str]:\n raise NotImplementedError(\n \"\"\"Spark DataFrameAsset does not implement \"_get_reader_options_include()\" method, because DataFrame is already available.\"\"\"\n )\n\n @public_api\n # TODO: <Alex>05/31/2023: Upon removal of deprecated \"dataframe\" argument to \"PandasDatasource.add_dataframe_asset()\", its validation code must be deleted.</Alex>\n @new_argument(\n argument_name=\"dataframe\",\n message='The \"dataframe\" argument is no longer part of \"PandasDatasource.add_dataframe_asset()\" method call; instead, \"dataframe\" is the required argument to \"DataFrameAsset.build_batch_request()\" method.',\n version=\"0.16.15\",\n )\n def build_batch_request(\n self, dataframe: Optional[_SparkDataFrameT] = None\n ) -> BatchRequest:\n \"\"\"A batch request that can be used to obtain batches for this DataAsset.\n\n Args:\n dataframe: The Spark Dataframe containing the data for this DataFrame data asset.\n\n Returns:\n A BatchRequest object that can be used to obtain a batch list from a Datasource by calling the\n get_batch_list_from_batch_request method.\n \"\"\"\n if dataframe is None:\n df = self.dataframe\n else:\n df = dataframe\n\n if df is None:\n raise ValueError(\n \"Cannot build batch request for dataframe asset without a dataframe\"\n )\n\n self.dataframe = df\n\n return BatchRequest(\n datasource_name=self.datasource.name,\n data_asset_name=self.name,\n options={},\n )\n\n def _validate_batch_request(self, batch_request: BatchRequest) -> None:\n \"\"\"Validates the batch_request has the correct form.\n\n Args:\n batch_request: A batch request object to be validated.\n \"\"\"\n if not (\n batch_request.datasource_name == self.datasource.name\n and batch_request.data_asset_name == self.name\n and not batch_request.options\n ):\n expect_batch_request_form = BatchRequest(\n datasource_name=self.datasource.name,\n data_asset_name=self.name,\n options={},\n batch_slice=batch_request._batch_slice_input, # type: ignore[attr-defined]\n )\n raise gx_exceptions.InvalidBatchRequestError(\n \"BatchRequest should have form:\\n\"\n f\"{pf(expect_batch_request_form.dict())}\\n\"\n f\"but actually has form:\\n{pf(batch_request.dict())}\\n\"\n )\n\n def get_batch_list_from_batch_request(\n self, batch_request: BatchRequest\n ) -> list[Batch]:\n self._validate_batch_request(batch_request)\n\n batch_spec = RuntimeDataBatchSpec(batch_data=self.dataframe)\n execution_engine: SparkDFExecutionEngine = (\n self.datasource.get_execution_engine()\n )\n data, markers = execution_engine.get_batch_data_and_markers(\n batch_spec=batch_spec\n )\n\n # batch_definition (along with batch_spec and markers) is only here to satisfy a\n # legacy constraint when computing usage statistics in a validator. We hope to remove\n # it in the future.\n # imports are done inline to prevent a circular dependency with core/batch.py\n from great_expectations.core import IDDict\n from great_expectations.core.batch import BatchDefinition\n\n batch_definition = BatchDefinition(\n datasource_name=self.datasource.name,\n data_connector_name=_DATA_CONNECTOR_NAME,\n data_asset_name=self.name,\n batch_identifiers=IDDict(batch_request.options),\n batch_spec_passthrough=None,\n )\n\n batch_metadata: BatchMetadata = self._get_batch_metadata_from_batch_request(\n batch_request=batch_request\n )\n\n # Some pydantic annotations are postponed due to circular imports.\n # Batch.update_forward_refs() will set the annotations before we\n # instantiate the Batch class since we can import them in this scope.\n Batch.update_forward_refs()\n\n return [\n Batch(\n datasource=self.datasource,\n data_asset=self,\n batch_request=batch_request,\n data=data,\n metadata=batch_metadata,\n legacy_batch_markers=markers,\n legacy_batch_spec=batch_spec,\n legacy_batch_definition=batch_definition,\n )\n ]\n\n\n@public_api\nclass SparkDatasource(_SparkDatasource):\n # class attributes\n asset_types: ClassVar[List[Type[DataAsset]]] = [DataFrameAsset]\n\n # instance attributes\n type: Literal[\"spark\"] = \"spark\"\n\n assets: List[DataFrameAsset] = [] # type: ignore[assignment]\n\n def test_connection(self, test_assets: bool = True) -> None:\n ...\n\n @public_api\n @deprecated_argument(\n argument_name=\"dataframe\",\n message='The \"dataframe\" argument is no longer part of \"PandasDatasource.add_dataframe_asset()\" method call; instead, \"dataframe\" is the required argument to \"DataFrameAsset.build_batch_request()\" method.',\n version=\"0.16.15\",\n )\n def add_dataframe_asset(\n self,\n name: str,\n dataframe: Optional[_SparkDataFrameT] = None,\n batch_metadata: Optional[BatchMetadata] = None,\n ) -> DataFrameAsset:\n \"\"\"Adds a Dataframe DataAsset to this SparkDatasource object.\n\n Args:\n name: The name of the DataFrame asset. This can be any arbitrary string.\n dataframe: The Spark Dataframe containing the data for this DataFrame data asset.\n batch_metadata: An arbitrary user defined dictionary with string keys which will get inherited by any\n batches created from the asset.\n\n Returns:\n The DataFameAsset that has been added to this datasource.\n \"\"\"\n asset: DataFrameAsset = DataFrameAsset(\n name=name,\n batch_metadata=batch_metadata or {},\n )\n asset.dataframe = dataframe\n return self._add_asset(asset=asset)\n", "path": "great_expectations/datasource/fluent/spark_datasource.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport logging\nfrom pprint import pformat as pf\nfrom typing import (\n TYPE_CHECKING,\n ClassVar,\n Dict,\n Generic,\n List,\n Literal,\n Optional,\n Type,\n TypeVar,\n Union,\n)\n\nimport pydantic\nfrom pydantic import StrictBool, StrictFloat, StrictInt, StrictStr\n\nimport great_expectations.exceptions as gx_exceptions\nfrom great_expectations.compatibility.pyspark import DataFrame, pyspark\nfrom great_expectations.core._docs_decorators import (\n deprecated_argument,\n new_argument,\n public_api,\n)\nfrom great_expectations.core.batch_spec import RuntimeDataBatchSpec\nfrom great_expectations.datasource.fluent import BatchRequest\nfrom great_expectations.datasource.fluent.constants import (\n _DATA_CONNECTOR_NAME,\n)\nfrom great_expectations.datasource.fluent.interfaces import (\n Batch,\n DataAsset,\n Datasource,\n _DataAssetT,\n)\n\nif TYPE_CHECKING:\n from typing_extensions import TypeAlias\n\n from great_expectations.datasource.fluent.interfaces import BatchMetadata\n from great_expectations.execution_engine import SparkDFExecutionEngine\n\n\nlogger = logging.getLogger(__name__)\n\n\n# this enables us to include dataframe in the json schema\n_SparkDataFrameT = TypeVar(\"_SparkDataFrameT\")\n\nSparkConfig: TypeAlias = Dict[\n StrictStr, Union[StrictStr, StrictInt, StrictFloat, StrictBool]\n]\n\n\nclass SparkDatasourceError(Exception):\n pass\n\n\nclass _SparkDatasource(Datasource):\n # instance attributes\n spark_config: Union[SparkConfig, None] = None\n force_reuse_spark_context: bool = True\n persist: bool = True\n\n @staticmethod\n def _update_asset_forward_refs(asset_type: Type[_DataAssetT]) -> None:\n # Only update forward refs if pyspark types are available.\n if pyspark:\n asset_type.update_forward_refs()\n\n # Abstract Methods\n @property\n def execution_engine_type(self) -> Type[SparkDFExecutionEngine]:\n \"\"\"Return the SparkDFExecutionEngine unless the override is set\"\"\"\n from great_expectations.execution_engine.sparkdf_execution_engine import (\n SparkDFExecutionEngine,\n )\n\n return SparkDFExecutionEngine\n\n def test_connection(self, test_assets: bool = True) -> None:\n \"\"\"Test the connection for the _SparkDatasource.\n\n Args:\n test_assets: If assets have been passed to the _SparkDatasource,\n an attempt can be made to test them as well.\n\n Raises:\n TestConnectionError: If the connection test fails.\n \"\"\"\n raise NotImplementedError(\n \"\"\"One needs to implement \"test_connection\" on a _SparkDatasource subclass.\"\"\"\n )\n\n # End Abstract Methods\n\n\nclass DataFrameAsset(DataAsset, Generic[_SparkDataFrameT]):\n # instance attributes\n type: Literal[\"dataframe\"] = \"dataframe\"\n # TODO: <Alex>05/31/2023: Upon removal of deprecated \"dataframe\" argument to \"PandasDatasource.add_dataframe_asset()\", default can be deleted.</Alex>\n dataframe: Optional[_SparkDataFrameT] = pydantic.Field(\n default=None, exclude=True, repr=False\n )\n\n class Config:\n extra = pydantic.Extra.forbid\n\n @pydantic.validator(\"dataframe\")\n def _validate_dataframe(cls, dataframe: DataFrame) -> DataFrame:\n if not (DataFrame and isinstance(dataframe, DataFrame)): # type: ignore[truthy-function]\n raise ValueError(\"dataframe must be of type pyspark.sql.DataFrame\")\n\n return dataframe\n\n def test_connection(self) -> None:\n ...\n\n @property\n def batch_request_options(self) -> tuple[str, ...]:\n return tuple()\n\n def _get_reader_method(self) -> str:\n raise NotImplementedError(\n \"\"\"Spark DataFrameAsset does not implement \"_get_reader_method()\" method, because DataFrame is already available.\"\"\"\n )\n\n def _get_reader_options_include(self) -> set[str]:\n raise NotImplementedError(\n \"\"\"Spark DataFrameAsset does not implement \"_get_reader_options_include()\" method, because DataFrame is already available.\"\"\"\n )\n\n @public_api\n # TODO: <Alex>05/31/2023: Upon removal of deprecated \"dataframe\" argument to \"PandasDatasource.add_dataframe_asset()\", its validation code must be deleted.</Alex>\n @new_argument(\n argument_name=\"dataframe\",\n message='The \"dataframe\" argument is no longer part of \"PandasDatasource.add_dataframe_asset()\" method call; instead, \"dataframe\" is the required argument to \"DataFrameAsset.build_batch_request()\" method.',\n version=\"0.16.15\",\n )\n def build_batch_request(\n self, dataframe: Optional[_SparkDataFrameT] = None\n ) -> BatchRequest:\n \"\"\"A batch request that can be used to obtain batches for this DataAsset.\n\n Args:\n dataframe: The Spark Dataframe containing the data for this DataFrame data asset.\n\n Returns:\n A BatchRequest object that can be used to obtain a batch list from a Datasource by calling the\n get_batch_list_from_batch_request method.\n \"\"\"\n if dataframe is None:\n df = self.dataframe\n else:\n df = dataframe\n\n if df is None:\n raise ValueError(\n \"Cannot build batch request for dataframe asset without a dataframe\"\n )\n\n self.dataframe = df\n\n return BatchRequest(\n datasource_name=self.datasource.name,\n data_asset_name=self.name,\n options={},\n )\n\n def _validate_batch_request(self, batch_request: BatchRequest) -> None:\n \"\"\"Validates the batch_request has the correct form.\n\n Args:\n batch_request: A batch request object to be validated.\n \"\"\"\n if not (\n batch_request.datasource_name == self.datasource.name\n and batch_request.data_asset_name == self.name\n and not batch_request.options\n ):\n expect_batch_request_form = BatchRequest(\n datasource_name=self.datasource.name,\n data_asset_name=self.name,\n options={},\n batch_slice=batch_request._batch_slice_input, # type: ignore[attr-defined]\n )\n raise gx_exceptions.InvalidBatchRequestError(\n \"BatchRequest should have form:\\n\"\n f\"{pf(expect_batch_request_form.dict())}\\n\"\n f\"but actually has form:\\n{pf(batch_request.dict())}\\n\"\n )\n\n def get_batch_list_from_batch_request(\n self, batch_request: BatchRequest\n ) -> list[Batch]:\n self._validate_batch_request(batch_request)\n\n batch_spec = RuntimeDataBatchSpec(batch_data=self.dataframe)\n execution_engine: SparkDFExecutionEngine = (\n self.datasource.get_execution_engine()\n )\n data, markers = execution_engine.get_batch_data_and_markers(\n batch_spec=batch_spec\n )\n\n # batch_definition (along with batch_spec and markers) is only here to satisfy a\n # legacy constraint when computing usage statistics in a validator. We hope to remove\n # it in the future.\n # imports are done inline to prevent a circular dependency with core/batch.py\n from great_expectations.core import IDDict\n from great_expectations.core.batch import BatchDefinition\n\n batch_definition = BatchDefinition(\n datasource_name=self.datasource.name,\n data_connector_name=_DATA_CONNECTOR_NAME,\n data_asset_name=self.name,\n batch_identifiers=IDDict(batch_request.options),\n batch_spec_passthrough=None,\n )\n\n batch_metadata: BatchMetadata = self._get_batch_metadata_from_batch_request(\n batch_request=batch_request\n )\n\n # Some pydantic annotations are postponed due to circular imports.\n # Batch.update_forward_refs() will set the annotations before we\n # instantiate the Batch class since we can import them in this scope.\n Batch.update_forward_refs()\n\n return [\n Batch(\n datasource=self.datasource,\n data_asset=self,\n batch_request=batch_request,\n data=data,\n metadata=batch_metadata,\n legacy_batch_markers=markers,\n legacy_batch_spec=batch_spec,\n legacy_batch_definition=batch_definition,\n )\n ]\n\n\n@public_api\nclass SparkDatasource(_SparkDatasource):\n # class attributes\n asset_types: ClassVar[List[Type[DataAsset]]] = [DataFrameAsset]\n\n # instance attributes\n type: Literal[\"spark\"] = \"spark\"\n\n assets: List[DataFrameAsset] = [] # type: ignore[assignment]\n\n def test_connection(self, test_assets: bool = True) -> None:\n ...\n\n @public_api\n @deprecated_argument(\n argument_name=\"dataframe\",\n message='The \"dataframe\" argument is no longer part of \"PandasDatasource.add_dataframe_asset()\" method call; instead, \"dataframe\" is the required argument to \"DataFrameAsset.build_batch_request()\" method.',\n version=\"0.16.15\",\n )\n def add_dataframe_asset(\n self,\n name: str,\n dataframe: Optional[_SparkDataFrameT] = None,\n batch_metadata: Optional[BatchMetadata] = None,\n ) -> DataFrameAsset:\n \"\"\"Adds a Dataframe DataAsset to this SparkDatasource object.\n\n Args:\n name: The name of the DataFrame asset. This can be any arbitrary string.\n dataframe: The Spark Dataframe containing the data for this DataFrame data asset.\n batch_metadata: An arbitrary user defined dictionary with string keys which will get inherited by any\n batches created from the asset.\n\n Returns:\n The DataFameAsset that has been added to this datasource.\n \"\"\"\n asset: DataFrameAsset = DataFrameAsset(\n name=name,\n batch_metadata=batch_metadata or {},\n )\n asset.dataframe = dataframe\n return self._add_asset(asset=asset)\n", "path": "great_expectations/datasource/fluent/spark_datasource.py"}]}
3,327
142
gh_patches_debug_36937
rasdani/github-patches
git_diff
comic__grand-challenge.org-1923
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `get_follow_object_pk` errors out if `obj.follow_object` is `None` Occurs when the follow object has been deleted and the follow is not cleaned up. See https://sentry.io/organizations/grand-challenge/issues/2511041483/?project=303639&query=is%3Aunresolved --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `app/grandchallenge/notifications/signals.py` Content: ``` 1 from actstream import action 2 from actstream.actions import follow 3 from actstream.models import Action, Follow, followers 4 from django.db.models.signals import post_save 5 from django.dispatch import receiver 6 from guardian.shortcuts import assign_perm 7 from machina.apps.forum_conversation.models import Post, Topic 8 9 from grandchallenge.notifications.models import Notification 10 11 12 @receiver(post_save, sender=Topic) 13 def create_topic_action(sender, *, instance, created, **_): 14 if created: 15 follow( 16 user=instance.poster, 17 obj=instance, 18 actor_only=False, 19 send_action=False, 20 ) 21 22 if int(instance.type) == int(Topic.TOPIC_ANNOUNCE): 23 action.send( 24 sender=instance.poster, 25 verb="announced", 26 action_object=instance, 27 target=instance.forum, 28 context_class="info", 29 ) 30 else: 31 action.send( 32 sender=instance.poster, 33 verb="posted", 34 action_object=instance, 35 target=instance.forum, 36 ) 37 38 39 @receiver(post_save, sender=Post) 40 def create_post_action(sender, *, instance, created, **_): 41 if ( 42 created 43 and instance.topic.posts_count != 0 44 and not instance.is_topic_head 45 ): 46 follow( 47 user=instance.poster, 48 obj=instance.topic, 49 actor_only=False, 50 send_action=False, 51 ) 52 53 action.send( 54 sender=instance.poster, verb="replied to", target=instance.topic, 55 ) 56 57 58 @receiver(post_save, sender=Action) 59 def create_notification(*, instance, **_): 60 if instance.target: 61 follower_group = followers(instance.target) 62 for follower in follower_group: 63 # only send notifications to followers other than the poster 64 if follower != instance.actor: 65 Notification(user=follower, action=instance).save() 66 else: 67 follower_group = followers(instance.actor) 68 for follower in follower_group: 69 # only send notifications to followers other than the poster 70 if follower != instance.actor: 71 Notification(user=follower, action=instance).save() 72 73 74 @receiver(post_save, sender=Follow) 75 def add_permissions(*, instance, created, **_): 76 if created: 77 assign_perm("change_follow", instance.user, instance) 78 assign_perm("delete_follow", instance.user, instance) 79 assign_perm("view_follow", instance.user, instance) 80 ``` Path: `app/grandchallenge/forum_conversation/templatetags/forum_extras.py` Content: ``` 1 from actstream.models import Follow 2 from django import template 3 from django.contrib.contenttypes.models import ContentType 4 5 from grandchallenge.notifications.forms import FollowForm 6 7 register = template.Library() 8 9 10 @register.simple_tag 11 def get_follow_object_pk(user, follow_object): 12 object_follows_for_user = Follow.objects.filter( 13 user=user, 14 content_type=ContentType.objects.get( 15 app_label=follow_object._meta.app_label, 16 model=follow_object._meta.model_name, 17 ), 18 ).all() 19 current_follow_object = [] 20 for obj in object_follows_for_user: 21 if obj.follow_object.id == follow_object.id: 22 current_follow_object = obj.pk 23 return current_follow_object 24 25 26 @register.simple_tag 27 def follow_form(*, user, object_id, content_type): 28 return FollowForm( 29 user=user, 30 initial={ 31 "object_id": object_id, 32 "content_type": content_type, 33 "actor_only": False, 34 }, 35 ) 36 37 38 @register.simple_tag() 39 def get_content_type(follow_object): 40 ct = ContentType.objects.get( 41 app_label=follow_object._meta.app_label, 42 model=follow_object._meta.model_name, 43 ) 44 return ct 45 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/app/grandchallenge/forum_conversation/templatetags/forum_extras.py b/app/grandchallenge/forum_conversation/templatetags/forum_extras.py --- a/app/grandchallenge/forum_conversation/templatetags/forum_extras.py +++ b/app/grandchallenge/forum_conversation/templatetags/forum_extras.py @@ -16,10 +16,16 @@ model=follow_object._meta.model_name, ), ).all() - current_follow_object = [] - for obj in object_follows_for_user: - if obj.follow_object.id == follow_object.id: - current_follow_object = obj.pk + + if not object_follows_for_user: + current_follow_object = [] + else: + current_follow_object = [] + for obj in object_follows_for_user: + if not obj.follow_object: + continue + elif obj.follow_object.id == follow_object.id: + current_follow_object = obj.pk return current_follow_object @@ -37,8 +43,11 @@ @register.simple_tag() def get_content_type(follow_object): - ct = ContentType.objects.get( - app_label=follow_object._meta.app_label, - model=follow_object._meta.model_name, - ) + try: + ct = ContentType.objects.get( + app_label=follow_object._meta.app_label, + model=follow_object._meta.model_name, + ) + except AttributeError: + ct = None return ct diff --git a/app/grandchallenge/notifications/signals.py b/app/grandchallenge/notifications/signals.py --- a/app/grandchallenge/notifications/signals.py +++ b/app/grandchallenge/notifications/signals.py @@ -1,9 +1,11 @@ from actstream import action from actstream.actions import follow from actstream.models import Action, Follow, followers -from django.db.models.signals import post_save +from django.contrib.contenttypes.models import ContentType +from django.db.models.signals import post_save, pre_delete from django.dispatch import receiver from guardian.shortcuts import assign_perm +from machina.apps.forum.models import Forum from machina.apps.forum_conversation.models import Post, Topic from grandchallenge.notifications.models import Notification @@ -77,3 +79,13 @@ assign_perm("change_follow", instance.user, instance) assign_perm("delete_follow", instance.user, instance) assign_perm("view_follow", instance.user, instance) + + +@receiver(pre_delete, sender=Topic) +@receiver(pre_delete, sender=Forum) +@receiver(pre_delete, sender=Post) +def clean_up_follows(*, instance, **_): + ct = ContentType.objects.filter( + app_label=instance._meta.app_label, model=instance._meta.model_name + ).get() + Follow.objects.filter(content_type=ct, object_id=instance.pk).delete()
{"golden_diff": "diff --git a/app/grandchallenge/forum_conversation/templatetags/forum_extras.py b/app/grandchallenge/forum_conversation/templatetags/forum_extras.py\n--- a/app/grandchallenge/forum_conversation/templatetags/forum_extras.py\n+++ b/app/grandchallenge/forum_conversation/templatetags/forum_extras.py\n@@ -16,10 +16,16 @@\n model=follow_object._meta.model_name,\r\n ),\r\n ).all()\r\n- current_follow_object = []\r\n- for obj in object_follows_for_user:\r\n- if obj.follow_object.id == follow_object.id:\r\n- current_follow_object = obj.pk\r\n+\r\n+ if not object_follows_for_user:\r\n+ current_follow_object = []\r\n+ else:\r\n+ current_follow_object = []\r\n+ for obj in object_follows_for_user:\r\n+ if not obj.follow_object:\r\n+ continue\r\n+ elif obj.follow_object.id == follow_object.id:\r\n+ current_follow_object = obj.pk\r\n return current_follow_object\r\n \r\n \r\n@@ -37,8 +43,11 @@\n \r\n @register.simple_tag()\r\n def get_content_type(follow_object):\r\n- ct = ContentType.objects.get(\r\n- app_label=follow_object._meta.app_label,\r\n- model=follow_object._meta.model_name,\r\n- )\r\n+ try:\r\n+ ct = ContentType.objects.get(\r\n+ app_label=follow_object._meta.app_label,\r\n+ model=follow_object._meta.model_name,\r\n+ )\r\n+ except AttributeError:\r\n+ ct = None\r\n return ct\r\ndiff --git a/app/grandchallenge/notifications/signals.py b/app/grandchallenge/notifications/signals.py\n--- a/app/grandchallenge/notifications/signals.py\n+++ b/app/grandchallenge/notifications/signals.py\n@@ -1,9 +1,11 @@\n from actstream import action\n from actstream.actions import follow\n from actstream.models import Action, Follow, followers\n-from django.db.models.signals import post_save\n+from django.contrib.contenttypes.models import ContentType\n+from django.db.models.signals import post_save, pre_delete\n from django.dispatch import receiver\n from guardian.shortcuts import assign_perm\n+from machina.apps.forum.models import Forum\n from machina.apps.forum_conversation.models import Post, Topic\n \n from grandchallenge.notifications.models import Notification\n@@ -77,3 +79,13 @@\n assign_perm(\"change_follow\", instance.user, instance)\n assign_perm(\"delete_follow\", instance.user, instance)\n assign_perm(\"view_follow\", instance.user, instance)\n+\n+\n+@receiver(pre_delete, sender=Topic)\n+@receiver(pre_delete, sender=Forum)\n+@receiver(pre_delete, sender=Post)\n+def clean_up_follows(*, instance, **_):\n+ ct = ContentType.objects.filter(\n+ app_label=instance._meta.app_label, model=instance._meta.model_name\n+ ).get()\n+ Follow.objects.filter(content_type=ct, object_id=instance.pk).delete()\n", "issue": "`get_follow_object_pk` errors out if `obj.follow_object` is `None`\nOccurs when the follow object has been deleted and the follow is not cleaned up. See https://sentry.io/organizations/grand-challenge/issues/2511041483/?project=303639&query=is%3Aunresolved\n", "before_files": [{"content": "from actstream import action\nfrom actstream.actions import follow\nfrom actstream.models import Action, Follow, followers\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom guardian.shortcuts import assign_perm\nfrom machina.apps.forum_conversation.models import Post, Topic\n\nfrom grandchallenge.notifications.models import Notification\n\n\n@receiver(post_save, sender=Topic)\ndef create_topic_action(sender, *, instance, created, **_):\n if created:\n follow(\n user=instance.poster,\n obj=instance,\n actor_only=False,\n send_action=False,\n )\n\n if int(instance.type) == int(Topic.TOPIC_ANNOUNCE):\n action.send(\n sender=instance.poster,\n verb=\"announced\",\n action_object=instance,\n target=instance.forum,\n context_class=\"info\",\n )\n else:\n action.send(\n sender=instance.poster,\n verb=\"posted\",\n action_object=instance,\n target=instance.forum,\n )\n\n\n@receiver(post_save, sender=Post)\ndef create_post_action(sender, *, instance, created, **_):\n if (\n created\n and instance.topic.posts_count != 0\n and not instance.is_topic_head\n ):\n follow(\n user=instance.poster,\n obj=instance.topic,\n actor_only=False,\n send_action=False,\n )\n\n action.send(\n sender=instance.poster, verb=\"replied to\", target=instance.topic,\n )\n\n\n@receiver(post_save, sender=Action)\ndef create_notification(*, instance, **_):\n if instance.target:\n follower_group = followers(instance.target)\n for follower in follower_group:\n # only send notifications to followers other than the poster\n if follower != instance.actor:\n Notification(user=follower, action=instance).save()\n else:\n follower_group = followers(instance.actor)\n for follower in follower_group:\n # only send notifications to followers other than the poster\n if follower != instance.actor:\n Notification(user=follower, action=instance).save()\n\n\n@receiver(post_save, sender=Follow)\ndef add_permissions(*, instance, created, **_):\n if created:\n assign_perm(\"change_follow\", instance.user, instance)\n assign_perm(\"delete_follow\", instance.user, instance)\n assign_perm(\"view_follow\", instance.user, instance)\n", "path": "app/grandchallenge/notifications/signals.py"}, {"content": "from actstream.models import Follow\r\nfrom django import template\r\nfrom django.contrib.contenttypes.models import ContentType\r\n\r\nfrom grandchallenge.notifications.forms import FollowForm\r\n\r\nregister = template.Library()\r\n\r\n\r\[email protected]_tag\r\ndef get_follow_object_pk(user, follow_object):\r\n object_follows_for_user = Follow.objects.filter(\r\n user=user,\r\n content_type=ContentType.objects.get(\r\n app_label=follow_object._meta.app_label,\r\n model=follow_object._meta.model_name,\r\n ),\r\n ).all()\r\n current_follow_object = []\r\n for obj in object_follows_for_user:\r\n if obj.follow_object.id == follow_object.id:\r\n current_follow_object = obj.pk\r\n return current_follow_object\r\n\r\n\r\[email protected]_tag\r\ndef follow_form(*, user, object_id, content_type):\r\n return FollowForm(\r\n user=user,\r\n initial={\r\n \"object_id\": object_id,\r\n \"content_type\": content_type,\r\n \"actor_only\": False,\r\n },\r\n )\r\n\r\n\r\[email protected]_tag()\r\ndef get_content_type(follow_object):\r\n ct = ContentType.objects.get(\r\n app_label=follow_object._meta.app_label,\r\n model=follow_object._meta.model_name,\r\n )\r\n return ct\r\n", "path": "app/grandchallenge/forum_conversation/templatetags/forum_extras.py"}], "after_files": [{"content": "from actstream import action\nfrom actstream.actions import follow\nfrom actstream.models import Action, Follow, followers\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db.models.signals import post_save, pre_delete\nfrom django.dispatch import receiver\nfrom guardian.shortcuts import assign_perm\nfrom machina.apps.forum.models import Forum\nfrom machina.apps.forum_conversation.models import Post, Topic\n\nfrom grandchallenge.notifications.models import Notification\n\n\n@receiver(post_save, sender=Topic)\ndef create_topic_action(sender, *, instance, created, **_):\n if created:\n follow(\n user=instance.poster,\n obj=instance,\n actor_only=False,\n send_action=False,\n )\n\n if int(instance.type) == int(Topic.TOPIC_ANNOUNCE):\n action.send(\n sender=instance.poster,\n verb=\"announced\",\n action_object=instance,\n target=instance.forum,\n context_class=\"info\",\n )\n else:\n action.send(\n sender=instance.poster,\n verb=\"posted\",\n action_object=instance,\n target=instance.forum,\n )\n\n\n@receiver(post_save, sender=Post)\ndef create_post_action(sender, *, instance, created, **_):\n if (\n created\n and instance.topic.posts_count != 0\n and not instance.is_topic_head\n ):\n follow(\n user=instance.poster,\n obj=instance.topic,\n actor_only=False,\n send_action=False,\n )\n\n action.send(\n sender=instance.poster, verb=\"replied to\", target=instance.topic,\n )\n\n\n@receiver(post_save, sender=Action)\ndef create_notification(*, instance, **_):\n if instance.target:\n follower_group = followers(instance.target)\n for follower in follower_group:\n # only send notifications to followers other than the poster\n if follower != instance.actor:\n Notification(user=follower, action=instance).save()\n else:\n follower_group = followers(instance.actor)\n for follower in follower_group:\n # only send notifications to followers other than the poster\n if follower != instance.actor:\n Notification(user=follower, action=instance).save()\n\n\n@receiver(post_save, sender=Follow)\ndef add_permissions(*, instance, created, **_):\n if created:\n assign_perm(\"change_follow\", instance.user, instance)\n assign_perm(\"delete_follow\", instance.user, instance)\n assign_perm(\"view_follow\", instance.user, instance)\n\n\n@receiver(pre_delete, sender=Topic)\n@receiver(pre_delete, sender=Forum)\n@receiver(pre_delete, sender=Post)\ndef clean_up_follows(*, instance, **_):\n ct = ContentType.objects.filter(\n app_label=instance._meta.app_label, model=instance._meta.model_name\n ).get()\n Follow.objects.filter(content_type=ct, object_id=instance.pk).delete()\n", "path": "app/grandchallenge/notifications/signals.py"}, {"content": "from actstream.models import Follow\r\nfrom django import template\r\nfrom django.contrib.contenttypes.models import ContentType\r\n\r\nfrom grandchallenge.notifications.forms import FollowForm\r\n\r\nregister = template.Library()\r\n\r\n\r\[email protected]_tag\r\ndef get_follow_object_pk(user, follow_object):\r\n object_follows_for_user = Follow.objects.filter(\r\n user=user,\r\n content_type=ContentType.objects.get(\r\n app_label=follow_object._meta.app_label,\r\n model=follow_object._meta.model_name,\r\n ),\r\n ).all()\r\n\r\n if not object_follows_for_user:\r\n current_follow_object = []\r\n else:\r\n current_follow_object = []\r\n for obj in object_follows_for_user:\r\n if not obj.follow_object:\r\n continue\r\n elif obj.follow_object.id == follow_object.id:\r\n current_follow_object = obj.pk\r\n return current_follow_object\r\n\r\n\r\[email protected]_tag\r\ndef follow_form(*, user, object_id, content_type):\r\n return FollowForm(\r\n user=user,\r\n initial={\r\n \"object_id\": object_id,\r\n \"content_type\": content_type,\r\n \"actor_only\": False,\r\n },\r\n )\r\n\r\n\r\[email protected]_tag()\r\ndef get_content_type(follow_object):\r\n try:\r\n ct = ContentType.objects.get(\r\n app_label=follow_object._meta.app_label,\r\n model=follow_object._meta.model_name,\r\n )\r\n except AttributeError:\r\n ct = None\r\n return ct\r\n", "path": "app/grandchallenge/forum_conversation/templatetags/forum_extras.py"}]}
1,349
645
gh_patches_debug_23374
rasdani/github-patches
git_diff
gratipay__gratipay.com-4390
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Localhost not loading in Firefox Just found this problem in Firefox while setting up Gratipay locally on @dmk246 laptop. For some reason the page never loads when you `make run` and try to open localhost:8537 in Firefox it hangs. We believe it is because `gratipay.report_uri.io` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gratipay/security/__init__.py` Content: ``` 1 from aspen import Response 2 3 4 _requesting_asset = lambda r: r.path.raw.startswith('/assets/') 5 6 7 def only_allow_certain_methods(request): 8 method = request.method.upper() 9 whitelist = ('GET', 'HEAD') if _requesting_asset(request) else ('GET', 'HEAD', 'POST') 10 # POSTing to /assets/ interferes with the csrf.* functions if we're not careful 11 if method not in whitelist: 12 raise Response(405) 13 14 15 def add_headers_to_response(response): 16 """Add security headers. 17 """ 18 19 # http://en.wikipedia.org/wiki/Clickjacking#X-Frame-Options 20 if 'X-Frame-Options' not in response.headers: 21 response.headers['X-Frame-Options'] = 'SAMEORIGIN' 22 elif response.headers['X-Frame-Options'] == 'ALLOWALL': 23 24 # ALLOWALL is non-standard. It's useful as a signal from a simplate 25 # that it doesn't want X-Frame-Options set at all, but because it's 26 # non-standard we don't send it. Instead we unset the header entirely, 27 # which has the desired effect of allowing framing indiscriminately. 28 # 29 # Refs.: 30 # 31 # http://en.wikipedia.org/wiki/Clickjacking#X-Frame-Options 32 # http://ipsec.pl/node/1094 33 34 del response.headers['X-Frame-Options'] 35 36 # https://www.owasp.org/index.php/List_of_useful_HTTP_headers 37 if 'X-Content-Type-Options' not in response.headers: 38 response.headers['X-Content-Type-Options'] = 'nosniff' 39 40 # https://www.owasp.org/index.php/List_of_useful_HTTP_headers 41 if 'X-XSS-Protection' not in response.headers: 42 response.headers['X-XSS-Protection'] = '1; mode=block' 43 44 # https://www.w3.org/TR/referrer-policy/ 45 if 'Referrer-Policy' not in response.headers: 46 response.headers['Referrer-Policy'] = 'strict-origin-when-cross-origin' 47 48 # https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP 49 if 'content-security-policy-report-only' not in response.headers: 50 response.headers['content-security-policy-report-only'] = ( 51 "default-src 'self';" 52 "script-src 'self' assets.gratipay.com 'unsafe-inline';" 53 "style-src 'self' assets.gratipay.com downloads.gratipay.com cloud.typography.com;" 54 "img-src *;" 55 "font-src 'self' assets.gratipay.com cloud.typography.com data:;" 56 "upgrade-insecure-requests;" 57 "block-all-mixed-content;" 58 "reflected-xss block;" 59 "report-uri https://gratipay.report-uri.io/r/default/csp/reportOnly;" 60 ) 61 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gratipay/security/__init__.py b/gratipay/security/__init__.py --- a/gratipay/security/__init__.py +++ b/gratipay/security/__init__.py @@ -43,7 +43,8 @@ # https://www.w3.org/TR/referrer-policy/ if 'Referrer-Policy' not in response.headers: - response.headers['Referrer-Policy'] = 'strict-origin-when-cross-origin' + response.headers['Referrer-Policy'] = \ + 'no-referrer-when-downgrade, strict-origin-when-cross-origin' # https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP if 'content-security-policy-report-only' not in response.headers: @@ -53,8 +54,6 @@ "style-src 'self' assets.gratipay.com downloads.gratipay.com cloud.typography.com;" "img-src *;" "font-src 'self' assets.gratipay.com cloud.typography.com data:;" - "upgrade-insecure-requests;" "block-all-mixed-content;" - "reflected-xss block;" "report-uri https://gratipay.report-uri.io/r/default/csp/reportOnly;" )
{"golden_diff": "diff --git a/gratipay/security/__init__.py b/gratipay/security/__init__.py\n--- a/gratipay/security/__init__.py\n+++ b/gratipay/security/__init__.py\n@@ -43,7 +43,8 @@\n \n # https://www.w3.org/TR/referrer-policy/\n if 'Referrer-Policy' not in response.headers:\n- response.headers['Referrer-Policy'] = 'strict-origin-when-cross-origin'\n+ response.headers['Referrer-Policy'] = \\\n+ 'no-referrer-when-downgrade, strict-origin-when-cross-origin'\n \n # https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP\n if 'content-security-policy-report-only' not in response.headers:\n@@ -53,8 +54,6 @@\n \"style-src 'self' assets.gratipay.com downloads.gratipay.com cloud.typography.com;\"\n \"img-src *;\"\n \"font-src 'self' assets.gratipay.com cloud.typography.com data:;\"\n- \"upgrade-insecure-requests;\"\n \"block-all-mixed-content;\"\n- \"reflected-xss block;\"\n \"report-uri https://gratipay.report-uri.io/r/default/csp/reportOnly;\"\n )\n", "issue": "Localhost not loading in Firefox\nJust found this problem in Firefox while setting up Gratipay locally on @dmk246 laptop. For some reason the page never loads when you `make run` and try to open localhost:8537 in Firefox it hangs. We believe it is because `gratipay.report_uri.io` \n", "before_files": [{"content": "from aspen import Response\n\n\n_requesting_asset = lambda r: r.path.raw.startswith('/assets/')\n\n\ndef only_allow_certain_methods(request):\n method = request.method.upper()\n whitelist = ('GET', 'HEAD') if _requesting_asset(request) else ('GET', 'HEAD', 'POST')\n # POSTing to /assets/ interferes with the csrf.* functions if we're not careful\n if method not in whitelist:\n raise Response(405)\n\n\ndef add_headers_to_response(response):\n \"\"\"Add security headers.\n \"\"\"\n\n # http://en.wikipedia.org/wiki/Clickjacking#X-Frame-Options\n if 'X-Frame-Options' not in response.headers:\n response.headers['X-Frame-Options'] = 'SAMEORIGIN'\n elif response.headers['X-Frame-Options'] == 'ALLOWALL':\n\n # ALLOWALL is non-standard. It's useful as a signal from a simplate\n # that it doesn't want X-Frame-Options set at all, but because it's\n # non-standard we don't send it. Instead we unset the header entirely,\n # which has the desired effect of allowing framing indiscriminately.\n #\n # Refs.:\n #\n # http://en.wikipedia.org/wiki/Clickjacking#X-Frame-Options\n # http://ipsec.pl/node/1094\n\n del response.headers['X-Frame-Options']\n\n # https://www.owasp.org/index.php/List_of_useful_HTTP_headers\n if 'X-Content-Type-Options' not in response.headers:\n response.headers['X-Content-Type-Options'] = 'nosniff'\n\n # https://www.owasp.org/index.php/List_of_useful_HTTP_headers\n if 'X-XSS-Protection' not in response.headers:\n response.headers['X-XSS-Protection'] = '1; mode=block'\n\n # https://www.w3.org/TR/referrer-policy/\n if 'Referrer-Policy' not in response.headers:\n response.headers['Referrer-Policy'] = 'strict-origin-when-cross-origin'\n\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP\n if 'content-security-policy-report-only' not in response.headers:\n response.headers['content-security-policy-report-only'] = (\n \"default-src 'self';\"\n \"script-src 'self' assets.gratipay.com 'unsafe-inline';\"\n \"style-src 'self' assets.gratipay.com downloads.gratipay.com cloud.typography.com;\"\n \"img-src *;\"\n \"font-src 'self' assets.gratipay.com cloud.typography.com data:;\"\n \"upgrade-insecure-requests;\"\n \"block-all-mixed-content;\"\n \"reflected-xss block;\"\n \"report-uri https://gratipay.report-uri.io/r/default/csp/reportOnly;\"\n )\n", "path": "gratipay/security/__init__.py"}], "after_files": [{"content": "from aspen import Response\n\n\n_requesting_asset = lambda r: r.path.raw.startswith('/assets/')\n\n\ndef only_allow_certain_methods(request):\n method = request.method.upper()\n whitelist = ('GET', 'HEAD') if _requesting_asset(request) else ('GET', 'HEAD', 'POST')\n # POSTing to /assets/ interferes with the csrf.* functions if we're not careful\n if method not in whitelist:\n raise Response(405)\n\n\ndef add_headers_to_response(response):\n \"\"\"Add security headers.\n \"\"\"\n\n # http://en.wikipedia.org/wiki/Clickjacking#X-Frame-Options\n if 'X-Frame-Options' not in response.headers:\n response.headers['X-Frame-Options'] = 'SAMEORIGIN'\n elif response.headers['X-Frame-Options'] == 'ALLOWALL':\n\n # ALLOWALL is non-standard. It's useful as a signal from a simplate\n # that it doesn't want X-Frame-Options set at all, but because it's\n # non-standard we don't send it. Instead we unset the header entirely,\n # which has the desired effect of allowing framing indiscriminately.\n #\n # Refs.:\n #\n # http://en.wikipedia.org/wiki/Clickjacking#X-Frame-Options\n # http://ipsec.pl/node/1094\n\n del response.headers['X-Frame-Options']\n\n # https://www.owasp.org/index.php/List_of_useful_HTTP_headers\n if 'X-Content-Type-Options' not in response.headers:\n response.headers['X-Content-Type-Options'] = 'nosniff'\n\n # https://www.owasp.org/index.php/List_of_useful_HTTP_headers\n if 'X-XSS-Protection' not in response.headers:\n response.headers['X-XSS-Protection'] = '1; mode=block'\n\n # https://www.w3.org/TR/referrer-policy/\n if 'Referrer-Policy' not in response.headers:\n response.headers['Referrer-Policy'] = \\\n 'no-referrer-when-downgrade, strict-origin-when-cross-origin'\n\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP\n if 'content-security-policy-report-only' not in response.headers:\n response.headers['content-security-policy-report-only'] = (\n \"default-src 'self';\"\n \"script-src 'self' assets.gratipay.com 'unsafe-inline';\"\n \"style-src 'self' assets.gratipay.com downloads.gratipay.com cloud.typography.com;\"\n \"img-src *;\"\n \"font-src 'self' assets.gratipay.com cloud.typography.com data:;\"\n \"block-all-mixed-content;\"\n \"report-uri https://gratipay.report-uri.io/r/default/csp/reportOnly;\"\n )\n", "path": "gratipay/security/__init__.py"}]}
1,066
269
gh_patches_debug_3361
rasdani/github-patches
git_diff
scikit-image__scikit-image-5167
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- DOCS: Return dtype of _label2rgb_avg function ## Description With skimage release 0.18.0, and in particular after PR #4840, there is a mismatch between the return dtype of function `_label2rgb_avg` in the `color/colorlabel` module: it is stated to be the same dtype as the image, but using `np.zeros` instead of `np.zeros_like` makes the return type always to be float --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `skimage/color/colorlabel.py` Content: ``` 1 import itertools 2 3 import numpy as np 4 5 from .._shared.utils import warn, change_default_value 6 from ..util import img_as_float 7 from . import rgb_colors 8 from .colorconv import rgb2gray, gray2rgb 9 10 11 __all__ = ['color_dict', 'label2rgb', 'DEFAULT_COLORS'] 12 13 14 DEFAULT_COLORS = ('red', 'blue', 'yellow', 'magenta', 'green', 15 'indigo', 'darkorange', 'cyan', 'pink', 'yellowgreen') 16 17 18 color_dict = {k: v for k, v in rgb_colors.__dict__.items() 19 if isinstance(v, tuple)} 20 21 22 def _rgb_vector(color): 23 """Return RGB color as (1, 3) array. 24 25 This RGB array gets multiplied by masked regions of an RGB image, which are 26 partially flattened by masking (i.e. dimensions 2D + RGB -> 1D + RGB). 27 28 Parameters 29 ---------- 30 color : str or array 31 Color name in `color_dict` or RGB float values between [0, 1]. 32 """ 33 if isinstance(color, str): 34 color = color_dict[color] 35 # Slice to handle RGBA colors. 36 return np.array(color[:3]) 37 38 39 def _match_label_with_color(label, colors, bg_label, bg_color): 40 """Return `unique_labels` and `color_cycle` for label array and color list. 41 42 Colors are cycled for normal labels, but the background color should only 43 be used for the background. 44 """ 45 # Temporarily set background color; it will be removed later. 46 if bg_color is None: 47 bg_color = (0, 0, 0) 48 bg_color = _rgb_vector(bg_color) 49 50 # map labels to their ranks among all labels from small to large 51 unique_labels, mapped_labels = np.unique(label, return_inverse=True) 52 53 # get rank of bg_label 54 bg_label_rank_list = mapped_labels[label.flat == bg_label] 55 56 # The rank of each label is the index of the color it is matched to in 57 # color cycle. bg_label should always be mapped to the first color, so 58 # its rank must be 0. Other labels should be ranked from small to large 59 # from 1. 60 if len(bg_label_rank_list) > 0: 61 bg_label_rank = bg_label_rank_list[0] 62 mapped_labels[mapped_labels < bg_label_rank] += 1 63 mapped_labels[label.flat == bg_label] = 0 64 else: 65 mapped_labels += 1 66 67 # Modify labels and color cycle so background color is used only once. 68 color_cycle = itertools.cycle(colors) 69 color_cycle = itertools.chain([bg_color], color_cycle) 70 71 return mapped_labels, color_cycle 72 73 74 @change_default_value("bg_label", new_value=0, changed_version="0.19") 75 def label2rgb(label, image=None, colors=None, alpha=0.3, 76 bg_label=-1, bg_color=(0, 0, 0), image_alpha=1, kind='overlay'): 77 """Return an RGB image where color-coded labels are painted over the image. 78 79 Parameters 80 ---------- 81 label : array, shape (M, N) 82 Integer array of labels with the same shape as `image`. 83 image : array, shape (M, N, 3), optional 84 Image used as underlay for labels. If the input is an RGB image, it's 85 converted to grayscale before coloring. 86 colors : list, optional 87 List of colors. If the number of labels exceeds the number of colors, 88 then the colors are cycled. 89 alpha : float [0, 1], optional 90 Opacity of colorized labels. Ignored if image is `None`. 91 bg_label : int, optional 92 Label that's treated as the background. If `bg_label` is specified, 93 `bg_color` is `None`, and `kind` is `overlay`, 94 background is not painted by any colors. 95 bg_color : str or array, optional 96 Background color. Must be a name in `color_dict` or RGB float values 97 between [0, 1]. 98 image_alpha : float [0, 1], optional 99 Opacity of the image. 100 kind : string, one of {'overlay', 'avg'} 101 The kind of color image desired. 'overlay' cycles over defined colors 102 and overlays the colored labels over the original image. 'avg' replaces 103 each labeled segment with its average color, for a stained-class or 104 pastel painting appearance. 105 106 Returns 107 ------- 108 result : array of float, shape (M, N, 3) 109 The result of blending a cycling colormap (`colors`) for each distinct 110 value in `label` with the image, at a certain alpha value. 111 """ 112 if kind == 'overlay': 113 return _label2rgb_overlay(label, image, colors, alpha, bg_label, 114 bg_color, image_alpha) 115 elif kind == 'avg': 116 return _label2rgb_avg(label, image, bg_label, bg_color) 117 else: 118 raise ValueError("`kind` must be either 'overlay' or 'avg'.") 119 120 121 def _label2rgb_overlay(label, image=None, colors=None, alpha=0.3, 122 bg_label=-1, bg_color=None, image_alpha=1): 123 """Return an RGB image where color-coded labels are painted over the image. 124 125 Parameters 126 ---------- 127 label : array, shape (M, N) 128 Integer array of labels with the same shape as `image`. 129 image : array, shape (M, N, 3), optional 130 Image used as underlay for labels. If the input is an RGB image, it's 131 converted to grayscale before coloring. 132 colors : list, optional 133 List of colors. If the number of labels exceeds the number of colors, 134 then the colors are cycled. 135 alpha : float [0, 1], optional 136 Opacity of colorized labels. Ignored if image is `None`. 137 bg_label : int, optional 138 Label that's treated as the background. If `bg_label` is specified and 139 `bg_color` is `None`, background is not painted by any colors. 140 bg_color : str or array, optional 141 Background color. Must be a name in `color_dict` or RGB float values 142 between [0, 1]. 143 image_alpha : float [0, 1], optional 144 Opacity of the image. 145 146 Returns 147 ------- 148 result : array of float, shape (M, N, 3) 149 The result of blending a cycling colormap (`colors`) for each distinct 150 value in `label` with the image, at a certain alpha value. 151 """ 152 if colors is None: 153 colors = DEFAULT_COLORS 154 colors = [_rgb_vector(c) for c in colors] 155 156 if image is None: 157 image = np.zeros(label.shape + (3,), dtype=np.float64) 158 # Opacity doesn't make sense if no image exists. 159 alpha = 1 160 else: 161 if not image.shape[:2] == label.shape: 162 raise ValueError("`image` and `label` must be the same shape") 163 164 if image.min() < 0: 165 warn("Negative intensities in `image` are not supported") 166 167 if image.ndim > label.ndim: 168 image = img_as_float(rgb2gray(image)) 169 else: 170 image = img_as_float(image) 171 image = gray2rgb(image) * image_alpha + (1 - image_alpha) 172 173 # Ensure that all labels are non-negative so we can index into 174 # `label_to_color` correctly. 175 offset = min(label.min(), bg_label) 176 if offset != 0: 177 label = label - offset # Make sure you don't modify the input array. 178 bg_label -= offset 179 180 new_type = np.min_scalar_type(int(label.max())) 181 if new_type == bool: 182 new_type = np.uint8 183 label = label.astype(new_type) 184 185 mapped_labels_flat, color_cycle = _match_label_with_color(label, colors, 186 bg_label, bg_color) 187 188 if len(mapped_labels_flat) == 0: 189 return image 190 191 dense_labels = range(np.max(mapped_labels_flat) + 1) 192 193 label_to_color = np.stack([c for i, c in zip(dense_labels, color_cycle)]) 194 195 mapped_labels = label 196 mapped_labels.flat = mapped_labels_flat 197 result = label_to_color[mapped_labels] * alpha + image * (1 - alpha) 198 199 # Remove background label if its color was not specified. 200 remove_background = 0 in mapped_labels_flat and bg_color is None 201 if remove_background: 202 result[label == bg_label] = image[label == bg_label] 203 204 return result 205 206 207 def _label2rgb_avg(label_field, image, bg_label=0, bg_color=(0, 0, 0)): 208 """Visualise each segment in `label_field` with its mean color in `image`. 209 210 Parameters 211 ---------- 212 label_field : array of int 213 A segmentation of an image. 214 image : array, shape ``label_field.shape + (3,)`` 215 A color image of the same spatial shape as `label_field`. 216 bg_label : int, optional 217 A value in `label_field` to be treated as background. 218 bg_color : 3-tuple of int, optional 219 The color for the background label 220 221 Returns 222 ------- 223 out : array, same shape and type as `image` 224 The output visualization. 225 """ 226 out = np.zeros(label_field.shape + (3,)) 227 labels = np.unique(label_field) 228 bg = (labels == bg_label) 229 if bg.any(): 230 labels = labels[labels != bg_label] 231 mask = (label_field == bg_label).nonzero() 232 out[mask] = bg_color 233 for label in labels: 234 mask = (label_field == label).nonzero() 235 color = image[mask].mean(axis=0) 236 out[mask] = color 237 return out 238 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/skimage/color/colorlabel.py b/skimage/color/colorlabel.py --- a/skimage/color/colorlabel.py +++ b/skimage/color/colorlabel.py @@ -223,7 +223,7 @@ out : array, same shape and type as `image` The output visualization. """ - out = np.zeros(label_field.shape + (3,)) + out = np.zeros(label_field.shape + (3,), dtype=image.dtype) labels = np.unique(label_field) bg = (labels == bg_label) if bg.any():
{"golden_diff": "diff --git a/skimage/color/colorlabel.py b/skimage/color/colorlabel.py\n--- a/skimage/color/colorlabel.py\n+++ b/skimage/color/colorlabel.py\n@@ -223,7 +223,7 @@\n out : array, same shape and type as `image`\n The output visualization.\n \"\"\"\n- out = np.zeros(label_field.shape + (3,))\n+ out = np.zeros(label_field.shape + (3,), dtype=image.dtype)\n labels = np.unique(label_field)\n bg = (labels == bg_label)\n if bg.any():\n", "issue": "DOCS: Return dtype of _label2rgb_avg function\n## Description\r\n\r\nWith skimage release 0.18.0, and in particular after PR #4840, there is a mismatch between the return dtype of function `_label2rgb_avg` in the `color/colorlabel` module: it is stated to be the same dtype as the image, but using `np.zeros` instead of `np.zeros_like` makes the return type always to be float\r\n\n", "before_files": [{"content": "import itertools\n\nimport numpy as np\n\nfrom .._shared.utils import warn, change_default_value\nfrom ..util import img_as_float\nfrom . import rgb_colors\nfrom .colorconv import rgb2gray, gray2rgb\n\n\n__all__ = ['color_dict', 'label2rgb', 'DEFAULT_COLORS']\n\n\nDEFAULT_COLORS = ('red', 'blue', 'yellow', 'magenta', 'green',\n 'indigo', 'darkorange', 'cyan', 'pink', 'yellowgreen')\n\n\ncolor_dict = {k: v for k, v in rgb_colors.__dict__.items()\n if isinstance(v, tuple)}\n\n\ndef _rgb_vector(color):\n \"\"\"Return RGB color as (1, 3) array.\n\n This RGB array gets multiplied by masked regions of an RGB image, which are\n partially flattened by masking (i.e. dimensions 2D + RGB -> 1D + RGB).\n\n Parameters\n ----------\n color : str or array\n Color name in `color_dict` or RGB float values between [0, 1].\n \"\"\"\n if isinstance(color, str):\n color = color_dict[color]\n # Slice to handle RGBA colors.\n return np.array(color[:3])\n\n\ndef _match_label_with_color(label, colors, bg_label, bg_color):\n \"\"\"Return `unique_labels` and `color_cycle` for label array and color list.\n\n Colors are cycled for normal labels, but the background color should only\n be used for the background.\n \"\"\"\n # Temporarily set background color; it will be removed later.\n if bg_color is None:\n bg_color = (0, 0, 0)\n bg_color = _rgb_vector(bg_color)\n\n # map labels to their ranks among all labels from small to large\n unique_labels, mapped_labels = np.unique(label, return_inverse=True)\n\n # get rank of bg_label\n bg_label_rank_list = mapped_labels[label.flat == bg_label]\n\n # The rank of each label is the index of the color it is matched to in\n # color cycle. bg_label should always be mapped to the first color, so\n # its rank must be 0. Other labels should be ranked from small to large\n # from 1.\n if len(bg_label_rank_list) > 0:\n bg_label_rank = bg_label_rank_list[0]\n mapped_labels[mapped_labels < bg_label_rank] += 1\n mapped_labels[label.flat == bg_label] = 0\n else:\n mapped_labels += 1\n\n # Modify labels and color cycle so background color is used only once.\n color_cycle = itertools.cycle(colors)\n color_cycle = itertools.chain([bg_color], color_cycle)\n\n return mapped_labels, color_cycle\n\n\n@change_default_value(\"bg_label\", new_value=0, changed_version=\"0.19\")\ndef label2rgb(label, image=None, colors=None, alpha=0.3,\n bg_label=-1, bg_color=(0, 0, 0), image_alpha=1, kind='overlay'):\n \"\"\"Return an RGB image where color-coded labels are painted over the image.\n\n Parameters\n ----------\n label : array, shape (M, N)\n Integer array of labels with the same shape as `image`.\n image : array, shape (M, N, 3), optional\n Image used as underlay for labels. If the input is an RGB image, it's\n converted to grayscale before coloring.\n colors : list, optional\n List of colors. If the number of labels exceeds the number of colors,\n then the colors are cycled.\n alpha : float [0, 1], optional\n Opacity of colorized labels. Ignored if image is `None`.\n bg_label : int, optional\n Label that's treated as the background. If `bg_label` is specified,\n `bg_color` is `None`, and `kind` is `overlay`,\n background is not painted by any colors.\n bg_color : str or array, optional\n Background color. Must be a name in `color_dict` or RGB float values\n between [0, 1].\n image_alpha : float [0, 1], optional\n Opacity of the image.\n kind : string, one of {'overlay', 'avg'}\n The kind of color image desired. 'overlay' cycles over defined colors\n and overlays the colored labels over the original image. 'avg' replaces\n each labeled segment with its average color, for a stained-class or\n pastel painting appearance.\n\n Returns\n -------\n result : array of float, shape (M, N, 3)\n The result of blending a cycling colormap (`colors`) for each distinct\n value in `label` with the image, at a certain alpha value.\n \"\"\"\n if kind == 'overlay':\n return _label2rgb_overlay(label, image, colors, alpha, bg_label,\n bg_color, image_alpha)\n elif kind == 'avg':\n return _label2rgb_avg(label, image, bg_label, bg_color)\n else:\n raise ValueError(\"`kind` must be either 'overlay' or 'avg'.\")\n\n\ndef _label2rgb_overlay(label, image=None, colors=None, alpha=0.3,\n bg_label=-1, bg_color=None, image_alpha=1):\n \"\"\"Return an RGB image where color-coded labels are painted over the image.\n\n Parameters\n ----------\n label : array, shape (M, N)\n Integer array of labels with the same shape as `image`.\n image : array, shape (M, N, 3), optional\n Image used as underlay for labels. If the input is an RGB image, it's\n converted to grayscale before coloring.\n colors : list, optional\n List of colors. If the number of labels exceeds the number of colors,\n then the colors are cycled.\n alpha : float [0, 1], optional\n Opacity of colorized labels. Ignored if image is `None`.\n bg_label : int, optional\n Label that's treated as the background. If `bg_label` is specified and\n `bg_color` is `None`, background is not painted by any colors.\n bg_color : str or array, optional\n Background color. Must be a name in `color_dict` or RGB float values\n between [0, 1].\n image_alpha : float [0, 1], optional\n Opacity of the image.\n\n Returns\n -------\n result : array of float, shape (M, N, 3)\n The result of blending a cycling colormap (`colors`) for each distinct\n value in `label` with the image, at a certain alpha value.\n \"\"\"\n if colors is None:\n colors = DEFAULT_COLORS\n colors = [_rgb_vector(c) for c in colors]\n\n if image is None:\n image = np.zeros(label.shape + (3,), dtype=np.float64)\n # Opacity doesn't make sense if no image exists.\n alpha = 1\n else:\n if not image.shape[:2] == label.shape:\n raise ValueError(\"`image` and `label` must be the same shape\")\n\n if image.min() < 0:\n warn(\"Negative intensities in `image` are not supported\")\n\n if image.ndim > label.ndim:\n image = img_as_float(rgb2gray(image))\n else:\n image = img_as_float(image)\n image = gray2rgb(image) * image_alpha + (1 - image_alpha)\n\n # Ensure that all labels are non-negative so we can index into\n # `label_to_color` correctly.\n offset = min(label.min(), bg_label)\n if offset != 0:\n label = label - offset # Make sure you don't modify the input array.\n bg_label -= offset\n\n new_type = np.min_scalar_type(int(label.max()))\n if new_type == bool:\n new_type = np.uint8\n label = label.astype(new_type)\n\n mapped_labels_flat, color_cycle = _match_label_with_color(label, colors,\n bg_label, bg_color)\n\n if len(mapped_labels_flat) == 0:\n return image\n\n dense_labels = range(np.max(mapped_labels_flat) + 1)\n\n label_to_color = np.stack([c for i, c in zip(dense_labels, color_cycle)])\n\n mapped_labels = label\n mapped_labels.flat = mapped_labels_flat\n result = label_to_color[mapped_labels] * alpha + image * (1 - alpha)\n\n # Remove background label if its color was not specified.\n remove_background = 0 in mapped_labels_flat and bg_color is None\n if remove_background:\n result[label == bg_label] = image[label == bg_label]\n\n return result\n\n\ndef _label2rgb_avg(label_field, image, bg_label=0, bg_color=(0, 0, 0)):\n \"\"\"Visualise each segment in `label_field` with its mean color in `image`.\n\n Parameters\n ----------\n label_field : array of int\n A segmentation of an image.\n image : array, shape ``label_field.shape + (3,)``\n A color image of the same spatial shape as `label_field`.\n bg_label : int, optional\n A value in `label_field` to be treated as background.\n bg_color : 3-tuple of int, optional\n The color for the background label\n\n Returns\n -------\n out : array, same shape and type as `image`\n The output visualization.\n \"\"\"\n out = np.zeros(label_field.shape + (3,))\n labels = np.unique(label_field)\n bg = (labels == bg_label)\n if bg.any():\n labels = labels[labels != bg_label]\n mask = (label_field == bg_label).nonzero()\n out[mask] = bg_color\n for label in labels:\n mask = (label_field == label).nonzero()\n color = image[mask].mean(axis=0)\n out[mask] = color\n return out\n", "path": "skimage/color/colorlabel.py"}], "after_files": [{"content": "import itertools\n\nimport numpy as np\n\nfrom .._shared.utils import warn, change_default_value\nfrom ..util import img_as_float\nfrom . import rgb_colors\nfrom .colorconv import rgb2gray, gray2rgb\n\n\n__all__ = ['color_dict', 'label2rgb', 'DEFAULT_COLORS']\n\n\nDEFAULT_COLORS = ('red', 'blue', 'yellow', 'magenta', 'green',\n 'indigo', 'darkorange', 'cyan', 'pink', 'yellowgreen')\n\n\ncolor_dict = {k: v for k, v in rgb_colors.__dict__.items()\n if isinstance(v, tuple)}\n\n\ndef _rgb_vector(color):\n \"\"\"Return RGB color as (1, 3) array.\n\n This RGB array gets multiplied by masked regions of an RGB image, which are\n partially flattened by masking (i.e. dimensions 2D + RGB -> 1D + RGB).\n\n Parameters\n ----------\n color : str or array\n Color name in `color_dict` or RGB float values between [0, 1].\n \"\"\"\n if isinstance(color, str):\n color = color_dict[color]\n # Slice to handle RGBA colors.\n return np.array(color[:3])\n\n\ndef _match_label_with_color(label, colors, bg_label, bg_color):\n \"\"\"Return `unique_labels` and `color_cycle` for label array and color list.\n\n Colors are cycled for normal labels, but the background color should only\n be used for the background.\n \"\"\"\n # Temporarily set background color; it will be removed later.\n if bg_color is None:\n bg_color = (0, 0, 0)\n bg_color = _rgb_vector(bg_color)\n\n # map labels to their ranks among all labels from small to large\n unique_labels, mapped_labels = np.unique(label, return_inverse=True)\n\n # get rank of bg_label\n bg_label_rank_list = mapped_labels[label.flat == bg_label]\n\n # The rank of each label is the index of the color it is matched to in\n # color cycle. bg_label should always be mapped to the first color, so\n # its rank must be 0. Other labels should be ranked from small to large\n # from 1.\n if len(bg_label_rank_list) > 0:\n bg_label_rank = bg_label_rank_list[0]\n mapped_labels[mapped_labels < bg_label_rank] += 1\n mapped_labels[label.flat == bg_label] = 0\n else:\n mapped_labels += 1\n\n # Modify labels and color cycle so background color is used only once.\n color_cycle = itertools.cycle(colors)\n color_cycle = itertools.chain([bg_color], color_cycle)\n\n return mapped_labels, color_cycle\n\n\n@change_default_value(\"bg_label\", new_value=0, changed_version=\"0.19\")\ndef label2rgb(label, image=None, colors=None, alpha=0.3,\n bg_label=-1, bg_color=(0, 0, 0), image_alpha=1, kind='overlay'):\n \"\"\"Return an RGB image where color-coded labels are painted over the image.\n\n Parameters\n ----------\n label : array, shape (M, N)\n Integer array of labels with the same shape as `image`.\n image : array, shape (M, N, 3), optional\n Image used as underlay for labels. If the input is an RGB image, it's\n converted to grayscale before coloring.\n colors : list, optional\n List of colors. If the number of labels exceeds the number of colors,\n then the colors are cycled.\n alpha : float [0, 1], optional\n Opacity of colorized labels. Ignored if image is `None`.\n bg_label : int, optional\n Label that's treated as the background. If `bg_label` is specified,\n `bg_color` is `None`, and `kind` is `overlay`,\n background is not painted by any colors.\n bg_color : str or array, optional\n Background color. Must be a name in `color_dict` or RGB float values\n between [0, 1].\n image_alpha : float [0, 1], optional\n Opacity of the image.\n kind : string, one of {'overlay', 'avg'}\n The kind of color image desired. 'overlay' cycles over defined colors\n and overlays the colored labels over the original image. 'avg' replaces\n each labeled segment with its average color, for a stained-class or\n pastel painting appearance.\n\n Returns\n -------\n result : array of float, shape (M, N, 3)\n The result of blending a cycling colormap (`colors`) for each distinct\n value in `label` with the image, at a certain alpha value.\n \"\"\"\n if kind == 'overlay':\n return _label2rgb_overlay(label, image, colors, alpha, bg_label,\n bg_color, image_alpha)\n elif kind == 'avg':\n return _label2rgb_avg(label, image, bg_label, bg_color)\n else:\n raise ValueError(\"`kind` must be either 'overlay' or 'avg'.\")\n\n\ndef _label2rgb_overlay(label, image=None, colors=None, alpha=0.3,\n bg_label=-1, bg_color=None, image_alpha=1):\n \"\"\"Return an RGB image where color-coded labels are painted over the image.\n\n Parameters\n ----------\n label : array, shape (M, N)\n Integer array of labels with the same shape as `image`.\n image : array, shape (M, N, 3), optional\n Image used as underlay for labels. If the input is an RGB image, it's\n converted to grayscale before coloring.\n colors : list, optional\n List of colors. If the number of labels exceeds the number of colors,\n then the colors are cycled.\n alpha : float [0, 1], optional\n Opacity of colorized labels. Ignored if image is `None`.\n bg_label : int, optional\n Label that's treated as the background. If `bg_label` is specified and\n `bg_color` is `None`, background is not painted by any colors.\n bg_color : str or array, optional\n Background color. Must be a name in `color_dict` or RGB float values\n between [0, 1].\n image_alpha : float [0, 1], optional\n Opacity of the image.\n\n Returns\n -------\n result : array of float, shape (M, N, 3)\n The result of blending a cycling colormap (`colors`) for each distinct\n value in `label` with the image, at a certain alpha value.\n \"\"\"\n if colors is None:\n colors = DEFAULT_COLORS\n colors = [_rgb_vector(c) for c in colors]\n\n if image is None:\n image = np.zeros(label.shape + (3,), dtype=np.float64)\n # Opacity doesn't make sense if no image exists.\n alpha = 1\n else:\n if not image.shape[:2] == label.shape:\n raise ValueError(\"`image` and `label` must be the same shape\")\n\n if image.min() < 0:\n warn(\"Negative intensities in `image` are not supported\")\n\n if image.ndim > label.ndim:\n image = img_as_float(rgb2gray(image))\n else:\n image = img_as_float(image)\n image = gray2rgb(image) * image_alpha + (1 - image_alpha)\n\n # Ensure that all labels are non-negative so we can index into\n # `label_to_color` correctly.\n offset = min(label.min(), bg_label)\n if offset != 0:\n label = label - offset # Make sure you don't modify the input array.\n bg_label -= offset\n\n new_type = np.min_scalar_type(int(label.max()))\n if new_type == bool:\n new_type = np.uint8\n label = label.astype(new_type)\n\n mapped_labels_flat, color_cycle = _match_label_with_color(label, colors,\n bg_label, bg_color)\n\n if len(mapped_labels_flat) == 0:\n return image\n\n dense_labels = range(np.max(mapped_labels_flat) + 1)\n\n label_to_color = np.stack([c for i, c in zip(dense_labels, color_cycle)])\n\n mapped_labels = label\n mapped_labels.flat = mapped_labels_flat\n result = label_to_color[mapped_labels] * alpha + image * (1 - alpha)\n\n # Remove background label if its color was not specified.\n remove_background = 0 in mapped_labels_flat and bg_color is None\n if remove_background:\n result[label == bg_label] = image[label == bg_label]\n\n return result\n\n\ndef _label2rgb_avg(label_field, image, bg_label=0, bg_color=(0, 0, 0)):\n \"\"\"Visualise each segment in `label_field` with its mean color in `image`.\n\n Parameters\n ----------\n label_field : array of int\n A segmentation of an image.\n image : array, shape ``label_field.shape + (3,)``\n A color image of the same spatial shape as `label_field`.\n bg_label : int, optional\n A value in `label_field` to be treated as background.\n bg_color : 3-tuple of int, optional\n The color for the background label\n\n Returns\n -------\n out : array, same shape and type as `image`\n The output visualization.\n \"\"\"\n out = np.zeros(label_field.shape + (3,), dtype=image.dtype)\n labels = np.unique(label_field)\n bg = (labels == bg_label)\n if bg.any():\n labels = labels[labels != bg_label]\n mask = (label_field == bg_label).nonzero()\n out[mask] = bg_color\n for label in labels:\n mask = (label_field == label).nonzero()\n color = image[mask].mean(axis=0)\n out[mask] = color\n return out\n", "path": "skimage/color/colorlabel.py"}]}
3,131
126
gh_patches_debug_1920
rasdani/github-patches
git_diff
mozilla__bugbug-598
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Use new 'everchanged' operator instead of changedafter 1970 Depends on https://bugzilla.mozilla.org/show_bug.cgi?id=1546624. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/get_type_labels.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # This Source Code Form is subject to the terms of the Mozilla Public 3 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 # You can obtain one at http://mozilla.org/MPL/2.0/. 5 6 import argparse 7 import csv 8 import sys 9 10 import requests 11 12 13 def parse_args(args): 14 parser = argparse.ArgumentParser() 15 parser.add_argument( 16 "--types", 17 help="Types to retrieve", 18 default=["defect", "enhancement", "task"], 19 nargs="*", 20 ) 21 return parser.parse_args(args) 22 23 24 def main(args): 25 params = { 26 "columnlist": "bug_type", 27 "order": "bug_id", 28 "j_top": "OR", 29 "f1": "bug_type", 30 "o1": "changedafter", 31 "v1": "1970-01-01", 32 "f2": "OP", 33 "f3": "bug_type", 34 "o3": "anyexact", 35 "v3": "task,enhancement", 36 "f4": "bug_id", 37 "o4": "greaterthan", 38 "v4": 1540807, 39 "f5": "CP", 40 "ctype": "csv", 41 } 42 43 r = requests.get("https://bugzilla.mozilla.org/buglist.cgi", params=params) 44 r.raise_for_status() 45 46 with open("bugbug/labels/defect_enhancement_task_h.csv", "r") as f: 47 reader = csv.reader(f) 48 headers = next(reader) 49 bug_type_map = {int(row[0]): row[1] for row in reader} 50 51 # We add to our csv both labels that were changed, and labels that are in 52 # the list of requested types. 53 reader = csv.reader(r.text.splitlines()) 54 next(reader) 55 for row in reader: 56 if int(row[0]) in bug_type_map or row[1] in args.types: 57 bug_type_map[int(row[0])] = row[1] 58 59 with open("bugbug/labels/defect_enhancement_task_h.csv", "w") as f: 60 writer = csv.writer(f) 61 writer.writerow(headers) 62 writer.writerows(sorted(bug_type_map.items())) 63 64 65 if __name__ == "__main__": 66 main(parse_args(sys.argv[1:])) 67 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/get_type_labels.py b/scripts/get_type_labels.py --- a/scripts/get_type_labels.py +++ b/scripts/get_type_labels.py @@ -27,8 +27,7 @@ "order": "bug_id", "j_top": "OR", "f1": "bug_type", - "o1": "changedafter", - "v1": "1970-01-01", + "o1": "everchanged", "f2": "OP", "f3": "bug_type", "o3": "anyexact",
{"golden_diff": "diff --git a/scripts/get_type_labels.py b/scripts/get_type_labels.py\n--- a/scripts/get_type_labels.py\n+++ b/scripts/get_type_labels.py\n@@ -27,8 +27,7 @@\n \"order\": \"bug_id\",\n \"j_top\": \"OR\",\n \"f1\": \"bug_type\",\n- \"o1\": \"changedafter\",\n- \"v1\": \"1970-01-01\",\n+ \"o1\": \"everchanged\",\n \"f2\": \"OP\",\n \"f3\": \"bug_type\",\n \"o3\": \"anyexact\",\n", "issue": "Use new 'everchanged' operator instead of changedafter 1970\nDepends on https://bugzilla.mozilla.org/show_bug.cgi?id=1546624.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport argparse\nimport csv\nimport sys\n\nimport requests\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--types\",\n help=\"Types to retrieve\",\n default=[\"defect\", \"enhancement\", \"task\"],\n nargs=\"*\",\n )\n return parser.parse_args(args)\n\n\ndef main(args):\n params = {\n \"columnlist\": \"bug_type\",\n \"order\": \"bug_id\",\n \"j_top\": \"OR\",\n \"f1\": \"bug_type\",\n \"o1\": \"changedafter\",\n \"v1\": \"1970-01-01\",\n \"f2\": \"OP\",\n \"f3\": \"bug_type\",\n \"o3\": \"anyexact\",\n \"v3\": \"task,enhancement\",\n \"f4\": \"bug_id\",\n \"o4\": \"greaterthan\",\n \"v4\": 1540807,\n \"f5\": \"CP\",\n \"ctype\": \"csv\",\n }\n\n r = requests.get(\"https://bugzilla.mozilla.org/buglist.cgi\", params=params)\n r.raise_for_status()\n\n with open(\"bugbug/labels/defect_enhancement_task_h.csv\", \"r\") as f:\n reader = csv.reader(f)\n headers = next(reader)\n bug_type_map = {int(row[0]): row[1] for row in reader}\n\n # We add to our csv both labels that were changed, and labels that are in\n # the list of requested types.\n reader = csv.reader(r.text.splitlines())\n next(reader)\n for row in reader:\n if int(row[0]) in bug_type_map or row[1] in args.types:\n bug_type_map[int(row[0])] = row[1]\n\n with open(\"bugbug/labels/defect_enhancement_task_h.csv\", \"w\") as f:\n writer = csv.writer(f)\n writer.writerow(headers)\n writer.writerows(sorted(bug_type_map.items()))\n\n\nif __name__ == \"__main__\":\n main(parse_args(sys.argv[1:]))\n", "path": "scripts/get_type_labels.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport argparse\nimport csv\nimport sys\n\nimport requests\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--types\",\n help=\"Types to retrieve\",\n default=[\"defect\", \"enhancement\", \"task\"],\n nargs=\"*\",\n )\n return parser.parse_args(args)\n\n\ndef main(args):\n params = {\n \"columnlist\": \"bug_type\",\n \"order\": \"bug_id\",\n \"j_top\": \"OR\",\n \"f1\": \"bug_type\",\n \"o1\": \"everchanged\",\n \"f2\": \"OP\",\n \"f3\": \"bug_type\",\n \"o3\": \"anyexact\",\n \"v3\": \"task,enhancement\",\n \"f4\": \"bug_id\",\n \"o4\": \"greaterthan\",\n \"v4\": 1540807,\n \"f5\": \"CP\",\n \"ctype\": \"csv\",\n }\n\n r = requests.get(\"https://bugzilla.mozilla.org/buglist.cgi\", params=params)\n r.raise_for_status()\n\n with open(\"bugbug/labels/defect_enhancement_task_h.csv\", \"r\") as f:\n reader = csv.reader(f)\n headers = next(reader)\n bug_type_map = {int(row[0]): row[1] for row in reader}\n\n # We add to our csv both labels that were changed, and labels that are in\n # the list of requested types.\n reader = csv.reader(r.text.splitlines())\n next(reader)\n for row in reader:\n if int(row[0]) in bug_type_map or row[1] in args.types:\n bug_type_map[int(row[0])] = row[1]\n\n with open(\"bugbug/labels/defect_enhancement_task_h.csv\", \"w\") as f:\n writer = csv.writer(f)\n writer.writerow(headers)\n writer.writerows(sorted(bug_type_map.items()))\n\n\nif __name__ == \"__main__\":\n main(parse_args(sys.argv[1:]))\n", "path": "scripts/get_type_labels.py"}]}
946
133
gh_patches_debug_25152
rasdani/github-patches
git_diff
ytdl-org__youtube-dl-14555
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Site Support on other Nickelodeon country website. (.dk, .no, .se) ## Please follow the guide below - You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly - Put an `x` into all the boxes [ ] relevant to your *issue* (like this: `[x]`) - Use the *Preview* tab to see what your issue will actually look like --- ### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.10.20*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. - [x] I've **verified** and **I assure** that I'm running youtube-dl **2017.10.20** ### Before submitting an *issue* make sure you have: - [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections - [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones ### What is the purpose of your *issue*? - [ ] Bug report (encountered problems with youtube-dl) - [x] Site support request (request for adding support for a new site) - [ ] Feature request (request for a new functionality) - [ ] Question - [ ] Other ### If the purpose of this *issue* is a *site support request* please provide all kinds of example URLs support for which should be included (replace following example URLs by **yours**): - Single video: http://www.nickelodeon.no/program/2626-bulderhuset/videoer/90947-femteklasse-veronica-vs-vanzilla - Single video: http://www.nickelodeon.dk/serier/2626-hojs-hus/videoer/761-tissepause - Single video: http://www.nickelodeon.se/serier/2626-lugn-i-stormen/videos/998- --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `youtube_dl/extractor/nick.py` Content: ``` 1 # coding: utf-8 2 from __future__ import unicode_literals 3 4 import re 5 6 from .mtv import MTVServicesInfoExtractor 7 from ..utils import update_url_query 8 9 10 class NickIE(MTVServicesInfoExtractor): 11 # None of videos on the website are still alive? 12 IE_NAME = 'nick.com' 13 _VALID_URL = r'https?://(?:(?:www|beta)\.)?nick(?:jr)?\.com/(?:[^/]+/)?(?:videos/clip|[^/]+/videos)/(?P<id>[^/?#.]+)' 14 _FEED_URL = 'http://udat.mtvnservices.com/service1/dispatch.htm' 15 _GEO_COUNTRIES = ['US'] 16 _TESTS = [{ 17 'url': 'http://www.nick.com/videos/clip/alvinnn-and-the-chipmunks-112-full-episode.html', 18 'playlist': [ 19 { 20 'md5': '6e5adc1e28253bbb1b28ab05403dd4d4', 21 'info_dict': { 22 'id': 'be6a17b0-412d-11e5-8ff7-0026b9414f30', 23 'ext': 'mp4', 24 'title': 'ALVINNN!!! and The Chipmunks: "Mojo Missing/Who\'s The Animal" S1', 25 'description': 'Alvin is convinced his mojo was in a cap he gave to a fan, and must find a way to get his hat back before the Chipmunks’ big concert.\nDuring a costume visit to the zoo, Alvin finds himself mistaken for the real Tasmanian devil.', 26 27 } 28 }, 29 { 30 'md5': 'd7be441fc53a1d4882fa9508a1e5b3ce', 31 'info_dict': { 32 'id': 'be6b8f96-412d-11e5-8ff7-0026b9414f30', 33 'ext': 'mp4', 34 'title': 'ALVINNN!!! and The Chipmunks: "Mojo Missing/Who\'s The Animal" S2', 35 'description': 'Alvin is convinced his mojo was in a cap he gave to a fan, and must find a way to get his hat back before the Chipmunks’ big concert.\nDuring a costume visit to the zoo, Alvin finds himself mistaken for the real Tasmanian devil.', 36 37 } 38 }, 39 { 40 'md5': 'efffe1728a234b2b0d2f2b343dd1946f', 41 'info_dict': { 42 'id': 'be6cf7e6-412d-11e5-8ff7-0026b9414f30', 43 'ext': 'mp4', 44 'title': 'ALVINNN!!! and The Chipmunks: "Mojo Missing/Who\'s The Animal" S3', 45 'description': 'Alvin is convinced his mojo was in a cap he gave to a fan, and must find a way to get his hat back before the Chipmunks’ big concert.\nDuring a costume visit to the zoo, Alvin finds himself mistaken for the real Tasmanian devil.', 46 } 47 }, 48 { 49 'md5': '1ec6690733ab9f41709e274a1d5c7556', 50 'info_dict': { 51 'id': 'be6e3354-412d-11e5-8ff7-0026b9414f30', 52 'ext': 'mp4', 53 'title': 'ALVINNN!!! and The Chipmunks: "Mojo Missing/Who\'s The Animal" S4', 54 'description': 'Alvin is convinced his mojo was in a cap he gave to a fan, and must find a way to get his hat back before the Chipmunks’ big concert.\nDuring a costume visit to the zoo, Alvin finds himself mistaken for the real Tasmanian devil.', 55 } 56 }, 57 ], 58 }, { 59 'url': 'http://www.nickjr.com/paw-patrol/videos/pups-save-a-goldrush-s3-ep302-full-episode/', 60 'only_matching': True, 61 }, { 62 'url': 'http://beta.nick.com/nicky-ricky-dicky-and-dawn/videos/nicky-ricky-dicky-dawn-301-full-episode/', 63 'only_matching': True, 64 }] 65 66 def _get_feed_query(self, uri): 67 return { 68 'feed': 'nick_arc_player_prime', 69 'mgid': uri, 70 } 71 72 def _extract_mgid(self, webpage): 73 return self._search_regex(r'data-contenturi="([^"]+)', webpage, 'mgid') 74 75 76 class NickDeIE(MTVServicesInfoExtractor): 77 IE_NAME = 'nick.de' 78 _VALID_URL = r'https?://(?:www\.)?(?P<host>nick\.(?:de|com\.pl)|nickelodeon\.(?:nl|at))/[^/]+/(?:[^/]+/)*(?P<id>[^/?#&]+)' 79 _TESTS = [{ 80 'url': 'http://www.nick.de/playlist/3773-top-videos/videos/episode/17306-zu-wasser-und-zu-land-rauchende-erdnusse', 81 'only_matching': True, 82 }, { 83 'url': 'http://www.nick.de/shows/342-icarly', 84 'only_matching': True, 85 }, { 86 'url': 'http://www.nickelodeon.nl/shows/474-spongebob/videos/17403-een-kijkje-in-de-keuken-met-sandy-van-binnenuit', 87 'only_matching': True, 88 }, { 89 'url': 'http://www.nickelodeon.at/playlist/3773-top-videos/videos/episode/77993-das-letzte-gefecht', 90 'only_matching': True, 91 }, { 92 'url': 'http://www.nick.com.pl/seriale/474-spongebob-kanciastoporty/wideo/17412-teatr-to-jest-to-rodeo-oszolom', 93 'only_matching': True, 94 }] 95 96 def _extract_mrss_url(self, webpage, host): 97 return update_url_query(self._search_regex( 98 r'data-mrss=(["\'])(?P<url>http.+?)\1', webpage, 'mrss url', group='url'), 99 {'siteKey': host}) 100 101 def _real_extract(self, url): 102 mobj = re.match(self._VALID_URL, url) 103 video_id = mobj.group('id') 104 host = mobj.group('host') 105 106 webpage = self._download_webpage(url, video_id) 107 108 mrss_url = self._extract_mrss_url(webpage, host) 109 110 return self._get_videos_info_from_url(mrss_url, video_id) 111 112 113 class NickNightIE(NickDeIE): 114 IE_NAME = 'nicknight' 115 _VALID_URL = r'https?://(?:www\.)(?P<host>nicknight\.(?:de|at|tv))/(?:playlist|shows)/(?:[^/]+/)*(?P<id>[^/?#&]+)' 116 _TESTS = [{ 117 'url': 'http://www.nicknight.at/shows/977-awkward/videos/85987-nimmer-beste-freunde', 118 'only_matching': True, 119 }, { 120 'url': 'http://www.nicknight.at/shows/977-awkward', 121 'only_matching': True, 122 }, { 123 'url': 'http://www.nicknight.at/shows/1900-faking-it', 124 'only_matching': True, 125 }] 126 127 def _extract_mrss_url(self, webpage, *args): 128 return self._search_regex( 129 r'mrss\s*:\s*(["\'])(?P<url>http.+?)\1', webpage, 130 'mrss url', group='url') 131 132 133 class NickRuIE(MTVServicesInfoExtractor): 134 IE_NAME = 'nickelodeonru' 135 _VALID_URL = r'https?://(?:www\.)nickelodeon\.ru/(?:playlist|shows|videos)/(?:[^/]+/)*(?P<id>[^/?#&]+)' 136 _TESTS = [{ 137 'url': 'http://www.nickelodeon.ru/shows/henrydanger/videos/episodes/3-sezon-15-seriya-licenziya-na-polyot/pmomfb#playlist/7airc6', 138 'only_matching': True, 139 }, { 140 'url': 'http://www.nickelodeon.ru/videos/smotri-na-nickelodeon-v-iyule/g9hvh7', 141 'only_matching': True, 142 }] 143 144 def _real_extract(self, url): 145 video_id = self._match_id(url) 146 webpage = self._download_webpage(url, video_id) 147 mgid = self._extract_mgid(webpage) 148 return self.url_result('http://media.mtvnservices.com/embed/%s' % mgid) 149 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/youtube_dl/extractor/nick.py b/youtube_dl/extractor/nick.py --- a/youtube_dl/extractor/nick.py +++ b/youtube_dl/extractor/nick.py @@ -75,7 +75,7 @@ class NickDeIE(MTVServicesInfoExtractor): IE_NAME = 'nick.de' - _VALID_URL = r'https?://(?:www\.)?(?P<host>nick\.(?:de|com\.pl)|nickelodeon\.(?:nl|at))/[^/]+/(?:[^/]+/)*(?P<id>[^/?#&]+)' + _VALID_URL = r'https?://(?:www\.)?(?P<host>nick\.(?:de|com\.pl)|nickelodeon\.(?:nl|at|dk|no|se))/[^/]+/(?:[^/]+/)*(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'http://www.nick.de/playlist/3773-top-videos/videos/episode/17306-zu-wasser-und-zu-land-rauchende-erdnusse', 'only_matching': True, @@ -91,6 +91,15 @@ }, { 'url': 'http://www.nick.com.pl/seriale/474-spongebob-kanciastoporty/wideo/17412-teatr-to-jest-to-rodeo-oszolom', 'only_matching': True, + }, { + 'url': 'http://www.nickelodeon.no/program/2626-bulderhuset/videoer/90947-femteklasse-veronica-vs-vanzilla', + 'only_matching': True, + }, { + 'url': 'http://www.nickelodeon.dk/serier/2626-hojs-hus/videoer/761-tissepause', + 'only_matching': True, + }, { + 'url': 'http://www.nickelodeon.se/serier/2626-lugn-i-stormen/videos/998-', + 'only_matching': True, }] def _extract_mrss_url(self, webpage, host):
{"golden_diff": "diff --git a/youtube_dl/extractor/nick.py b/youtube_dl/extractor/nick.py\n--- a/youtube_dl/extractor/nick.py\n+++ b/youtube_dl/extractor/nick.py\n@@ -75,7 +75,7 @@\n \n class NickDeIE(MTVServicesInfoExtractor):\n IE_NAME = 'nick.de'\n- _VALID_URL = r'https?://(?:www\\.)?(?P<host>nick\\.(?:de|com\\.pl)|nickelodeon\\.(?:nl|at))/[^/]+/(?:[^/]+/)*(?P<id>[^/?#&]+)'\n+ _VALID_URL = r'https?://(?:www\\.)?(?P<host>nick\\.(?:de|com\\.pl)|nickelodeon\\.(?:nl|at|dk|no|se))/[^/]+/(?:[^/]+/)*(?P<id>[^/?#&]+)'\n _TESTS = [{\n 'url': 'http://www.nick.de/playlist/3773-top-videos/videos/episode/17306-zu-wasser-und-zu-land-rauchende-erdnusse',\n 'only_matching': True,\n@@ -91,6 +91,15 @@\n }, {\n 'url': 'http://www.nick.com.pl/seriale/474-spongebob-kanciastoporty/wideo/17412-teatr-to-jest-to-rodeo-oszolom',\n 'only_matching': True,\n+ }, {\n+ 'url': 'http://www.nickelodeon.no/program/2626-bulderhuset/videoer/90947-femteklasse-veronica-vs-vanzilla',\n+ 'only_matching': True,\n+ }, {\n+ 'url': 'http://www.nickelodeon.dk/serier/2626-hojs-hus/videoer/761-tissepause',\n+ 'only_matching': True,\n+ }, {\n+ 'url': 'http://www.nickelodeon.se/serier/2626-lugn-i-stormen/videos/998-',\n+ 'only_matching': True,\n }]\n \n def _extract_mrss_url(self, webpage, host):\n", "issue": "Site Support on other Nickelodeon country website. (.dk, .no, .se)\n## Please follow the guide below\r\n\r\n- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly\r\n- Put an `x` into all the boxes [ ] relevant to your *issue* (like this: `[x]`)\r\n- Use the *Preview* tab to see what your issue will actually look like\r\n\r\n---\r\n\r\n### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.10.20*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.\r\n- [x] I've **verified** and **I assure** that I'm running youtube-dl **2017.10.20**\r\n\r\n### Before submitting an *issue* make sure you have:\r\n- [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections\r\n- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones\r\n\r\n### What is the purpose of your *issue*?\r\n- [ ] Bug report (encountered problems with youtube-dl)\r\n- [x] Site support request (request for adding support for a new site)\r\n- [ ] Feature request (request for a new functionality)\r\n- [ ] Question\r\n- [ ] Other\r\n\r\n### If the purpose of this *issue* is a *site support request* please provide all kinds of example URLs support for which should be included (replace following example URLs by **yours**):\r\n- Single video: http://www.nickelodeon.no/program/2626-bulderhuset/videoer/90947-femteklasse-veronica-vs-vanzilla\r\n- Single video: http://www.nickelodeon.dk/serier/2626-hojs-hus/videoer/761-tissepause\r\n- Single video: http://www.nickelodeon.se/serier/2626-lugn-i-stormen/videos/998-\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .mtv import MTVServicesInfoExtractor\nfrom ..utils import update_url_query\n\n\nclass NickIE(MTVServicesInfoExtractor):\n # None of videos on the website are still alive?\n IE_NAME = 'nick.com'\n _VALID_URL = r'https?://(?:(?:www|beta)\\.)?nick(?:jr)?\\.com/(?:[^/]+/)?(?:videos/clip|[^/]+/videos)/(?P<id>[^/?#.]+)'\n _FEED_URL = 'http://udat.mtvnservices.com/service1/dispatch.htm'\n _GEO_COUNTRIES = ['US']\n _TESTS = [{\n 'url': 'http://www.nick.com/videos/clip/alvinnn-and-the-chipmunks-112-full-episode.html',\n 'playlist': [\n {\n 'md5': '6e5adc1e28253bbb1b28ab05403dd4d4',\n 'info_dict': {\n 'id': 'be6a17b0-412d-11e5-8ff7-0026b9414f30',\n 'ext': 'mp4',\n 'title': 'ALVINNN!!! and The Chipmunks: \"Mojo Missing/Who\\'s The Animal\" S1',\n 'description': 'Alvin is convinced his mojo was in a cap he gave to a fan, and must find a way to get his hat back before the Chipmunks\u2019 big concert.\\nDuring a costume visit to the zoo, Alvin finds himself mistaken for the real Tasmanian devil.',\n\n }\n },\n {\n 'md5': 'd7be441fc53a1d4882fa9508a1e5b3ce',\n 'info_dict': {\n 'id': 'be6b8f96-412d-11e5-8ff7-0026b9414f30',\n 'ext': 'mp4',\n 'title': 'ALVINNN!!! and The Chipmunks: \"Mojo Missing/Who\\'s The Animal\" S2',\n 'description': 'Alvin is convinced his mojo was in a cap he gave to a fan, and must find a way to get his hat back before the Chipmunks\u2019 big concert.\\nDuring a costume visit to the zoo, Alvin finds himself mistaken for the real Tasmanian devil.',\n\n }\n },\n {\n 'md5': 'efffe1728a234b2b0d2f2b343dd1946f',\n 'info_dict': {\n 'id': 'be6cf7e6-412d-11e5-8ff7-0026b9414f30',\n 'ext': 'mp4',\n 'title': 'ALVINNN!!! and The Chipmunks: \"Mojo Missing/Who\\'s The Animal\" S3',\n 'description': 'Alvin is convinced his mojo was in a cap he gave to a fan, and must find a way to get his hat back before the Chipmunks\u2019 big concert.\\nDuring a costume visit to the zoo, Alvin finds himself mistaken for the real Tasmanian devil.',\n }\n },\n {\n 'md5': '1ec6690733ab9f41709e274a1d5c7556',\n 'info_dict': {\n 'id': 'be6e3354-412d-11e5-8ff7-0026b9414f30',\n 'ext': 'mp4',\n 'title': 'ALVINNN!!! and The Chipmunks: \"Mojo Missing/Who\\'s The Animal\" S4',\n 'description': 'Alvin is convinced his mojo was in a cap he gave to a fan, and must find a way to get his hat back before the Chipmunks\u2019 big concert.\\nDuring a costume visit to the zoo, Alvin finds himself mistaken for the real Tasmanian devil.',\n }\n },\n ],\n }, {\n 'url': 'http://www.nickjr.com/paw-patrol/videos/pups-save-a-goldrush-s3-ep302-full-episode/',\n 'only_matching': True,\n }, {\n 'url': 'http://beta.nick.com/nicky-ricky-dicky-and-dawn/videos/nicky-ricky-dicky-dawn-301-full-episode/',\n 'only_matching': True,\n }]\n\n def _get_feed_query(self, uri):\n return {\n 'feed': 'nick_arc_player_prime',\n 'mgid': uri,\n }\n\n def _extract_mgid(self, webpage):\n return self._search_regex(r'data-contenturi=\"([^\"]+)', webpage, 'mgid')\n\n\nclass NickDeIE(MTVServicesInfoExtractor):\n IE_NAME = 'nick.de'\n _VALID_URL = r'https?://(?:www\\.)?(?P<host>nick\\.(?:de|com\\.pl)|nickelodeon\\.(?:nl|at))/[^/]+/(?:[^/]+/)*(?P<id>[^/?#&]+)'\n _TESTS = [{\n 'url': 'http://www.nick.de/playlist/3773-top-videos/videos/episode/17306-zu-wasser-und-zu-land-rauchende-erdnusse',\n 'only_matching': True,\n }, {\n 'url': 'http://www.nick.de/shows/342-icarly',\n 'only_matching': True,\n }, {\n 'url': 'http://www.nickelodeon.nl/shows/474-spongebob/videos/17403-een-kijkje-in-de-keuken-met-sandy-van-binnenuit',\n 'only_matching': True,\n }, {\n 'url': 'http://www.nickelodeon.at/playlist/3773-top-videos/videos/episode/77993-das-letzte-gefecht',\n 'only_matching': True,\n }, {\n 'url': 'http://www.nick.com.pl/seriale/474-spongebob-kanciastoporty/wideo/17412-teatr-to-jest-to-rodeo-oszolom',\n 'only_matching': True,\n }]\n\n def _extract_mrss_url(self, webpage, host):\n return update_url_query(self._search_regex(\n r'data-mrss=([\"\\'])(?P<url>http.+?)\\1', webpage, 'mrss url', group='url'),\n {'siteKey': host})\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n video_id = mobj.group('id')\n host = mobj.group('host')\n\n webpage = self._download_webpage(url, video_id)\n\n mrss_url = self._extract_mrss_url(webpage, host)\n\n return self._get_videos_info_from_url(mrss_url, video_id)\n\n\nclass NickNightIE(NickDeIE):\n IE_NAME = 'nicknight'\n _VALID_URL = r'https?://(?:www\\.)(?P<host>nicknight\\.(?:de|at|tv))/(?:playlist|shows)/(?:[^/]+/)*(?P<id>[^/?#&]+)'\n _TESTS = [{\n 'url': 'http://www.nicknight.at/shows/977-awkward/videos/85987-nimmer-beste-freunde',\n 'only_matching': True,\n }, {\n 'url': 'http://www.nicknight.at/shows/977-awkward',\n 'only_matching': True,\n }, {\n 'url': 'http://www.nicknight.at/shows/1900-faking-it',\n 'only_matching': True,\n }]\n\n def _extract_mrss_url(self, webpage, *args):\n return self._search_regex(\n r'mrss\\s*:\\s*([\"\\'])(?P<url>http.+?)\\1', webpage,\n 'mrss url', group='url')\n\n\nclass NickRuIE(MTVServicesInfoExtractor):\n IE_NAME = 'nickelodeonru'\n _VALID_URL = r'https?://(?:www\\.)nickelodeon\\.ru/(?:playlist|shows|videos)/(?:[^/]+/)*(?P<id>[^/?#&]+)'\n _TESTS = [{\n 'url': 'http://www.nickelodeon.ru/shows/henrydanger/videos/episodes/3-sezon-15-seriya-licenziya-na-polyot/pmomfb#playlist/7airc6',\n 'only_matching': True,\n }, {\n 'url': 'http://www.nickelodeon.ru/videos/smotri-na-nickelodeon-v-iyule/g9hvh7',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage(url, video_id)\n mgid = self._extract_mgid(webpage)\n return self.url_result('http://media.mtvnservices.com/embed/%s' % mgid)\n", "path": "youtube_dl/extractor/nick.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .mtv import MTVServicesInfoExtractor\nfrom ..utils import update_url_query\n\n\nclass NickIE(MTVServicesInfoExtractor):\n # None of videos on the website are still alive?\n IE_NAME = 'nick.com'\n _VALID_URL = r'https?://(?:(?:www|beta)\\.)?nick(?:jr)?\\.com/(?:[^/]+/)?(?:videos/clip|[^/]+/videos)/(?P<id>[^/?#.]+)'\n _FEED_URL = 'http://udat.mtvnservices.com/service1/dispatch.htm'\n _GEO_COUNTRIES = ['US']\n _TESTS = [{\n 'url': 'http://www.nick.com/videos/clip/alvinnn-and-the-chipmunks-112-full-episode.html',\n 'playlist': [\n {\n 'md5': '6e5adc1e28253bbb1b28ab05403dd4d4',\n 'info_dict': {\n 'id': 'be6a17b0-412d-11e5-8ff7-0026b9414f30',\n 'ext': 'mp4',\n 'title': 'ALVINNN!!! and The Chipmunks: \"Mojo Missing/Who\\'s The Animal\" S1',\n 'description': 'Alvin is convinced his mojo was in a cap he gave to a fan, and must find a way to get his hat back before the Chipmunks\u2019 big concert.\\nDuring a costume visit to the zoo, Alvin finds himself mistaken for the real Tasmanian devil.',\n\n }\n },\n {\n 'md5': 'd7be441fc53a1d4882fa9508a1e5b3ce',\n 'info_dict': {\n 'id': 'be6b8f96-412d-11e5-8ff7-0026b9414f30',\n 'ext': 'mp4',\n 'title': 'ALVINNN!!! and The Chipmunks: \"Mojo Missing/Who\\'s The Animal\" S2',\n 'description': 'Alvin is convinced his mojo was in a cap he gave to a fan, and must find a way to get his hat back before the Chipmunks\u2019 big concert.\\nDuring a costume visit to the zoo, Alvin finds himself mistaken for the real Tasmanian devil.',\n\n }\n },\n {\n 'md5': 'efffe1728a234b2b0d2f2b343dd1946f',\n 'info_dict': {\n 'id': 'be6cf7e6-412d-11e5-8ff7-0026b9414f30',\n 'ext': 'mp4',\n 'title': 'ALVINNN!!! and The Chipmunks: \"Mojo Missing/Who\\'s The Animal\" S3',\n 'description': 'Alvin is convinced his mojo was in a cap he gave to a fan, and must find a way to get his hat back before the Chipmunks\u2019 big concert.\\nDuring a costume visit to the zoo, Alvin finds himself mistaken for the real Tasmanian devil.',\n }\n },\n {\n 'md5': '1ec6690733ab9f41709e274a1d5c7556',\n 'info_dict': {\n 'id': 'be6e3354-412d-11e5-8ff7-0026b9414f30',\n 'ext': 'mp4',\n 'title': 'ALVINNN!!! and The Chipmunks: \"Mojo Missing/Who\\'s The Animal\" S4',\n 'description': 'Alvin is convinced his mojo was in a cap he gave to a fan, and must find a way to get his hat back before the Chipmunks\u2019 big concert.\\nDuring a costume visit to the zoo, Alvin finds himself mistaken for the real Tasmanian devil.',\n }\n },\n ],\n }, {\n 'url': 'http://www.nickjr.com/paw-patrol/videos/pups-save-a-goldrush-s3-ep302-full-episode/',\n 'only_matching': True,\n }, {\n 'url': 'http://beta.nick.com/nicky-ricky-dicky-and-dawn/videos/nicky-ricky-dicky-dawn-301-full-episode/',\n 'only_matching': True,\n }]\n\n def _get_feed_query(self, uri):\n return {\n 'feed': 'nick_arc_player_prime',\n 'mgid': uri,\n }\n\n def _extract_mgid(self, webpage):\n return self._search_regex(r'data-contenturi=\"([^\"]+)', webpage, 'mgid')\n\n\nclass NickDeIE(MTVServicesInfoExtractor):\n IE_NAME = 'nick.de'\n _VALID_URL = r'https?://(?:www\\.)?(?P<host>nick\\.(?:de|com\\.pl)|nickelodeon\\.(?:nl|at|dk|no|se))/[^/]+/(?:[^/]+/)*(?P<id>[^/?#&]+)'\n _TESTS = [{\n 'url': 'http://www.nick.de/playlist/3773-top-videos/videos/episode/17306-zu-wasser-und-zu-land-rauchende-erdnusse',\n 'only_matching': True,\n }, {\n 'url': 'http://www.nick.de/shows/342-icarly',\n 'only_matching': True,\n }, {\n 'url': 'http://www.nickelodeon.nl/shows/474-spongebob/videos/17403-een-kijkje-in-de-keuken-met-sandy-van-binnenuit',\n 'only_matching': True,\n }, {\n 'url': 'http://www.nickelodeon.at/playlist/3773-top-videos/videos/episode/77993-das-letzte-gefecht',\n 'only_matching': True,\n }, {\n 'url': 'http://www.nick.com.pl/seriale/474-spongebob-kanciastoporty/wideo/17412-teatr-to-jest-to-rodeo-oszolom',\n 'only_matching': True,\n }, {\n 'url': 'http://www.nickelodeon.no/program/2626-bulderhuset/videoer/90947-femteklasse-veronica-vs-vanzilla',\n 'only_matching': True,\n }, {\n 'url': 'http://www.nickelodeon.dk/serier/2626-hojs-hus/videoer/761-tissepause',\n 'only_matching': True,\n }, {\n 'url': 'http://www.nickelodeon.se/serier/2626-lugn-i-stormen/videos/998-',\n 'only_matching': True,\n }]\n\n def _extract_mrss_url(self, webpage, host):\n return update_url_query(self._search_regex(\n r'data-mrss=([\"\\'])(?P<url>http.+?)\\1', webpage, 'mrss url', group='url'),\n {'siteKey': host})\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n video_id = mobj.group('id')\n host = mobj.group('host')\n\n webpage = self._download_webpage(url, video_id)\n\n mrss_url = self._extract_mrss_url(webpage, host)\n\n return self._get_videos_info_from_url(mrss_url, video_id)\n\n\nclass NickNightIE(NickDeIE):\n IE_NAME = 'nicknight'\n _VALID_URL = r'https?://(?:www\\.)(?P<host>nicknight\\.(?:de|at|tv))/(?:playlist|shows)/(?:[^/]+/)*(?P<id>[^/?#&]+)'\n _TESTS = [{\n 'url': 'http://www.nicknight.at/shows/977-awkward/videos/85987-nimmer-beste-freunde',\n 'only_matching': True,\n }, {\n 'url': 'http://www.nicknight.at/shows/977-awkward',\n 'only_matching': True,\n }, {\n 'url': 'http://www.nicknight.at/shows/1900-faking-it',\n 'only_matching': True,\n }]\n\n def _extract_mrss_url(self, webpage, *args):\n return self._search_regex(\n r'mrss\\s*:\\s*([\"\\'])(?P<url>http.+?)\\1', webpage,\n 'mrss url', group='url')\n\n\nclass NickRuIE(MTVServicesInfoExtractor):\n IE_NAME = 'nickelodeonru'\n _VALID_URL = r'https?://(?:www\\.)nickelodeon\\.ru/(?:playlist|shows|videos)/(?:[^/]+/)*(?P<id>[^/?#&]+)'\n _TESTS = [{\n 'url': 'http://www.nickelodeon.ru/shows/henrydanger/videos/episodes/3-sezon-15-seriya-licenziya-na-polyot/pmomfb#playlist/7airc6',\n 'only_matching': True,\n }, {\n 'url': 'http://www.nickelodeon.ru/videos/smotri-na-nickelodeon-v-iyule/g9hvh7',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage(url, video_id)\n mgid = self._extract_mgid(webpage)\n return self.url_result('http://media.mtvnservices.com/embed/%s' % mgid)\n", "path": "youtube_dl/extractor/nick.py"}]}
3,242
512
gh_patches_debug_16448
rasdani/github-patches
git_diff
pyodide__pyodide-127
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Numpy package does't include test files When examining the virtual filesystem in the built numpy package, the python tests files don't seem to be included (even when they would be in a regular CPython install). For instance, using the `selenium` fixture from tests, ```py selenium.load_package('numpy') selenium.run('from pathlib import Path') selenium.run('import os') selenium.run('import numpy as np') selenium.run('base_dir = Path(np.__file__).parent') selenium.run('print(base_dir)') selenium.run('print(list(sorted(os.listdir(base_dir))))') ``` produces, ```py /lib/python3.6/site-packages/numpy ['__config__.py', '__init__.py', '_distributor_init.py', '_globals.py', '_import_tools.py', 'add_newdocs.py', 'compat', 'conftest.py', 'core', 'ctypeslib.py', 'distutils', 'doc', 'dual.py', 'f2py', 'fft', 'lib', 'linalg', 'ma', 'matlib.py', 'matrixlib', 'polynomial', 'random', 'setup.py', 'testing', 'version.py'] ``` i.e. the `tests` folder is not included, even if it is included in `.zip`, ```sh $ unzip numpy-1.14.1.zip $ ls numpy-1.14.1/numpy __init__.py _globals.py compat ctypeslib.py dual.py lib matlib.py random tests _build_utils _import_tools.py conftest.py distutils f2py linalg matrixlib setup.py version.py _distributor_init.py add_newdocs.py core doc fft ma polynomial testing ``` and in a pip installed numpy from the same `.zip` ```sh $ ls ~/.miniconda3/envs/test23/lib/python3.6/site-packages/numpy/ LICENSE.txt __pycache__ _import_tools.py conftest.py distutils f2py linalg matrixlib setup.py version.py __config__.py _distributor_init.py add_newdocs.py core doc fft ma polynomial testing __init__.py _globals.py compat ctypeslib.py dual.py lib matlib.py random tests ``` This prevents running the corresponding tests with pytest https://github.com/iodide-project/pyodide/pull/95 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tools/buildpkg.py` Content: ``` 1 #!/usr/bin/env python3 2 3 """ 4 Builds a Pyodide package. 5 """ 6 7 import argparse 8 import hashlib 9 import os 10 from pathlib import Path 11 import shutil 12 import subprocess 13 14 15 import common 16 17 18 ROOTDIR = Path(__file__).parent.resolve() 19 20 21 def check_checksum(path, pkg): 22 """ 23 Checks that a tarball matches the checksum in the package metadata. 24 """ 25 checksum_keys = {'md5', 'sha256'}.intersection(pkg['source']) 26 if not checksum_keys: 27 return 28 elif len(checksum_keys) != 1: 29 raise ValueError('Only one checksum should be included in a package ' 30 'setup; found {}.'.format(checksum_keys)) 31 checksum_algorithm = checksum_keys.pop() 32 checksum = pkg['source'][checksum_algorithm] 33 CHUNK_SIZE = 1 << 16 34 h = getattr(hashlib, checksum_algorithm)() 35 with open(path, 'rb') as fd: 36 while True: 37 chunk = fd.read(CHUNK_SIZE) 38 h.update(chunk) 39 if len(chunk) < CHUNK_SIZE: 40 break 41 if h.hexdigest() != checksum: 42 raise ValueError("Invalid {} checksum".format(checksum_algorithm)) 43 44 45 def download_and_extract(buildpath, packagedir, pkg, args): 46 tarballpath = buildpath / Path(pkg['source']['url']).name 47 if not tarballpath.is_file(): 48 subprocess.run([ 49 'wget', '-q', '-O', str(tarballpath), pkg['source']['url'] 50 ], check=True) 51 check_checksum(tarballpath, pkg) 52 srcpath = buildpath / packagedir 53 if not srcpath.is_dir(): 54 shutil.unpack_archive(str(tarballpath), str(buildpath)) 55 return srcpath 56 57 58 def patch(path, srcpath, pkg, args): 59 if (srcpath / '.patched').is_file(): 60 return 61 62 # Apply all of the patches 63 orig_dir = Path.cwd() 64 pkgdir = path.parent.resolve() 65 os.chdir(srcpath) 66 try: 67 for patch in pkg['source'].get('patches', []): 68 subprocess.run([ 69 'patch', '-p1', '--binary', '-i', pkgdir / patch 70 ], check=True) 71 finally: 72 os.chdir(orig_dir) 73 74 # Add any extra files 75 for src, dst in pkg['source'].get('extras', []): 76 shutil.copyfile(pkgdir / src, srcpath / dst) 77 78 with open(srcpath / '.patched', 'wb') as fd: 79 fd.write(b'\n') 80 81 82 def get_libdir(srcpath, args): 83 # Get the name of the build/lib.XXX directory that distutils wrote its 84 # output to 85 slug = subprocess.check_output([ 86 str(Path(args.host) / 'bin' / 'python3'), 87 '-c', 88 'import sysconfig, sys; ' 89 'print("{}-{}.{}".format(' 90 'sysconfig.get_platform(), ' 91 'sys.version_info[0], ' 92 'sys.version_info[1]))']).decode('ascii').strip() 93 purelib = srcpath / 'build' / 'lib' 94 if purelib.is_dir(): 95 libdir = purelib 96 else: 97 libdir = srcpath / 'build' / ('lib.' + slug) 98 return libdir 99 100 101 def compile(path, srcpath, pkg, args): 102 if (srcpath / '.built').is_file(): 103 return 104 105 orig_dir = Path.cwd() 106 os.chdir(srcpath) 107 try: 108 subprocess.run([ 109 str(Path(args.host) / 'bin' / 'python3'), 110 str(ROOTDIR / 'pywasmcross'), 111 '--cflags', 112 args.cflags + ' ' + 113 pkg.get('build', {}).get('cflags', ''), 114 '--ldflags', 115 args.ldflags + ' ' + 116 pkg.get('build', {}).get('ldflags', ''), 117 '--host', args.host, 118 '--target', args.target], check=True) 119 finally: 120 os.chdir(orig_dir) 121 122 post = pkg.get('build', {}).get('post') 123 if post is not None: 124 libdir = get_libdir(srcpath, args) 125 pkgdir = path.parent.resolve() 126 env = { 127 'BUILD': libdir, 128 'PKGDIR': pkgdir 129 } 130 subprocess.run([ 131 'bash', '-c', post], env=env, check=True) 132 133 with open(srcpath / '.built', 'wb') as fd: 134 fd.write(b'\n') 135 136 137 def package_files(buildpath, srcpath, pkg, args): 138 if (buildpath / '.pacakaged').is_file(): 139 return 140 141 name = pkg['package']['name'] 142 libdir = get_libdir(srcpath, args) 143 subprocess.run([ 144 'python', 145 Path(os.environ['EMSCRIPTEN']) / 'tools' / 'file_packager.py', 146 name + '.data', 147 '--preload', 148 '{}@/lib/python3.6/site-packages'.format(libdir), 149 '--js-output={}'.format(name + '.js'), 150 '--export-name=pyodide', 151 '--exclude', '*.wasm.pre', 152 '--exclude', '__pycache__', 153 '--use-preload-plugins'], 154 cwd=buildpath, check=True) 155 subprocess.run([ 156 'uglifyjs', 157 buildpath / (name + '.js'), 158 '-o', 159 buildpath / (name + '.js')], check=True) 160 161 with open(buildpath / '.packaged', 'wb') as fd: 162 fd.write(b'\n') 163 164 165 def build_package(path, args): 166 pkg = common.parse_package(path) 167 packagedir = pkg['package']['name'] + '-' + pkg['package']['version'] 168 dirpath = path.parent 169 orig_path = Path.cwd() 170 os.chdir(dirpath) 171 try: 172 buildpath = dirpath / 'build' 173 if not buildpath.is_dir(): 174 os.makedirs(buildpath) 175 srcpath = download_and_extract(buildpath, packagedir, pkg, args) 176 patch(path, srcpath, pkg, args) 177 compile(path, srcpath, pkg, args) 178 package_files(buildpath, srcpath, pkg, args) 179 finally: 180 os.chdir(orig_path) 181 182 183 def parse_args(): 184 parser = argparse.ArgumentParser('Build a pyodide package.') 185 parser.add_argument( 186 'package', type=str, nargs=1, 187 help="Path to meta.yaml package description") 188 parser.add_argument( 189 '--cflags', type=str, nargs='?', default=common.DEFAULTCFLAGS, 190 help='Extra compiling flags') 191 parser.add_argument( 192 '--ldflags', type=str, nargs='?', default=common.DEFAULTLDFLAGS, 193 help='Extra linking flags') 194 parser.add_argument( 195 '--host', type=str, nargs='?', default=common.HOSTPYTHON, 196 help='The path to the host Python installation') 197 parser.add_argument( 198 '--target', type=str, nargs='?', default=common.TARGETPYTHON, 199 help='The path to the target Python installation') 200 return parser.parse_args() 201 202 203 def main(args): 204 path = Path(args.package[0]).resolve() 205 build_package(path, args) 206 207 208 if __name__ == '__main__': 209 args = parse_args() 210 main(args) 211 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tools/buildpkg.py b/tools/buildpkg.py --- a/tools/buildpkg.py +++ b/tools/buildpkg.py @@ -135,17 +135,17 @@ def package_files(buildpath, srcpath, pkg, args): - if (buildpath / '.pacakaged').is_file(): + if (buildpath / '.packaged').is_file(): return name = pkg['package']['name'] - libdir = get_libdir(srcpath, args) + install_prefix = (srcpath / 'install').resolve() subprocess.run([ 'python', Path(os.environ['EMSCRIPTEN']) / 'tools' / 'file_packager.py', name + '.data', '--preload', - '{}@/lib/python3.6/site-packages'.format(libdir), + '{}@/'.format(install_prefix), '--js-output={}'.format(name + '.js'), '--export-name=pyodide', '--exclude', '*.wasm.pre',
{"golden_diff": "diff --git a/tools/buildpkg.py b/tools/buildpkg.py\n--- a/tools/buildpkg.py\n+++ b/tools/buildpkg.py\n@@ -135,17 +135,17 @@\n \n \n def package_files(buildpath, srcpath, pkg, args):\n- if (buildpath / '.pacakaged').is_file():\n+ if (buildpath / '.packaged').is_file():\n return\n \n name = pkg['package']['name']\n- libdir = get_libdir(srcpath, args)\n+ install_prefix = (srcpath / 'install').resolve()\n subprocess.run([\n 'python',\n Path(os.environ['EMSCRIPTEN']) / 'tools' / 'file_packager.py',\n name + '.data',\n '--preload',\n- '{}@/lib/python3.6/site-packages'.format(libdir),\n+ '{}@/'.format(install_prefix),\n '--js-output={}'.format(name + '.js'),\n '--export-name=pyodide',\n '--exclude', '*.wasm.pre',\n", "issue": "Numpy package does't include test files\nWhen examining the virtual filesystem in the built numpy package, the python tests files don't seem to be included (even when they would be in a regular CPython install).\r\n\r\nFor instance, using the `selenium` fixture from tests,\r\n```py\r\n selenium.load_package('numpy')\r\n selenium.run('from pathlib import Path')\r\n selenium.run('import os')\r\n selenium.run('import numpy as np')\r\n selenium.run('base_dir = Path(np.__file__).parent')\r\n selenium.run('print(base_dir)')\r\n selenium.run('print(list(sorted(os.listdir(base_dir))))')\r\n```\r\nproduces,\r\n```py\r\n/lib/python3.6/site-packages/numpy\r\n['__config__.py', '__init__.py', '_distributor_init.py', '_globals.py', '_import_tools.py', 'add_newdocs.py', 'compat', 'conftest.py', 'core', 'ctypeslib.py', 'distutils', 'doc', 'dual.py', 'f2py', 'fft', 'lib', 'linalg', 'ma', 'matlib.py', 'matrixlib', 'polynomial', 'random', 'setup.py', 'testing', 'version.py']\r\n```\r\n\r\ni.e. the `tests` folder is not included, even if it is included in `.zip`,\r\n```sh\r\n$ unzip numpy-1.14.1.zip\r\n$ ls numpy-1.14.1/numpy\r\n__init__.py _globals.py compat ctypeslib.py dual.py lib matlib.py random tests\r\n_build_utils _import_tools.py conftest.py distutils f2py linalg matrixlib setup.py version.py\r\n_distributor_init.py add_newdocs.py core doc fft ma polynomial testing\r\n```\r\nand in a pip installed numpy from the same `.zip`\r\n```sh\r\n$ ls ~/.miniconda3/envs/test23/lib/python3.6/site-packages/numpy/ \r\nLICENSE.txt __pycache__ _import_tools.py conftest.py distutils f2py linalg matrixlib setup.py version.py\r\n__config__.py _distributor_init.py add_newdocs.py core doc fft ma polynomial testing\r\n__init__.py _globals.py compat ctypeslib.py dual.py lib matlib.py random tests\r\n```\r\nThis prevents running the corresponding tests with pytest https://github.com/iodide-project/pyodide/pull/95\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"\nBuilds a Pyodide package.\n\"\"\"\n\nimport argparse\nimport hashlib\nimport os\nfrom pathlib import Path\nimport shutil\nimport subprocess\n\n\nimport common\n\n\nROOTDIR = Path(__file__).parent.resolve()\n\n\ndef check_checksum(path, pkg):\n \"\"\"\n Checks that a tarball matches the checksum in the package metadata.\n \"\"\"\n checksum_keys = {'md5', 'sha256'}.intersection(pkg['source'])\n if not checksum_keys:\n return\n elif len(checksum_keys) != 1:\n raise ValueError('Only one checksum should be included in a package '\n 'setup; found {}.'.format(checksum_keys))\n checksum_algorithm = checksum_keys.pop()\n checksum = pkg['source'][checksum_algorithm]\n CHUNK_SIZE = 1 << 16\n h = getattr(hashlib, checksum_algorithm)()\n with open(path, 'rb') as fd:\n while True:\n chunk = fd.read(CHUNK_SIZE)\n h.update(chunk)\n if len(chunk) < CHUNK_SIZE:\n break\n if h.hexdigest() != checksum:\n raise ValueError(\"Invalid {} checksum\".format(checksum_algorithm))\n\n\ndef download_and_extract(buildpath, packagedir, pkg, args):\n tarballpath = buildpath / Path(pkg['source']['url']).name\n if not tarballpath.is_file():\n subprocess.run([\n 'wget', '-q', '-O', str(tarballpath), pkg['source']['url']\n ], check=True)\n check_checksum(tarballpath, pkg)\n srcpath = buildpath / packagedir\n if not srcpath.is_dir():\n shutil.unpack_archive(str(tarballpath), str(buildpath))\n return srcpath\n\n\ndef patch(path, srcpath, pkg, args):\n if (srcpath / '.patched').is_file():\n return\n\n # Apply all of the patches\n orig_dir = Path.cwd()\n pkgdir = path.parent.resolve()\n os.chdir(srcpath)\n try:\n for patch in pkg['source'].get('patches', []):\n subprocess.run([\n 'patch', '-p1', '--binary', '-i', pkgdir / patch\n ], check=True)\n finally:\n os.chdir(orig_dir)\n\n # Add any extra files\n for src, dst in pkg['source'].get('extras', []):\n shutil.copyfile(pkgdir / src, srcpath / dst)\n\n with open(srcpath / '.patched', 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef get_libdir(srcpath, args):\n # Get the name of the build/lib.XXX directory that distutils wrote its\n # output to\n slug = subprocess.check_output([\n str(Path(args.host) / 'bin' / 'python3'),\n '-c',\n 'import sysconfig, sys; '\n 'print(\"{}-{}.{}\".format('\n 'sysconfig.get_platform(), '\n 'sys.version_info[0], '\n 'sys.version_info[1]))']).decode('ascii').strip()\n purelib = srcpath / 'build' / 'lib'\n if purelib.is_dir():\n libdir = purelib\n else:\n libdir = srcpath / 'build' / ('lib.' + slug)\n return libdir\n\n\ndef compile(path, srcpath, pkg, args):\n if (srcpath / '.built').is_file():\n return\n\n orig_dir = Path.cwd()\n os.chdir(srcpath)\n try:\n subprocess.run([\n str(Path(args.host) / 'bin' / 'python3'),\n str(ROOTDIR / 'pywasmcross'),\n '--cflags',\n args.cflags + ' ' +\n pkg.get('build', {}).get('cflags', ''),\n '--ldflags',\n args.ldflags + ' ' +\n pkg.get('build', {}).get('ldflags', ''),\n '--host', args.host,\n '--target', args.target], check=True)\n finally:\n os.chdir(orig_dir)\n\n post = pkg.get('build', {}).get('post')\n if post is not None:\n libdir = get_libdir(srcpath, args)\n pkgdir = path.parent.resolve()\n env = {\n 'BUILD': libdir,\n 'PKGDIR': pkgdir\n }\n subprocess.run([\n 'bash', '-c', post], env=env, check=True)\n\n with open(srcpath / '.built', 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef package_files(buildpath, srcpath, pkg, args):\n if (buildpath / '.pacakaged').is_file():\n return\n\n name = pkg['package']['name']\n libdir = get_libdir(srcpath, args)\n subprocess.run([\n 'python',\n Path(os.environ['EMSCRIPTEN']) / 'tools' / 'file_packager.py',\n name + '.data',\n '--preload',\n '{}@/lib/python3.6/site-packages'.format(libdir),\n '--js-output={}'.format(name + '.js'),\n '--export-name=pyodide',\n '--exclude', '*.wasm.pre',\n '--exclude', '__pycache__',\n '--use-preload-plugins'],\n cwd=buildpath, check=True)\n subprocess.run([\n 'uglifyjs',\n buildpath / (name + '.js'),\n '-o',\n buildpath / (name + '.js')], check=True)\n\n with open(buildpath / '.packaged', 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef build_package(path, args):\n pkg = common.parse_package(path)\n packagedir = pkg['package']['name'] + '-' + pkg['package']['version']\n dirpath = path.parent\n orig_path = Path.cwd()\n os.chdir(dirpath)\n try:\n buildpath = dirpath / 'build'\n if not buildpath.is_dir():\n os.makedirs(buildpath)\n srcpath = download_and_extract(buildpath, packagedir, pkg, args)\n patch(path, srcpath, pkg, args)\n compile(path, srcpath, pkg, args)\n package_files(buildpath, srcpath, pkg, args)\n finally:\n os.chdir(orig_path)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser('Build a pyodide package.')\n parser.add_argument(\n 'package', type=str, nargs=1,\n help=\"Path to meta.yaml package description\")\n parser.add_argument(\n '--cflags', type=str, nargs='?', default=common.DEFAULTCFLAGS,\n help='Extra compiling flags')\n parser.add_argument(\n '--ldflags', type=str, nargs='?', default=common.DEFAULTLDFLAGS,\n help='Extra linking flags')\n parser.add_argument(\n '--host', type=str, nargs='?', default=common.HOSTPYTHON,\n help='The path to the host Python installation')\n parser.add_argument(\n '--target', type=str, nargs='?', default=common.TARGETPYTHON,\n help='The path to the target Python installation')\n return parser.parse_args()\n\n\ndef main(args):\n path = Path(args.package[0]).resolve()\n build_package(path, args)\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n", "path": "tools/buildpkg.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"\nBuilds a Pyodide package.\n\"\"\"\n\nimport argparse\nimport hashlib\nimport os\nfrom pathlib import Path\nimport shutil\nimport subprocess\n\n\nimport common\n\n\nROOTDIR = Path(__file__).parent.resolve()\n\n\ndef check_checksum(path, pkg):\n \"\"\"\n Checks that a tarball matches the checksum in the package metadata.\n \"\"\"\n checksum_keys = {'md5', 'sha256'}.intersection(pkg['source'])\n if not checksum_keys:\n return\n elif len(checksum_keys) != 1:\n raise ValueError('Only one checksum should be included in a package '\n 'setup; found {}.'.format(checksum_keys))\n checksum_algorithm = checksum_keys.pop()\n checksum = pkg['source'][checksum_algorithm]\n CHUNK_SIZE = 1 << 16\n h = getattr(hashlib, checksum_algorithm)()\n with open(path, 'rb') as fd:\n while True:\n chunk = fd.read(CHUNK_SIZE)\n h.update(chunk)\n if len(chunk) < CHUNK_SIZE:\n break\n if h.hexdigest() != checksum:\n raise ValueError(\"Invalid {} checksum\".format(checksum_algorithm))\n\n\ndef download_and_extract(buildpath, packagedir, pkg, args):\n tarballpath = buildpath / Path(pkg['source']['url']).name\n if not tarballpath.is_file():\n subprocess.run([\n 'wget', '-q', '-O', str(tarballpath), pkg['source']['url']\n ], check=True)\n check_checksum(tarballpath, pkg)\n srcpath = buildpath / packagedir\n if not srcpath.is_dir():\n shutil.unpack_archive(str(tarballpath), str(buildpath))\n return srcpath\n\n\ndef patch(path, srcpath, pkg, args):\n if (srcpath / '.patched').is_file():\n return\n\n # Apply all of the patches\n orig_dir = Path.cwd()\n pkgdir = path.parent.resolve()\n os.chdir(srcpath)\n try:\n for patch in pkg['source'].get('patches', []):\n subprocess.run([\n 'patch', '-p1', '--binary', '-i', pkgdir / patch\n ], check=True)\n finally:\n os.chdir(orig_dir)\n\n # Add any extra files\n for src, dst in pkg['source'].get('extras', []):\n shutil.copyfile(pkgdir / src, srcpath / dst)\n\n with open(srcpath / '.patched', 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef get_libdir(srcpath, args):\n # Get the name of the build/lib.XXX directory that distutils wrote its\n # output to\n slug = subprocess.check_output([\n str(Path(args.host) / 'bin' / 'python3'),\n '-c',\n 'import sysconfig, sys; '\n 'print(\"{}-{}.{}\".format('\n 'sysconfig.get_platform(), '\n 'sys.version_info[0], '\n 'sys.version_info[1]))']).decode('ascii').strip()\n purelib = srcpath / 'build' / 'lib'\n if purelib.is_dir():\n libdir = purelib\n else:\n libdir = srcpath / 'build' / ('lib.' + slug)\n return libdir\n\n\ndef compile(path, srcpath, pkg, args):\n if (srcpath / '.built').is_file():\n return\n\n orig_dir = Path.cwd()\n os.chdir(srcpath)\n try:\n subprocess.run([\n str(Path(args.host) / 'bin' / 'python3'),\n str(ROOTDIR / 'pywasmcross'),\n '--cflags',\n args.cflags + ' ' +\n pkg.get('build', {}).get('cflags', ''),\n '--ldflags',\n args.ldflags + ' ' +\n pkg.get('build', {}).get('ldflags', ''),\n '--host', args.host,\n '--target', args.target], check=True)\n finally:\n os.chdir(orig_dir)\n\n post = pkg.get('build', {}).get('post')\n if post is not None:\n libdir = get_libdir(srcpath, args)\n pkgdir = path.parent.resolve()\n env = {\n 'BUILD': libdir,\n 'PKGDIR': pkgdir\n }\n subprocess.run([\n 'bash', '-c', post], env=env, check=True)\n\n with open(srcpath / '.built', 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef package_files(buildpath, srcpath, pkg, args):\n if (buildpath / '.packaged').is_file():\n return\n\n name = pkg['package']['name']\n install_prefix = (srcpath / 'install').resolve()\n subprocess.run([\n 'python',\n Path(os.environ['EMSCRIPTEN']) / 'tools' / 'file_packager.py',\n name + '.data',\n '--preload',\n '{}@/'.format(install_prefix),\n '--js-output={}'.format(name + '.js'),\n '--export-name=pyodide',\n '--exclude', '*.wasm.pre',\n '--exclude', '__pycache__',\n '--use-preload-plugins'],\n cwd=buildpath, check=True)\n subprocess.run([\n 'uglifyjs',\n buildpath / (name + '.js'),\n '-o',\n buildpath / (name + '.js')], check=True)\n\n with open(buildpath / '.packaged', 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef build_package(path, args):\n pkg = common.parse_package(path)\n packagedir = pkg['package']['name'] + '-' + pkg['package']['version']\n dirpath = path.parent\n orig_path = Path.cwd()\n os.chdir(dirpath)\n try:\n buildpath = dirpath / 'build'\n if not buildpath.is_dir():\n os.makedirs(buildpath)\n srcpath = download_and_extract(buildpath, packagedir, pkg, args)\n patch(path, srcpath, pkg, args)\n compile(path, srcpath, pkg, args)\n package_files(buildpath, srcpath, pkg, args)\n finally:\n os.chdir(orig_path)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser('Build a pyodide package.')\n parser.add_argument(\n 'package', type=str, nargs=1,\n help=\"Path to meta.yaml package description\")\n parser.add_argument(\n '--cflags', type=str, nargs='?', default=common.DEFAULTCFLAGS,\n help='Extra compiling flags')\n parser.add_argument(\n '--ldflags', type=str, nargs='?', default=common.DEFAULTLDFLAGS,\n help='Extra linking flags')\n parser.add_argument(\n '--host', type=str, nargs='?', default=common.HOSTPYTHON,\n help='The path to the host Python installation')\n parser.add_argument(\n '--target', type=str, nargs='?', default=common.TARGETPYTHON,\n help='The path to the target Python installation')\n return parser.parse_args()\n\n\ndef main(args):\n path = Path(args.package[0]).resolve()\n build_package(path, args)\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n", "path": "tools/buildpkg.py"}]}
2,889
221
gh_patches_debug_17121
rasdani/github-patches
git_diff
opendatacube__datacube-core-905
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update release process documentation Many steps described in the document have since been automated, documentation should reflect that: - Upload to pypi is done by Travis - Updates for conda-forge are done by some bot that creates PR --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 from setuptools import setup, find_packages 4 5 tests_require = [ 6 'compliance-checker>=4.0.0', 7 'hypothesis', 8 'mock', 9 'pycodestyle', 10 'pylint', 11 'pytest', 12 'pytest-cov', 13 'pytest-timeout', 14 'pytest-httpserver', 15 'moto', 16 ] 17 18 extras_require = { 19 'performance': ['ciso8601', 'bottleneck'], 20 'interactive': ['matplotlib', 'fiona'], 21 'distributed': ['distributed', 'dask[distributed]'], 22 'doc': ['Sphinx', 'setuptools'], 23 'replicas': ['paramiko', 'sshtunnel', 'tqdm'], 24 'celery': ['celery>=4', 'redis'], 25 's3': ['boto3'], 26 'test': tests_require, 27 } 28 # An 'all' option, following ipython naming conventions. 29 extras_require['all'] = sorted(set(sum(extras_require.values(), []))) 30 31 extra_plugins = dict(read=[], write=[], index=[]) 32 33 setup( 34 name='datacube', 35 python_requires='>=3.5.2', 36 37 url='https://github.com/opendatacube/datacube-core', 38 author='Open Data Cube', 39 maintainer='Open Data Cube', 40 maintainer_email='', 41 description='An analysis environment for satellite and other earth observation data', 42 long_description=open('README.rst').read(), 43 long_description_content_type='text/x-rst', 44 license='Apache License 2.0', 45 classifiers=[ 46 "Development Status :: 4 - Beta", 47 "Intended Audience :: Developers", 48 "Intended Audience :: Science/Research", 49 "License :: OSI Approved :: Apache Software License", 50 "Natural Language :: English", 51 "Operating System :: MacOS :: MacOS X", 52 "Operating System :: POSIX", 53 "Operating System :: POSIX :: BSD", 54 "Operating System :: POSIX :: Linux", 55 "Operating System :: Microsoft :: Windows", 56 "Programming Language :: Python", 57 "Programming Language :: Python :: 3", 58 "Programming Language :: Python :: 3.5", 59 "Programming Language :: Python :: 3.6", 60 "Topic :: Scientific/Engineering :: GIS", 61 "Topic :: Scientific/Engineering :: Information Analysis", 62 ], 63 64 packages=find_packages( 65 exclude=('tests', 'tests.*', 66 'integration_tests', 'integration_tests.*') 67 ), 68 package_data={ 69 '': ['*.yaml', '*/*.yaml'], 70 }, 71 scripts=[ 72 'datacube_apps/scripts/pbs_helpers.sh' 73 ], 74 install_requires=[ 75 'affine', 76 'pyproj>=2.5', 77 'shapely>=1.6.4', 78 'cachetools', 79 'click>=5.0', 80 'cloudpickle>=0.4', 81 'dask[array]', 82 'distributed', 83 'jsonschema', 84 'netcdf4', 85 'numpy', 86 'psycopg2', 87 'lark-parser>=0.6.7', 88 'python-dateutil', 89 'pyyaml', 90 'rasterio>=1.0.2', # Multi-band re-project fixed in that version 91 'sqlalchemy', 92 'toolz', 93 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost 94 ], 95 extras_require=extras_require, 96 tests_require=tests_require, 97 98 entry_points={ 99 'console_scripts': [ 100 'datacube = datacube.scripts.cli_app:cli', 101 'datacube-search = datacube.scripts.search_tool:cli', 102 'datacube-stacker = datacube_apps.stacker:main', 103 'datacube-worker = datacube.execution.worker:main', 104 'datacube-fixer = datacube_apps.stacker:fixer_main', 105 'datacube-ncml = datacube_apps.ncml:ncml_app', 106 'pixeldrill = datacube_apps.pixeldrill:main [interactive]', 107 'movie_generator = datacube_apps.movie_generator:main', 108 'datacube-simple-replica = datacube_apps.simple_replica:replicate [replicas]' 109 ], 110 'datacube.plugins.io.read': [ 111 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init', 112 *extra_plugins['read'], 113 ], 114 'datacube.plugins.io.write': [ 115 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init', 116 *extra_plugins['write'], 117 ], 118 'datacube.plugins.index': [ 119 'default = datacube.index.index:index_driver_init', 120 *extra_plugins['index'], 121 ], 122 }, 123 ) 124 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -32,7 +32,7 @@ setup( name='datacube', - python_requires='>=3.5.2', + python_requires='>=3.6.0', url='https://github.com/opendatacube/datacube-core', author='Open Data Cube', @@ -55,8 +55,8 @@ "Operating System :: Microsoft :: Windows", "Programming Language :: Python", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", "Topic :: Scientific/Engineering :: GIS", "Topic :: Scientific/Engineering :: Information Analysis", ],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -32,7 +32,7 @@\n \n setup(\n name='datacube',\n- python_requires='>=3.5.2',\n+ python_requires='>=3.6.0',\n \n url='https://github.com/opendatacube/datacube-core',\n author='Open Data Cube',\n@@ -55,8 +55,8 @@\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n+ \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n", "issue": "Update release process documentation\nMany steps described in the document have since been automated, documentation should reflect that:\r\n\r\n- Upload to pypi is done by Travis\r\n- Updates for conda-forge are done by some bot that creates PR\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'compliance-checker>=4.0.0',\n 'hypothesis',\n 'mock',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-httpserver',\n 'moto',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'interactive': ['matplotlib', 'fiona'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'doc': ['Sphinx', 'setuptools'],\n 'replicas': ['paramiko', 'sshtunnel', 'tqdm'],\n 'celery': ['celery>=4', 'redis'],\n 's3': ['boto3'],\n 'test': tests_require,\n}\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextra_plugins = dict(read=[], write=[], index=[])\n\nsetup(\n name='datacube',\n python_requires='>=3.5.2',\n\n url='https://github.com/opendatacube/datacube-core',\n author='Open Data Cube',\n maintainer='Open Data Cube',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n },\n scripts=[\n 'datacube_apps/scripts/pbs_helpers.sh'\n ],\n install_requires=[\n 'affine',\n 'pyproj>=2.5',\n 'shapely>=1.6.4',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'distributed',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'psycopg2',\n 'lark-parser>=0.6.7',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=1.0.2', # Multi-band re-project fixed in that version\n 'sqlalchemy',\n 'toolz',\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-stacker = datacube_apps.stacker:main',\n 'datacube-worker = datacube.execution.worker:main',\n 'datacube-fixer = datacube_apps.stacker:fixer_main',\n 'datacube-ncml = datacube_apps.ncml:ncml_app',\n 'pixeldrill = datacube_apps.pixeldrill:main [interactive]',\n 'movie_generator = datacube_apps.movie_generator:main',\n 'datacube-simple-replica = datacube_apps.simple_replica:replicate [replicas]'\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n *extra_plugins['read'],\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n *extra_plugins['write'],\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.index:index_driver_init',\n *extra_plugins['index'],\n ],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'compliance-checker>=4.0.0',\n 'hypothesis',\n 'mock',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-httpserver',\n 'moto',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'interactive': ['matplotlib', 'fiona'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'doc': ['Sphinx', 'setuptools'],\n 'replicas': ['paramiko', 'sshtunnel', 'tqdm'],\n 'celery': ['celery>=4', 'redis'],\n 's3': ['boto3'],\n 'test': tests_require,\n}\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextra_plugins = dict(read=[], write=[], index=[])\n\nsetup(\n name='datacube',\n python_requires='>=3.6.0',\n\n url='https://github.com/opendatacube/datacube-core',\n author='Open Data Cube',\n maintainer='Open Data Cube',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n },\n scripts=[\n 'datacube_apps/scripts/pbs_helpers.sh'\n ],\n install_requires=[\n 'affine',\n 'pyproj>=2.5',\n 'shapely>=1.6.4',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'distributed',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'psycopg2',\n 'lark-parser>=0.6.7',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=1.0.2', # Multi-band re-project fixed in that version\n 'sqlalchemy',\n 'toolz',\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-stacker = datacube_apps.stacker:main',\n 'datacube-worker = datacube.execution.worker:main',\n 'datacube-fixer = datacube_apps.stacker:fixer_main',\n 'datacube-ncml = datacube_apps.ncml:ncml_app',\n 'pixeldrill = datacube_apps.pixeldrill:main [interactive]',\n 'movie_generator = datacube_apps.movie_generator:main',\n 'datacube-simple-replica = datacube_apps.simple_replica:replicate [replicas]'\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n *extra_plugins['read'],\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n *extra_plugins['write'],\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.index:index_driver_init',\n *extra_plugins['index'],\n ],\n },\n)\n", "path": "setup.py"}]}
1,565
187
gh_patches_debug_27280
rasdani/github-patches
git_diff
Pylons__pyramid-2620
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pcreate -s shows wrong link to tutorials after a ``` pcreate -s alchemy scaffold-alchemy ``` I see a link to tutorials, but this link is a 404: ``` Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pyramid/scaffolds/__init__.py` Content: ``` 1 import binascii 2 import os 3 from textwrap import dedent 4 5 from pyramid.compat import native_ 6 7 from pyramid.scaffolds.template import Template # API 8 9 class PyramidTemplate(Template): 10 """ 11 A class that can be used as a base class for Pyramid scaffolding 12 templates. 13 """ 14 def pre(self, command, output_dir, vars): 15 """ Overrides :meth:`pyramid.scaffolds.template.Template.pre`, adding 16 several variables to the default variables list (including 17 ``random_string``, and ``package_logger``). It also prevents common 18 misnamings (such as naming a package "site" or naming a package 19 logger "root". 20 """ 21 vars['random_string'] = native_(binascii.hexlify(os.urandom(20))) 22 package_logger = vars['package'] 23 if package_logger == 'root': 24 # Rename the app logger in the rare case a project is named 'root' 25 package_logger = 'app' 26 vars['package_logger'] = package_logger 27 return Template.pre(self, command, output_dir, vars) 28 29 def post(self, command, output_dir, vars): # pragma: no cover 30 """ Overrides :meth:`pyramid.scaffolds.template.Template.post`, to 31 print "Welcome to Pyramid. Sorry for the convenience." after a 32 successful scaffolding rendering.""" 33 34 separator = "=" * 79 35 msg = dedent( 36 """ 37 %(separator)s 38 Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials 39 Documentation: http://docs.pylonsproject.org/projects/pyramid 40 41 Twitter (tips & updates): http://twitter.com/pylons 42 Mailing List: http://groups.google.com/group/pylons-discuss 43 44 Welcome to Pyramid. Sorry for the convenience. 45 %(separator)s 46 """ % {'separator': separator}) 47 48 self.out(msg) 49 return Template.post(self, command, output_dir, vars) 50 51 def out(self, msg): # pragma: no cover (replaceable testing hook) 52 print(msg) 53 54 class StarterProjectTemplate(PyramidTemplate): 55 _template_dir = 'starter' 56 summary = 'Pyramid starter project' 57 58 class ZODBProjectTemplate(PyramidTemplate): 59 _template_dir = 'zodb' 60 summary = 'Pyramid ZODB project using traversal' 61 62 class AlchemyProjectTemplate(PyramidTemplate): 63 _template_dir = 'alchemy' 64 summary = 'Pyramid SQLAlchemy project using url dispatch' 65 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pyramid/scaffolds/__init__.py b/pyramid/scaffolds/__init__.py --- a/pyramid/scaffolds/__init__.py +++ b/pyramid/scaffolds/__init__.py @@ -35,11 +35,10 @@ msg = dedent( """ %(separator)s - Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials - Documentation: http://docs.pylonsproject.org/projects/pyramid - - Twitter (tips & updates): http://twitter.com/pylons - Mailing List: http://groups.google.com/group/pylons-discuss + Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials/en/latest/ + Documentation: http://docs.pylonsproject.org/projects/pyramid/en/latest/ + Twitter: https://twitter.com/trypyramid + Mailing List: https://groups.google.com/forum/#!forum/pylons-discuss Welcome to Pyramid. Sorry for the convenience. %(separator)s @@ -53,12 +52,13 @@ class StarterProjectTemplate(PyramidTemplate): _template_dir = 'starter' - summary = 'Pyramid starter project' + summary = 'Pyramid starter project using URL dispatch and Chameleon' class ZODBProjectTemplate(PyramidTemplate): _template_dir = 'zodb' - summary = 'Pyramid ZODB project using traversal' + summary = 'Pyramid project using ZODB, traversal, and Chameleon' class AlchemyProjectTemplate(PyramidTemplate): _template_dir = 'alchemy' - summary = 'Pyramid SQLAlchemy project using url dispatch' + summary = 'Pyramid project using SQLAlchemy, SQLite, URL dispatch, and' + ' Chameleon'
{"golden_diff": "diff --git a/pyramid/scaffolds/__init__.py b/pyramid/scaffolds/__init__.py\n--- a/pyramid/scaffolds/__init__.py\n+++ b/pyramid/scaffolds/__init__.py\n@@ -35,11 +35,10 @@\n msg = dedent(\n \"\"\"\n %(separator)s\n- Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials\n- Documentation: http://docs.pylonsproject.org/projects/pyramid\n-\n- Twitter (tips & updates): http://twitter.com/pylons\n- Mailing List: http://groups.google.com/group/pylons-discuss\n+ Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials/en/latest/\n+ Documentation: http://docs.pylonsproject.org/projects/pyramid/en/latest/\n+ Twitter: https://twitter.com/trypyramid\n+ Mailing List: https://groups.google.com/forum/#!forum/pylons-discuss\n \n Welcome to Pyramid. Sorry for the convenience.\n %(separator)s\n@@ -53,12 +52,13 @@\n \n class StarterProjectTemplate(PyramidTemplate):\n _template_dir = 'starter'\n- summary = 'Pyramid starter project'\n+ summary = 'Pyramid starter project using URL dispatch and Chameleon'\n \n class ZODBProjectTemplate(PyramidTemplate):\n _template_dir = 'zodb'\n- summary = 'Pyramid ZODB project using traversal'\n+ summary = 'Pyramid project using ZODB, traversal, and Chameleon'\n \n class AlchemyProjectTemplate(PyramidTemplate):\n _template_dir = 'alchemy'\n- summary = 'Pyramid SQLAlchemy project using url dispatch'\n+ summary = 'Pyramid project using SQLAlchemy, SQLite, URL dispatch, and'\n+ ' Chameleon'\n", "issue": "pcreate -s shows wrong link to tutorials\nafter a \n\n```\npcreate -s alchemy scaffold-alchemy\n```\n\nI see a link to tutorials, but this link is a 404: \n\n```\nTutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials\n```\n\n", "before_files": [{"content": "import binascii\nimport os\nfrom textwrap import dedent\n\nfrom pyramid.compat import native_\n\nfrom pyramid.scaffolds.template import Template # API\n\nclass PyramidTemplate(Template):\n \"\"\"\n A class that can be used as a base class for Pyramid scaffolding\n templates.\n \"\"\"\n def pre(self, command, output_dir, vars):\n \"\"\" Overrides :meth:`pyramid.scaffolds.template.Template.pre`, adding\n several variables to the default variables list (including\n ``random_string``, and ``package_logger``). It also prevents common\n misnamings (such as naming a package \"site\" or naming a package\n logger \"root\".\n \"\"\"\n vars['random_string'] = native_(binascii.hexlify(os.urandom(20)))\n package_logger = vars['package']\n if package_logger == 'root':\n # Rename the app logger in the rare case a project is named 'root'\n package_logger = 'app'\n vars['package_logger'] = package_logger\n return Template.pre(self, command, output_dir, vars)\n\n def post(self, command, output_dir, vars): # pragma: no cover\n \"\"\" Overrides :meth:`pyramid.scaffolds.template.Template.post`, to\n print \"Welcome to Pyramid. Sorry for the convenience.\" after a\n successful scaffolding rendering.\"\"\"\n\n separator = \"=\" * 79\n msg = dedent(\n \"\"\"\n %(separator)s\n Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials\n Documentation: http://docs.pylonsproject.org/projects/pyramid\n\n Twitter (tips & updates): http://twitter.com/pylons\n Mailing List: http://groups.google.com/group/pylons-discuss\n\n Welcome to Pyramid. Sorry for the convenience.\n %(separator)s\n \"\"\" % {'separator': separator})\n\n self.out(msg)\n return Template.post(self, command, output_dir, vars)\n\n def out(self, msg): # pragma: no cover (replaceable testing hook)\n print(msg)\n\nclass StarterProjectTemplate(PyramidTemplate):\n _template_dir = 'starter'\n summary = 'Pyramid starter project'\n\nclass ZODBProjectTemplate(PyramidTemplate):\n _template_dir = 'zodb'\n summary = 'Pyramid ZODB project using traversal'\n\nclass AlchemyProjectTemplate(PyramidTemplate):\n _template_dir = 'alchemy'\n summary = 'Pyramid SQLAlchemy project using url dispatch'\n", "path": "pyramid/scaffolds/__init__.py"}], "after_files": [{"content": "import binascii\nimport os\nfrom textwrap import dedent\n\nfrom pyramid.compat import native_\n\nfrom pyramid.scaffolds.template import Template # API\n\nclass PyramidTemplate(Template):\n \"\"\"\n A class that can be used as a base class for Pyramid scaffolding\n templates.\n \"\"\"\n def pre(self, command, output_dir, vars):\n \"\"\" Overrides :meth:`pyramid.scaffolds.template.Template.pre`, adding\n several variables to the default variables list (including\n ``random_string``, and ``package_logger``). It also prevents common\n misnamings (such as naming a package \"site\" or naming a package\n logger \"root\".\n \"\"\"\n vars['random_string'] = native_(binascii.hexlify(os.urandom(20)))\n package_logger = vars['package']\n if package_logger == 'root':\n # Rename the app logger in the rare case a project is named 'root'\n package_logger = 'app'\n vars['package_logger'] = package_logger\n return Template.pre(self, command, output_dir, vars)\n\n def post(self, command, output_dir, vars): # pragma: no cover\n \"\"\" Overrides :meth:`pyramid.scaffolds.template.Template.post`, to\n print \"Welcome to Pyramid. Sorry for the convenience.\" after a\n successful scaffolding rendering.\"\"\"\n\n separator = \"=\" * 79\n msg = dedent(\n \"\"\"\n %(separator)s\n Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials/en/latest/\n Documentation: http://docs.pylonsproject.org/projects/pyramid/en/latest/\n Twitter: https://twitter.com/trypyramid\n Mailing List: https://groups.google.com/forum/#!forum/pylons-discuss\n\n Welcome to Pyramid. Sorry for the convenience.\n %(separator)s\n \"\"\" % {'separator': separator})\n\n self.out(msg)\n return Template.post(self, command, output_dir, vars)\n\n def out(self, msg): # pragma: no cover (replaceable testing hook)\n print(msg)\n\nclass StarterProjectTemplate(PyramidTemplate):\n _template_dir = 'starter'\n summary = 'Pyramid starter project using URL dispatch and Chameleon'\n\nclass ZODBProjectTemplate(PyramidTemplate):\n _template_dir = 'zodb'\n summary = 'Pyramid project using ZODB, traversal, and Chameleon'\n\nclass AlchemyProjectTemplate(PyramidTemplate):\n _template_dir = 'alchemy'\n summary = 'Pyramid project using SQLAlchemy, SQLite, URL dispatch, and'\n ' Chameleon'\n", "path": "pyramid/scaffolds/__init__.py"}]}
979
398
gh_patches_debug_61017
rasdani/github-patches
git_diff
lnbits__lnbits-2283
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Feature request] Add server url to "API keys and API docs" section **Is your feature request related to a problem? Please describe.** When linking lnbits with external services, (e.g. [zaprite](https://zaprite.com/)) one needs to specify two things: node url and invoice key. ![image](https://github.com/lnbits/lnbits/assets/19181985/64920942-d120-4d50-951f-99aa8e6b1cca) Invoice key is clearly visible in the "API keys and API docs" section, but it's sometimes unclear what my "LNbits Node URL" is. ![image](https://github.com/lnbits/lnbits/assets/19181985/9ae7086b-f48b-4b56-b2aa-6f4a3f42fd96) **Describe the solution you'd like** Display "LNbits Node URL" in "Node URL, API keys and docs" --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tools/i18n-ai-tool.py` Content: ``` 1 # 1. Always check the results of the procedure 2 # 2. Always run "npx prettier -w lnbits/static/i18n/XX.js" to reformat the result 3 4 import os 5 import re 6 import sys 7 8 import json5 9 from openai import OpenAI 10 11 if len(sys.argv) < 2: 12 print("Usage: python3 tools/i18n-tool.py <code> [language]") 13 sys.exit(1) 14 lang = sys.argv[1] 15 16 17 def load_language(lang): 18 s = open(f"lnbits/static/i18n/{lang}.js", "rt").read() 19 prefix = "window.localisation.%s = {\n" % lang 20 assert s.startswith(prefix) 21 s = s[len(prefix) - 2 :] 22 return json5.loads(s) 23 24 25 def save_language(lang, data): 26 with open(f"lnbits/static/i18n/{lang}.js", "wt") as f: 27 f.write("window.localisation.%s = {\n" % lang) 28 row = 0 29 for k, v in data.items(): 30 row += 1 31 f.write(" %s:\n" % k) 32 if "'" in v: 33 f.write(' "%s"' % v) 34 else: 35 f.write(" '%s'" % v) 36 if row == len(data): 37 f.write("\n") 38 else: 39 f.write(",\n") 40 f.write("}\n") 41 42 43 def string_variables_match(str1, str2): 44 pat = re.compile(r"%\{[a-z0-9_]*\}") 45 m1 = re.findall(pat, str1) 46 m2 = re.findall(pat, str2) 47 return sorted(m1) == sorted(m2) 48 49 50 def translate_string(lang_from, lang_to, text): 51 target = { 52 "de": "German", 53 "es": "Spanish", 54 "jp": "Japan", 55 "cn": "Chinese", 56 "fr": "French", 57 "it": "Italian", 58 "pi": "Pirate", 59 "nl": "Dutch", 60 "we": "Welsh", 61 "pl": "Polish", 62 "pt": "Portuguese", 63 "br": "Brazilian Portugese", 64 "cs": "Czech", 65 "sk": "Slovak", 66 "kr": "Korean", 67 }[lang_to] 68 assert os.getenv("OPENAI_API_KEY"), "OPENAI_API_KEY env var not set" 69 client = OpenAI() 70 try: 71 chat_completion = client.chat.completions.create( 72 messages=[ 73 { 74 "role": "system", 75 "content": "You are a language expert that speaks all languages in the world. You are about to translate text from English to another language. The text is a part of the software you are translating. If the given text contains a phrase enclosed by curly preceded with a percent sign, do not translate the given phrase, just keep it verbatim. So for example, the phrase %{amount} translated to target language should still be kept as %{amount}. Never output anything else, just the translated string.", # noqa: E501 76 }, 77 { 78 "role": "user", 79 "content": f"Translate the following string from English to {target}: {text}", # noqa: E501 80 }, 81 ], 82 model="gpt-4-1106-preview", # aka GPT-4 Turbo 83 ) 84 translated = chat_completion.choices[0].message.content.strip() 85 # return translated string only if variables were not broken 86 if string_variables_match(text, translated): 87 return translated 88 else: 89 return None 90 except Exception: 91 return None 92 93 94 data_en = load_language("en") 95 data = load_language(lang) 96 97 missing = set(data_en.keys()) - set(data.keys()) 98 print(f"Missing {len(missing)} keys in language '{lang}'") 99 100 if len(missing) > 0: 101 new = {} 102 for k in data_en: 103 if k in data: 104 new[k] = data[k] 105 else: 106 print(f"Translating key '{k}'") 107 print(f"{data_en[k]}") 108 translated = translate_string("en", lang, data_en[k]) 109 print("->") 110 if translated: 111 print(f"{translated}") 112 new[k] = translated 113 else: 114 print("ERROR") 115 print() 116 save_language(lang, new) 117 else: 118 # check whether variables match for each string 119 for k in data_en: 120 if not string_variables_match(data_en[k], data[k]): 121 print(f"Variables mismatch ({k}):") 122 print(data_en[k]) 123 print(data[k]) 124 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tools/i18n-ai-tool.py b/tools/i18n-ai-tool.py --- a/tools/i18n-ai-tool.py +++ b/tools/i18n-ai-tool.py @@ -64,6 +64,7 @@ "cs": "Czech", "sk": "Slovak", "kr": "Korean", + "fi": "Finnish", }[lang_to] assert os.getenv("OPENAI_API_KEY"), "OPENAI_API_KEY env var not set" client = OpenAI()
{"golden_diff": "diff --git a/tools/i18n-ai-tool.py b/tools/i18n-ai-tool.py\n--- a/tools/i18n-ai-tool.py\n+++ b/tools/i18n-ai-tool.py\n@@ -64,6 +64,7 @@\n \"cs\": \"Czech\",\n \"sk\": \"Slovak\",\n \"kr\": \"Korean\",\n+ \"fi\": \"Finnish\",\n }[lang_to]\n assert os.getenv(\"OPENAI_API_KEY\"), \"OPENAI_API_KEY env var not set\"\n client = OpenAI()\n", "issue": "[Feature request] Add server url to \"API keys and API docs\" section\n**Is your feature request related to a problem? Please describe.**\r\nWhen linking lnbits with external services, (e.g. [zaprite](https://zaprite.com/)) one needs to specify two things: node url and invoice key. \r\n\r\n![image](https://github.com/lnbits/lnbits/assets/19181985/64920942-d120-4d50-951f-99aa8e6b1cca)\r\n\r\nInvoice key is clearly visible in the \"API keys and API docs\" section, but it's sometimes unclear what my \"LNbits Node URL\" is. \r\n\r\n![image](https://github.com/lnbits/lnbits/assets/19181985/9ae7086b-f48b-4b56-b2aa-6f4a3f42fd96)\r\n\r\n**Describe the solution you'd like**\r\nDisplay \"LNbits Node URL\" in \"Node URL, API keys and docs\"\n", "before_files": [{"content": "# 1. Always check the results of the procedure\n# 2. Always run \"npx prettier -w lnbits/static/i18n/XX.js\" to reformat the result\n\nimport os\nimport re\nimport sys\n\nimport json5\nfrom openai import OpenAI\n\nif len(sys.argv) < 2:\n print(\"Usage: python3 tools/i18n-tool.py <code> [language]\")\n sys.exit(1)\nlang = sys.argv[1]\n\n\ndef load_language(lang):\n s = open(f\"lnbits/static/i18n/{lang}.js\", \"rt\").read()\n prefix = \"window.localisation.%s = {\\n\" % lang\n assert s.startswith(prefix)\n s = s[len(prefix) - 2 :]\n return json5.loads(s)\n\n\ndef save_language(lang, data):\n with open(f\"lnbits/static/i18n/{lang}.js\", \"wt\") as f:\n f.write(\"window.localisation.%s = {\\n\" % lang)\n row = 0\n for k, v in data.items():\n row += 1\n f.write(\" %s:\\n\" % k)\n if \"'\" in v:\n f.write(' \"%s\"' % v)\n else:\n f.write(\" '%s'\" % v)\n if row == len(data):\n f.write(\"\\n\")\n else:\n f.write(\",\\n\")\n f.write(\"}\\n\")\n\n\ndef string_variables_match(str1, str2):\n pat = re.compile(r\"%\\{[a-z0-9_]*\\}\")\n m1 = re.findall(pat, str1)\n m2 = re.findall(pat, str2)\n return sorted(m1) == sorted(m2)\n\n\ndef translate_string(lang_from, lang_to, text):\n target = {\n \"de\": \"German\",\n \"es\": \"Spanish\",\n \"jp\": \"Japan\",\n \"cn\": \"Chinese\",\n \"fr\": \"French\",\n \"it\": \"Italian\",\n \"pi\": \"Pirate\",\n \"nl\": \"Dutch\",\n \"we\": \"Welsh\",\n \"pl\": \"Polish\",\n \"pt\": \"Portuguese\",\n \"br\": \"Brazilian Portugese\",\n \"cs\": \"Czech\",\n \"sk\": \"Slovak\",\n \"kr\": \"Korean\",\n }[lang_to]\n assert os.getenv(\"OPENAI_API_KEY\"), \"OPENAI_API_KEY env var not set\"\n client = OpenAI()\n try:\n chat_completion = client.chat.completions.create(\n messages=[\n {\n \"role\": \"system\",\n \"content\": \"You are a language expert that speaks all languages in the world. You are about to translate text from English to another language. The text is a part of the software you are translating. If the given text contains a phrase enclosed by curly preceded with a percent sign, do not translate the given phrase, just keep it verbatim. So for example, the phrase %{amount} translated to target language should still be kept as %{amount}. Never output anything else, just the translated string.\", # noqa: E501\n },\n {\n \"role\": \"user\",\n \"content\": f\"Translate the following string from English to {target}: {text}\", # noqa: E501\n },\n ],\n model=\"gpt-4-1106-preview\", # aka GPT-4 Turbo\n )\n translated = chat_completion.choices[0].message.content.strip()\n # return translated string only if variables were not broken\n if string_variables_match(text, translated):\n return translated\n else:\n return None\n except Exception:\n return None\n\n\ndata_en = load_language(\"en\")\ndata = load_language(lang)\n\nmissing = set(data_en.keys()) - set(data.keys())\nprint(f\"Missing {len(missing)} keys in language '{lang}'\")\n\nif len(missing) > 0:\n new = {}\n for k in data_en:\n if k in data:\n new[k] = data[k]\n else:\n print(f\"Translating key '{k}'\")\n print(f\"{data_en[k]}\")\n translated = translate_string(\"en\", lang, data_en[k])\n print(\"->\")\n if translated:\n print(f\"{translated}\")\n new[k] = translated\n else:\n print(\"ERROR\")\n print()\n save_language(lang, new)\nelse:\n # check whether variables match for each string\n for k in data_en:\n if not string_variables_match(data_en[k], data[k]):\n print(f\"Variables mismatch ({k}):\")\n print(data_en[k])\n print(data[k])\n", "path": "tools/i18n-ai-tool.py"}], "after_files": [{"content": "# 1. Always check the results of the procedure\n# 2. Always run \"npx prettier -w lnbits/static/i18n/XX.js\" to reformat the result\n\nimport os\nimport re\nimport sys\n\nimport json5\nfrom openai import OpenAI\n\nif len(sys.argv) < 2:\n print(\"Usage: python3 tools/i18n-tool.py <code> [language]\")\n sys.exit(1)\nlang = sys.argv[1]\n\n\ndef load_language(lang):\n s = open(f\"lnbits/static/i18n/{lang}.js\", \"rt\").read()\n prefix = \"window.localisation.%s = {\\n\" % lang\n assert s.startswith(prefix)\n s = s[len(prefix) - 2 :]\n return json5.loads(s)\n\n\ndef save_language(lang, data):\n with open(f\"lnbits/static/i18n/{lang}.js\", \"wt\") as f:\n f.write(\"window.localisation.%s = {\\n\" % lang)\n row = 0\n for k, v in data.items():\n row += 1\n f.write(\" %s:\\n\" % k)\n if \"'\" in v:\n f.write(' \"%s\"' % v)\n else:\n f.write(\" '%s'\" % v)\n if row == len(data):\n f.write(\"\\n\")\n else:\n f.write(\",\\n\")\n f.write(\"}\\n\")\n\n\ndef string_variables_match(str1, str2):\n pat = re.compile(r\"%\\{[a-z0-9_]*\\}\")\n m1 = re.findall(pat, str1)\n m2 = re.findall(pat, str2)\n return sorted(m1) == sorted(m2)\n\n\ndef translate_string(lang_from, lang_to, text):\n target = {\n \"de\": \"German\",\n \"es\": \"Spanish\",\n \"jp\": \"Japan\",\n \"cn\": \"Chinese\",\n \"fr\": \"French\",\n \"it\": \"Italian\",\n \"pi\": \"Pirate\",\n \"nl\": \"Dutch\",\n \"we\": \"Welsh\",\n \"pl\": \"Polish\",\n \"pt\": \"Portuguese\",\n \"br\": \"Brazilian Portugese\",\n \"cs\": \"Czech\",\n \"sk\": \"Slovak\",\n \"kr\": \"Korean\",\n \"fi\": \"Finnish\",\n }[lang_to]\n assert os.getenv(\"OPENAI_API_KEY\"), \"OPENAI_API_KEY env var not set\"\n client = OpenAI()\n try:\n chat_completion = client.chat.completions.create(\n messages=[\n {\n \"role\": \"system\",\n \"content\": \"You are a language expert that speaks all languages in the world. You are about to translate text from English to another language. The text is a part of the software you are translating. If the given text contains a phrase enclosed by curly preceded with a percent sign, do not translate the given phrase, just keep it verbatim. So for example, the phrase %{amount} translated to target language should still be kept as %{amount}. Never output anything else, just the translated string.\", # noqa: E501\n },\n {\n \"role\": \"user\",\n \"content\": f\"Translate the following string from English to {target}: {text}\", # noqa: E501\n },\n ],\n model=\"gpt-4-1106-preview\", # aka GPT-4 Turbo\n )\n translated = chat_completion.choices[0].message.content.strip()\n # return translated string only if variables were not broken\n if string_variables_match(text, translated):\n return translated\n else:\n return None\n except Exception:\n return None\n\n\ndata_en = load_language(\"en\")\ndata = load_language(lang)\n\nmissing = set(data_en.keys()) - set(data.keys())\nprint(f\"Missing {len(missing)} keys in language '{lang}'\")\n\nif len(missing) > 0:\n new = {}\n for k in data_en:\n if k in data:\n new[k] = data[k]\n else:\n print(f\"Translating key '{k}'\")\n print(f\"{data_en[k]}\")\n translated = translate_string(\"en\", lang, data_en[k])\n print(\"->\")\n if translated:\n print(f\"{translated}\")\n new[k] = translated\n else:\n print(\"ERROR\")\n print()\n save_language(lang, new)\nelse:\n # check whether variables match for each string\n for k in data_en:\n if not string_variables_match(data_en[k], data[k]):\n print(f\"Variables mismatch ({k}):\")\n print(data_en[k])\n print(data[k])\n", "path": "tools/i18n-ai-tool.py"}]}
1,784
126
gh_patches_debug_16816
rasdani/github-patches
git_diff
open-mmlab__mmcv-889
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 'ConvModule' object has no attribute 'norm' when using torch.jit.trace environment: python3.6.6 pytorch1.7.0 code: ``` import torch from mmcv.cnn.bricks import ConvModule conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN')) input_example = torch.randn((1, 3, 224, 224)) ts = torch.jit.trace(func=conv.eval(), example_inputs=input_example) torch.jit.save(ts, 'conv_module.ts') ``` It work well, but when set `norm_cfg=None`, failed. ``` torch.nn.modules.module.ModuleAttributeError: 'ConvModule' object has no attribute 'norm' ``` Any help? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mmcv/cnn/bricks/conv_module.py` Content: ``` 1 import warnings 2 3 import torch.nn as nn 4 5 from ..utils import constant_init, kaiming_init 6 from .activation import build_activation_layer 7 from .conv import build_conv_layer 8 from .norm import build_norm_layer 9 from .padding import build_padding_layer 10 from .registry import PLUGIN_LAYERS 11 12 13 @PLUGIN_LAYERS.register_module() 14 class ConvModule(nn.Module): 15 """A conv block that bundles conv/norm/activation layers. 16 17 This block simplifies the usage of convolution layers, which are commonly 18 used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU). 19 It is based upon three build methods: `build_conv_layer()`, 20 `build_norm_layer()` and `build_activation_layer()`. 21 22 Besides, we add some additional features in this module. 23 1. Automatically set `bias` of the conv layer. 24 2. Spectral norm is supported. 25 3. More padding modes are supported. Before PyTorch 1.5, nn.Conv2d only 26 supports zero and circular padding, and we add "reflect" padding mode. 27 28 Args: 29 in_channels (int): Number of channels in the input feature map. 30 Same as that in ``nn._ConvNd``. 31 out_channels (int): Number of channels produced by the convolution. 32 Same as that in ``nn._ConvNd``. 33 kernel_size (int | tuple[int]): Size of the convolving kernel. 34 Same as that in ``nn._ConvNd``. 35 stride (int | tuple[int]): Stride of the convolution. 36 Same as that in ``nn._ConvNd``. 37 padding (int | tuple[int]): Zero-padding added to both sides of 38 the input. Same as that in ``nn._ConvNd``. 39 dilation (int | tuple[int]): Spacing between kernel elements. 40 Same as that in ``nn._ConvNd``. 41 groups (int): Number of blocked connections from input channels to 42 output channels. Same as that in ``nn._ConvNd``. 43 bias (bool | str): If specified as `auto`, it will be decided by the 44 norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise 45 False. Default: "auto". 46 conv_cfg (dict): Config dict for convolution layer. Default: None, 47 which means using conv2d. 48 norm_cfg (dict): Config dict for normalization layer. Default: None. 49 act_cfg (dict): Config dict for activation layer. 50 Default: dict(type='ReLU'). 51 inplace (bool): Whether to use inplace mode for activation. 52 Default: True. 53 with_spectral_norm (bool): Whether use spectral norm in conv module. 54 Default: False. 55 padding_mode (str): If the `padding_mode` has not been supported by 56 current `Conv2d` in PyTorch, we will use our own padding layer 57 instead. Currently, we support ['zeros', 'circular'] with official 58 implementation and ['reflect'] with our own implementation. 59 Default: 'zeros'. 60 order (tuple[str]): The order of conv/norm/activation layers. It is a 61 sequence of "conv", "norm" and "act". Common examples are 62 ("conv", "norm", "act") and ("act", "conv", "norm"). 63 Default: ('conv', 'norm', 'act'). 64 """ 65 66 _abbr_ = 'conv_block' 67 68 def __init__(self, 69 in_channels, 70 out_channels, 71 kernel_size, 72 stride=1, 73 padding=0, 74 dilation=1, 75 groups=1, 76 bias='auto', 77 conv_cfg=None, 78 norm_cfg=None, 79 act_cfg=dict(type='ReLU'), 80 inplace=True, 81 with_spectral_norm=False, 82 padding_mode='zeros', 83 order=('conv', 'norm', 'act')): 84 super(ConvModule, self).__init__() 85 assert conv_cfg is None or isinstance(conv_cfg, dict) 86 assert norm_cfg is None or isinstance(norm_cfg, dict) 87 assert act_cfg is None or isinstance(act_cfg, dict) 88 official_padding_mode = ['zeros', 'circular'] 89 self.conv_cfg = conv_cfg 90 self.norm_cfg = norm_cfg 91 self.act_cfg = act_cfg 92 self.inplace = inplace 93 self.with_spectral_norm = with_spectral_norm 94 self.with_explicit_padding = padding_mode not in official_padding_mode 95 self.order = order 96 assert isinstance(self.order, tuple) and len(self.order) == 3 97 assert set(order) == set(['conv', 'norm', 'act']) 98 99 self.with_norm = norm_cfg is not None 100 self.with_activation = act_cfg is not None 101 # if the conv layer is before a norm layer, bias is unnecessary. 102 if bias == 'auto': 103 bias = not self.with_norm 104 self.with_bias = bias 105 106 if self.with_norm and self.with_bias: 107 warnings.warn('ConvModule has norm and bias at the same time') 108 109 if self.with_explicit_padding: 110 pad_cfg = dict(type=padding_mode) 111 self.padding_layer = build_padding_layer(pad_cfg, padding) 112 113 # reset padding to 0 for conv module 114 conv_padding = 0 if self.with_explicit_padding else padding 115 # build convolution layer 116 self.conv = build_conv_layer( 117 conv_cfg, 118 in_channels, 119 out_channels, 120 kernel_size, 121 stride=stride, 122 padding=conv_padding, 123 dilation=dilation, 124 groups=groups, 125 bias=bias) 126 # export the attributes of self.conv to a higher level for convenience 127 self.in_channels = self.conv.in_channels 128 self.out_channels = self.conv.out_channels 129 self.kernel_size = self.conv.kernel_size 130 self.stride = self.conv.stride 131 self.padding = padding 132 self.dilation = self.conv.dilation 133 self.transposed = self.conv.transposed 134 self.output_padding = self.conv.output_padding 135 self.groups = self.conv.groups 136 137 if self.with_spectral_norm: 138 self.conv = nn.utils.spectral_norm(self.conv) 139 140 # build normalization layers 141 if self.with_norm: 142 # norm layer is after conv layer 143 if order.index('norm') > order.index('conv'): 144 norm_channels = out_channels 145 else: 146 norm_channels = in_channels 147 self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels) 148 self.add_module(self.norm_name, norm) 149 150 # build activation layer 151 if self.with_activation: 152 act_cfg_ = act_cfg.copy() 153 # nn.Tanh has no 'inplace' argument 154 if act_cfg_['type'] not in [ 155 'Tanh', 'PReLU', 'Sigmoid', 'HSigmoid', 'Swish' 156 ]: 157 act_cfg_.setdefault('inplace', inplace) 158 self.activate = build_activation_layer(act_cfg_) 159 160 # Use msra init by default 161 self.init_weights() 162 163 @property 164 def norm(self): 165 return getattr(self, self.norm_name) 166 167 def init_weights(self): 168 # 1. It is mainly for customized conv layers with their own 169 # initialization manners by calling their own ``init_weights()``, 170 # and we do not want ConvModule to override the initialization. 171 # 2. For customized conv layers without their own initialization 172 # manners (that is, they don't have their own ``init_weights()``) 173 # and PyTorch's conv layers, they will be initialized by 174 # this method with default ``kaiming_init``. 175 # Note: For PyTorch's conv layers, they will be overwritten by our 176 # initialization implementation using default ``kaiming_init``. 177 if not hasattr(self.conv, 'init_weights'): 178 if self.with_activation and self.act_cfg['type'] == 'LeakyReLU': 179 nonlinearity = 'leaky_relu' 180 a = self.act_cfg.get('negative_slope', 0.01) 181 else: 182 nonlinearity = 'relu' 183 a = 0 184 kaiming_init(self.conv, a=a, nonlinearity=nonlinearity) 185 if self.with_norm: 186 constant_init(self.norm, 1, bias=0) 187 188 def forward(self, x, activate=True, norm=True): 189 for layer in self.order: 190 if layer == 'conv': 191 if self.with_explicit_padding: 192 x = self.padding_layer(x) 193 x = self.conv(x) 194 elif layer == 'norm' and norm and self.with_norm: 195 x = self.norm(x) 196 elif layer == 'act' and activate and self.with_activation: 197 x = self.activate(x) 198 return x 199 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mmcv/cnn/bricks/conv_module.py b/mmcv/cnn/bricks/conv_module.py --- a/mmcv/cnn/bricks/conv_module.py +++ b/mmcv/cnn/bricks/conv_module.py @@ -146,6 +146,8 @@ norm_channels = in_channels self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels) self.add_module(self.norm_name, norm) + else: + self.norm_name = None # build activation layer if self.with_activation: @@ -162,7 +164,10 @@ @property def norm(self): - return getattr(self, self.norm_name) + if self.norm_name: + return getattr(self, self.norm_name) + else: + return None def init_weights(self): # 1. It is mainly for customized conv layers with their own
{"golden_diff": "diff --git a/mmcv/cnn/bricks/conv_module.py b/mmcv/cnn/bricks/conv_module.py\n--- a/mmcv/cnn/bricks/conv_module.py\n+++ b/mmcv/cnn/bricks/conv_module.py\n@@ -146,6 +146,8 @@\n norm_channels = in_channels\n self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)\n self.add_module(self.norm_name, norm)\n+ else:\n+ self.norm_name = None\n \n # build activation layer\n if self.with_activation:\n@@ -162,7 +164,10 @@\n \n @property\n def norm(self):\n- return getattr(self, self.norm_name)\n+ if self.norm_name:\n+ return getattr(self, self.norm_name)\n+ else:\n+ return None\n \n def init_weights(self):\n # 1. It is mainly for customized conv layers with their own\n", "issue": "'ConvModule' object has no attribute 'norm' when using torch.jit.trace\nenvironment: python3.6.6 pytorch1.7.0\r\n\r\ncode:\r\n\r\n```\r\nimport torch\r\nfrom mmcv.cnn.bricks import ConvModule\r\n\r\nconv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN'))\r\ninput_example = torch.randn((1, 3, 224, 224))\r\n\r\nts = torch.jit.trace(func=conv.eval(), example_inputs=input_example)\r\ntorch.jit.save(ts, 'conv_module.ts')\r\n```\r\n\r\nIt work well, but when set `norm_cfg=None`, failed.\r\n\r\n```\r\ntorch.nn.modules.module.ModuleAttributeError: 'ConvModule' object has no attribute 'norm'\r\n```\r\n\r\nAny help?\n", "before_files": [{"content": "import warnings\n\nimport torch.nn as nn\n\nfrom ..utils import constant_init, kaiming_init\nfrom .activation import build_activation_layer\nfrom .conv import build_conv_layer\nfrom .norm import build_norm_layer\nfrom .padding import build_padding_layer\nfrom .registry import PLUGIN_LAYERS\n\n\n@PLUGIN_LAYERS.register_module()\nclass ConvModule(nn.Module):\n \"\"\"A conv block that bundles conv/norm/activation layers.\n\n This block simplifies the usage of convolution layers, which are commonly\n used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).\n It is based upon three build methods: `build_conv_layer()`,\n `build_norm_layer()` and `build_activation_layer()`.\n\n Besides, we add some additional features in this module.\n 1. Automatically set `bias` of the conv layer.\n 2. Spectral norm is supported.\n 3. More padding modes are supported. Before PyTorch 1.5, nn.Conv2d only\n supports zero and circular padding, and we add \"reflect\" padding mode.\n\n Args:\n in_channels (int): Number of channels in the input feature map.\n Same as that in ``nn._ConvNd``.\n out_channels (int): Number of channels produced by the convolution.\n Same as that in ``nn._ConvNd``.\n kernel_size (int | tuple[int]): Size of the convolving kernel.\n Same as that in ``nn._ConvNd``.\n stride (int | tuple[int]): Stride of the convolution.\n Same as that in ``nn._ConvNd``.\n padding (int | tuple[int]): Zero-padding added to both sides of\n the input. Same as that in ``nn._ConvNd``.\n dilation (int | tuple[int]): Spacing between kernel elements.\n Same as that in ``nn._ConvNd``.\n groups (int): Number of blocked connections from input channels to\n output channels. Same as that in ``nn._ConvNd``.\n bias (bool | str): If specified as `auto`, it will be decided by the\n norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise\n False. Default: \"auto\".\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU').\n inplace (bool): Whether to use inplace mode for activation.\n Default: True.\n with_spectral_norm (bool): Whether use spectral norm in conv module.\n Default: False.\n padding_mode (str): If the `padding_mode` has not been supported by\n current `Conv2d` in PyTorch, we will use our own padding layer\n instead. Currently, we support ['zeros', 'circular'] with official\n implementation and ['reflect'] with our own implementation.\n Default: 'zeros'.\n order (tuple[str]): The order of conv/norm/activation layers. It is a\n sequence of \"conv\", \"norm\" and \"act\". Common examples are\n (\"conv\", \"norm\", \"act\") and (\"act\", \"conv\", \"norm\").\n Default: ('conv', 'norm', 'act').\n \"\"\"\n\n _abbr_ = 'conv_block'\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n bias='auto',\n conv_cfg=None,\n norm_cfg=None,\n act_cfg=dict(type='ReLU'),\n inplace=True,\n with_spectral_norm=False,\n padding_mode='zeros',\n order=('conv', 'norm', 'act')):\n super(ConvModule, self).__init__()\n assert conv_cfg is None or isinstance(conv_cfg, dict)\n assert norm_cfg is None or isinstance(norm_cfg, dict)\n assert act_cfg is None or isinstance(act_cfg, dict)\n official_padding_mode = ['zeros', 'circular']\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.act_cfg = act_cfg\n self.inplace = inplace\n self.with_spectral_norm = with_spectral_norm\n self.with_explicit_padding = padding_mode not in official_padding_mode\n self.order = order\n assert isinstance(self.order, tuple) and len(self.order) == 3\n assert set(order) == set(['conv', 'norm', 'act'])\n\n self.with_norm = norm_cfg is not None\n self.with_activation = act_cfg is not None\n # if the conv layer is before a norm layer, bias is unnecessary.\n if bias == 'auto':\n bias = not self.with_norm\n self.with_bias = bias\n\n if self.with_norm and self.with_bias:\n warnings.warn('ConvModule has norm and bias at the same time')\n\n if self.with_explicit_padding:\n pad_cfg = dict(type=padding_mode)\n self.padding_layer = build_padding_layer(pad_cfg, padding)\n\n # reset padding to 0 for conv module\n conv_padding = 0 if self.with_explicit_padding else padding\n # build convolution layer\n self.conv = build_conv_layer(\n conv_cfg,\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n padding=conv_padding,\n dilation=dilation,\n groups=groups,\n bias=bias)\n # export the attributes of self.conv to a higher level for convenience\n self.in_channels = self.conv.in_channels\n self.out_channels = self.conv.out_channels\n self.kernel_size = self.conv.kernel_size\n self.stride = self.conv.stride\n self.padding = padding\n self.dilation = self.conv.dilation\n self.transposed = self.conv.transposed\n self.output_padding = self.conv.output_padding\n self.groups = self.conv.groups\n\n if self.with_spectral_norm:\n self.conv = nn.utils.spectral_norm(self.conv)\n\n # build normalization layers\n if self.with_norm:\n # norm layer is after conv layer\n if order.index('norm') > order.index('conv'):\n norm_channels = out_channels\n else:\n norm_channels = in_channels\n self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)\n self.add_module(self.norm_name, norm)\n\n # build activation layer\n if self.with_activation:\n act_cfg_ = act_cfg.copy()\n # nn.Tanh has no 'inplace' argument\n if act_cfg_['type'] not in [\n 'Tanh', 'PReLU', 'Sigmoid', 'HSigmoid', 'Swish'\n ]:\n act_cfg_.setdefault('inplace', inplace)\n self.activate = build_activation_layer(act_cfg_)\n\n # Use msra init by default\n self.init_weights()\n\n @property\n def norm(self):\n return getattr(self, self.norm_name)\n\n def init_weights(self):\n # 1. It is mainly for customized conv layers with their own\n # initialization manners by calling their own ``init_weights()``,\n # and we do not want ConvModule to override the initialization.\n # 2. For customized conv layers without their own initialization\n # manners (that is, they don't have their own ``init_weights()``)\n # and PyTorch's conv layers, they will be initialized by\n # this method with default ``kaiming_init``.\n # Note: For PyTorch's conv layers, they will be overwritten by our\n # initialization implementation using default ``kaiming_init``.\n if not hasattr(self.conv, 'init_weights'):\n if self.with_activation and self.act_cfg['type'] == 'LeakyReLU':\n nonlinearity = 'leaky_relu'\n a = self.act_cfg.get('negative_slope', 0.01)\n else:\n nonlinearity = 'relu'\n a = 0\n kaiming_init(self.conv, a=a, nonlinearity=nonlinearity)\n if self.with_norm:\n constant_init(self.norm, 1, bias=0)\n\n def forward(self, x, activate=True, norm=True):\n for layer in self.order:\n if layer == 'conv':\n if self.with_explicit_padding:\n x = self.padding_layer(x)\n x = self.conv(x)\n elif layer == 'norm' and norm and self.with_norm:\n x = self.norm(x)\n elif layer == 'act' and activate and self.with_activation:\n x = self.activate(x)\n return x\n", "path": "mmcv/cnn/bricks/conv_module.py"}], "after_files": [{"content": "import warnings\n\nimport torch.nn as nn\n\nfrom ..utils import constant_init, kaiming_init\nfrom .activation import build_activation_layer\nfrom .conv import build_conv_layer\nfrom .norm import build_norm_layer\nfrom .padding import build_padding_layer\nfrom .registry import PLUGIN_LAYERS\n\n\n@PLUGIN_LAYERS.register_module()\nclass ConvModule(nn.Module):\n \"\"\"A conv block that bundles conv/norm/activation layers.\n\n This block simplifies the usage of convolution layers, which are commonly\n used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).\n It is based upon three build methods: `build_conv_layer()`,\n `build_norm_layer()` and `build_activation_layer()`.\n\n Besides, we add some additional features in this module.\n 1. Automatically set `bias` of the conv layer.\n 2. Spectral norm is supported.\n 3. More padding modes are supported. Before PyTorch 1.5, nn.Conv2d only\n supports zero and circular padding, and we add \"reflect\" padding mode.\n\n Args:\n in_channels (int): Number of channels in the input feature map.\n Same as that in ``nn._ConvNd``.\n out_channels (int): Number of channels produced by the convolution.\n Same as that in ``nn._ConvNd``.\n kernel_size (int | tuple[int]): Size of the convolving kernel.\n Same as that in ``nn._ConvNd``.\n stride (int | tuple[int]): Stride of the convolution.\n Same as that in ``nn._ConvNd``.\n padding (int | tuple[int]): Zero-padding added to both sides of\n the input. Same as that in ``nn._ConvNd``.\n dilation (int | tuple[int]): Spacing between kernel elements.\n Same as that in ``nn._ConvNd``.\n groups (int): Number of blocked connections from input channels to\n output channels. Same as that in ``nn._ConvNd``.\n bias (bool | str): If specified as `auto`, it will be decided by the\n norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise\n False. Default: \"auto\".\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU').\n inplace (bool): Whether to use inplace mode for activation.\n Default: True.\n with_spectral_norm (bool): Whether use spectral norm in conv module.\n Default: False.\n padding_mode (str): If the `padding_mode` has not been supported by\n current `Conv2d` in PyTorch, we will use our own padding layer\n instead. Currently, we support ['zeros', 'circular'] with official\n implementation and ['reflect'] with our own implementation.\n Default: 'zeros'.\n order (tuple[str]): The order of conv/norm/activation layers. It is a\n sequence of \"conv\", \"norm\" and \"act\". Common examples are\n (\"conv\", \"norm\", \"act\") and (\"act\", \"conv\", \"norm\").\n Default: ('conv', 'norm', 'act').\n \"\"\"\n\n _abbr_ = 'conv_block'\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n bias='auto',\n conv_cfg=None,\n norm_cfg=None,\n act_cfg=dict(type='ReLU'),\n inplace=True,\n with_spectral_norm=False,\n padding_mode='zeros',\n order=('conv', 'norm', 'act')):\n super(ConvModule, self).__init__()\n assert conv_cfg is None or isinstance(conv_cfg, dict)\n assert norm_cfg is None or isinstance(norm_cfg, dict)\n assert act_cfg is None or isinstance(act_cfg, dict)\n official_padding_mode = ['zeros', 'circular']\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.act_cfg = act_cfg\n self.inplace = inplace\n self.with_spectral_norm = with_spectral_norm\n self.with_explicit_padding = padding_mode not in official_padding_mode\n self.order = order\n assert isinstance(self.order, tuple) and len(self.order) == 3\n assert set(order) == set(['conv', 'norm', 'act'])\n\n self.with_norm = norm_cfg is not None\n self.with_activation = act_cfg is not None\n # if the conv layer is before a norm layer, bias is unnecessary.\n if bias == 'auto':\n bias = not self.with_norm\n self.with_bias = bias\n\n if self.with_norm and self.with_bias:\n warnings.warn('ConvModule has norm and bias at the same time')\n\n if self.with_explicit_padding:\n pad_cfg = dict(type=padding_mode)\n self.padding_layer = build_padding_layer(pad_cfg, padding)\n\n # reset padding to 0 for conv module\n conv_padding = 0 if self.with_explicit_padding else padding\n # build convolution layer\n self.conv = build_conv_layer(\n conv_cfg,\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n padding=conv_padding,\n dilation=dilation,\n groups=groups,\n bias=bias)\n # export the attributes of self.conv to a higher level for convenience\n self.in_channels = self.conv.in_channels\n self.out_channels = self.conv.out_channels\n self.kernel_size = self.conv.kernel_size\n self.stride = self.conv.stride\n self.padding = padding\n self.dilation = self.conv.dilation\n self.transposed = self.conv.transposed\n self.output_padding = self.conv.output_padding\n self.groups = self.conv.groups\n\n if self.with_spectral_norm:\n self.conv = nn.utils.spectral_norm(self.conv)\n\n # build normalization layers\n if self.with_norm:\n # norm layer is after conv layer\n if order.index('norm') > order.index('conv'):\n norm_channels = out_channels\n else:\n norm_channels = in_channels\n self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)\n self.add_module(self.norm_name, norm)\n else:\n self.norm_name = None\n\n # build activation layer\n if self.with_activation:\n act_cfg_ = act_cfg.copy()\n # nn.Tanh has no 'inplace' argument\n if act_cfg_['type'] not in [\n 'Tanh', 'PReLU', 'Sigmoid', 'HSigmoid', 'Swish'\n ]:\n act_cfg_.setdefault('inplace', inplace)\n self.activate = build_activation_layer(act_cfg_)\n\n # Use msra init by default\n self.init_weights()\n\n @property\n def norm(self):\n if self.norm_name:\n return getattr(self, self.norm_name)\n else:\n return None\n\n def init_weights(self):\n # 1. It is mainly for customized conv layers with their own\n # initialization manners by calling their own ``init_weights()``,\n # and we do not want ConvModule to override the initialization.\n # 2. For customized conv layers without their own initialization\n # manners (that is, they don't have their own ``init_weights()``)\n # and PyTorch's conv layers, they will be initialized by\n # this method with default ``kaiming_init``.\n # Note: For PyTorch's conv layers, they will be overwritten by our\n # initialization implementation using default ``kaiming_init``.\n if not hasattr(self.conv, 'init_weights'):\n if self.with_activation and self.act_cfg['type'] == 'LeakyReLU':\n nonlinearity = 'leaky_relu'\n a = self.act_cfg.get('negative_slope', 0.01)\n else:\n nonlinearity = 'relu'\n a = 0\n kaiming_init(self.conv, a=a, nonlinearity=nonlinearity)\n if self.with_norm:\n constant_init(self.norm, 1, bias=0)\n\n def forward(self, x, activate=True, norm=True):\n for layer in self.order:\n if layer == 'conv':\n if self.with_explicit_padding:\n x = self.padding_layer(x)\n x = self.conv(x)\n elif layer == 'norm' and norm and self.with_norm:\n x = self.norm(x)\n elif layer == 'act' and activate and self.with_activation:\n x = self.activate(x)\n return x\n", "path": "mmcv/cnn/bricks/conv_module.py"}]}
2,782
214
gh_patches_debug_9196
rasdani/github-patches
git_diff
conda__conda-build-1470
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- conda metapackage Hello, I was wondering why the behaviour of `conda metapackage` has changed. Previously, it outputted helpful information about the location of the recently created package. However, this is the output now: ``` BUILD START: cgat-devel-0.4-py27r3.2.2_6 Package: cgat-devel-0.4-py27r3.2.2_6 source tree in: /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476780260959/work number of files: 1 Fixing permissions Detected hard-coded path in text file bin/cgat Fixing permissions ``` Moreover, the command also creates temporary folders that are left empty after the package has been built: ``` sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476720264845 /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476695297317 /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476718035758 /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476718312877 /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476721899323 /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476698228374 /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476696744782 /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476719724225 /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476720123351 /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476780047095 ``` Is this required? Here is additional info about my environment: ``` $ conda info Current conda install: platform : linux-64 conda version : 4.2.9 conda is private : False conda-env version : 4.2.9 conda-build version : 2.0.6 python version : 2.7.12.final.0 requests version : 2.11.1 root environment : /sebastian/conda/conda-build/build-testing (writable) default environment : /sebastian/conda/conda-build/build-testing envs directories : /sebastian/conda/conda-build/build-testing/envs package cache : /sebastian/conda/conda-build/build-testing/pkgs channel URLs : https://conda.anaconda.org/cgat/linux-64/ https://conda.anaconda.org/cgat/noarch/ https://repo.continuum.io/pkgs/free/linux-64/ https://repo.continuum.io/pkgs/free/noarch/ https://repo.continuum.io/pkgs/pro/linux-64/ https://repo.continuum.io/pkgs/pro/noarch/ https://conda.anaconda.org/conda-forge/linux-64/ https://conda.anaconda.org/conda-forge/noarch/ https://conda.anaconda.org/r/linux-64/ https://conda.anaconda.org/r/noarch/ https://conda.anaconda.org/bioconda/linux-64/ https://conda.anaconda.org/bioconda/noarch/ config file : /ifs/home/sebastian/.condarc offline mode : False ``` Many thanks, Sebastian --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conda_build/metapackage.py` Content: ``` 1 from collections import defaultdict 2 from conda_build.config import Config 3 from conda_build.metadata import MetaData 4 5 6 def create_metapackage(name, version, entry_points=(), build_string=None, build_number=0, 7 dependencies=(), home=None, license_name=None, summary=None, config=None): 8 # local import to avoid circular import, we provid create_metapackage in api 9 from conda_build.build import build 10 11 if not config: 12 config = Config() 13 14 d = defaultdict(dict) 15 d['package']['name'] = name 16 d['package']['version'] = version 17 d['build']['number'] = build_number 18 d['build']['entry_points'] = entry_points 19 # MetaData does the auto stuff if the build string is None 20 d['build']['string'] = build_string 21 d['requirements']['run'] = dependencies 22 d['about']['home'] = home 23 d['about']['license'] = license_name 24 d['about']['summary'] = summary 25 d = dict(d) 26 m = MetaData.fromdict(d, config=config) 27 config.compute_build_id(m.name()) 28 29 return build(m, config=config, need_source_download=False) 30 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conda_build/metapackage.py b/conda_build/metapackage.py --- a/conda_build/metapackage.py +++ b/conda_build/metapackage.py @@ -6,7 +6,7 @@ def create_metapackage(name, version, entry_points=(), build_string=None, build_number=0, dependencies=(), home=None, license_name=None, summary=None, config=None): # local import to avoid circular import, we provid create_metapackage in api - from conda_build.build import build + from conda_build.api import build if not config: config = Config()
{"golden_diff": "diff --git a/conda_build/metapackage.py b/conda_build/metapackage.py\n--- a/conda_build/metapackage.py\n+++ b/conda_build/metapackage.py\n@@ -6,7 +6,7 @@\n def create_metapackage(name, version, entry_points=(), build_string=None, build_number=0,\n dependencies=(), home=None, license_name=None, summary=None, config=None):\n # local import to avoid circular import, we provid create_metapackage in api\n- from conda_build.build import build\n+ from conda_build.api import build\n \n if not config:\n config = Config()\n", "issue": "conda metapackage \nHello,\n\nI was wondering why the behaviour of `conda metapackage` has changed. Previously, it outputted helpful information about the location of the recently created package. However, this is the output now:\n\n```\nBUILD START: cgat-devel-0.4-py27r3.2.2_6\nPackage: cgat-devel-0.4-py27r3.2.2_6\nsource tree in: /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476780260959/work\nnumber of files: 1\nFixing permissions\nDetected hard-coded path in text file bin/cgat\nFixing permissions\n```\n\nMoreover, the command also creates temporary folders that are left empty after the package has been built:\n\n```\nsebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476720264845\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476695297317\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476718035758\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476718312877\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476721899323\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476698228374\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476696744782\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476719724225\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476720123351\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476780047095\n```\n\nIs this required?\n\nHere is additional info about my environment:\n\n```\n$ conda info\nCurrent conda install:\n\n platform : linux-64\n conda version : 4.2.9\n conda is private : False\n conda-env version : 4.2.9\n conda-build version : 2.0.6\n python version : 2.7.12.final.0\n requests version : 2.11.1\n root environment : /sebastian/conda/conda-build/build-testing (writable)\n default environment : /sebastian/conda/conda-build/build-testing\n envs directories : /sebastian/conda/conda-build/build-testing/envs\n package cache : /sebastian/conda/conda-build/build-testing/pkgs\n channel URLs : https://conda.anaconda.org/cgat/linux-64/\n https://conda.anaconda.org/cgat/noarch/\n https://repo.continuum.io/pkgs/free/linux-64/\n https://repo.continuum.io/pkgs/free/noarch/\n https://repo.continuum.io/pkgs/pro/linux-64/\n https://repo.continuum.io/pkgs/pro/noarch/\n https://conda.anaconda.org/conda-forge/linux-64/\n https://conda.anaconda.org/conda-forge/noarch/\n https://conda.anaconda.org/r/linux-64/\n https://conda.anaconda.org/r/noarch/\n https://conda.anaconda.org/bioconda/linux-64/\n https://conda.anaconda.org/bioconda/noarch/\n config file : /ifs/home/sebastian/.condarc\n offline mode : False\n```\n\nMany thanks,\nSebastian\n\n", "before_files": [{"content": "from collections import defaultdict\nfrom conda_build.config import Config\nfrom conda_build.metadata import MetaData\n\n\ndef create_metapackage(name, version, entry_points=(), build_string=None, build_number=0,\n dependencies=(), home=None, license_name=None, summary=None, config=None):\n # local import to avoid circular import, we provid create_metapackage in api\n from conda_build.build import build\n\n if not config:\n config = Config()\n\n d = defaultdict(dict)\n d['package']['name'] = name\n d['package']['version'] = version\n d['build']['number'] = build_number\n d['build']['entry_points'] = entry_points\n # MetaData does the auto stuff if the build string is None\n d['build']['string'] = build_string\n d['requirements']['run'] = dependencies\n d['about']['home'] = home\n d['about']['license'] = license_name\n d['about']['summary'] = summary\n d = dict(d)\n m = MetaData.fromdict(d, config=config)\n config.compute_build_id(m.name())\n\n return build(m, config=config, need_source_download=False)\n", "path": "conda_build/metapackage.py"}], "after_files": [{"content": "from collections import defaultdict\nfrom conda_build.config import Config\nfrom conda_build.metadata import MetaData\n\n\ndef create_metapackage(name, version, entry_points=(), build_string=None, build_number=0,\n dependencies=(), home=None, license_name=None, summary=None, config=None):\n # local import to avoid circular import, we provid create_metapackage in api\n from conda_build.api import build\n\n if not config:\n config = Config()\n\n d = defaultdict(dict)\n d['package']['name'] = name\n d['package']['version'] = version\n d['build']['number'] = build_number\n d['build']['entry_points'] = entry_points\n # MetaData does the auto stuff if the build string is None\n d['build']['string'] = build_string\n d['requirements']['run'] = dependencies\n d['about']['home'] = home\n d['about']['license'] = license_name\n d['about']['summary'] = summary\n d = dict(d)\n m = MetaData.fromdict(d, config=config)\n config.compute_build_id(m.name())\n\n return build(m, config=config, need_source_download=False)\n", "path": "conda_build/metapackage.py"}]}
1,461
137
gh_patches_debug_3382
rasdani/github-patches
git_diff
cocotb__cocotb-275
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Typo in BusMonitor Causes python Exception In the bus monitor function in_reset(), there is a typo causing a problem. The code at lines 168-169, tests if self._reset is valid, but then it accesses self._reset_n when it should be accessing self._reset. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cocotb/monitors/__init__.py` Content: ``` 1 #!/bin/env python 2 3 ''' Copyright (c) 2013 Potential Ventures Ltd 4 Copyright (c) 2013 SolarFlare Communications Inc 5 All rights reserved. 6 7 Redistribution and use in source and binary forms, with or without 8 modification, are permitted provided that the following conditions are met: 9 * Redistributions of source code must retain the above copyright 10 notice, this list of conditions and the following disclaimer. 11 * Redistributions in binary form must reproduce the above copyright 12 notice, this list of conditions and the following disclaimer in the 13 documentation and/or other materials provided with the distribution. 14 * Neither the name of Potential Ventures Ltd, 15 SolarFlare Communications Inc nor the 16 names of its contributors may be used to endorse or promote products 17 derived from this software without specific prior written permission. 18 19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY 23 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ''' 29 30 """ 31 32 Class defining the standard interface for a monitor within a testbench 33 34 The monitor is responsible for watching the pins of the DUT and recreating 35 the transactions 36 """ 37 38 import math 39 40 import cocotb 41 from cocotb.decorators import coroutine 42 from cocotb.triggers import Edge, Event, RisingEdge, ReadOnly, Timer 43 from cocotb.binary import BinaryValue 44 from cocotb.bus import Bus 45 from cocotb.log import SimLog 46 from cocotb.result import ReturnValue 47 48 49 class MonitorStatistics(object): 50 """Wrapper class for storing Monitor statistics""" 51 def __init__(self): 52 self.received_transactions = 0 53 54 55 class Monitor(object): 56 57 def __init__(self, callback=None, event=None): 58 """ 59 Constructor for a monitor instance 60 61 callback will be called with each recovered transaction as the argument 62 63 If the callback isn't used, received transactions will be placed on a 64 queue and the event used to notify any consumers. 65 """ 66 self._event = event 67 self._wait_event = None 68 self._recvQ = [] 69 self._callbacks = [] 70 self.stats = MonitorStatistics() 71 self._wait_event = Event() 72 73 # Subclasses may already set up logging 74 if not hasattr(self, "log"): 75 self.log = SimLog("cocotb.monitor.%s" % (self.__class__.__name__)) 76 77 if callback is not None: 78 self.add_callback(callback) 79 80 # Create an independent coroutine which can receive stuff 81 self._thread = cocotb.scheduler.add(self._monitor_recv()) 82 83 def kill(self): 84 if self._thread: 85 self._thread.kill() 86 self._thread = None 87 88 def __len__(self): 89 return len(self._recvQ) 90 91 def __getitem__(self, idx): 92 return self._recvQ[idx] 93 94 def add_callback(self, callback): 95 self.log.debug("Adding callback of function %s to monitor" % 96 (callback.__name__)) 97 self._callbacks.append(callback) 98 99 @coroutine 100 def wait_for_recv(self, timeout=None): 101 if timeout: 102 t = Timer(timeout) 103 fired = yield [self._wait_event.wait(), t] 104 if fired is t: 105 raise ReturnValue(None) 106 else: 107 yield self._wait_event.wait() 108 109 pkt = self._wait_event.data 110 raise ReturnValue(pkt) 111 112 @coroutine 113 def _monitor_recv(self): 114 """ 115 actual impementation of the receiver 116 117 subclasses should override this method to implement the actual receive 118 routine and call self._recv() with the recovered transaction 119 """ 120 raise NotImplementedError("Attempt to use base monitor class without " 121 "providing a _monitor_recv method") 122 123 def _recv(self, transaction): 124 """Common handling of a received transaction.""" 125 126 self.stats.received_transactions += 1 127 128 # either callback based consumer 129 for callback in self._callbacks: 130 callback(transaction) 131 132 # Or queued with a notification 133 if not self._callbacks: 134 self._recvQ.append(transaction) 135 136 if self._event is not None: 137 self._event.set() 138 139 # If anyone was waiting then let them know 140 if self._wait_event is not None: 141 self._wait_event.set(data=transaction) 142 self._wait_event.clear() 143 144 145 class BusMonitor(Monitor): 146 """ 147 Wrapper providing common functionality for monitoring busses 148 """ 149 _signals = [] 150 _optional_signals = [] 151 152 def __init__(self, entity, name, clock, reset=None, reset_n=None, 153 callback=None, event=None): 154 self.log = SimLog("cocotb.%s.%s" % (entity.name, name)) 155 self.entity = entity 156 self.name = name 157 self.clock = clock 158 self.bus = Bus(self.entity, self.name, self._signals, 159 optional_signals=self._optional_signals) 160 self._reset = reset 161 self._reset_n = reset_n 162 Monitor.__init__(self, callback=callback, event=event) 163 164 @property 165 def in_reset(self): 166 if self._reset_n is not None: 167 return not bool(self._reset_n.value.integer) 168 if self._reset is not None: 169 return bool(self._reset_n.value.integer) 170 return False 171 172 def __str__(self): 173 return "%s(%s)" % (self.__class__.__name__, self.name) 174 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cocotb/monitors/__init__.py b/cocotb/monitors/__init__.py --- a/cocotb/monitors/__init__.py +++ b/cocotb/monitors/__init__.py @@ -166,7 +166,7 @@ if self._reset_n is not None: return not bool(self._reset_n.value.integer) if self._reset is not None: - return bool(self._reset_n.value.integer) + return bool(self._reset.value.integer) return False def __str__(self):
{"golden_diff": "diff --git a/cocotb/monitors/__init__.py b/cocotb/monitors/__init__.py\n--- a/cocotb/monitors/__init__.py\n+++ b/cocotb/monitors/__init__.py\n@@ -166,7 +166,7 @@\n if self._reset_n is not None:\n return not bool(self._reset_n.value.integer)\n if self._reset is not None:\n- return bool(self._reset_n.value.integer)\n+ return bool(self._reset.value.integer)\n return False\n \n def __str__(self):\n", "issue": "Typo in BusMonitor Causes python Exception\nIn the bus monitor function in_reset(), there is a typo causing a problem.\n\nThe code at lines 168-169, tests if self._reset is valid, but then it accesses self._reset_n when it should be accessing self._reset.\n\n", "before_files": [{"content": "#!/bin/env python\n\n''' Copyright (c) 2013 Potential Ventures Ltd\nCopyright (c) 2013 SolarFlare Communications Inc\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of Potential Ventures Ltd,\n SolarFlare Communications Inc nor the\n names of its contributors may be used to endorse or promote products\n derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''\n\n\"\"\"\n\n Class defining the standard interface for a monitor within a testbench\n\n The monitor is responsible for watching the pins of the DUT and recreating\n the transactions\n\"\"\"\n\nimport math\n\nimport cocotb\nfrom cocotb.decorators import coroutine\nfrom cocotb.triggers import Edge, Event, RisingEdge, ReadOnly, Timer\nfrom cocotb.binary import BinaryValue\nfrom cocotb.bus import Bus\nfrom cocotb.log import SimLog\nfrom cocotb.result import ReturnValue\n\n\nclass MonitorStatistics(object):\n \"\"\"Wrapper class for storing Monitor statistics\"\"\"\n def __init__(self):\n self.received_transactions = 0\n\n\nclass Monitor(object):\n\n def __init__(self, callback=None, event=None):\n \"\"\"\n Constructor for a monitor instance\n\n callback will be called with each recovered transaction as the argument\n\n If the callback isn't used, received transactions will be placed on a\n queue and the event used to notify any consumers.\n \"\"\"\n self._event = event\n self._wait_event = None\n self._recvQ = []\n self._callbacks = []\n self.stats = MonitorStatistics()\n self._wait_event = Event()\n\n # Subclasses may already set up logging\n if not hasattr(self, \"log\"):\n self.log = SimLog(\"cocotb.monitor.%s\" % (self.__class__.__name__))\n\n if callback is not None:\n self.add_callback(callback)\n\n # Create an independent coroutine which can receive stuff\n self._thread = cocotb.scheduler.add(self._monitor_recv())\n\n def kill(self):\n if self._thread:\n self._thread.kill()\n self._thread = None\n\n def __len__(self):\n return len(self._recvQ)\n\n def __getitem__(self, idx):\n return self._recvQ[idx]\n\n def add_callback(self, callback):\n self.log.debug(\"Adding callback of function %s to monitor\" %\n (callback.__name__))\n self._callbacks.append(callback)\n\n @coroutine\n def wait_for_recv(self, timeout=None):\n if timeout:\n t = Timer(timeout)\n fired = yield [self._wait_event.wait(), t]\n if fired is t:\n raise ReturnValue(None)\n else:\n yield self._wait_event.wait()\n\n pkt = self._wait_event.data\n raise ReturnValue(pkt)\n\n @coroutine\n def _monitor_recv(self):\n \"\"\"\n actual impementation of the receiver\n\n subclasses should override this method to implement the actual receive\n routine and call self._recv() with the recovered transaction\n \"\"\"\n raise NotImplementedError(\"Attempt to use base monitor class without \"\n \"providing a _monitor_recv method\")\n\n def _recv(self, transaction):\n \"\"\"Common handling of a received transaction.\"\"\"\n\n self.stats.received_transactions += 1\n\n # either callback based consumer\n for callback in self._callbacks:\n callback(transaction)\n\n # Or queued with a notification\n if not self._callbacks:\n self._recvQ.append(transaction)\n\n if self._event is not None:\n self._event.set()\n\n # If anyone was waiting then let them know\n if self._wait_event is not None:\n self._wait_event.set(data=transaction)\n self._wait_event.clear()\n\n\nclass BusMonitor(Monitor):\n \"\"\"\n Wrapper providing common functionality for monitoring busses\n \"\"\"\n _signals = []\n _optional_signals = []\n\n def __init__(self, entity, name, clock, reset=None, reset_n=None,\n callback=None, event=None):\n self.log = SimLog(\"cocotb.%s.%s\" % (entity.name, name))\n self.entity = entity\n self.name = name\n self.clock = clock\n self.bus = Bus(self.entity, self.name, self._signals,\n optional_signals=self._optional_signals)\n self._reset = reset\n self._reset_n = reset_n\n Monitor.__init__(self, callback=callback, event=event)\n\n @property\n def in_reset(self):\n if self._reset_n is not None:\n return not bool(self._reset_n.value.integer)\n if self._reset is not None:\n return bool(self._reset_n.value.integer)\n return False\n\n def __str__(self):\n return \"%s(%s)\" % (self.__class__.__name__, self.name)\n", "path": "cocotb/monitors/__init__.py"}], "after_files": [{"content": "#!/bin/env python\n\n''' Copyright (c) 2013 Potential Ventures Ltd\nCopyright (c) 2013 SolarFlare Communications Inc\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of Potential Ventures Ltd,\n SolarFlare Communications Inc nor the\n names of its contributors may be used to endorse or promote products\n derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''\n\n\"\"\"\n\n Class defining the standard interface for a monitor within a testbench\n\n The monitor is responsible for watching the pins of the DUT and recreating\n the transactions\n\"\"\"\n\nimport math\n\nimport cocotb\nfrom cocotb.decorators import coroutine\nfrom cocotb.triggers import Edge, Event, RisingEdge, ReadOnly, Timer\nfrom cocotb.binary import BinaryValue\nfrom cocotb.bus import Bus\nfrom cocotb.log import SimLog\nfrom cocotb.result import ReturnValue\n\n\nclass MonitorStatistics(object):\n \"\"\"Wrapper class for storing Monitor statistics\"\"\"\n def __init__(self):\n self.received_transactions = 0\n\n\nclass Monitor(object):\n\n def __init__(self, callback=None, event=None):\n \"\"\"\n Constructor for a monitor instance\n\n callback will be called with each recovered transaction as the argument\n\n If the callback isn't used, received transactions will be placed on a\n queue and the event used to notify any consumers.\n \"\"\"\n self._event = event\n self._wait_event = None\n self._recvQ = []\n self._callbacks = []\n self.stats = MonitorStatistics()\n self._wait_event = Event()\n\n # Subclasses may already set up logging\n if not hasattr(self, \"log\"):\n self.log = SimLog(\"cocotb.monitor.%s\" % (self.__class__.__name__))\n\n if callback is not None:\n self.add_callback(callback)\n\n # Create an independent coroutine which can receive stuff\n self._thread = cocotb.scheduler.add(self._monitor_recv())\n\n def kill(self):\n if self._thread:\n self._thread.kill()\n self._thread = None\n\n def __len__(self):\n return len(self._recvQ)\n\n def __getitem__(self, idx):\n return self._recvQ[idx]\n\n def add_callback(self, callback):\n self.log.debug(\"Adding callback of function %s to monitor\" %\n (callback.__name__))\n self._callbacks.append(callback)\n\n @coroutine\n def wait_for_recv(self, timeout=None):\n if timeout:\n t = Timer(timeout)\n fired = yield [self._wait_event.wait(), t]\n if fired is t:\n raise ReturnValue(None)\n else:\n yield self._wait_event.wait()\n\n pkt = self._wait_event.data\n raise ReturnValue(pkt)\n\n @coroutine\n def _monitor_recv(self):\n \"\"\"\n actual impementation of the receiver\n\n subclasses should override this method to implement the actual receive\n routine and call self._recv() with the recovered transaction\n \"\"\"\n raise NotImplementedError(\"Attempt to use base monitor class without \"\n \"providing a _monitor_recv method\")\n\n def _recv(self, transaction):\n \"\"\"Common handling of a received transaction.\"\"\"\n\n self.stats.received_transactions += 1\n\n # either callback based consumer\n for callback in self._callbacks:\n callback(transaction)\n\n # Or queued with a notification\n if not self._callbacks:\n self._recvQ.append(transaction)\n\n if self._event is not None:\n self._event.set()\n\n # If anyone was waiting then let them know\n if self._wait_event is not None:\n self._wait_event.set(data=transaction)\n self._wait_event.clear()\n\n\nclass BusMonitor(Monitor):\n \"\"\"\n Wrapper providing common functionality for monitoring busses\n \"\"\"\n _signals = []\n _optional_signals = []\n\n def __init__(self, entity, name, clock, reset=None, reset_n=None,\n callback=None, event=None):\n self.log = SimLog(\"cocotb.%s.%s\" % (entity.name, name))\n self.entity = entity\n self.name = name\n self.clock = clock\n self.bus = Bus(self.entity, self.name, self._signals,\n optional_signals=self._optional_signals)\n self._reset = reset\n self._reset_n = reset_n\n Monitor.__init__(self, callback=callback, event=event)\n\n @property\n def in_reset(self):\n if self._reset_n is not None:\n return not bool(self._reset_n.value.integer)\n if self._reset is not None:\n return bool(self._reset.value.integer)\n return False\n\n def __str__(self):\n return \"%s(%s)\" % (self.__class__.__name__, self.name)\n", "path": "cocotb/monitors/__init__.py"}]}
2,016
132
gh_patches_debug_62584
rasdani/github-patches
git_diff
microsoft__Qcodes-82
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- DiskIO discards absolute path information ``` python > my_io = qcodes.DiskIO('/home/eendebakpt/tmp') > print(my_io) <DiskIO, base_location=/mounts/d3/home/eendebakpt/svn/qtt/home/eendebakpt/tmp> ``` The DiskIO object converts my absolute path to a relative path. The problem is in `def _normalize_slashes(self, location)` from `qcodes/data/io.py`. I am not sure about what `_normalize_slashes` should do, so I am not sure how to fix this --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `qcodes/data/io.py` Content: ``` 1 ''' 2 IO managers for QCodes 3 4 IO managers wrap whatever physical storage layer the user wants to use 5 in an interface mimicking the built-in <open> context manager, with 6 some restrictions to minimize the overhead in creating new IO managers. 7 8 The main thing these managers need to implement is the open context manager: 9 - Only the context manager needs to be implemented, not separate 10 open function and close methods. 11 12 - open takes the standard parameters: 13 filename: (string) 14 mode: (string) only 'r' (read), 'w' (write), and 'a' (append) are 15 expected to be implemented. As with normal file objects, the only 16 difference between write and append is that write empties the file 17 before adding new data, and append leaves the existing contents in 18 place but starts writing at the end. 19 20 - the file-like object returned should implement a minimal set of operations. 21 22 In read mode: 23 read([size]): read to the end or at most size bytes into a string 24 readline([size]): read until a newline or up to size bytes, into a string 25 iter(): usually return self, but can be any iterator over lines 26 next(): assuming iter() returns self, this yields the next line. 27 (note: iter and next can be constructed automatically by FileWrapper 28 if you implement readline.) 29 30 In write or append mode: 31 write(s): add string s to the end of the file. 32 writelines(seq): add a sequence of strings (can be constructed 33 automatically if you use FileWrapper) 34 35 IO managers should also implement: 36 - a join method, ala os.path.join(*args). 37 - a list method, that returns all objects matching location 38 - a remove method, ala os.remove(path) except that it will remove directories 39 as well as files, since we're allowing "locations" to be directories 40 or files. 41 ''' 42 43 from contextlib import contextmanager 44 import os 45 import re 46 import shutil 47 48 ALLOWED_OPEN_MODES = ('r', 'w', 'a') 49 50 51 class DiskIO: 52 ''' 53 Simple IO object to wrap disk operations with a custom base location 54 55 Also accepts both forward and backward slashes at any point, and 56 normalizes both to the OS we are currently on 57 ''' 58 def __init__(self, base_location): 59 base_location = self._normalize_slashes(base_location) 60 self.base_location = os.path.abspath(base_location) 61 62 @contextmanager 63 def open(self, filename, mode): 64 ''' 65 mimics the interface of the built in open context manager 66 filename: string, relative to base_location 67 mode: 'r' (read), 'w' (write), or 'a' (append) 68 other open modes are not supported because we don't want 69 to force all IO managers to support others. 70 ''' 71 if mode not in ALLOWED_OPEN_MODES: 72 raise ValueError('mode {} not allowed in IO managers'.format(mode)) 73 74 filepath = self._add_base(filename) 75 76 # make directories if needed 77 dirpath = os.path.dirname(filepath) 78 if not os.path.exists(dirpath): 79 os.makedirs(dirpath) 80 81 # normally we'd construct this context manager with try/finally, but 82 # here we already have a context manager for open so we just wrap it 83 with open(filepath, mode) as f: 84 yield f 85 86 def _normalize_slashes(self, location): 87 return os.path.join(*re.split('[\\\\/]', location)) 88 89 def _add_base(self, location): 90 location = self._normalize_slashes(location) 91 return os.path.join(self.base_location, location) 92 93 def _strip_base(self, path): 94 return os.path.relpath(path, self.base_location) 95 96 def __repr__(self): 97 return '<DiskIO, base_location={}>'.format(self.base_location) 98 99 def join(self, *args): 100 ''' 101 the context-dependent version of os.path.join for this io manager 102 ''' 103 return os.path.join(*args) 104 105 def isfile(self, location): 106 ''' 107 does `location` match a file? 108 ''' 109 path = self._add_base(location) 110 return os.path.isfile(path) 111 112 def list(self, location, maxdepth=1): 113 ''' 114 return all files that match location, either files 115 whose names match up to an arbitrary extension 116 or any files within an exactly matching directory name, 117 nested as far as maxdepth (default 1) levels 118 ''' 119 location = self._normalize_slashes(location) 120 base_location, pattern = os.path.split(location) 121 path = self._add_base(base_location) 122 123 if not os.path.isdir(path): 124 return [] 125 126 matches = [fn for fn in os.listdir(path) if fn.startswith(pattern)] 127 out = [] 128 129 for match in matches: 130 matchpath = self.join(path, match) 131 if os.path.isdir(matchpath) and match == pattern and maxdepth > 0: 132 # exact directory match - walk down to maxdepth 133 for root, dirs, files in os.walk(matchpath, topdown=True): 134 depth = root[len(path):].count(os.path.sep) 135 if depth == maxdepth: 136 dirs[:] = [] # don't recurse any further 137 for fn in files: 138 out.append(self._strip_base(self.join(root, fn))) 139 140 elif (os.path.isfile(matchpath) and 141 (match == pattern or os.path.splitext(match)[0] == pattern)): 142 # exact filename match, or match up to an extension 143 # note that we need match == pattern in addition to the 144 # splitext test to cover the case of the base filename itself 145 # containing a dot. 146 out.append(self.join(base_location, match)) 147 148 return out 149 150 def remove(self, filename): 151 ''' 152 delete this file/folder and prune the directory tree 153 ''' 154 path = self._add_base(filename) 155 if(os.path.isdir(path)): 156 shutil.rmtree(path) 157 else: 158 os.remove(path) 159 160 filepath = os.path.split(path)[0] 161 try: 162 os.removedirs(filepath) 163 except OSError: 164 # directory was not empty - good that we're not removing it! 165 pass 166 167 def remove_all(self, location): 168 ''' 169 delete all files/directories in the dataset at this location, 170 and prune the directory tree 171 ''' 172 for fn in self.list(location): 173 self.remove(fn) 174 175 176 class FileWrapper: 177 def read(self, size=None): 178 raise NotImplementedError 179 180 def readline(self, size=None): 181 raise NotImplementedError 182 183 def __iter__(self): 184 return self 185 186 def __next__(self): 187 line = self.readline() 188 if line: 189 return line 190 else: 191 raise StopIteration 192 193 def write(self, s): 194 raise NotImplementedError 195 196 def writelines(self, seq): 197 for s in seq: 198 self.write(s) 199 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/qcodes/data/io.py b/qcodes/data/io.py --- a/qcodes/data/io.py +++ b/qcodes/data/io.py @@ -84,7 +84,10 @@ yield f def _normalize_slashes(self, location): - return os.path.join(*re.split('[\\\\/]', location)) + # note that this is NOT os.path.join - the difference is os.path.join + # discards empty strings, so if you use it on a re.split absolute + # path you will get a relative path! + return os.sep.join(re.split('[\\\\/]', location)) def _add_base(self, location): location = self._normalize_slashes(location)
{"golden_diff": "diff --git a/qcodes/data/io.py b/qcodes/data/io.py\n--- a/qcodes/data/io.py\n+++ b/qcodes/data/io.py\n@@ -84,7 +84,10 @@\n yield f\n \n def _normalize_slashes(self, location):\n- return os.path.join(*re.split('[\\\\\\\\/]', location))\n+ # note that this is NOT os.path.join - the difference is os.path.join\n+ # discards empty strings, so if you use it on a re.split absolute\n+ # path you will get a relative path!\n+ return os.sep.join(re.split('[\\\\\\\\/]', location))\n \n def _add_base(self, location):\n location = self._normalize_slashes(location)\n", "issue": "DiskIO discards absolute path information\n``` python\n> my_io = qcodes.DiskIO('/home/eendebakpt/tmp')\n> print(my_io)\n<DiskIO, base_location=/mounts/d3/home/eendebakpt/svn/qtt/home/eendebakpt/tmp>\n```\n\nThe DiskIO object converts my absolute path to a relative path. The problem is in `def _normalize_slashes(self, location)` from `qcodes/data/io.py`. \nI am not sure about what `_normalize_slashes` should do, so I am not sure how to fix this\n\n", "before_files": [{"content": "'''\nIO managers for QCodes\n\nIO managers wrap whatever physical storage layer the user wants to use\nin an interface mimicking the built-in <open> context manager, with\nsome restrictions to minimize the overhead in creating new IO managers.\n\nThe main thing these managers need to implement is the open context manager:\n- Only the context manager needs to be implemented, not separate\n open function and close methods.\n\n- open takes the standard parameters:\n filename: (string)\n mode: (string) only 'r' (read), 'w' (write), and 'a' (append) are\n expected to be implemented. As with normal file objects, the only\n difference between write and append is that write empties the file\n before adding new data, and append leaves the existing contents in\n place but starts writing at the end.\n\n- the file-like object returned should implement a minimal set of operations.\n\n In read mode:\n read([size]): read to the end or at most size bytes into a string\n readline([size]): read until a newline or up to size bytes, into a string\n iter(): usually return self, but can be any iterator over lines\n next(): assuming iter() returns self, this yields the next line.\n (note: iter and next can be constructed automatically by FileWrapper\n if you implement readline.)\n\n In write or append mode:\n write(s): add string s to the end of the file.\n writelines(seq): add a sequence of strings (can be constructed\n automatically if you use FileWrapper)\n\nIO managers should also implement:\n- a join method, ala os.path.join(*args).\n- a list method, that returns all objects matching location\n- a remove method, ala os.remove(path) except that it will remove directories\n as well as files, since we're allowing \"locations\" to be directories\n or files.\n'''\n\nfrom contextlib import contextmanager\nimport os\nimport re\nimport shutil\n\nALLOWED_OPEN_MODES = ('r', 'w', 'a')\n\n\nclass DiskIO:\n '''\n Simple IO object to wrap disk operations with a custom base location\n\n Also accepts both forward and backward slashes at any point, and\n normalizes both to the OS we are currently on\n '''\n def __init__(self, base_location):\n base_location = self._normalize_slashes(base_location)\n self.base_location = os.path.abspath(base_location)\n\n @contextmanager\n def open(self, filename, mode):\n '''\n mimics the interface of the built in open context manager\n filename: string, relative to base_location\n mode: 'r' (read), 'w' (write), or 'a' (append)\n other open modes are not supported because we don't want\n to force all IO managers to support others.\n '''\n if mode not in ALLOWED_OPEN_MODES:\n raise ValueError('mode {} not allowed in IO managers'.format(mode))\n\n filepath = self._add_base(filename)\n\n # make directories if needed\n dirpath = os.path.dirname(filepath)\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n\n # normally we'd construct this context manager with try/finally, but\n # here we already have a context manager for open so we just wrap it\n with open(filepath, mode) as f:\n yield f\n\n def _normalize_slashes(self, location):\n return os.path.join(*re.split('[\\\\\\\\/]', location))\n\n def _add_base(self, location):\n location = self._normalize_slashes(location)\n return os.path.join(self.base_location, location)\n\n def _strip_base(self, path):\n return os.path.relpath(path, self.base_location)\n\n def __repr__(self):\n return '<DiskIO, base_location={}>'.format(self.base_location)\n\n def join(self, *args):\n '''\n the context-dependent version of os.path.join for this io manager\n '''\n return os.path.join(*args)\n\n def isfile(self, location):\n '''\n does `location` match a file?\n '''\n path = self._add_base(location)\n return os.path.isfile(path)\n\n def list(self, location, maxdepth=1):\n '''\n return all files that match location, either files\n whose names match up to an arbitrary extension\n or any files within an exactly matching directory name,\n nested as far as maxdepth (default 1) levels\n '''\n location = self._normalize_slashes(location)\n base_location, pattern = os.path.split(location)\n path = self._add_base(base_location)\n\n if not os.path.isdir(path):\n return []\n\n matches = [fn for fn in os.listdir(path) if fn.startswith(pattern)]\n out = []\n\n for match in matches:\n matchpath = self.join(path, match)\n if os.path.isdir(matchpath) and match == pattern and maxdepth > 0:\n # exact directory match - walk down to maxdepth\n for root, dirs, files in os.walk(matchpath, topdown=True):\n depth = root[len(path):].count(os.path.sep)\n if depth == maxdepth:\n dirs[:] = [] # don't recurse any further\n for fn in files:\n out.append(self._strip_base(self.join(root, fn)))\n\n elif (os.path.isfile(matchpath) and\n (match == pattern or os.path.splitext(match)[0] == pattern)):\n # exact filename match, or match up to an extension\n # note that we need match == pattern in addition to the\n # splitext test to cover the case of the base filename itself\n # containing a dot.\n out.append(self.join(base_location, match))\n\n return out\n\n def remove(self, filename):\n '''\n delete this file/folder and prune the directory tree\n '''\n path = self._add_base(filename)\n if(os.path.isdir(path)):\n shutil.rmtree(path)\n else:\n os.remove(path)\n\n filepath = os.path.split(path)[0]\n try:\n os.removedirs(filepath)\n except OSError:\n # directory was not empty - good that we're not removing it!\n pass\n\n def remove_all(self, location):\n '''\n delete all files/directories in the dataset at this location,\n and prune the directory tree\n '''\n for fn in self.list(location):\n self.remove(fn)\n\n\nclass FileWrapper:\n def read(self, size=None):\n raise NotImplementedError\n\n def readline(self, size=None):\n raise NotImplementedError\n\n def __iter__(self):\n return self\n\n def __next__(self):\n line = self.readline()\n if line:\n return line\n else:\n raise StopIteration\n\n def write(self, s):\n raise NotImplementedError\n\n def writelines(self, seq):\n for s in seq:\n self.write(s)\n", "path": "qcodes/data/io.py"}], "after_files": [{"content": "'''\nIO managers for QCodes\n\nIO managers wrap whatever physical storage layer the user wants to use\nin an interface mimicking the built-in <open> context manager, with\nsome restrictions to minimize the overhead in creating new IO managers.\n\nThe main thing these managers need to implement is the open context manager:\n- Only the context manager needs to be implemented, not separate\n open function and close methods.\n\n- open takes the standard parameters:\n filename: (string)\n mode: (string) only 'r' (read), 'w' (write), and 'a' (append) are\n expected to be implemented. As with normal file objects, the only\n difference between write and append is that write empties the file\n before adding new data, and append leaves the existing contents in\n place but starts writing at the end.\n\n- the file-like object returned should implement a minimal set of operations.\n\n In read mode:\n read([size]): read to the end or at most size bytes into a string\n readline([size]): read until a newline or up to size bytes, into a string\n iter(): usually return self, but can be any iterator over lines\n next(): assuming iter() returns self, this yields the next line.\n (note: iter and next can be constructed automatically by FileWrapper\n if you implement readline.)\n\n In write or append mode:\n write(s): add string s to the end of the file.\n writelines(seq): add a sequence of strings (can be constructed\n automatically if you use FileWrapper)\n\nIO managers should also implement:\n- a join method, ala os.path.join(*args).\n- a list method, that returns all objects matching location\n- a remove method, ala os.remove(path) except that it will remove directories\n as well as files, since we're allowing \"locations\" to be directories\n or files.\n'''\n\nfrom contextlib import contextmanager\nimport os\nimport re\nimport shutil\n\nALLOWED_OPEN_MODES = ('r', 'w', 'a')\n\n\nclass DiskIO:\n '''\n Simple IO object to wrap disk operations with a custom base location\n\n Also accepts both forward and backward slashes at any point, and\n normalizes both to the OS we are currently on\n '''\n def __init__(self, base_location):\n base_location = self._normalize_slashes(base_location)\n self.base_location = os.path.abspath(base_location)\n\n @contextmanager\n def open(self, filename, mode):\n '''\n mimics the interface of the built in open context manager\n filename: string, relative to base_location\n mode: 'r' (read), 'w' (write), or 'a' (append)\n other open modes are not supported because we don't want\n to force all IO managers to support others.\n '''\n if mode not in ALLOWED_OPEN_MODES:\n raise ValueError('mode {} not allowed in IO managers'.format(mode))\n\n filepath = self._add_base(filename)\n\n # make directories if needed\n dirpath = os.path.dirname(filepath)\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n\n # normally we'd construct this context manager with try/finally, but\n # here we already have a context manager for open so we just wrap it\n with open(filepath, mode) as f:\n yield f\n\n def _normalize_slashes(self, location):\n # note that this is NOT os.path.join - the difference is os.path.join\n # discards empty strings, so if you use it on a re.split absolute\n # path you will get a relative path!\n return os.sep.join(re.split('[\\\\\\\\/]', location))\n\n def _add_base(self, location):\n location = self._normalize_slashes(location)\n return os.path.join(self.base_location, location)\n\n def _strip_base(self, path):\n return os.path.relpath(path, self.base_location)\n\n def __repr__(self):\n return '<DiskIO, base_location={}>'.format(self.base_location)\n\n def join(self, *args):\n '''\n the context-dependent version of os.path.join for this io manager\n '''\n return os.path.join(*args)\n\n def isfile(self, location):\n '''\n does `location` match a file?\n '''\n path = self._add_base(location)\n return os.path.isfile(path)\n\n def list(self, location, maxdepth=1):\n '''\n return all files that match location, either files\n whose names match up to an arbitrary extension\n or any files within an exactly matching directory name,\n nested as far as maxdepth (default 1) levels\n '''\n location = self._normalize_slashes(location)\n base_location, pattern = os.path.split(location)\n path = self._add_base(base_location)\n\n if not os.path.isdir(path):\n return []\n\n matches = [fn for fn in os.listdir(path) if fn.startswith(pattern)]\n out = []\n\n for match in matches:\n matchpath = self.join(path, match)\n if os.path.isdir(matchpath) and match == pattern and maxdepth > 0:\n # exact directory match - walk down to maxdepth\n for root, dirs, files in os.walk(matchpath, topdown=True):\n depth = root[len(path):].count(os.path.sep)\n if depth == maxdepth:\n dirs[:] = [] # don't recurse any further\n for fn in files:\n out.append(self._strip_base(self.join(root, fn)))\n\n elif (os.path.isfile(matchpath) and\n (match == pattern or os.path.splitext(match)[0] == pattern)):\n # exact filename match, or match up to an extension\n # note that we need match == pattern in addition to the\n # splitext test to cover the case of the base filename itself\n # containing a dot.\n out.append(self.join(base_location, match))\n\n return out\n\n def remove(self, filename):\n '''\n delete this file/folder and prune the directory tree\n '''\n path = self._add_base(filename)\n if(os.path.isdir(path)):\n shutil.rmtree(path)\n else:\n os.remove(path)\n\n filepath = os.path.split(path)[0]\n try:\n os.removedirs(filepath)\n except OSError:\n # directory was not empty - good that we're not removing it!\n pass\n\n def remove_all(self, location):\n '''\n delete all files/directories in the dataset at this location,\n and prune the directory tree\n '''\n for fn in self.list(location):\n self.remove(fn)\n\n\nclass FileWrapper:\n def read(self, size=None):\n raise NotImplementedError\n\n def readline(self, size=None):\n raise NotImplementedError\n\n def __iter__(self):\n return self\n\n def __next__(self):\n line = self.readline()\n if line:\n return line\n else:\n raise StopIteration\n\n def write(self, s):\n raise NotImplementedError\n\n def writelines(self, seq):\n for s in seq:\n self.write(s)\n", "path": "qcodes/data/io.py"}]}
2,356
157
gh_patches_debug_17275
rasdani/github-patches
git_diff
zestedesavoir__zds-site-5892
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Supprimer les messages privés de l'interface d'administration de Django À l'heure actuelle, un super-admin peut, via l'interface de Django, lire tous les MPs du site. Certes, l'interface est peu pratique pour ça (aucune notion de fil, etc.), mais je trouve tout de même bien peu souhaitable. ![Regardez-moi tous ces MPs dans l'interface d'administration de Django.](https://user-images.githubusercontent.com/1417570/88059844-b199bf00-cb65-11ea-8dc9-fc78310001c7.png) Après discussion avec @gcodeur sur ce sujet, je propose donc de **supprimer les MPs de l'interface d'administration de Django**. Une personne avec les accès prod pourrait toujours les lire (vu qu'ils ne sont pas chiffrés de bout en bout), mais ça limiterait d'autant l'exposition. Supprimer les messages privés de l'interface d'administration de Django À l'heure actuelle, un super-admin peut, via l'interface de Django, lire tous les MPs du site. Certes, l'interface est peu pratique pour ça (aucune notion de fil, etc.), mais je trouve tout de même bien peu souhaitable. ![Regardez-moi tous ces MPs dans l'interface d'administration de Django.](https://user-images.githubusercontent.com/1417570/88059844-b199bf00-cb65-11ea-8dc9-fc78310001c7.png) Après discussion avec @gcodeur sur ce sujet, je propose donc de **supprimer les MPs de l'interface d'administration de Django**. Une personne avec les accès prod pourrait toujours les lire (vu qu'ils ne sont pas chiffrés de bout en bout), mais ça limiterait d'autant l'exposition. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `zds/mp/admin.py` Content: ``` 1 from django.contrib import admin 2 3 from .models import PrivatePost, PrivateTopic, PrivateTopicRead 4 5 6 class PrivatePostAdmin(admin.ModelAdmin): 7 8 """Representation of PrivatePost model in the admin interface.""" 9 10 list_display = ('privatetopic', 'author', 'pubdate', 'update', 'position_in_topic') 11 raw_id_fields = ('privatetopic', 'author') 12 13 14 class PrivateTopicAdmin(admin.ModelAdmin): 15 16 """Representation of PrivateTopic model in the admin interface.""" 17 18 list_display = ('title', 'subtitle', 'author', 'last_message', 'pubdate') 19 raw_id_fields = ('author', 'participants', 'last_message') 20 21 22 class PrivateTopicReadAdmin(admin.ModelAdmin): 23 24 """Representation of PrivateTopicRead model in the admin interface.""" 25 26 list_display = ('privatetopic', 'privatepost', 'user') 27 raw_id_fields = ('privatetopic', 'privatepost', 'user') 28 29 30 admin.site.register(PrivatePost, PrivatePostAdmin) 31 admin.site.register(PrivateTopic, PrivateTopicAdmin) 32 admin.site.register(PrivateTopicRead, PrivateTopicReadAdmin) 33 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/zds/mp/admin.py b/zds/mp/admin.py deleted file mode 100644 --- a/zds/mp/admin.py +++ /dev/null @@ -1,32 +0,0 @@ -from django.contrib import admin - -from .models import PrivatePost, PrivateTopic, PrivateTopicRead - - -class PrivatePostAdmin(admin.ModelAdmin): - - """Representation of PrivatePost model in the admin interface.""" - - list_display = ('privatetopic', 'author', 'pubdate', 'update', 'position_in_topic') - raw_id_fields = ('privatetopic', 'author') - - -class PrivateTopicAdmin(admin.ModelAdmin): - - """Representation of PrivateTopic model in the admin interface.""" - - list_display = ('title', 'subtitle', 'author', 'last_message', 'pubdate') - raw_id_fields = ('author', 'participants', 'last_message') - - -class PrivateTopicReadAdmin(admin.ModelAdmin): - - """Representation of PrivateTopicRead model in the admin interface.""" - - list_display = ('privatetopic', 'privatepost', 'user') - raw_id_fields = ('privatetopic', 'privatepost', 'user') - - -admin.site.register(PrivatePost, PrivatePostAdmin) -admin.site.register(PrivateTopic, PrivateTopicAdmin) -admin.site.register(PrivateTopicRead, PrivateTopicReadAdmin)
{"golden_diff": "diff --git a/zds/mp/admin.py b/zds/mp/admin.py\ndeleted file mode 100644\n--- a/zds/mp/admin.py\n+++ /dev/null\n@@ -1,32 +0,0 @@\n-from django.contrib import admin\n-\n-from .models import PrivatePost, PrivateTopic, PrivateTopicRead\n-\n-\n-class PrivatePostAdmin(admin.ModelAdmin):\n-\n- \"\"\"Representation of PrivatePost model in the admin interface.\"\"\"\n-\n- list_display = ('privatetopic', 'author', 'pubdate', 'update', 'position_in_topic')\n- raw_id_fields = ('privatetopic', 'author')\n-\n-\n-class PrivateTopicAdmin(admin.ModelAdmin):\n-\n- \"\"\"Representation of PrivateTopic model in the admin interface.\"\"\"\n-\n- list_display = ('title', 'subtitle', 'author', 'last_message', 'pubdate')\n- raw_id_fields = ('author', 'participants', 'last_message')\n-\n-\n-class PrivateTopicReadAdmin(admin.ModelAdmin):\n-\n- \"\"\"Representation of PrivateTopicRead model in the admin interface.\"\"\"\n-\n- list_display = ('privatetopic', 'privatepost', 'user')\n- raw_id_fields = ('privatetopic', 'privatepost', 'user')\n-\n-\n-admin.site.register(PrivatePost, PrivatePostAdmin)\n-admin.site.register(PrivateTopic, PrivateTopicAdmin)\n-admin.site.register(PrivateTopicRead, PrivateTopicReadAdmin)\n", "issue": "Supprimer les messages priv\u00e9s de l'interface d'administration de Django\n\u00c0 l'heure actuelle, un super-admin peut, via l'interface de Django, lire tous les MPs du site. Certes, l'interface est peu pratique pour \u00e7a (aucune notion de fil, etc.), mais je trouve tout de m\u00eame bien peu souhaitable.\r\n\r\n![Regardez-moi tous ces MPs dans l'interface d'administration de Django.](https://user-images.githubusercontent.com/1417570/88059844-b199bf00-cb65-11ea-8dc9-fc78310001c7.png)\r\n\r\nApr\u00e8s discussion avec @gcodeur sur ce sujet, je propose donc de **supprimer les MPs de l'interface d'administration de Django**. Une personne avec les acc\u00e8s prod pourrait toujours les lire (vu qu'ils ne sont pas chiffr\u00e9s de bout en bout), mais \u00e7a limiterait d'autant l'exposition.\nSupprimer les messages priv\u00e9s de l'interface d'administration de Django\n\u00c0 l'heure actuelle, un super-admin peut, via l'interface de Django, lire tous les MPs du site. Certes, l'interface est peu pratique pour \u00e7a (aucune notion de fil, etc.), mais je trouve tout de m\u00eame bien peu souhaitable.\r\n\r\n![Regardez-moi tous ces MPs dans l'interface d'administration de Django.](https://user-images.githubusercontent.com/1417570/88059844-b199bf00-cb65-11ea-8dc9-fc78310001c7.png)\r\n\r\nApr\u00e8s discussion avec @gcodeur sur ce sujet, je propose donc de **supprimer les MPs de l'interface d'administration de Django**. Une personne avec les acc\u00e8s prod pourrait toujours les lire (vu qu'ils ne sont pas chiffr\u00e9s de bout en bout), mais \u00e7a limiterait d'autant l'exposition.\n", "before_files": [{"content": "from django.contrib import admin\n\nfrom .models import PrivatePost, PrivateTopic, PrivateTopicRead\n\n\nclass PrivatePostAdmin(admin.ModelAdmin):\n\n \"\"\"Representation of PrivatePost model in the admin interface.\"\"\"\n\n list_display = ('privatetopic', 'author', 'pubdate', 'update', 'position_in_topic')\n raw_id_fields = ('privatetopic', 'author')\n\n\nclass PrivateTopicAdmin(admin.ModelAdmin):\n\n \"\"\"Representation of PrivateTopic model in the admin interface.\"\"\"\n\n list_display = ('title', 'subtitle', 'author', 'last_message', 'pubdate')\n raw_id_fields = ('author', 'participants', 'last_message')\n\n\nclass PrivateTopicReadAdmin(admin.ModelAdmin):\n\n \"\"\"Representation of PrivateTopicRead model in the admin interface.\"\"\"\n\n list_display = ('privatetopic', 'privatepost', 'user')\n raw_id_fields = ('privatetopic', 'privatepost', 'user')\n\n\nadmin.site.register(PrivatePost, PrivatePostAdmin)\nadmin.site.register(PrivateTopic, PrivateTopicAdmin)\nadmin.site.register(PrivateTopicRead, PrivateTopicReadAdmin)\n", "path": "zds/mp/admin.py"}], "after_files": [{"content": null, "path": "zds/mp/admin.py"}]}
997
299
gh_patches_debug_14796
rasdani/github-patches
git_diff
espnet__espnet-3262
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error when "--field -5" is passed to espnet2.bin.tokenize_text **Describe the bug** ``` D:\Anaconda\python.exe D:/repos/espnet/espnet2/bin/tokenize_text.py --token_type phn --input tmp.txt --output tmp.phn --field -2 --cleaner none --g2p g2p_en --add_symbol '$<blank>:0' --add_symbol '<oov>:1' --add_symbol '<sos/eos>:-1' --write_vocabulary false --keep_all_fields true Traceback (most recent call last): File "D:/repos/espnet/espnet2/bin/tokenize_text.py", line 297, in <module> main() File "D:/repos/espnet/espnet2/bin/tokenize_text.py", line 293, in main tokenize(**kwargs) File "D:/repos/espnet/espnet2/bin/tokenize_text.py", line 112, in tokenize field = field2slice(field) File "D:/repos/espnet/espnet2/bin/tokenize_text.py", line 59, in field2slice slic = slice(s1 - 1, s2) TypeError: unsupported operand type(s) for -: 'NoneType' and 'int' ``` This is because of a missing None check [here](https://github.com/espnet/espnet/blob/master/espnet2/bin/tokenize_text.py#L59) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `espnet2/bin/tokenize_text.py` Content: ``` 1 #!/usr/bin/env python3 2 import argparse 3 from collections import Counter 4 import logging 5 from pathlib import Path 6 import sys 7 from typing import List 8 from typing import Optional 9 10 from typeguard import check_argument_types 11 12 from espnet.utils.cli_utils import get_commandline_args 13 from espnet2.text.build_tokenizer import build_tokenizer 14 from espnet2.text.cleaner import TextCleaner 15 from espnet2.utils.types import str2bool 16 from espnet2.utils.types import str_or_none 17 18 19 def field2slice(field: Optional[str]) -> slice: 20 """Convert field string to slice 21 22 Note that field string accepts 1-based integer. 23 24 Examples: 25 >>> field2slice("1-") 26 slice(0, None, None) 27 >>> field2slice("1-3") 28 slice(0, 3, None) 29 >>> field2slice("-3") 30 slice(None, 3, None) 31 32 """ 33 field = field.strip() 34 try: 35 if "-" in field: 36 # e.g. "2-" or "2-5" or "-7" 37 s1, s2 = field.split("-", maxsplit=1) 38 if s1.strip() == "": 39 s1 = None 40 else: 41 s1 = int(s1) 42 if s1 == 0: 43 raise ValueError("1-based string") 44 if s2.strip() == "": 45 s2 = None 46 else: 47 s2 = int(s2) 48 else: 49 # e.g. "2" 50 s1 = int(field) 51 s2 = s1 + 1 52 if s1 == 0: 53 raise ValueError("must be 1 or more value") 54 except ValueError: 55 raise RuntimeError(f"Format error: e.g. '2-', '2-5', or '-5': {field}") 56 57 # -1 because of 1-based integer following "cut" command 58 # e.g "1-3" -> slice(0, 3) 59 slic = slice(s1 - 1, s2) 60 return slic 61 62 63 def tokenize( 64 input: str, 65 output: str, 66 field: Optional[str], 67 delimiter: Optional[str], 68 token_type: str, 69 space_symbol: str, 70 non_linguistic_symbols: Optional[str], 71 bpemodel: Optional[str], 72 log_level: str, 73 write_vocabulary: bool, 74 vocabulary_size: int, 75 remove_non_linguistic_symbols: bool, 76 cutoff: int, 77 add_symbol: List[str], 78 cleaner: Optional[str], 79 g2p: Optional[str], 80 ): 81 assert check_argument_types() 82 83 logging.basicConfig( 84 level=log_level, 85 format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", 86 ) 87 if input == "-": 88 fin = sys.stdin 89 else: 90 fin = Path(input).open("r", encoding="utf-8") 91 if output == "-": 92 fout = sys.stdout 93 else: 94 p = Path(output) 95 p.parent.mkdir(parents=True, exist_ok=True) 96 fout = p.open("w", encoding="utf-8") 97 98 cleaner = TextCleaner(cleaner) 99 tokenizer = build_tokenizer( 100 token_type=token_type, 101 bpemodel=bpemodel, 102 delimiter=delimiter, 103 space_symbol=space_symbol, 104 non_linguistic_symbols=non_linguistic_symbols, 105 remove_non_linguistic_symbols=remove_non_linguistic_symbols, 106 g2p_type=g2p, 107 ) 108 109 counter = Counter() 110 if field is not None: 111 field = field2slice(field) 112 113 for line in fin: 114 line = line.rstrip() 115 if field is not None: 116 # e.g. field="2-" 117 # uttidA hello world!! -> hello world!! 118 tokens = line.split(delimiter) 119 tokens = tokens[field] 120 if delimiter is None: 121 line = " ".join(tokens) 122 else: 123 line = delimiter.join(tokens) 124 125 line = cleaner(line) 126 tokens = tokenizer.text2tokens(line) 127 if not write_vocabulary: 128 fout.write(" ".join(tokens) + "\n") 129 else: 130 for t in tokens: 131 counter[t] += 1 132 133 if not write_vocabulary: 134 return 135 136 # ======= write_vocabulary mode from here ======= 137 # Sort by the number of occurrences in descending order 138 # and filter lower frequency words than cutoff value 139 words_and_counts = list( 140 filter(lambda x: x[1] > cutoff, sorted(counter.items(), key=lambda x: -x[1])) 141 ) 142 # Restrict the vocabulary size 143 if vocabulary_size > 0: 144 if vocabulary_size < len(add_symbol): 145 raise RuntimeError(f"vocabulary_size is too small: {vocabulary_size}") 146 words_and_counts = words_and_counts[: vocabulary_size - len(add_symbol)] 147 148 # Parse the values of --add_symbol 149 for symbol_and_id in add_symbol: 150 # e.g symbol="<blank>:0" 151 try: 152 symbol, idx = symbol_and_id.split(":") 153 idx = int(idx) 154 except ValueError: 155 raise RuntimeError(f"Format error: e.g. '<blank>:0': {symbol_and_id}") 156 symbol = symbol.strip() 157 158 # e.g. idx=0 -> append as the first symbol 159 # e.g. idx=-1 -> append as the last symbol 160 if idx < 0: 161 idx = len(words_and_counts) + 1 + idx 162 words_and_counts.insert(idx, (symbol, None)) 163 164 # Write words 165 for w, c in words_and_counts: 166 fout.write(w + "\n") 167 168 # Logging 169 total_count = sum(counter.values()) 170 invocab_count = sum(c for w, c in words_and_counts if c is not None) 171 logging.info(f"OOV rate = {(total_count - invocab_count) / total_count * 100} %") 172 173 174 def get_parser() -> argparse.ArgumentParser: 175 parser = argparse.ArgumentParser( 176 description="Tokenize texts", 177 formatter_class=argparse.ArgumentDefaultsHelpFormatter, 178 ) 179 parser.add_argument( 180 "--log_level", 181 type=lambda x: x.upper(), 182 default="INFO", 183 choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"), 184 help="The verbose level of logging", 185 ) 186 187 parser.add_argument( 188 "--input", "-i", required=True, help="Input text. - indicates sys.stdin" 189 ) 190 parser.add_argument( 191 "--output", "-o", required=True, help="Output text. - indicates sys.stdout" 192 ) 193 parser.add_argument( 194 "--field", 195 "-f", 196 help="The target columns of the input text as 1-based integer. e.g 2-", 197 ) 198 parser.add_argument( 199 "--token_type", 200 "-t", 201 default="char", 202 choices=["char", "bpe", "word", "phn"], 203 help="Token type", 204 ) 205 parser.add_argument("--delimiter", "-d", default=None, help="The delimiter") 206 parser.add_argument("--space_symbol", default="<space>", help="The space symbol") 207 parser.add_argument("--bpemodel", default=None, help="The bpemodel file path") 208 parser.add_argument( 209 "--non_linguistic_symbols", 210 type=str_or_none, 211 help="non_linguistic_symbols file path", 212 ) 213 parser.add_argument( 214 "--remove_non_linguistic_symbols", 215 type=str2bool, 216 default=False, 217 help="Remove non-language-symbols from tokens", 218 ) 219 parser.add_argument( 220 "--cleaner", 221 type=str_or_none, 222 choices=[None, "tacotron", "jaconv", "vietnamese"], 223 default=None, 224 help="Apply text cleaning", 225 ) 226 parser.add_argument( 227 "--g2p", 228 type=str_or_none, 229 choices=[ 230 None, 231 "g2p_en", 232 "g2p_en_no_space", 233 "pyopenjtalk", 234 "pyopenjtalk_kana", 235 "pyopenjtalk_accent", 236 "pyopenjtalk_accent_with_pause", 237 "pypinyin_g2p", 238 "pypinyin_g2p_phone", 239 "espeak_ng_arabic", 240 ], 241 default=None, 242 help="Specify g2p method if --token_type=phn", 243 ) 244 245 group = parser.add_argument_group("write_vocabulary mode related") 246 group.add_argument( 247 "--write_vocabulary", 248 type=str2bool, 249 default=False, 250 help="Write tokens list instead of tokenized text per line", 251 ) 252 group.add_argument("--vocabulary_size", type=int, default=0, help="Vocabulary size") 253 group.add_argument( 254 "--cutoff", 255 default=0, 256 type=int, 257 help="cut-off frequency used for write-vocabulary mode", 258 ) 259 group.add_argument( 260 "--add_symbol", 261 type=str, 262 default=[], 263 action="append", 264 help="Append symbol e.g. --add_symbol '<blank>:0' --add_symbol '<unk>:1'", 265 ) 266 267 return parser 268 269 270 def main(cmd=None): 271 print(get_commandline_args(), file=sys.stderr) 272 parser = get_parser() 273 args = parser.parse_args(cmd) 274 kwargs = vars(args) 275 tokenize(**kwargs) 276 277 278 if __name__ == "__main__": 279 main() 280 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/espnet2/bin/tokenize_text.py b/espnet2/bin/tokenize_text.py --- a/espnet2/bin/tokenize_text.py +++ b/espnet2/bin/tokenize_text.py @@ -28,7 +28,6 @@ slice(0, 3, None) >>> field2slice("-3") slice(None, 3, None) - """ field = field.strip() try: @@ -54,9 +53,12 @@ except ValueError: raise RuntimeError(f"Format error: e.g. '2-', '2-5', or '-5': {field}") - # -1 because of 1-based integer following "cut" command - # e.g "1-3" -> slice(0, 3) - slic = slice(s1 - 1, s2) + if s1 is None: + slic = slice(None, s2) + else: + # -1 because of 1-based integer following "cut" command + # e.g "1-3" -> slice(0, 3) + slic = slice(s1 - 1, s2) return slic
{"golden_diff": "diff --git a/espnet2/bin/tokenize_text.py b/espnet2/bin/tokenize_text.py\n--- a/espnet2/bin/tokenize_text.py\n+++ b/espnet2/bin/tokenize_text.py\n@@ -28,7 +28,6 @@\n slice(0, 3, None)\n >>> field2slice(\"-3\")\n slice(None, 3, None)\n-\n \"\"\"\n field = field.strip()\n try:\n@@ -54,9 +53,12 @@\n except ValueError:\n raise RuntimeError(f\"Format error: e.g. '2-', '2-5', or '-5': {field}\")\n \n- # -1 because of 1-based integer following \"cut\" command\n- # e.g \"1-3\" -> slice(0, 3)\n- slic = slice(s1 - 1, s2)\n+ if s1 is None:\n+ slic = slice(None, s2)\n+ else:\n+ # -1 because of 1-based integer following \"cut\" command\n+ # e.g \"1-3\" -> slice(0, 3)\n+ slic = slice(s1 - 1, s2)\n return slic\n", "issue": "Error when \"--field -5\" is passed to espnet2.bin.tokenize_text\n**Describe the bug**\r\n\r\n```\r\nD:\\Anaconda\\python.exe D:/repos/espnet/espnet2/bin/tokenize_text.py --token_type phn --input tmp.txt --output tmp.phn --field -2 --cleaner none --g2p g2p_en --add_symbol '$<blank>:0' --add_symbol '<oov>:1' --add_symbol '<sos/eos>:-1' --write_vocabulary false --keep_all_fields true\r\nTraceback (most recent call last):\r\n File \"D:/repos/espnet/espnet2/bin/tokenize_text.py\", line 297, in <module>\r\n main()\r\n File \"D:/repos/espnet/espnet2/bin/tokenize_text.py\", line 293, in main\r\n tokenize(**kwargs)\r\n File \"D:/repos/espnet/espnet2/bin/tokenize_text.py\", line 112, in tokenize\r\n field = field2slice(field)\r\n File \"D:/repos/espnet/espnet2/bin/tokenize_text.py\", line 59, in field2slice\r\n slic = slice(s1 - 1, s2)\r\nTypeError: unsupported operand type(s) for -: 'NoneType' and 'int'\r\n```\r\nThis is because of a missing None check [here](https://github.com/espnet/espnet/blob/master/espnet2/bin/tokenize_text.py#L59)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport argparse\nfrom collections import Counter\nimport logging\nfrom pathlib import Path\nimport sys\nfrom typing import List\nfrom typing import Optional\n\nfrom typeguard import check_argument_types\n\nfrom espnet.utils.cli_utils import get_commandline_args\nfrom espnet2.text.build_tokenizer import build_tokenizer\nfrom espnet2.text.cleaner import TextCleaner\nfrom espnet2.utils.types import str2bool\nfrom espnet2.utils.types import str_or_none\n\n\ndef field2slice(field: Optional[str]) -> slice:\n \"\"\"Convert field string to slice\n\n Note that field string accepts 1-based integer.\n\n Examples:\n >>> field2slice(\"1-\")\n slice(0, None, None)\n >>> field2slice(\"1-3\")\n slice(0, 3, None)\n >>> field2slice(\"-3\")\n slice(None, 3, None)\n\n \"\"\"\n field = field.strip()\n try:\n if \"-\" in field:\n # e.g. \"2-\" or \"2-5\" or \"-7\"\n s1, s2 = field.split(\"-\", maxsplit=1)\n if s1.strip() == \"\":\n s1 = None\n else:\n s1 = int(s1)\n if s1 == 0:\n raise ValueError(\"1-based string\")\n if s2.strip() == \"\":\n s2 = None\n else:\n s2 = int(s2)\n else:\n # e.g. \"2\"\n s1 = int(field)\n s2 = s1 + 1\n if s1 == 0:\n raise ValueError(\"must be 1 or more value\")\n except ValueError:\n raise RuntimeError(f\"Format error: e.g. '2-', '2-5', or '-5': {field}\")\n\n # -1 because of 1-based integer following \"cut\" command\n # e.g \"1-3\" -> slice(0, 3)\n slic = slice(s1 - 1, s2)\n return slic\n\n\ndef tokenize(\n input: str,\n output: str,\n field: Optional[str],\n delimiter: Optional[str],\n token_type: str,\n space_symbol: str,\n non_linguistic_symbols: Optional[str],\n bpemodel: Optional[str],\n log_level: str,\n write_vocabulary: bool,\n vocabulary_size: int,\n remove_non_linguistic_symbols: bool,\n cutoff: int,\n add_symbol: List[str],\n cleaner: Optional[str],\n g2p: Optional[str],\n):\n assert check_argument_types()\n\n logging.basicConfig(\n level=log_level,\n format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\",\n )\n if input == \"-\":\n fin = sys.stdin\n else:\n fin = Path(input).open(\"r\", encoding=\"utf-8\")\n if output == \"-\":\n fout = sys.stdout\n else:\n p = Path(output)\n p.parent.mkdir(parents=True, exist_ok=True)\n fout = p.open(\"w\", encoding=\"utf-8\")\n\n cleaner = TextCleaner(cleaner)\n tokenizer = build_tokenizer(\n token_type=token_type,\n bpemodel=bpemodel,\n delimiter=delimiter,\n space_symbol=space_symbol,\n non_linguistic_symbols=non_linguistic_symbols,\n remove_non_linguistic_symbols=remove_non_linguistic_symbols,\n g2p_type=g2p,\n )\n\n counter = Counter()\n if field is not None:\n field = field2slice(field)\n\n for line in fin:\n line = line.rstrip()\n if field is not None:\n # e.g. field=\"2-\"\n # uttidA hello world!! -> hello world!!\n tokens = line.split(delimiter)\n tokens = tokens[field]\n if delimiter is None:\n line = \" \".join(tokens)\n else:\n line = delimiter.join(tokens)\n\n line = cleaner(line)\n tokens = tokenizer.text2tokens(line)\n if not write_vocabulary:\n fout.write(\" \".join(tokens) + \"\\n\")\n else:\n for t in tokens:\n counter[t] += 1\n\n if not write_vocabulary:\n return\n\n # ======= write_vocabulary mode from here =======\n # Sort by the number of occurrences in descending order\n # and filter lower frequency words than cutoff value\n words_and_counts = list(\n filter(lambda x: x[1] > cutoff, sorted(counter.items(), key=lambda x: -x[1]))\n )\n # Restrict the vocabulary size\n if vocabulary_size > 0:\n if vocabulary_size < len(add_symbol):\n raise RuntimeError(f\"vocabulary_size is too small: {vocabulary_size}\")\n words_and_counts = words_and_counts[: vocabulary_size - len(add_symbol)]\n\n # Parse the values of --add_symbol\n for symbol_and_id in add_symbol:\n # e.g symbol=\"<blank>:0\"\n try:\n symbol, idx = symbol_and_id.split(\":\")\n idx = int(idx)\n except ValueError:\n raise RuntimeError(f\"Format error: e.g. '<blank>:0': {symbol_and_id}\")\n symbol = symbol.strip()\n\n # e.g. idx=0 -> append as the first symbol\n # e.g. idx=-1 -> append as the last symbol\n if idx < 0:\n idx = len(words_and_counts) + 1 + idx\n words_and_counts.insert(idx, (symbol, None))\n\n # Write words\n for w, c in words_and_counts:\n fout.write(w + \"\\n\")\n\n # Logging\n total_count = sum(counter.values())\n invocab_count = sum(c for w, c in words_and_counts if c is not None)\n logging.info(f\"OOV rate = {(total_count - invocab_count) / total_count * 100} %\")\n\n\ndef get_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=\"Tokenize texts\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\n \"--log_level\",\n type=lambda x: x.upper(),\n default=\"INFO\",\n choices=(\"CRITICAL\", \"ERROR\", \"WARNING\", \"INFO\", \"DEBUG\", \"NOTSET\"),\n help=\"The verbose level of logging\",\n )\n\n parser.add_argument(\n \"--input\", \"-i\", required=True, help=\"Input text. - indicates sys.stdin\"\n )\n parser.add_argument(\n \"--output\", \"-o\", required=True, help=\"Output text. - indicates sys.stdout\"\n )\n parser.add_argument(\n \"--field\",\n \"-f\",\n help=\"The target columns of the input text as 1-based integer. e.g 2-\",\n )\n parser.add_argument(\n \"--token_type\",\n \"-t\",\n default=\"char\",\n choices=[\"char\", \"bpe\", \"word\", \"phn\"],\n help=\"Token type\",\n )\n parser.add_argument(\"--delimiter\", \"-d\", default=None, help=\"The delimiter\")\n parser.add_argument(\"--space_symbol\", default=\"<space>\", help=\"The space symbol\")\n parser.add_argument(\"--bpemodel\", default=None, help=\"The bpemodel file path\")\n parser.add_argument(\n \"--non_linguistic_symbols\",\n type=str_or_none,\n help=\"non_linguistic_symbols file path\",\n )\n parser.add_argument(\n \"--remove_non_linguistic_symbols\",\n type=str2bool,\n default=False,\n help=\"Remove non-language-symbols from tokens\",\n )\n parser.add_argument(\n \"--cleaner\",\n type=str_or_none,\n choices=[None, \"tacotron\", \"jaconv\", \"vietnamese\"],\n default=None,\n help=\"Apply text cleaning\",\n )\n parser.add_argument(\n \"--g2p\",\n type=str_or_none,\n choices=[\n None,\n \"g2p_en\",\n \"g2p_en_no_space\",\n \"pyopenjtalk\",\n \"pyopenjtalk_kana\",\n \"pyopenjtalk_accent\",\n \"pyopenjtalk_accent_with_pause\",\n \"pypinyin_g2p\",\n \"pypinyin_g2p_phone\",\n \"espeak_ng_arabic\",\n ],\n default=None,\n help=\"Specify g2p method if --token_type=phn\",\n )\n\n group = parser.add_argument_group(\"write_vocabulary mode related\")\n group.add_argument(\n \"--write_vocabulary\",\n type=str2bool,\n default=False,\n help=\"Write tokens list instead of tokenized text per line\",\n )\n group.add_argument(\"--vocabulary_size\", type=int, default=0, help=\"Vocabulary size\")\n group.add_argument(\n \"--cutoff\",\n default=0,\n type=int,\n help=\"cut-off frequency used for write-vocabulary mode\",\n )\n group.add_argument(\n \"--add_symbol\",\n type=str,\n default=[],\n action=\"append\",\n help=\"Append symbol e.g. --add_symbol '<blank>:0' --add_symbol '<unk>:1'\",\n )\n\n return parser\n\n\ndef main(cmd=None):\n print(get_commandline_args(), file=sys.stderr)\n parser = get_parser()\n args = parser.parse_args(cmd)\n kwargs = vars(args)\n tokenize(**kwargs)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "espnet2/bin/tokenize_text.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport argparse\nfrom collections import Counter\nimport logging\nfrom pathlib import Path\nimport sys\nfrom typing import List\nfrom typing import Optional\n\nfrom typeguard import check_argument_types\n\nfrom espnet.utils.cli_utils import get_commandline_args\nfrom espnet2.text.build_tokenizer import build_tokenizer\nfrom espnet2.text.cleaner import TextCleaner\nfrom espnet2.utils.types import str2bool\nfrom espnet2.utils.types import str_or_none\n\n\ndef field2slice(field: Optional[str]) -> slice:\n \"\"\"Convert field string to slice\n\n Note that field string accepts 1-based integer.\n\n Examples:\n >>> field2slice(\"1-\")\n slice(0, None, None)\n >>> field2slice(\"1-3\")\n slice(0, 3, None)\n >>> field2slice(\"-3\")\n slice(None, 3, None)\n \"\"\"\n field = field.strip()\n try:\n if \"-\" in field:\n # e.g. \"2-\" or \"2-5\" or \"-7\"\n s1, s2 = field.split(\"-\", maxsplit=1)\n if s1.strip() == \"\":\n s1 = None\n else:\n s1 = int(s1)\n if s1 == 0:\n raise ValueError(\"1-based string\")\n if s2.strip() == \"\":\n s2 = None\n else:\n s2 = int(s2)\n else:\n # e.g. \"2\"\n s1 = int(field)\n s2 = s1 + 1\n if s1 == 0:\n raise ValueError(\"must be 1 or more value\")\n except ValueError:\n raise RuntimeError(f\"Format error: e.g. '2-', '2-5', or '-5': {field}\")\n\n if s1 is None:\n slic = slice(None, s2)\n else:\n # -1 because of 1-based integer following \"cut\" command\n # e.g \"1-3\" -> slice(0, 3)\n slic = slice(s1 - 1, s2)\n return slic\n\n\ndef tokenize(\n input: str,\n output: str,\n field: Optional[str],\n delimiter: Optional[str],\n token_type: str,\n space_symbol: str,\n non_linguistic_symbols: Optional[str],\n bpemodel: Optional[str],\n log_level: str,\n write_vocabulary: bool,\n vocabulary_size: int,\n remove_non_linguistic_symbols: bool,\n cutoff: int,\n add_symbol: List[str],\n cleaner: Optional[str],\n g2p: Optional[str],\n):\n assert check_argument_types()\n\n logging.basicConfig(\n level=log_level,\n format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\",\n )\n if input == \"-\":\n fin = sys.stdin\n else:\n fin = Path(input).open(\"r\", encoding=\"utf-8\")\n if output == \"-\":\n fout = sys.stdout\n else:\n p = Path(output)\n p.parent.mkdir(parents=True, exist_ok=True)\n fout = p.open(\"w\", encoding=\"utf-8\")\n\n cleaner = TextCleaner(cleaner)\n tokenizer = build_tokenizer(\n token_type=token_type,\n bpemodel=bpemodel,\n delimiter=delimiter,\n space_symbol=space_symbol,\n non_linguistic_symbols=non_linguistic_symbols,\n remove_non_linguistic_symbols=remove_non_linguistic_symbols,\n g2p_type=g2p,\n )\n\n counter = Counter()\n if field is not None:\n field = field2slice(field)\n\n for line in fin:\n line = line.rstrip()\n if field is not None:\n # e.g. field=\"2-\"\n # uttidA hello world!! -> hello world!!\n tokens = line.split(delimiter)\n tokens = tokens[field]\n if delimiter is None:\n line = \" \".join(tokens)\n else:\n line = delimiter.join(tokens)\n\n line = cleaner(line)\n tokens = tokenizer.text2tokens(line)\n if not write_vocabulary:\n fout.write(\" \".join(tokens) + \"\\n\")\n else:\n for t in tokens:\n counter[t] += 1\n\n if not write_vocabulary:\n return\n\n # ======= write_vocabulary mode from here =======\n # Sort by the number of occurrences in descending order\n # and filter lower frequency words than cutoff value\n words_and_counts = list(\n filter(lambda x: x[1] > cutoff, sorted(counter.items(), key=lambda x: -x[1]))\n )\n # Restrict the vocabulary size\n if vocabulary_size > 0:\n if vocabulary_size < len(add_symbol):\n raise RuntimeError(f\"vocabulary_size is too small: {vocabulary_size}\")\n words_and_counts = words_and_counts[: vocabulary_size - len(add_symbol)]\n\n # Parse the values of --add_symbol\n for symbol_and_id in add_symbol:\n # e.g symbol=\"<blank>:0\"\n try:\n symbol, idx = symbol_and_id.split(\":\")\n idx = int(idx)\n except ValueError:\n raise RuntimeError(f\"Format error: e.g. '<blank>:0': {symbol_and_id}\")\n symbol = symbol.strip()\n\n # e.g. idx=0 -> append as the first symbol\n # e.g. idx=-1 -> append as the last symbol\n if idx < 0:\n idx = len(words_and_counts) + 1 + idx\n words_and_counts.insert(idx, (symbol, None))\n\n # Write words\n for w, c in words_and_counts:\n fout.write(w + \"\\n\")\n\n # Logging\n total_count = sum(counter.values())\n invocab_count = sum(c for w, c in words_and_counts if c is not None)\n logging.info(f\"OOV rate = {(total_count - invocab_count) / total_count * 100} %\")\n\n\ndef get_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=\"Tokenize texts\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\n \"--log_level\",\n type=lambda x: x.upper(),\n default=\"INFO\",\n choices=(\"CRITICAL\", \"ERROR\", \"WARNING\", \"INFO\", \"DEBUG\", \"NOTSET\"),\n help=\"The verbose level of logging\",\n )\n\n parser.add_argument(\n \"--input\", \"-i\", required=True, help=\"Input text. - indicates sys.stdin\"\n )\n parser.add_argument(\n \"--output\", \"-o\", required=True, help=\"Output text. - indicates sys.stdout\"\n )\n parser.add_argument(\n \"--field\",\n \"-f\",\n help=\"The target columns of the input text as 1-based integer. e.g 2-\",\n )\n parser.add_argument(\n \"--token_type\",\n \"-t\",\n default=\"char\",\n choices=[\"char\", \"bpe\", \"word\", \"phn\"],\n help=\"Token type\",\n )\n parser.add_argument(\"--delimiter\", \"-d\", default=None, help=\"The delimiter\")\n parser.add_argument(\"--space_symbol\", default=\"<space>\", help=\"The space symbol\")\n parser.add_argument(\"--bpemodel\", default=None, help=\"The bpemodel file path\")\n parser.add_argument(\n \"--non_linguistic_symbols\",\n type=str_or_none,\n help=\"non_linguistic_symbols file path\",\n )\n parser.add_argument(\n \"--remove_non_linguistic_symbols\",\n type=str2bool,\n default=False,\n help=\"Remove non-language-symbols from tokens\",\n )\n parser.add_argument(\n \"--cleaner\",\n type=str_or_none,\n choices=[None, \"tacotron\", \"jaconv\", \"vietnamese\"],\n default=None,\n help=\"Apply text cleaning\",\n )\n parser.add_argument(\n \"--g2p\",\n type=str_or_none,\n choices=[\n None,\n \"g2p_en\",\n \"g2p_en_no_space\",\n \"pyopenjtalk\",\n \"pyopenjtalk_kana\",\n \"pyopenjtalk_accent\",\n \"pyopenjtalk_accent_with_pause\",\n \"pypinyin_g2p\",\n \"pypinyin_g2p_phone\",\n \"espeak_ng_arabic\",\n ],\n default=None,\n help=\"Specify g2p method if --token_type=phn\",\n )\n\n group = parser.add_argument_group(\"write_vocabulary mode related\")\n group.add_argument(\n \"--write_vocabulary\",\n type=str2bool,\n default=False,\n help=\"Write tokens list instead of tokenized text per line\",\n )\n group.add_argument(\"--vocabulary_size\", type=int, default=0, help=\"Vocabulary size\")\n group.add_argument(\n \"--cutoff\",\n default=0,\n type=int,\n help=\"cut-off frequency used for write-vocabulary mode\",\n )\n group.add_argument(\n \"--add_symbol\",\n type=str,\n default=[],\n action=\"append\",\n help=\"Append symbol e.g. --add_symbol '<blank>:0' --add_symbol '<unk>:1'\",\n )\n\n return parser\n\n\ndef main(cmd=None):\n print(get_commandline_args(), file=sys.stderr)\n parser = get_parser()\n args = parser.parse_args(cmd)\n kwargs = vars(args)\n tokenize(**kwargs)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "espnet2/bin/tokenize_text.py"}]}
3,345
267
gh_patches_debug_38380
rasdani/github-patches
git_diff
StackStorm__st2-5059
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ST2 logs writing "(unknown file)" in log prefix, instead of module name ## SUMMARY In ST2 v3.2, the st2sensorcontainer.log, and in v3.3 all st2 logs have the `module` name replaced with `(unknown file)` ### STACKSTORM VERSION v3.2, v3.3, EL8 and U18.04 Python 3.6 ### OS, environment, install method One Line and Ansible so far. ## Steps to reproduce the problem Show how to reproduce the problem, using a minimal test-case. Make sure to include any content (pack content - workflows, actions, etc.) which are needed to reproduce the problem. ## Expected Results No `(unknown file)`, but the python module name. ## Actual Results ``` 2020-10-13 17:55:13,787 140460262501416 INFO (unknown file) [-] Sensor linux.FileWatchSensor started 2020-10-13 17:55:15,444 139725927337456 INFO (unknown file) [-] No config found for sensor "FileWatchSensor" 2020-10-13 17:55:15,446 139725927337456 INFO (unknown file) [-] Watcher started 2020-10-13 17:55:15,446 139725927337456 INFO (unknown file) [-] Running sensor initialization code 2020-10-13 17:55:15,454 139725927337456 INFO (unknown file) [-] Running sensor in passive mode ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `st2common/st2common/log.py` Content: ``` 1 # Copyright 2020 The StackStorm Authors. 2 # Copyright 2019 Extreme Networks, Inc. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 from __future__ import absolute_import 17 18 import os 19 import sys 20 import logging 21 import logging.config 22 import logging.handlers 23 import traceback 24 from functools import wraps 25 26 import six 27 28 from st2common.logging.filters import LoggerNameExclusionFilter 29 30 # Those are here for backward compatibility reasons 31 from st2common.logging.handlers import FormatNamedFileHandler 32 from st2common.logging.handlers import ConfigurableSyslogHandler 33 from st2common.util.misc import prefix_dict_keys 34 from st2common.util.misc import get_normalized_file_path 35 36 __all__ = [ 37 'getLogger', 38 'setup', 39 40 'FormatNamedFileHandler', 41 'ConfigurableSyslogHandler', 42 43 'LoggingStream', 44 45 'ignore_lib2to3_log_messages', 46 'ignore_statsd_log_messages' 47 ] 48 49 # NOTE: We set AUDIT to the highest log level which means AUDIT log messages will always be 50 # included (e.g. also if log level is set to INFO). To avoid that, we need to explicitly filter 51 # out AUDIT log level in service setup code. 52 logging.AUDIT = logging.CRITICAL + 10 53 logging.addLevelName(logging.AUDIT, 'AUDIT') 54 55 LOGGER_KEYS = [ 56 'debug', 57 'info', 58 'warning', 59 'error', 60 'critical', 61 'exception', 62 'log', 63 64 'audit' 65 ] 66 67 # Note: This attribute is used by "find_caller" so it can correctly exclude this file when looking 68 # for the logger method caller frame. 69 _srcfile = get_normalized_file_path(__file__) 70 71 72 def find_caller(*args, **kwargs): 73 """ 74 Find the stack frame of the caller so that we can note the source file name, line number and 75 function name. 76 77 Note: This is based on logging/__init__.py:findCaller and modified so it takes into account 78 this file - https://hg.python.org/cpython/file/2.7/Lib/logging/__init__.py#l1233 79 """ 80 rv = '(unknown file)', 0, '(unknown function)' 81 82 try: 83 f = logging.currentframe().f_back 84 while hasattr(f, 'f_code'): 85 co = f.f_code 86 filename = os.path.normcase(co.co_filename) 87 if filename in (_srcfile, logging._srcfile): # This line is modified. 88 f = f.f_back 89 continue 90 rv = (filename, f.f_lineno, co.co_name) 91 break 92 except Exception: 93 pass 94 95 return rv 96 97 98 def decorate_log_method(func): 99 @wraps(func) 100 def func_wrapper(*args, **kwargs): 101 # Prefix extra keys with underscore 102 if 'extra' in kwargs: 103 kwargs['extra'] = prefix_dict_keys(dictionary=kwargs['extra'], prefix='_') 104 105 try: 106 return func(*args, **kwargs) 107 except TypeError as e: 108 # In some version of Python 2.7, logger.exception doesn't take any kwargs so we need 109 # this hack :/ 110 # See: 111 # - https://docs.python.org/release/2.7.3/library/logging.html#logging.Logger.exception 112 # - https://docs.python.org/release/2.7.7/library/logging.html#logging.Logger.exception 113 if 'got an unexpected keyword argument \'extra\'' in six.text_type(e): 114 kwargs.pop('extra', None) 115 return func(*args, **kwargs) 116 raise e 117 return func_wrapper 118 119 120 def decorate_logger_methods(logger): 121 """ 122 Decorate all the logger methods so all the keys in the extra dictionary are 123 automatically prefixed with an underscore to avoid clashes with standard log 124 record attributes. 125 """ 126 127 # Note: We override findCaller with our custom implementation which takes into account this 128 # module. 129 # This way filename, module, funcName and lineno LogRecord attributes contain correct values 130 # instead of all pointing to decorate_log_method. 131 logger.findCaller = find_caller 132 for key in LOGGER_KEYS: 133 log_method = getattr(logger, key) 134 log_method = decorate_log_method(log_method) 135 setattr(logger, key, log_method) 136 137 return logger 138 139 140 def getLogger(name): 141 # make sure that prefix isn't appended multiple times to preserve logging name hierarchy 142 prefix = 'st2.' 143 if name.startswith(prefix): 144 logger = logging.getLogger(name) 145 else: 146 logger_name = '{}{}'.format(prefix, name) 147 logger = logging.getLogger(logger_name) 148 149 logger = decorate_logger_methods(logger=logger) 150 return logger 151 152 153 class LoggingStream(object): 154 155 def __init__(self, name, level=logging.ERROR): 156 self._logger = getLogger(name) 157 self._level = level 158 159 def write(self, message): 160 self._logger._log(self._level, message, None) 161 162 def flush(self): 163 pass 164 165 166 def _audit(logger, msg, *args, **kwargs): 167 if logger.isEnabledFor(logging.AUDIT): 168 logger._log(logging.AUDIT, msg, args, **kwargs) 169 170 171 logging.Logger.audit = _audit 172 173 174 def _add_exclusion_filters(handlers, excludes=None): 175 if excludes: 176 for h in handlers: 177 h.addFilter(LoggerNameExclusionFilter(excludes)) 178 179 180 def _redirect_stderr(): 181 # It is ok to redirect stderr as none of the st2 handlers write to stderr. 182 sys.stderr = LoggingStream('STDERR') 183 184 185 def setup(config_file, redirect_stderr=True, excludes=None, disable_existing_loggers=False, 186 st2_conf_path=None): 187 """ 188 Configure logging from file. 189 190 :param st2_conf_path: Optional path to st2.conf file. If provided and "config_file" path is 191 relative to st2.conf path, the config_file path will get resolved to full 192 absolute path relative to st2.conf. 193 :type st2_conf_path: ``str`` 194 """ 195 if st2_conf_path and config_file[:2] == './' and not os.path.isfile(config_file): 196 # Logging config path is relative to st2.conf, resolve it to full absolute path 197 directory = os.path.dirname(st2_conf_path) 198 config_file_name = os.path.basename(config_file) 199 config_file = os.path.join(directory, config_file_name) 200 201 try: 202 logging.config.fileConfig(config_file, 203 defaults=None, 204 disable_existing_loggers=disable_existing_loggers) 205 handlers = logging.getLoggerClass().manager.root.handlers 206 _add_exclusion_filters(handlers=handlers, excludes=excludes) 207 if redirect_stderr: 208 _redirect_stderr() 209 except Exception as exc: 210 exc_cls = type(exc) 211 tb_msg = traceback.format_exc() 212 213 msg = str(exc) 214 msg += '\n\n' + tb_msg 215 216 # revert stderr redirection since there is no logger in place. 217 sys.stderr = sys.__stderr__ 218 219 # No logger yet therefore write to stderr 220 sys.stderr.write('ERROR: %s' % (msg)) 221 222 raise exc_cls(six.text_type(msg)) 223 224 225 def ignore_lib2to3_log_messages(): 226 """ 227 Work around to ignore "Generating grammar tables from" log messages which are logged under 228 INFO by default by libraries such as networkx which use 2to3. 229 """ 230 import lib2to3.pgen2.driver 231 232 class MockLoggingModule(object): 233 def getLogger(self, *args, **kwargs): 234 return logging.getLogger('lib2to3') 235 236 lib2to3.pgen2.driver.logging = MockLoggingModule() 237 logging.getLogger('lib2to3').setLevel(logging.ERROR) 238 239 240 def ignore_statsd_log_messages(): 241 """ 242 By default statsd client logs all the operations under INFO and that causes a lot of noise. 243 244 This pull request silences all the statsd INFO log messages. 245 """ 246 import statsd.connection 247 import statsd.client 248 249 class MockLoggingModule(object): 250 def getLogger(self, *args, **kwargs): 251 return logging.getLogger('statsd') 252 253 statsd.connection.logging = MockLoggingModule() 254 statsd.client.logging = MockLoggingModule() 255 logging.getLogger('statsd').setLevel(logging.ERROR) 256 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/st2common/st2common/log.py b/st2common/st2common/log.py --- a/st2common/st2common/log.py +++ b/st2common/st2common/log.py @@ -15,6 +15,7 @@ from __future__ import absolute_import +import io import os import sys import logging @@ -69,25 +70,64 @@ _srcfile = get_normalized_file_path(__file__) -def find_caller(*args, **kwargs): +def find_caller(stack_info=False, stacklevel=1): """ Find the stack frame of the caller so that we can note the source file name, line number and function name. Note: This is based on logging/__init__.py:findCaller and modified so it takes into account - this file - https://hg.python.org/cpython/file/2.7/Lib/logging/__init__.py#l1233 + this file: + https://github.com/python/cpython/blob/2.7/Lib/logging/__init__.py#L1240-L1259 + + The Python 3.x implementation adds in a new argument `stack_info` and `stacklevel` + and expects a 4-element tuple to be returned, rather than a 3-element tuple in + the python 2 implementation. + We derived our implementation from the Python 3.9 source code here: + https://github.com/python/cpython/blob/3.9/Lib/logging/__init__.py#L1502-L1536 + + We've made the appropriate changes so that we're python 2 and python 3 compatible depending + on what runtine we're working in. """ - rv = '(unknown file)', 0, '(unknown function)' + if six.PY2: + rv = '(unknown file)', 0, '(unknown function)' + else: + # python 3, has extra tuple element at the end for stack information + rv = '(unknown file)', 0, '(unknown function)', None try: - f = logging.currentframe().f_back + f = logging.currentframe() + # On some versions of IronPython, currentframe() returns None if + # IronPython isn't run with -X:Frames. + if f is not None: + f = f.f_back + orig_f = f + while f and stacklevel > 1: + f = f.f_back + stacklevel -= 1 + if not f: + f = orig_f + while hasattr(f, 'f_code'): co = f.f_code filename = os.path.normcase(co.co_filename) if filename in (_srcfile, logging._srcfile): # This line is modified. f = f.f_back continue - rv = (filename, f.f_lineno, co.co_name) + + if six.PY2: + rv = (filename, f.f_lineno, co.co_name) + else: + # python 3, new stack_info processing and extra tuple return value + sinfo = None + if stack_info: + sio = io.StringIO() + sio.write('Stack (most recent call last):\n') + traceback.print_stack(f, file=sio) + sinfo = sio.getvalue() + if sinfo[-1] == '\n': + sinfo = sinfo[:-1] + sio.close() + rv = (filename, f.f_lineno, co.co_name, sinfo) break except Exception: pass
{"golden_diff": "diff --git a/st2common/st2common/log.py b/st2common/st2common/log.py\n--- a/st2common/st2common/log.py\n+++ b/st2common/st2common/log.py\n@@ -15,6 +15,7 @@\n \n from __future__ import absolute_import\n \n+import io\n import os\n import sys\n import logging\n@@ -69,25 +70,64 @@\n _srcfile = get_normalized_file_path(__file__)\n \n \n-def find_caller(*args, **kwargs):\n+def find_caller(stack_info=False, stacklevel=1):\n \"\"\"\n Find the stack frame of the caller so that we can note the source file name, line number and\n function name.\n \n Note: This is based on logging/__init__.py:findCaller and modified so it takes into account\n- this file - https://hg.python.org/cpython/file/2.7/Lib/logging/__init__.py#l1233\n+ this file:\n+ https://github.com/python/cpython/blob/2.7/Lib/logging/__init__.py#L1240-L1259\n+\n+ The Python 3.x implementation adds in a new argument `stack_info` and `stacklevel`\n+ and expects a 4-element tuple to be returned, rather than a 3-element tuple in\n+ the python 2 implementation.\n+ We derived our implementation from the Python 3.9 source code here:\n+ https://github.com/python/cpython/blob/3.9/Lib/logging/__init__.py#L1502-L1536\n+\n+ We've made the appropriate changes so that we're python 2 and python 3 compatible depending\n+ on what runtine we're working in.\n \"\"\"\n- rv = '(unknown file)', 0, '(unknown function)'\n+ if six.PY2:\n+ rv = '(unknown file)', 0, '(unknown function)'\n+ else:\n+ # python 3, has extra tuple element at the end for stack information\n+ rv = '(unknown file)', 0, '(unknown function)', None\n \n try:\n- f = logging.currentframe().f_back\n+ f = logging.currentframe()\n+ # On some versions of IronPython, currentframe() returns None if\n+ # IronPython isn't run with -X:Frames.\n+ if f is not None:\n+ f = f.f_back\n+ orig_f = f\n+ while f and stacklevel > 1:\n+ f = f.f_back\n+ stacklevel -= 1\n+ if not f:\n+ f = orig_f\n+\n while hasattr(f, 'f_code'):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename in (_srcfile, logging._srcfile): # This line is modified.\n f = f.f_back\n continue\n- rv = (filename, f.f_lineno, co.co_name)\n+\n+ if six.PY2:\n+ rv = (filename, f.f_lineno, co.co_name)\n+ else:\n+ # python 3, new stack_info processing and extra tuple return value\n+ sinfo = None\n+ if stack_info:\n+ sio = io.StringIO()\n+ sio.write('Stack (most recent call last):\\n')\n+ traceback.print_stack(f, file=sio)\n+ sinfo = sio.getvalue()\n+ if sinfo[-1] == '\\n':\n+ sinfo = sinfo[:-1]\n+ sio.close()\n+ rv = (filename, f.f_lineno, co.co_name, sinfo)\n break\n except Exception:\n pass\n", "issue": "ST2 logs writing \"(unknown file)\" in log prefix, instead of module name\n## SUMMARY\r\n\r\nIn ST2 v3.2, the st2sensorcontainer.log, and in v3.3 all st2 logs have the `module` name replaced with `(unknown file)`\r\n\r\n\r\n### STACKSTORM VERSION\r\n\r\nv3.2, v3.3, EL8 and U18.04\r\nPython 3.6\r\n\r\n### OS, environment, install method\r\n\r\nOne Line and Ansible so far. \r\n\r\n## Steps to reproduce the problem\r\n\r\nShow how to reproduce the problem, using a minimal test-case. Make sure to include any content\r\n(pack content - workflows, actions, etc.) which are needed to reproduce the problem.\r\n\r\n## Expected Results\r\n\r\nNo `(unknown file)`, but the python module name. \r\n\r\n## Actual Results\r\n```\r\n2020-10-13 17:55:13,787 140460262501416 INFO (unknown file) [-] Sensor linux.FileWatchSensor started\r\n2020-10-13 17:55:15,444 139725927337456 INFO (unknown file) [-] No config found for sensor \"FileWatchSensor\"\r\n2020-10-13 17:55:15,446 139725927337456 INFO (unknown file) [-] Watcher started\r\n2020-10-13 17:55:15,446 139725927337456 INFO (unknown file) [-] Running sensor initialization code\r\n2020-10-13 17:55:15,454 139725927337456 INFO (unknown file) [-] Running sensor in passive mode\r\n```\r\n\r\n\n", "before_files": [{"content": "# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport os\nimport sys\nimport logging\nimport logging.config\nimport logging.handlers\nimport traceback\nfrom functools import wraps\n\nimport six\n\nfrom st2common.logging.filters import LoggerNameExclusionFilter\n\n# Those are here for backward compatibility reasons\nfrom st2common.logging.handlers import FormatNamedFileHandler\nfrom st2common.logging.handlers import ConfigurableSyslogHandler\nfrom st2common.util.misc import prefix_dict_keys\nfrom st2common.util.misc import get_normalized_file_path\n\n__all__ = [\n 'getLogger',\n 'setup',\n\n 'FormatNamedFileHandler',\n 'ConfigurableSyslogHandler',\n\n 'LoggingStream',\n\n 'ignore_lib2to3_log_messages',\n 'ignore_statsd_log_messages'\n]\n\n# NOTE: We set AUDIT to the highest log level which means AUDIT log messages will always be\n# included (e.g. also if log level is set to INFO). To avoid that, we need to explicitly filter\n# out AUDIT log level in service setup code.\nlogging.AUDIT = logging.CRITICAL + 10\nlogging.addLevelName(logging.AUDIT, 'AUDIT')\n\nLOGGER_KEYS = [\n 'debug',\n 'info',\n 'warning',\n 'error',\n 'critical',\n 'exception',\n 'log',\n\n 'audit'\n]\n\n# Note: This attribute is used by \"find_caller\" so it can correctly exclude this file when looking\n# for the logger method caller frame.\n_srcfile = get_normalized_file_path(__file__)\n\n\ndef find_caller(*args, **kwargs):\n \"\"\"\n Find the stack frame of the caller so that we can note the source file name, line number and\n function name.\n\n Note: This is based on logging/__init__.py:findCaller and modified so it takes into account\n this file - https://hg.python.org/cpython/file/2.7/Lib/logging/__init__.py#l1233\n \"\"\"\n rv = '(unknown file)', 0, '(unknown function)'\n\n try:\n f = logging.currentframe().f_back\n while hasattr(f, 'f_code'):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename in (_srcfile, logging._srcfile): # This line is modified.\n f = f.f_back\n continue\n rv = (filename, f.f_lineno, co.co_name)\n break\n except Exception:\n pass\n\n return rv\n\n\ndef decorate_log_method(func):\n @wraps(func)\n def func_wrapper(*args, **kwargs):\n # Prefix extra keys with underscore\n if 'extra' in kwargs:\n kwargs['extra'] = prefix_dict_keys(dictionary=kwargs['extra'], prefix='_')\n\n try:\n return func(*args, **kwargs)\n except TypeError as e:\n # In some version of Python 2.7, logger.exception doesn't take any kwargs so we need\n # this hack :/\n # See:\n # - https://docs.python.org/release/2.7.3/library/logging.html#logging.Logger.exception\n # - https://docs.python.org/release/2.7.7/library/logging.html#logging.Logger.exception\n if 'got an unexpected keyword argument \\'extra\\'' in six.text_type(e):\n kwargs.pop('extra', None)\n return func(*args, **kwargs)\n raise e\n return func_wrapper\n\n\ndef decorate_logger_methods(logger):\n \"\"\"\n Decorate all the logger methods so all the keys in the extra dictionary are\n automatically prefixed with an underscore to avoid clashes with standard log\n record attributes.\n \"\"\"\n\n # Note: We override findCaller with our custom implementation which takes into account this\n # module.\n # This way filename, module, funcName and lineno LogRecord attributes contain correct values\n # instead of all pointing to decorate_log_method.\n logger.findCaller = find_caller\n for key in LOGGER_KEYS:\n log_method = getattr(logger, key)\n log_method = decorate_log_method(log_method)\n setattr(logger, key, log_method)\n\n return logger\n\n\ndef getLogger(name):\n # make sure that prefix isn't appended multiple times to preserve logging name hierarchy\n prefix = 'st2.'\n if name.startswith(prefix):\n logger = logging.getLogger(name)\n else:\n logger_name = '{}{}'.format(prefix, name)\n logger = logging.getLogger(logger_name)\n\n logger = decorate_logger_methods(logger=logger)\n return logger\n\n\nclass LoggingStream(object):\n\n def __init__(self, name, level=logging.ERROR):\n self._logger = getLogger(name)\n self._level = level\n\n def write(self, message):\n self._logger._log(self._level, message, None)\n\n def flush(self):\n pass\n\n\ndef _audit(logger, msg, *args, **kwargs):\n if logger.isEnabledFor(logging.AUDIT):\n logger._log(logging.AUDIT, msg, args, **kwargs)\n\n\nlogging.Logger.audit = _audit\n\n\ndef _add_exclusion_filters(handlers, excludes=None):\n if excludes:\n for h in handlers:\n h.addFilter(LoggerNameExclusionFilter(excludes))\n\n\ndef _redirect_stderr():\n # It is ok to redirect stderr as none of the st2 handlers write to stderr.\n sys.stderr = LoggingStream('STDERR')\n\n\ndef setup(config_file, redirect_stderr=True, excludes=None, disable_existing_loggers=False,\n st2_conf_path=None):\n \"\"\"\n Configure logging from file.\n\n :param st2_conf_path: Optional path to st2.conf file. If provided and \"config_file\" path is\n relative to st2.conf path, the config_file path will get resolved to full\n absolute path relative to st2.conf.\n :type st2_conf_path: ``str``\n \"\"\"\n if st2_conf_path and config_file[:2] == './' and not os.path.isfile(config_file):\n # Logging config path is relative to st2.conf, resolve it to full absolute path\n directory = os.path.dirname(st2_conf_path)\n config_file_name = os.path.basename(config_file)\n config_file = os.path.join(directory, config_file_name)\n\n try:\n logging.config.fileConfig(config_file,\n defaults=None,\n disable_existing_loggers=disable_existing_loggers)\n handlers = logging.getLoggerClass().manager.root.handlers\n _add_exclusion_filters(handlers=handlers, excludes=excludes)\n if redirect_stderr:\n _redirect_stderr()\n except Exception as exc:\n exc_cls = type(exc)\n tb_msg = traceback.format_exc()\n\n msg = str(exc)\n msg += '\\n\\n' + tb_msg\n\n # revert stderr redirection since there is no logger in place.\n sys.stderr = sys.__stderr__\n\n # No logger yet therefore write to stderr\n sys.stderr.write('ERROR: %s' % (msg))\n\n raise exc_cls(six.text_type(msg))\n\n\ndef ignore_lib2to3_log_messages():\n \"\"\"\n Work around to ignore \"Generating grammar tables from\" log messages which are logged under\n INFO by default by libraries such as networkx which use 2to3.\n \"\"\"\n import lib2to3.pgen2.driver\n\n class MockLoggingModule(object):\n def getLogger(self, *args, **kwargs):\n return logging.getLogger('lib2to3')\n\n lib2to3.pgen2.driver.logging = MockLoggingModule()\n logging.getLogger('lib2to3').setLevel(logging.ERROR)\n\n\ndef ignore_statsd_log_messages():\n \"\"\"\n By default statsd client logs all the operations under INFO and that causes a lot of noise.\n\n This pull request silences all the statsd INFO log messages.\n \"\"\"\n import statsd.connection\n import statsd.client\n\n class MockLoggingModule(object):\n def getLogger(self, *args, **kwargs):\n return logging.getLogger('statsd')\n\n statsd.connection.logging = MockLoggingModule()\n statsd.client.logging = MockLoggingModule()\n logging.getLogger('statsd').setLevel(logging.ERROR)\n", "path": "st2common/st2common/log.py"}], "after_files": [{"content": "# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport io\nimport os\nimport sys\nimport logging\nimport logging.config\nimport logging.handlers\nimport traceback\nfrom functools import wraps\n\nimport six\n\nfrom st2common.logging.filters import LoggerNameExclusionFilter\n\n# Those are here for backward compatibility reasons\nfrom st2common.logging.handlers import FormatNamedFileHandler\nfrom st2common.logging.handlers import ConfigurableSyslogHandler\nfrom st2common.util.misc import prefix_dict_keys\nfrom st2common.util.misc import get_normalized_file_path\n\n__all__ = [\n 'getLogger',\n 'setup',\n\n 'FormatNamedFileHandler',\n 'ConfigurableSyslogHandler',\n\n 'LoggingStream',\n\n 'ignore_lib2to3_log_messages',\n 'ignore_statsd_log_messages'\n]\n\n# NOTE: We set AUDIT to the highest log level which means AUDIT log messages will always be\n# included (e.g. also if log level is set to INFO). To avoid that, we need to explicitly filter\n# out AUDIT log level in service setup code.\nlogging.AUDIT = logging.CRITICAL + 10\nlogging.addLevelName(logging.AUDIT, 'AUDIT')\n\nLOGGER_KEYS = [\n 'debug',\n 'info',\n 'warning',\n 'error',\n 'critical',\n 'exception',\n 'log',\n\n 'audit'\n]\n\n# Note: This attribute is used by \"find_caller\" so it can correctly exclude this file when looking\n# for the logger method caller frame.\n_srcfile = get_normalized_file_path(__file__)\n\n\ndef find_caller(stack_info=False, stacklevel=1):\n \"\"\"\n Find the stack frame of the caller so that we can note the source file name, line number and\n function name.\n\n Note: This is based on logging/__init__.py:findCaller and modified so it takes into account\n this file:\n https://github.com/python/cpython/blob/2.7/Lib/logging/__init__.py#L1240-L1259\n\n The Python 3.x implementation adds in a new argument `stack_info` and `stacklevel`\n and expects a 4-element tuple to be returned, rather than a 3-element tuple in\n the python 2 implementation.\n We derived our implementation from the Python 3.9 source code here:\n https://github.com/python/cpython/blob/3.9/Lib/logging/__init__.py#L1502-L1536\n\n We've made the appropriate changes so that we're python 2 and python 3 compatible depending\n on what runtine we're working in.\n \"\"\"\n if six.PY2:\n rv = '(unknown file)', 0, '(unknown function)'\n else:\n # python 3, has extra tuple element at the end for stack information\n rv = '(unknown file)', 0, '(unknown function)', None\n\n try:\n f = logging.currentframe()\n # On some versions of IronPython, currentframe() returns None if\n # IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n orig_f = f\n while f and stacklevel > 1:\n f = f.f_back\n stacklevel -= 1\n if not f:\n f = orig_f\n\n while hasattr(f, 'f_code'):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename in (_srcfile, logging._srcfile): # This line is modified.\n f = f.f_back\n continue\n\n if six.PY2:\n rv = (filename, f.f_lineno, co.co_name)\n else:\n # python 3, new stack_info processing and extra tuple return value\n sinfo = None\n if stack_info:\n sio = io.StringIO()\n sio.write('Stack (most recent call last):\\n')\n traceback.print_stack(f, file=sio)\n sinfo = sio.getvalue()\n if sinfo[-1] == '\\n':\n sinfo = sinfo[:-1]\n sio.close()\n rv = (filename, f.f_lineno, co.co_name, sinfo)\n break\n except Exception:\n pass\n\n return rv\n\n\ndef decorate_log_method(func):\n @wraps(func)\n def func_wrapper(*args, **kwargs):\n # Prefix extra keys with underscore\n if 'extra' in kwargs:\n kwargs['extra'] = prefix_dict_keys(dictionary=kwargs['extra'], prefix='_')\n\n try:\n return func(*args, **kwargs)\n except TypeError as e:\n # In some version of Python 2.7, logger.exception doesn't take any kwargs so we need\n # this hack :/\n # See:\n # - https://docs.python.org/release/2.7.3/library/logging.html#logging.Logger.exception\n # - https://docs.python.org/release/2.7.7/library/logging.html#logging.Logger.exception\n if 'got an unexpected keyword argument \\'extra\\'' in six.text_type(e):\n kwargs.pop('extra', None)\n return func(*args, **kwargs)\n raise e\n return func_wrapper\n\n\ndef decorate_logger_methods(logger):\n \"\"\"\n Decorate all the logger methods so all the keys in the extra dictionary are\n automatically prefixed with an underscore to avoid clashes with standard log\n record attributes.\n \"\"\"\n\n # Note: We override findCaller with our custom implementation which takes into account this\n # module.\n # This way filename, module, funcName and lineno LogRecord attributes contain correct values\n # instead of all pointing to decorate_log_method.\n logger.findCaller = find_caller\n for key in LOGGER_KEYS:\n log_method = getattr(logger, key)\n log_method = decorate_log_method(log_method)\n setattr(logger, key, log_method)\n\n return logger\n\n\ndef getLogger(name):\n # make sure that prefix isn't appended multiple times to preserve logging name hierarchy\n prefix = 'st2.'\n if name.startswith(prefix):\n logger = logging.getLogger(name)\n else:\n logger_name = '{}{}'.format(prefix, name)\n logger = logging.getLogger(logger_name)\n\n logger = decorate_logger_methods(logger=logger)\n return logger\n\n\nclass LoggingStream(object):\n\n def __init__(self, name, level=logging.ERROR):\n self._logger = getLogger(name)\n self._level = level\n\n def write(self, message):\n self._logger._log(self._level, message, None)\n\n def flush(self):\n pass\n\n\ndef _audit(logger, msg, *args, **kwargs):\n if logger.isEnabledFor(logging.AUDIT):\n logger._log(logging.AUDIT, msg, args, **kwargs)\n\n\nlogging.Logger.audit = _audit\n\n\ndef _add_exclusion_filters(handlers, excludes=None):\n if excludes:\n for h in handlers:\n h.addFilter(LoggerNameExclusionFilter(excludes))\n\n\ndef _redirect_stderr():\n # It is ok to redirect stderr as none of the st2 handlers write to stderr.\n sys.stderr = LoggingStream('STDERR')\n\n\ndef setup(config_file, redirect_stderr=True, excludes=None, disable_existing_loggers=False,\n st2_conf_path=None):\n \"\"\"\n Configure logging from file.\n\n :param st2_conf_path: Optional path to st2.conf file. If provided and \"config_file\" path is\n relative to st2.conf path, the config_file path will get resolved to full\n absolute path relative to st2.conf.\n :type st2_conf_path: ``str``\n \"\"\"\n if st2_conf_path and config_file[:2] == './' and not os.path.isfile(config_file):\n # Logging config path is relative to st2.conf, resolve it to full absolute path\n directory = os.path.dirname(st2_conf_path)\n config_file_name = os.path.basename(config_file)\n config_file = os.path.join(directory, config_file_name)\n\n try:\n logging.config.fileConfig(config_file,\n defaults=None,\n disable_existing_loggers=disable_existing_loggers)\n handlers = logging.getLoggerClass().manager.root.handlers\n _add_exclusion_filters(handlers=handlers, excludes=excludes)\n if redirect_stderr:\n _redirect_stderr()\n except Exception as exc:\n exc_cls = type(exc)\n tb_msg = traceback.format_exc()\n\n msg = str(exc)\n msg += '\\n\\n' + tb_msg\n\n # revert stderr redirection since there is no logger in place.\n sys.stderr = sys.__stderr__\n\n # No logger yet therefore write to stderr\n sys.stderr.write('ERROR: %s' % (msg))\n\n raise exc_cls(six.text_type(msg))\n\n\ndef ignore_lib2to3_log_messages():\n \"\"\"\n Work around to ignore \"Generating grammar tables from\" log messages which are logged under\n INFO by default by libraries such as networkx which use 2to3.\n \"\"\"\n import lib2to3.pgen2.driver\n\n class MockLoggingModule(object):\n def getLogger(self, *args, **kwargs):\n return logging.getLogger('lib2to3')\n\n lib2to3.pgen2.driver.logging = MockLoggingModule()\n logging.getLogger('lib2to3').setLevel(logging.ERROR)\n\n\ndef ignore_statsd_log_messages():\n \"\"\"\n By default statsd client logs all the operations under INFO and that causes a lot of noise.\n\n This pull request silences all the statsd INFO log messages.\n \"\"\"\n import statsd.connection\n import statsd.client\n\n class MockLoggingModule(object):\n def getLogger(self, *args, **kwargs):\n return logging.getLogger('statsd')\n\n statsd.connection.logging = MockLoggingModule()\n statsd.client.logging = MockLoggingModule()\n logging.getLogger('statsd').setLevel(logging.ERROR)\n", "path": "st2common/st2common/log.py"}]}
3,249
813
gh_patches_debug_32430
rasdani/github-patches
git_diff
mampfes__hacs_waste_collection_schedule-351
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Min Renovasjon for county_id 0301 does not work. Seem like there are many different types of garbage in my county. List collected of types of garbage: [ { "Id": 1, "Navn": "Rest- og matavfall (Optibag)", "Ikon": "https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/restavfall.png" }, { "Id": 2, "Navn": "Papir", "Ikon": "https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/papir.png" }, { "Id": 4, "Navn": "Glass/Metallembalsje", "Ikon": "https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/glassogmetallemballasje.png" }, { "Id": 6, "Navn": "Spesialavfall", "Ikon": "https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/farligavfall.png" }, { "Id": 7, "Navn": "Plast", "Ikon": "https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/plastemballasje.png" }, { "Id": 8, "Navn": "Trevirke", "Ikon": "https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/trevirke.png" }, { "Id": 9, "Navn": "Tekstiler", "Ikon": "https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/klaerogsko.png" }, { "Id": 10, "Navn": "Hageavfall", "Ikon": "https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/hageavfall.png" }, { "Id": 11, "Navn": "Metaller", "Ikon": "https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/metall.png" }, { "Id": 12, "Navn": "Hvitevarer/EE-avfall", "Ikon": "https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/elektriskogelektronisk.png" }, { "Id": 13, "Navn": "Papp", "Ikon": "https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/pappogkartong.png" }, { "Id": 15, "Navn": "Fretex", "Ikon": "https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/brush.png" }, { "Id": 16, "Navn": "Mobil Gjenbruksstasjon", "Ikon": "https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/brush.png" }, { "Id": 17, "Navn": "Farlig Avfall", "Ikon": "https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/brush.png" }, { "Id": 18, "Navn": "Matavfall/ Organisk", "Ikon": "https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/brush.png" }, { "Id": 19, "Navn": "Rødboks- Farlig avfall/ småelektrisk", "Ikon": "https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/farligavfall.png" }, { "Id": 20, "Navn": "Restavfall", "Ikon": "https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/brush.png" }, { "Id": 21, "Navn": "Plastemballasje", "Ikon": "https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/brush.png" } ] --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/minrenovasjon_no.py` Content: ``` 1 import requests 2 import urllib.parse 3 import json 4 import datetime 5 import re 6 7 from waste_collection_schedule import Collection # type: ignore[attr-defined] 8 from pprint import pprint 9 10 TITLE = "Min Renovasjon" 11 DESCRIPTION = "Source for Norkart Komtek MinRenovasjon (Norway)." 12 URL = "https://www.norkart.no/komtek/renovasjon/" 13 14 # **street_code:** \ 15 # **county_id:** \ 16 # Can be found with this REST-API call. 17 # ``` 18 # https://ws.geonorge.no/adresser/v1/#/default/get_sok 19 # https://ws.geonorge.no/adresser/v1/sok?sok=Min%20Gate%2012 20 # ``` 21 # "street_code" equals to "adressekode" and "county_id" equals to "kommunenummer". 22 23 TEST_CASES = { 24 "Sandvika Rådhus": { 25 "street_name": "Rådhustorget", 26 "house_number": 2, 27 "street_code": 2469, 28 "county_id": 3024 29 } 30 } 31 32 BASE_URL = "https://komteksky.norkart.no/komtek.renovasjonwebapi/api/" 33 APP_KEY = "AE13DEEC-804F-4615-A74E-B4FAC11F0A30" 34 35 class Source: 36 def __init__(self, street_name, house_number, street_code, county_id): 37 self._street_name = street_name 38 self._house_number = house_number 39 self._street_code = street_code 40 self._county_id = county_id 41 self._icon_map = { 42 "": "mdi:trash-can", 43 "brush": "mdi:trash-can", 44 "farligavfall": "mdi:trash-can", 45 "glassogmetallemballasje": "mdi:trash-can", 46 "hageavfall": "mdi:leaf", 47 "klaerogsko": "mdi:hanger", 48 "matavfall": "mdi:trash-can", 49 "matrestavfall": "mdi:trash-can", 50 "papir": "mdi:newspaper-variant-multiple", 51 "plastemballasje": "mdi:trash-can", 52 "restavfall": "mdi:trash-can" 53 } 54 55 def fetch(self): 56 headers = { 57 'Kommunenr': str(self._county_id), 58 'RenovasjonAppKey': APP_KEY, 59 'user-agent': 'Home-Assitant-waste-col-sched/0.1' 60 } 61 args = {} 62 63 r = requests.get(BASE_URL + 'fraksjoner', params = args, headers = headers) 64 65 type = {} 66 for f in json.loads(r.content): 67 # pprint(f) 68 icon = self._icon_map[re.sub(r"^.*?/(\w+)\.\w{3,4}$", "\\1", f['Ikon'])] 69 type[f['Id']] = { 70 'name': f['Navn'], 71 'image': f['Ikon'], 72 'icon': icon 73 } 74 75 args = { 76 'gatenavn': self._street_name, 77 'husnr': self._house_number, 78 'gatekode': self._street_code, 79 80 } 81 82 r = requests.get(BASE_URL + 'tommekalender', params = args, headers = headers) 83 84 entries = [] 85 for f in json.loads(r.content): 86 for d in f['Tommedatoer']: 87 entries.append( 88 Collection( 89 date = datetime.datetime.strptime( 90 d, "%Y-%m-%dT%H:%M:%S" 91 ).date(), 92 t = type[f['FraksjonId']]['name'], 93 icon = type[f['FraksjonId']]['icon'], 94 picture = type[f['FraksjonId']]['image'] 95 ) 96 ) 97 98 return entries 99 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/minrenovasjon_no.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/minrenovasjon_no.py --- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/minrenovasjon_no.py +++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/minrenovasjon_no.py @@ -41,15 +41,20 @@ self._icon_map = { "": "mdi:trash-can", "brush": "mdi:trash-can", + "elektriskogelektronisk": "mdi:chip", "farligavfall": "mdi:trash-can", "glassogmetallemballasje": "mdi:trash-can", "hageavfall": "mdi:leaf", "klaerogsko": "mdi:hanger", "matavfall": "mdi:trash-can", "matrestavfall": "mdi:trash-can", + "metall": "mdi:trash-can", "papir": "mdi:newspaper-variant-multiple", + "pappogkartong": "mdi:archive", "plastemballasje": "mdi:trash-can", - "restavfall": "mdi:trash-can" + "restavfall": "mdi:trash-can", + "trevirke": "mdi:trash-can" + } def fetch(self): @@ -65,7 +70,10 @@ type = {} for f in json.loads(r.content): # pprint(f) - icon = self._icon_map[re.sub(r"^.*?/(\w+)\.\w{3,4}$", "\\1", f['Ikon'])] + icon = "mdi:trash-can" + icon_name = re.sub(r"^.*?/(\w+)\.\w{3,4}$", "\\1", f['Ikon']) + if icon_name in self._icon_map: + icon = self._icon_map[icon_name] type[f['Id']] = { 'name': f['Navn'], 'image': f['Ikon'],
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/minrenovasjon_no.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/minrenovasjon_no.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/minrenovasjon_no.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/minrenovasjon_no.py\n@@ -41,15 +41,20 @@\n self._icon_map = {\n \"\": \"mdi:trash-can\",\n \"brush\": \"mdi:trash-can\",\n+ \"elektriskogelektronisk\": \"mdi:chip\",\n \"farligavfall\": \"mdi:trash-can\",\n \"glassogmetallemballasje\": \"mdi:trash-can\",\n \"hageavfall\": \"mdi:leaf\",\n \"klaerogsko\": \"mdi:hanger\",\n \"matavfall\": \"mdi:trash-can\",\n \"matrestavfall\": \"mdi:trash-can\",\n+ \"metall\": \"mdi:trash-can\",\n \"papir\": \"mdi:newspaper-variant-multiple\",\n+ \"pappogkartong\": \"mdi:archive\",\n \"plastemballasje\": \"mdi:trash-can\",\n- \"restavfall\": \"mdi:trash-can\"\n+ \"restavfall\": \"mdi:trash-can\",\n+ \"trevirke\": \"mdi:trash-can\"\n+\n } \n \n def fetch(self):\n@@ -65,7 +70,10 @@\n type = {}\n for f in json.loads(r.content):\n # pprint(f)\n- icon = self._icon_map[re.sub(r\"^.*?/(\\w+)\\.\\w{3,4}$\", \"\\\\1\", f['Ikon'])]\n+ icon = \"mdi:trash-can\"\n+ icon_name = re.sub(r\"^.*?/(\\w+)\\.\\w{3,4}$\", \"\\\\1\", f['Ikon'])\n+ if icon_name in self._icon_map:\n+ icon = self._icon_map[icon_name]\n type[f['Id']] = {\n 'name': f['Navn'],\n 'image': f['Ikon'],\n", "issue": "Min Renovasjon for county_id 0301 does not work.\nSeem like there are many different types of garbage in my county.\r\nList collected of types of garbage:\r\n[\r\n {\r\n \"Id\": 1,\r\n \"Navn\": \"Rest- og matavfall (Optibag)\",\r\n \"Ikon\": \"https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/restavfall.png\"\r\n },\r\n {\r\n \"Id\": 2,\r\n \"Navn\": \"Papir\",\r\n \"Ikon\": \"https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/papir.png\"\r\n },\r\n {\r\n \"Id\": 4,\r\n \"Navn\": \"Glass/Metallembalsje\",\r\n \"Ikon\": \"https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/glassogmetallemballasje.png\"\r\n },\r\n {\r\n \"Id\": 6,\r\n \"Navn\": \"Spesialavfall\",\r\n \"Ikon\": \"https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/farligavfall.png\"\r\n },\r\n {\r\n \"Id\": 7,\r\n \"Navn\": \"Plast\",\r\n \"Ikon\": \"https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/plastemballasje.png\"\r\n },\r\n {\r\n \"Id\": 8,\r\n \"Navn\": \"Trevirke\",\r\n \"Ikon\": \"https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/trevirke.png\"\r\n },\r\n {\r\n \"Id\": 9,\r\n \"Navn\": \"Tekstiler\",\r\n \"Ikon\": \"https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/klaerogsko.png\"\r\n },\r\n {\r\n \"Id\": 10,\r\n \"Navn\": \"Hageavfall\",\r\n \"Ikon\": \"https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/hageavfall.png\"\r\n },\r\n {\r\n \"Id\": 11,\r\n \"Navn\": \"Metaller\",\r\n \"Ikon\": \"https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/metall.png\"\r\n },\r\n {\r\n \"Id\": 12,\r\n \"Navn\": \"Hvitevarer/EE-avfall\",\r\n \"Ikon\": \"https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/elektriskogelektronisk.png\"\r\n },\r\n {\r\n \"Id\": 13,\r\n \"Navn\": \"Papp\",\r\n \"Ikon\": \"https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/pappogkartong.png\"\r\n },\r\n {\r\n \"Id\": 15,\r\n \"Navn\": \"Fretex\",\r\n \"Ikon\": \"https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/brush.png\"\r\n },\r\n {\r\n \"Id\": 16,\r\n \"Navn\": \"Mobil Gjenbruksstasjon\",\r\n \"Ikon\": \"https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/brush.png\"\r\n },\r\n {\r\n \"Id\": 17,\r\n \"Navn\": \"Farlig Avfall\",\r\n \"Ikon\": \"https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/brush.png\"\r\n },\r\n {\r\n \"Id\": 18,\r\n \"Navn\": \"Matavfall/ Organisk\",\r\n \"Ikon\": \"https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/brush.png\"\r\n },\r\n {\r\n \"Id\": 19,\r\n \"Navn\": \"R\u00f8dboks- Farlig avfall/ sm\u00e5elektrisk\",\r\n \"Ikon\": \"https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/farligavfall.png\"\r\n },\r\n {\r\n \"Id\": 20,\r\n \"Navn\": \"Restavfall\",\r\n \"Ikon\": \"https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/brush.png\"\r\n },\r\n {\r\n \"Id\": 21,\r\n \"Navn\": \"Plastemballasje\",\r\n \"Ikon\": \"https://komteksky.norkart.no/komtek.renovasjonwebapi/Ikoner/brush.png\"\r\n }\r\n]\r\n\n", "before_files": [{"content": "import requests\nimport urllib.parse\nimport json\nimport datetime\nimport re\n\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom pprint import pprint\n\nTITLE = \"Min Renovasjon\"\nDESCRIPTION = \"Source for Norkart Komtek MinRenovasjon (Norway).\"\nURL = \"https://www.norkart.no/komtek/renovasjon/\"\n\n# **street_code:** \\\n# **county_id:** \\\n# Can be found with this REST-API call.\n# ```\n# https://ws.geonorge.no/adresser/v1/#/default/get_sok\n# https://ws.geonorge.no/adresser/v1/sok?sok=Min%20Gate%2012\n# ```\n# \"street_code\" equals to \"adressekode\" and \"county_id\" equals to \"kommunenummer\".\n\nTEST_CASES = {\n \"Sandvika R\u00e5dhus\": {\n \"street_name\": \"R\u00e5dhustorget\",\n \"house_number\": 2,\n \"street_code\": 2469,\n \"county_id\": 3024\n }\n}\n\nBASE_URL = \"https://komteksky.norkart.no/komtek.renovasjonwebapi/api/\"\nAPP_KEY = \"AE13DEEC-804F-4615-A74E-B4FAC11F0A30\"\n\nclass Source:\n def __init__(self, street_name, house_number, street_code, county_id):\n self._street_name = street_name\n self._house_number = house_number\n self._street_code = street_code\n self._county_id = county_id\n self._icon_map = {\n \"\": \"mdi:trash-can\",\n \"brush\": \"mdi:trash-can\",\n \"farligavfall\": \"mdi:trash-can\",\n \"glassogmetallemballasje\": \"mdi:trash-can\",\n \"hageavfall\": \"mdi:leaf\",\n \"klaerogsko\": \"mdi:hanger\",\n \"matavfall\": \"mdi:trash-can\",\n \"matrestavfall\": \"mdi:trash-can\",\n \"papir\": \"mdi:newspaper-variant-multiple\",\n \"plastemballasje\": \"mdi:trash-can\",\n \"restavfall\": \"mdi:trash-can\"\n } \n\n def fetch(self):\n headers = {\n 'Kommunenr': str(self._county_id),\n 'RenovasjonAppKey': APP_KEY,\n 'user-agent': 'Home-Assitant-waste-col-sched/0.1'\n }\n args = {}\n\n r = requests.get(BASE_URL + 'fraksjoner', params = args, headers = headers)\n\n type = {}\n for f in json.loads(r.content):\n # pprint(f)\n icon = self._icon_map[re.sub(r\"^.*?/(\\w+)\\.\\w{3,4}$\", \"\\\\1\", f['Ikon'])]\n type[f['Id']] = {\n 'name': f['Navn'],\n 'image': f['Ikon'],\n 'icon': icon\n }\n\n args = {\n 'gatenavn': self._street_name,\n 'husnr': self._house_number,\n 'gatekode': self._street_code,\n\n }\n\n r = requests.get(BASE_URL + 'tommekalender', params = args, headers = headers)\n\n entries = []\n for f in json.loads(r.content):\n for d in f['Tommedatoer']:\n entries.append(\n Collection(\n date = datetime.datetime.strptime(\n d, \"%Y-%m-%dT%H:%M:%S\"\n ).date(),\n t = type[f['FraksjonId']]['name'],\n icon = type[f['FraksjonId']]['icon'],\n picture = type[f['FraksjonId']]['image']\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/minrenovasjon_no.py"}], "after_files": [{"content": "import requests\nimport urllib.parse\nimport json\nimport datetime\nimport re\n\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom pprint import pprint\n\nTITLE = \"Min Renovasjon\"\nDESCRIPTION = \"Source for Norkart Komtek MinRenovasjon (Norway).\"\nURL = \"https://www.norkart.no/komtek/renovasjon/\"\n\n# **street_code:** \\\n# **county_id:** \\\n# Can be found with this REST-API call.\n# ```\n# https://ws.geonorge.no/adresser/v1/#/default/get_sok\n# https://ws.geonorge.no/adresser/v1/sok?sok=Min%20Gate%2012\n# ```\n# \"street_code\" equals to \"adressekode\" and \"county_id\" equals to \"kommunenummer\".\n\nTEST_CASES = {\n \"Sandvika R\u00e5dhus\": {\n \"street_name\": \"R\u00e5dhustorget\",\n \"house_number\": 2,\n \"street_code\": 2469,\n \"county_id\": 3024\n }\n}\n\nBASE_URL = \"https://komteksky.norkart.no/komtek.renovasjonwebapi/api/\"\nAPP_KEY = \"AE13DEEC-804F-4615-A74E-B4FAC11F0A30\"\n\nclass Source:\n def __init__(self, street_name, house_number, street_code, county_id):\n self._street_name = street_name\n self._house_number = house_number\n self._street_code = street_code\n self._county_id = county_id\n self._icon_map = {\n \"\": \"mdi:trash-can\",\n \"brush\": \"mdi:trash-can\",\n \"elektriskogelektronisk\": \"mdi:chip\",\n \"farligavfall\": \"mdi:trash-can\",\n \"glassogmetallemballasje\": \"mdi:trash-can\",\n \"hageavfall\": \"mdi:leaf\",\n \"klaerogsko\": \"mdi:hanger\",\n \"matavfall\": \"mdi:trash-can\",\n \"matrestavfall\": \"mdi:trash-can\",\n \"metall\": \"mdi:trash-can\",\n \"papir\": \"mdi:newspaper-variant-multiple\",\n \"pappogkartong\": \"mdi:archive\",\n \"plastemballasje\": \"mdi:trash-can\",\n \"restavfall\": \"mdi:trash-can\",\n \"trevirke\": \"mdi:trash-can\"\n\n } \n\n def fetch(self):\n headers = {\n 'Kommunenr': str(self._county_id),\n 'RenovasjonAppKey': APP_KEY,\n 'user-agent': 'Home-Assitant-waste-col-sched/0.1'\n }\n args = {}\n\n r = requests.get(BASE_URL + 'fraksjoner', params = args, headers = headers)\n\n type = {}\n for f in json.loads(r.content):\n # pprint(f)\n icon = \"mdi:trash-can\"\n icon_name = re.sub(r\"^.*?/(\\w+)\\.\\w{3,4}$\", \"\\\\1\", f['Ikon'])\n if icon_name in self._icon_map:\n icon = self._icon_map[icon_name]\n type[f['Id']] = {\n 'name': f['Navn'],\n 'image': f['Ikon'],\n 'icon': icon\n }\n\n args = {\n 'gatenavn': self._street_name,\n 'husnr': self._house_number,\n 'gatekode': self._street_code,\n\n }\n\n r = requests.get(BASE_URL + 'tommekalender', params = args, headers = headers)\n\n entries = []\n for f in json.loads(r.content):\n for d in f['Tommedatoer']:\n entries.append(\n Collection(\n date = datetime.datetime.strptime(\n d, \"%Y-%m-%dT%H:%M:%S\"\n ).date(),\n t = type[f['FraksjonId']]['name'],\n icon = type[f['FraksjonId']]['icon'],\n picture = type[f['FraksjonId']]['image']\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/minrenovasjon_no.py"}]}
2,394
496
gh_patches_debug_49870
rasdani/github-patches
git_diff
fossasia__open-event-server-4398
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Attendee : user/<id>/attendee gives Error 400 **I'm submitting a ...** (check one with "x") - [x] bug report - [ ] feature request - [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server URL ``` https://open-event-api.herokuapp.com/v1/users/5/attendees?include=ticket,event,order ``` ERROR ``` { "errors":[ { "title":"Invalid include querystring parameter.", "source":{ "parameter":"include" }, "status":400, "detail":"AttendeeSchemaPublic has no attribute ticket" } ], "jsonapi":{ "version":"1.0" } } ``` Related Front-end route ``` https://open-event-frontend.herokuapp.com/my-tickets ``` Due to recent changes the URL gives ERROR 400. @poush @shubham-padia @enigmaeth @magdalenesuo Please have a look at it --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `app/api/attendees.py` Content: ``` 1 from flask_jwt import current_identity 2 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship 3 4 from app.api.bootstrap import api 5 from app.api.helpers.db import safe_query 6 from app.api.helpers.exceptions import ForbiddenException 7 from app.api.helpers.permission_manager import has_access 8 from app.api.helpers.permissions import jwt_required 9 from app.api.helpers.query import event_query 10 from app.api.helpers.utilities import require_relationship 11 from app.api.schema.attendees import AttendeeSchema, AttendeeSchemaPublic 12 from app.models import db 13 from app.models.order import Order 14 from app.models.ticket import Ticket 15 from app.models.ticket_holder import TicketHolder 16 from app.models.user import User 17 18 19 class AttendeeListPost(ResourceList): 20 """ 21 List and create Attendees through direct URL 22 """ 23 24 def before_post(self, args, kwargs, data): 25 require_relationship(['ticket', 'event'], data) 26 if not has_access('is_coorganizer', event_id=data['event']): 27 raise ForbiddenException({'source': 'event_id'}, "Access Forbidden") 28 29 methods = ['POST'] 30 schema = AttendeeSchema 31 data_layer = {'session': db.session, 32 'model': TicketHolder} 33 34 35 class AttendeeList(ResourceList): 36 """ 37 List Attendees 38 """ 39 def before_get(self, args, kwargs): 40 if kwargs.get('user_id'): 41 self.schema = AttendeeSchemaPublic 42 43 def query(self, view_kwargs): 44 query_ = self.session.query(TicketHolder) 45 46 if view_kwargs.get('order_identifier'): 47 order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier') 48 if not has_access('is_registrar', event_id=order.event_id) or not has_access('is_user_itself', 49 id=order.user_id): 50 raise ForbiddenException({'source': ''}, 'Access Forbidden') 51 query_ = query_.join(Order).filter(Order.id == order.id) 52 53 if view_kwargs.get('ticket_id'): 54 ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id') 55 if not has_access('is_registrar', event_id=ticket.event_id): 56 raise ForbiddenException({'source': ''}, 'Access Forbidden') 57 query_ = query_.join(Ticket).filter(Ticket.id == ticket.id) 58 59 if view_kwargs.get('user_id'): 60 user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id') 61 if not has_access('is_user_itself', user_id=user.id): 62 raise ForbiddenException({'source': ''}, 'Access Forbidden') 63 query_ = query_.join(User, User.email == TicketHolder.email).filter(User.id == user.id) 64 65 query_ = event_query(self, query_, view_kwargs, permission='is_registrar') 66 return query_ 67 68 view_kwargs = True 69 methods = ['GET', ] 70 schema = AttendeeSchema 71 data_layer = {'session': db.session, 72 'model': TicketHolder, 73 'methods': { 74 'query': query 75 }} 76 77 78 class AttendeeDetail(ResourceDetail): 79 """ 80 Attendee detail by id 81 """ 82 def before_get_object(self, view_kwargs): 83 attendee = safe_query(self, TicketHolder, 'id', view_kwargs['id'], 'attendee_id') 84 if not has_access('is_registrar_or_user_itself', user_id=current_identity.id, event_id=attendee.event_id): 85 raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.') 86 87 def before_delete_object(self, obj, kwargs): 88 if not has_access('is_registrar', event_id=obj.event_id): 89 raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.') 90 91 def before_update_object(self, obj, data, kwargs): 92 if not has_access('is_registrar', event_id=obj.event_id): 93 raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.') 94 95 decorators = (jwt_required,) 96 schema = AttendeeSchema 97 data_layer = {'session': db.session, 98 'model': TicketHolder, 99 'methods': { 100 'before_get_object': before_get_object, 101 'before_update_object': before_update_object, 102 'before_delete_object': before_delete_object 103 }} 104 105 106 class AttendeeRelationshipRequired(ResourceRelationship): 107 """ 108 Attendee Relationship (Required) 109 """ 110 decorators = (jwt_required,) 111 methods = ['GET', 'PATCH'] 112 schema = AttendeeSchema 113 data_layer = {'session': db.session, 114 'model': TicketHolder} 115 116 117 class AttendeeRelationshipOptional(ResourceRelationship): 118 """ 119 Attendee Relationship(Optional) 120 """ 121 decorators = (api.has_permission('is_user_itself', fetch="user_id", fetch_as="id", model=TicketHolder),) 122 schema = AttendeeSchema 123 data_layer = {'session': db.session, 124 'model': TicketHolder} 125 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/app/api/attendees.py b/app/api/attendees.py --- a/app/api/attendees.py +++ b/app/api/attendees.py @@ -36,10 +36,6 @@ """ List Attendees """ - def before_get(self, args, kwargs): - if kwargs.get('user_id'): - self.schema = AttendeeSchemaPublic - def query(self, view_kwargs): query_ = self.session.query(TicketHolder)
{"golden_diff": "diff --git a/app/api/attendees.py b/app/api/attendees.py\n--- a/app/api/attendees.py\n+++ b/app/api/attendees.py\n@@ -36,10 +36,6 @@\n \"\"\"\n List Attendees\n \"\"\"\n- def before_get(self, args, kwargs):\n- if kwargs.get('user_id'):\n- self.schema = AttendeeSchemaPublic\n-\n def query(self, view_kwargs):\n query_ = self.session.query(TicketHolder)\n", "issue": "Attendee : user/<id>/attendee gives Error 400\n**I'm submitting a ...** (check one with \"x\")\r\n- [x] bug report\r\n- [ ] feature request\r\n- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server\r\n\r\nURL\r\n```\r\nhttps://open-event-api.herokuapp.com/v1/users/5/attendees?include=ticket,event,order\r\n```\r\n\r\nERROR\r\n```\r\n{\r\n \"errors\":[\r\n {\r\n \"title\":\"Invalid include querystring parameter.\",\r\n \"source\":{\r\n \"parameter\":\"include\"\r\n },\r\n \"status\":400,\r\n \"detail\":\"AttendeeSchemaPublic has no attribute ticket\"\r\n }\r\n ],\r\n \"jsonapi\":{\r\n \"version\":\"1.0\"\r\n }\r\n}\r\n```\r\nRelated Front-end route\r\n```\r\nhttps://open-event-frontend.herokuapp.com/my-tickets\r\n```\r\nDue to recent changes the URL gives ERROR 400.\r\n@poush @shubham-padia @enigmaeth @magdalenesuo Please have a look at it\n", "before_files": [{"content": "from flask_jwt import current_identity\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.exceptions import ForbiddenException\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.attendees import AttendeeSchema, AttendeeSchemaPublic\nfrom app.models import db\nfrom app.models.order import Order\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_holder import TicketHolder\nfrom app.models.user import User\n\n\nclass AttendeeListPost(ResourceList):\n \"\"\"\n List and create Attendees through direct URL\n \"\"\"\n\n def before_post(self, args, kwargs, data):\n require_relationship(['ticket', 'event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ForbiddenException({'source': 'event_id'}, \"Access Forbidden\")\n\n methods = ['POST']\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n\n\nclass AttendeeList(ResourceList):\n \"\"\"\n List Attendees\n \"\"\"\n def before_get(self, args, kwargs):\n if kwargs.get('user_id'):\n self.schema = AttendeeSchemaPublic\n\n def query(self, view_kwargs):\n query_ = self.session.query(TicketHolder)\n\n if view_kwargs.get('order_identifier'):\n order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier')\n if not has_access('is_registrar', event_id=order.event_id) or not has_access('is_user_itself',\n id=order.user_id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(Order).filter(Order.id == order.id)\n\n if view_kwargs.get('ticket_id'):\n ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')\n if not has_access('is_registrar', event_id=ticket.event_id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(Ticket).filter(Ticket.id == ticket.id)\n\n if view_kwargs.get('user_id'):\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n if not has_access('is_user_itself', user_id=user.id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(User, User.email == TicketHolder.email).filter(User.id == user.id)\n\n query_ = event_query(self, query_, view_kwargs, permission='is_registrar')\n return query_\n\n view_kwargs = True\n methods = ['GET', ]\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder,\n 'methods': {\n 'query': query\n }}\n\n\nclass AttendeeDetail(ResourceDetail):\n \"\"\"\n Attendee detail by id\n \"\"\"\n def before_get_object(self, view_kwargs):\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['id'], 'attendee_id')\n if not has_access('is_registrar_or_user_itself', user_id=current_identity.id, event_id=attendee.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n def before_delete_object(self, obj, kwargs):\n if not has_access('is_registrar', event_id=obj.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n def before_update_object(self, obj, data, kwargs):\n if not has_access('is_registrar', event_id=obj.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n decorators = (jwt_required,)\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object,\n 'before_delete_object': before_delete_object\n }}\n\n\nclass AttendeeRelationshipRequired(ResourceRelationship):\n \"\"\"\n Attendee Relationship (Required)\n \"\"\"\n decorators = (jwt_required,)\n methods = ['GET', 'PATCH']\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n\n\nclass AttendeeRelationshipOptional(ResourceRelationship):\n \"\"\"\n Attendee Relationship(Optional)\n \"\"\"\n decorators = (api.has_permission('is_user_itself', fetch=\"user_id\", fetch_as=\"id\", model=TicketHolder),)\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n", "path": "app/api/attendees.py"}], "after_files": [{"content": "from flask_jwt import current_identity\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.exceptions import ForbiddenException\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.attendees import AttendeeSchema, AttendeeSchemaPublic\nfrom app.models import db\nfrom app.models.order import Order\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_holder import TicketHolder\nfrom app.models.user import User\n\n\nclass AttendeeListPost(ResourceList):\n \"\"\"\n List and create Attendees through direct URL\n \"\"\"\n\n def before_post(self, args, kwargs, data):\n require_relationship(['ticket', 'event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ForbiddenException({'source': 'event_id'}, \"Access Forbidden\")\n\n methods = ['POST']\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n\n\nclass AttendeeList(ResourceList):\n \"\"\"\n List Attendees\n \"\"\"\n def query(self, view_kwargs):\n query_ = self.session.query(TicketHolder)\n\n if view_kwargs.get('order_identifier'):\n order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier')\n if not has_access('is_registrar', event_id=order.event_id) or not has_access('is_user_itself',\n id=order.user_id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(Order).filter(Order.id == order.id)\n\n if view_kwargs.get('ticket_id'):\n ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')\n if not has_access('is_registrar', event_id=ticket.event_id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(Ticket).filter(Ticket.id == ticket.id)\n\n if view_kwargs.get('user_id'):\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n if not has_access('is_user_itself', user_id=user.id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(User, User.email == TicketHolder.email).filter(User.id == user.id)\n\n query_ = event_query(self, query_, view_kwargs, permission='is_registrar')\n return query_\n\n view_kwargs = True\n methods = ['GET', ]\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder,\n 'methods': {\n 'query': query\n }}\n\n\nclass AttendeeDetail(ResourceDetail):\n \"\"\"\n Attendee detail by id\n \"\"\"\n def before_get_object(self, view_kwargs):\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['id'], 'attendee_id')\n if not has_access('is_registrar_or_user_itself', user_id=current_identity.id, event_id=attendee.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n def before_delete_object(self, obj, kwargs):\n if not has_access('is_registrar', event_id=obj.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n def before_update_object(self, obj, data, kwargs):\n if not has_access('is_registrar', event_id=obj.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n decorators = (jwt_required,)\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object,\n 'before_delete_object': before_delete_object\n }}\n\n\nclass AttendeeRelationshipRequired(ResourceRelationship):\n \"\"\"\n Attendee Relationship (Required)\n \"\"\"\n decorators = (jwt_required,)\n methods = ['GET', 'PATCH']\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n\n\nclass AttendeeRelationshipOptional(ResourceRelationship):\n \"\"\"\n Attendee Relationship(Optional)\n \"\"\"\n decorators = (api.has_permission('is_user_itself', fetch=\"user_id\", fetch_as=\"id\", model=TicketHolder),)\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n", "path": "app/api/attendees.py"}]}
1,825
108
gh_patches_debug_19996
rasdani/github-patches
git_diff
MycroftAI__mycroft-core-1539
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Allow comments in dialog and voc files Having a way to comment dialog and voc files would ease the translation process as the original developer can provide context useful for the translator, and the translator can explain things that motivate his/her choice of words. I am thinking for instance of voc files where similar words to the target word are used to trigger the skill in order to workaround speech to text errors. Documenting that the meaning of those words should not be translated in a comment would be practical. Example: the "high" word triggers the hello world skill because "high" is similar to "hi". In the Spanish translation we have "hola" (for "hello") and we should have "ola" (literally "wave") because it sounds like "hola" Other file formats usually used for translation purposes allow developer/translator comments (see the .po file format for instance) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mycroft/skills/skill_data.py` Content: ``` 1 # Copyright 2018 Mycroft AI Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # 15 16 """Module containing methods needed to load skill 17 data such as dialogs, intents and regular expressions. 18 """ 19 20 from os import listdir 21 from os.path import splitext, join 22 import re 23 24 from mycroft.messagebus.message import Message 25 26 27 def load_vocab_from_file(path, vocab_type, emitter): 28 """Load Mycroft vocabulary from file 29 The vocab is sent to the intent handler using the message bus 30 31 Args: 32 path: path to vocabulary file (*.voc) 33 vocab_type: keyword name 34 emitter: emitter to access the message bus 35 skill_id(str): skill id 36 """ 37 if path.endswith('.voc'): 38 with open(path, 'r') as voc_file: 39 for line in voc_file.readlines(): 40 parts = line.strip().split("|") 41 entity = parts[0] 42 emitter.emit(Message("register_vocab", { 43 'start': entity, 'end': vocab_type 44 })) 45 for alias in parts[1:]: 46 emitter.emit(Message("register_vocab", { 47 'start': alias, 'end': vocab_type, 'alias_of': entity 48 })) 49 50 51 def load_regex_from_file(path, emitter, skill_id): 52 """Load regex from file 53 The regex is sent to the intent handler using the message bus 54 55 Args: 56 path: path to vocabulary file (*.voc) 57 emitter: emitter to access the message bus 58 """ 59 if path.endswith('.rx'): 60 with open(path, 'r') as reg_file: 61 for line in reg_file.readlines(): 62 re.compile(munge_regex(line.strip(), skill_id)) 63 emitter.emit( 64 Message("register_vocab", 65 {'regex': munge_regex(line.strip(), skill_id)})) 66 67 68 def load_vocabulary(basedir, emitter, skill_id): 69 """Load vocabulary from all files in the specified directory. 70 71 Args: 72 basedir (str): path of directory to load from 73 emitter (messagebus emitter): websocket used to send the vocab to 74 the intent service 75 skill_id: skill the data belongs to 76 """ 77 for vocab_file in listdir(basedir): 78 if vocab_file.endswith(".voc"): 79 vocab_type = to_letters(skill_id) + splitext(vocab_file)[0] 80 load_vocab_from_file( 81 join(basedir, vocab_file), vocab_type, emitter) 82 83 84 def load_regex(basedir, emitter, skill_id): 85 """Load regex from all files in the specified directory. 86 87 Args: 88 basedir (str): path of directory to load from 89 emitter (messagebus emitter): websocket used to send the vocab to 90 the intent service 91 skill_id (int): skill identifier 92 """ 93 for regex_type in listdir(basedir): 94 if regex_type.endswith(".rx"): 95 load_regex_from_file( 96 join(basedir, regex_type), emitter, skill_id) 97 98 99 def to_letters(number): 100 """Convert number to string of letters. 101 102 0 -> A, 1 -> B, etc. 103 104 Args: 105 number (int): number to be converted 106 Returns: 107 (str) String of letters 108 """ 109 ret = '' 110 for n in str(number).strip('-'): 111 ret += chr(65 + int(n)) 112 return ret 113 114 115 def munge_regex(regex, skill_id): 116 """Insert skill id as letters into match groups. 117 118 Args: 119 regex (str): regex string 120 skill_id (int): skill identifier 121 Returns: 122 (str) munged regex 123 """ 124 base = '(?P<' + to_letters(skill_id) 125 return base.join(regex.split('(?P<')) 126 127 128 def munge_intent_parser(intent_parser, name, skill_id): 129 """Rename intent keywords to make them skill exclusive 130 This gives the intent parser an exclusive name in the 131 format <skill_id>:<name>. The keywords are given unique 132 names in the format <Skill id as letters><Intent name>. 133 134 The function will not munge instances that's already been 135 munged 136 137 Args: 138 intent_parser: (IntentParser) object to update 139 name: (str) Skill name 140 skill_id: (int) skill identifier 141 """ 142 # Munge parser name 143 if str(skill_id) + ':' not in name: 144 intent_parser.name = str(skill_id) + ':' + name 145 else: 146 intent_parser.name = name 147 148 # Munge keywords 149 skill_id = to_letters(skill_id) 150 # Munge required keyword 151 reqs = [] 152 for i in intent_parser.requires: 153 if skill_id not in i[0]: 154 kw = (skill_id + i[0], skill_id + i[0]) 155 reqs.append(kw) 156 else: 157 reqs.append(i) 158 intent_parser.requires = reqs 159 160 # Munge optional keywords 161 opts = [] 162 for i in intent_parser.optional: 163 if skill_id not in i[0]: 164 kw = (skill_id + i[0], skill_id + i[0]) 165 opts.append(kw) 166 else: 167 opts.append(i) 168 intent_parser.optional = opts 169 170 # Munge at_least_one keywords 171 at_least_one = [] 172 for i in intent_parser.at_least_one: 173 element = [skill_id + e.replace(skill_id, '') for e in i] 174 at_least_one.append(tuple(element)) 175 intent_parser.at_least_one = at_least_one 176 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mycroft/skills/skill_data.py b/mycroft/skills/skill_data.py --- a/mycroft/skills/skill_data.py +++ b/mycroft/skills/skill_data.py @@ -37,6 +37,8 @@ if path.endswith('.voc'): with open(path, 'r') as voc_file: for line in voc_file.readlines(): + if line.startswith("#"): + continue parts = line.strip().split("|") entity = parts[0] emitter.emit(Message("register_vocab", { @@ -59,6 +61,8 @@ if path.endswith('.rx'): with open(path, 'r') as reg_file: for line in reg_file.readlines(): + if line.startswith("#"): + continue re.compile(munge_regex(line.strip(), skill_id)) emitter.emit( Message("register_vocab",
{"golden_diff": "diff --git a/mycroft/skills/skill_data.py b/mycroft/skills/skill_data.py\n--- a/mycroft/skills/skill_data.py\n+++ b/mycroft/skills/skill_data.py\n@@ -37,6 +37,8 @@\n if path.endswith('.voc'):\n with open(path, 'r') as voc_file:\n for line in voc_file.readlines():\n+ if line.startswith(\"#\"):\n+ continue\n parts = line.strip().split(\"|\")\n entity = parts[0]\n emitter.emit(Message(\"register_vocab\", {\n@@ -59,6 +61,8 @@\n if path.endswith('.rx'):\n with open(path, 'r') as reg_file:\n for line in reg_file.readlines():\n+ if line.startswith(\"#\"):\n+ continue\n re.compile(munge_regex(line.strip(), skill_id))\n emitter.emit(\n Message(\"register_vocab\",\n", "issue": "Allow comments in dialog and voc files\nHaving a way to comment dialog and voc files would ease the translation process as the original developer can provide context useful for the translator, and the translator can explain things that motivate his/her choice of words.\r\n\r\nI am thinking for instance of voc files where similar words to the target word are used to trigger the skill in order to workaround speech to text errors. Documenting that the meaning of those words should not be translated in a comment would be practical.\r\n\r\nExample: the \"high\" word triggers the hello world skill because \"high\" is similar to \"hi\". In the Spanish translation we have \"hola\" (for \"hello\") and we should have \"ola\" (literally \"wave\") because it sounds like \"hola\"\r\n\r\nOther file formats usually used for translation purposes allow developer/translator comments (see the .po file format for instance)\n", "before_files": [{"content": "# Copyright 2018 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"Module containing methods needed to load skill\ndata such as dialogs, intents and regular expressions.\n\"\"\"\n\nfrom os import listdir\nfrom os.path import splitext, join\nimport re\n\nfrom mycroft.messagebus.message import Message\n\n\ndef load_vocab_from_file(path, vocab_type, emitter):\n \"\"\"Load Mycroft vocabulary from file\n The vocab is sent to the intent handler using the message bus\n\n Args:\n path: path to vocabulary file (*.voc)\n vocab_type: keyword name\n emitter: emitter to access the message bus\n skill_id(str): skill id\n \"\"\"\n if path.endswith('.voc'):\n with open(path, 'r') as voc_file:\n for line in voc_file.readlines():\n parts = line.strip().split(\"|\")\n entity = parts[0]\n emitter.emit(Message(\"register_vocab\", {\n 'start': entity, 'end': vocab_type\n }))\n for alias in parts[1:]:\n emitter.emit(Message(\"register_vocab\", {\n 'start': alias, 'end': vocab_type, 'alias_of': entity\n }))\n\n\ndef load_regex_from_file(path, emitter, skill_id):\n \"\"\"Load regex from file\n The regex is sent to the intent handler using the message bus\n\n Args:\n path: path to vocabulary file (*.voc)\n emitter: emitter to access the message bus\n \"\"\"\n if path.endswith('.rx'):\n with open(path, 'r') as reg_file:\n for line in reg_file.readlines():\n re.compile(munge_regex(line.strip(), skill_id))\n emitter.emit(\n Message(\"register_vocab\",\n {'regex': munge_regex(line.strip(), skill_id)}))\n\n\ndef load_vocabulary(basedir, emitter, skill_id):\n \"\"\"Load vocabulary from all files in the specified directory.\n\n Args:\n basedir (str): path of directory to load from\n emitter (messagebus emitter): websocket used to send the vocab to\n the intent service\n skill_id: skill the data belongs to\n \"\"\"\n for vocab_file in listdir(basedir):\n if vocab_file.endswith(\".voc\"):\n vocab_type = to_letters(skill_id) + splitext(vocab_file)[0]\n load_vocab_from_file(\n join(basedir, vocab_file), vocab_type, emitter)\n\n\ndef load_regex(basedir, emitter, skill_id):\n \"\"\"Load regex from all files in the specified directory.\n\n Args:\n basedir (str): path of directory to load from\n emitter (messagebus emitter): websocket used to send the vocab to\n the intent service\n skill_id (int): skill identifier\n \"\"\"\n for regex_type in listdir(basedir):\n if regex_type.endswith(\".rx\"):\n load_regex_from_file(\n join(basedir, regex_type), emitter, skill_id)\n\n\ndef to_letters(number):\n \"\"\"Convert number to string of letters.\n\n 0 -> A, 1 -> B, etc.\n\n Args:\n number (int): number to be converted\n Returns:\n (str) String of letters\n \"\"\"\n ret = ''\n for n in str(number).strip('-'):\n ret += chr(65 + int(n))\n return ret\n\n\ndef munge_regex(regex, skill_id):\n \"\"\"Insert skill id as letters into match groups.\n\n Args:\n regex (str): regex string\n skill_id (int): skill identifier\n Returns:\n (str) munged regex\n \"\"\"\n base = '(?P<' + to_letters(skill_id)\n return base.join(regex.split('(?P<'))\n\n\ndef munge_intent_parser(intent_parser, name, skill_id):\n \"\"\"Rename intent keywords to make them skill exclusive\n This gives the intent parser an exclusive name in the\n format <skill_id>:<name>. The keywords are given unique\n names in the format <Skill id as letters><Intent name>.\n\n The function will not munge instances that's already been\n munged\n\n Args:\n intent_parser: (IntentParser) object to update\n name: (str) Skill name\n skill_id: (int) skill identifier\n \"\"\"\n # Munge parser name\n if str(skill_id) + ':' not in name:\n intent_parser.name = str(skill_id) + ':' + name\n else:\n intent_parser.name = name\n\n # Munge keywords\n skill_id = to_letters(skill_id)\n # Munge required keyword\n reqs = []\n for i in intent_parser.requires:\n if skill_id not in i[0]:\n kw = (skill_id + i[0], skill_id + i[0])\n reqs.append(kw)\n else:\n reqs.append(i)\n intent_parser.requires = reqs\n\n # Munge optional keywords\n opts = []\n for i in intent_parser.optional:\n if skill_id not in i[0]:\n kw = (skill_id + i[0], skill_id + i[0])\n opts.append(kw)\n else:\n opts.append(i)\n intent_parser.optional = opts\n\n # Munge at_least_one keywords\n at_least_one = []\n for i in intent_parser.at_least_one:\n element = [skill_id + e.replace(skill_id, '') for e in i]\n at_least_one.append(tuple(element))\n intent_parser.at_least_one = at_least_one\n", "path": "mycroft/skills/skill_data.py"}], "after_files": [{"content": "# Copyright 2018 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"Module containing methods needed to load skill\ndata such as dialogs, intents and regular expressions.\n\"\"\"\n\nfrom os import listdir\nfrom os.path import splitext, join\nimport re\n\nfrom mycroft.messagebus.message import Message\n\n\ndef load_vocab_from_file(path, vocab_type, emitter):\n \"\"\"Load Mycroft vocabulary from file\n The vocab is sent to the intent handler using the message bus\n\n Args:\n path: path to vocabulary file (*.voc)\n vocab_type: keyword name\n emitter: emitter to access the message bus\n skill_id(str): skill id\n \"\"\"\n if path.endswith('.voc'):\n with open(path, 'r') as voc_file:\n for line in voc_file.readlines():\n if line.startswith(\"#\"):\n continue\n parts = line.strip().split(\"|\")\n entity = parts[0]\n emitter.emit(Message(\"register_vocab\", {\n 'start': entity, 'end': vocab_type\n }))\n for alias in parts[1:]:\n emitter.emit(Message(\"register_vocab\", {\n 'start': alias, 'end': vocab_type, 'alias_of': entity\n }))\n\n\ndef load_regex_from_file(path, emitter, skill_id):\n \"\"\"Load regex from file\n The regex is sent to the intent handler using the message bus\n\n Args:\n path: path to vocabulary file (*.voc)\n emitter: emitter to access the message bus\n \"\"\"\n if path.endswith('.rx'):\n with open(path, 'r') as reg_file:\n for line in reg_file.readlines():\n if line.startswith(\"#\"):\n continue\n re.compile(munge_regex(line.strip(), skill_id))\n emitter.emit(\n Message(\"register_vocab\",\n {'regex': munge_regex(line.strip(), skill_id)}))\n\n\ndef load_vocabulary(basedir, emitter, skill_id):\n \"\"\"Load vocabulary from all files in the specified directory.\n\n Args:\n basedir (str): path of directory to load from\n emitter (messagebus emitter): websocket used to send the vocab to\n the intent service\n skill_id: skill the data belongs to\n \"\"\"\n for vocab_file in listdir(basedir):\n if vocab_file.endswith(\".voc\"):\n vocab_type = to_letters(skill_id) + splitext(vocab_file)[0]\n load_vocab_from_file(\n join(basedir, vocab_file), vocab_type, emitter)\n\n\ndef load_regex(basedir, emitter, skill_id):\n \"\"\"Load regex from all files in the specified directory.\n\n Args:\n basedir (str): path of directory to load from\n emitter (messagebus emitter): websocket used to send the vocab to\n the intent service\n skill_id (int): skill identifier\n \"\"\"\n for regex_type in listdir(basedir):\n if regex_type.endswith(\".rx\"):\n load_regex_from_file(\n join(basedir, regex_type), emitter, skill_id)\n\n\ndef to_letters(number):\n \"\"\"Convert number to string of letters.\n\n 0 -> A, 1 -> B, etc.\n\n Args:\n number (int): number to be converted\n Returns:\n (str) String of letters\n \"\"\"\n ret = ''\n for n in str(number).strip('-'):\n ret += chr(65 + int(n))\n return ret\n\n\ndef munge_regex(regex, skill_id):\n \"\"\"Insert skill id as letters into match groups.\n\n Args:\n regex (str): regex string\n skill_id (int): skill identifier\n Returns:\n (str) munged regex\n \"\"\"\n base = '(?P<' + to_letters(skill_id)\n return base.join(regex.split('(?P<'))\n\n\ndef munge_intent_parser(intent_parser, name, skill_id):\n \"\"\"Rename intent keywords to make them skill exclusive\n This gives the intent parser an exclusive name in the\n format <skill_id>:<name>. The keywords are given unique\n names in the format <Skill id as letters><Intent name>.\n\n The function will not munge instances that's already been\n munged\n\n Args:\n intent_parser: (IntentParser) object to update\n name: (str) Skill name\n skill_id: (int) skill identifier\n \"\"\"\n # Munge parser name\n if str(skill_id) + ':' not in name:\n intent_parser.name = str(skill_id) + ':' + name\n else:\n intent_parser.name = name\n\n # Munge keywords\n skill_id = to_letters(skill_id)\n # Munge required keyword\n reqs = []\n for i in intent_parser.requires:\n if skill_id not in i[0]:\n kw = (skill_id + i[0], skill_id + i[0])\n reqs.append(kw)\n else:\n reqs.append(i)\n intent_parser.requires = reqs\n\n # Munge optional keywords\n opts = []\n for i in intent_parser.optional:\n if skill_id not in i[0]:\n kw = (skill_id + i[0], skill_id + i[0])\n opts.append(kw)\n else:\n opts.append(i)\n intent_parser.optional = opts\n\n # Munge at_least_one keywords\n at_least_one = []\n for i in intent_parser.at_least_one:\n element = [skill_id + e.replace(skill_id, '') for e in i]\n at_least_one.append(tuple(element))\n intent_parser.at_least_one = at_least_one\n", "path": "mycroft/skills/skill_data.py"}]}
2,158
192
gh_patches_debug_15406
rasdani/github-patches
git_diff
vega__altair-3303
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Verify versions of both VegaFusion packages See https://github.com/altair-viz/altair/pull/3281#issuecomment-1867599879 We should check the version of `vegafusion-python-embed` as well as the version of `vegafusion` since it's possible for these to get out of sync. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `altair/utils/_importers.py` Content: ``` 1 from types import ModuleType 2 from packaging.version import Version 3 from importlib.metadata import version as importlib_version 4 5 6 def import_vegafusion() -> ModuleType: 7 min_version = "1.5.0" 8 try: 9 version = importlib_version("vegafusion") 10 if Version(version) < Version(min_version): 11 raise RuntimeError( 12 f"The vegafusion package must be version {min_version} or greater. " 13 f"Found version {version}" 14 ) 15 import vegafusion as vf # type: ignore 16 17 return vf 18 except ImportError as err: 19 raise ImportError( 20 'The "vegafusion" data transformer and chart.transformed_data feature requires\n' 21 f"version {min_version} or greater of the 'vegafusion-python-embed' and 'vegafusion' packages.\n" 22 "These can be installed with pip using:\n" 23 f' pip install "vegafusion[embed]>={min_version}"\n' 24 "Or with conda using:\n" 25 f' conda install -c conda-forge "vegafusion-python-embed>={min_version}" ' 26 f'"vegafusion>={min_version}"\n\n' 27 f"ImportError: {err.args[0]}" 28 ) from err 29 30 31 def import_vl_convert() -> ModuleType: 32 min_version = "1.1.0" 33 try: 34 version = importlib_version("vl-convert-python") 35 if Version(version) < Version(min_version): 36 raise RuntimeError( 37 f"The vl-convert-python package must be version {min_version} or greater. " 38 f"Found version {version}" 39 ) 40 import vl_convert as vlc 41 42 return vlc 43 except ImportError as err: 44 raise ImportError( 45 f"The vl-convert Vega-Lite compiler and file export feature requires\n" 46 f"version {min_version} or greater of the 'vl-convert-python' package. \n" 47 f"This can be installed with pip using:\n" 48 f' pip install "vl-convert-python>={min_version}"\n' 49 "or conda:\n" 50 f' conda install -c conda-forge "vl-convert-python>={min_version}"\n\n' 51 f"ImportError: {err.args[0]}" 52 ) from err 53 54 55 def vl_version_for_vl_convert() -> str: 56 from ..vegalite import SCHEMA_VERSION 57 58 # Compute VlConvert's vl_version string (of the form 'v5_2') 59 # from SCHEMA_VERSION (of the form 'v5.2.0') 60 return "_".join(SCHEMA_VERSION.split(".")[:2]) 61 62 63 def import_pyarrow_interchange() -> ModuleType: 64 min_version = "11.0.0" 65 try: 66 version = importlib_version("pyarrow") 67 68 if Version(version) < Version(min_version): 69 raise RuntimeError( 70 f"The pyarrow package must be version {min_version} or greater. " 71 f"Found version {version}" 72 ) 73 import pyarrow.interchange as pi 74 75 return pi 76 except ImportError as err: 77 raise ImportError( 78 f"Usage of the DataFrame Interchange Protocol requires\n" 79 f"version {min_version} or greater of the pyarrow package. \n" 80 f"This can be installed with pip using:\n" 81 f' pip install "pyarrow>={min_version}"\n' 82 "or conda:\n" 83 f' conda install -c conda-forge "pyarrow>={min_version}"\n\n' 84 f"ImportError: {err.args[0]}" 85 ) from err 86 87 88 def pyarrow_available() -> bool: 89 try: 90 import_pyarrow_interchange() 91 return True 92 except ImportError: 93 return False 94 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/altair/utils/_importers.py b/altair/utils/_importers.py --- a/altair/utils/_importers.py +++ b/altair/utils/_importers.py @@ -7,10 +7,14 @@ min_version = "1.5.0" try: version = importlib_version("vegafusion") - if Version(version) < Version(min_version): + embed_version = importlib_version("vegafusion-python-embed") + if version != embed_version or Version(version) < Version(min_version): raise RuntimeError( - f"The vegafusion package must be version {min_version} or greater. " - f"Found version {version}" + "The versions of the vegafusion and vegafusion-python-embed packages must match\n" + f"and must be version {min_version} or greater.\n" + f"Found:\n" + f" - vegafusion=={version}\n" + f" - vegafusion-python-embed=={embed_version}\n" ) import vegafusion as vf # type: ignore
{"golden_diff": "diff --git a/altair/utils/_importers.py b/altair/utils/_importers.py\n--- a/altair/utils/_importers.py\n+++ b/altair/utils/_importers.py\n@@ -7,10 +7,14 @@\n min_version = \"1.5.0\"\n try:\n version = importlib_version(\"vegafusion\")\n- if Version(version) < Version(min_version):\n+ embed_version = importlib_version(\"vegafusion-python-embed\")\n+ if version != embed_version or Version(version) < Version(min_version):\n raise RuntimeError(\n- f\"The vegafusion package must be version {min_version} or greater. \"\n- f\"Found version {version}\"\n+ \"The versions of the vegafusion and vegafusion-python-embed packages must match\\n\"\n+ f\"and must be version {min_version} or greater.\\n\"\n+ f\"Found:\\n\"\n+ f\" - vegafusion=={version}\\n\"\n+ f\" - vegafusion-python-embed=={embed_version}\\n\"\n )\n import vegafusion as vf # type: ignore\n", "issue": "Verify versions of both VegaFusion packages\nSee https://github.com/altair-viz/altair/pull/3281#issuecomment-1867599879\r\n\r\nWe should check the version of `vegafusion-python-embed` as well as the version of `vegafusion` since it's possible for these to get out of sync.\r\n\r\n\n", "before_files": [{"content": "from types import ModuleType\nfrom packaging.version import Version\nfrom importlib.metadata import version as importlib_version\n\n\ndef import_vegafusion() -> ModuleType:\n min_version = \"1.5.0\"\n try:\n version = importlib_version(\"vegafusion\")\n if Version(version) < Version(min_version):\n raise RuntimeError(\n f\"The vegafusion package must be version {min_version} or greater. \"\n f\"Found version {version}\"\n )\n import vegafusion as vf # type: ignore\n\n return vf\n except ImportError as err:\n raise ImportError(\n 'The \"vegafusion\" data transformer and chart.transformed_data feature requires\\n'\n f\"version {min_version} or greater of the 'vegafusion-python-embed' and 'vegafusion' packages.\\n\"\n \"These can be installed with pip using:\\n\"\n f' pip install \"vegafusion[embed]>={min_version}\"\\n'\n \"Or with conda using:\\n\"\n f' conda install -c conda-forge \"vegafusion-python-embed>={min_version}\" '\n f'\"vegafusion>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef import_vl_convert() -> ModuleType:\n min_version = \"1.1.0\"\n try:\n version = importlib_version(\"vl-convert-python\")\n if Version(version) < Version(min_version):\n raise RuntimeError(\n f\"The vl-convert-python package must be version {min_version} or greater. \"\n f\"Found version {version}\"\n )\n import vl_convert as vlc\n\n return vlc\n except ImportError as err:\n raise ImportError(\n f\"The vl-convert Vega-Lite compiler and file export feature requires\\n\"\n f\"version {min_version} or greater of the 'vl-convert-python' package. \\n\"\n f\"This can be installed with pip using:\\n\"\n f' pip install \"vl-convert-python>={min_version}\"\\n'\n \"or conda:\\n\"\n f' conda install -c conda-forge \"vl-convert-python>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef vl_version_for_vl_convert() -> str:\n from ..vegalite import SCHEMA_VERSION\n\n # Compute VlConvert's vl_version string (of the form 'v5_2')\n # from SCHEMA_VERSION (of the form 'v5.2.0')\n return \"_\".join(SCHEMA_VERSION.split(\".\")[:2])\n\n\ndef import_pyarrow_interchange() -> ModuleType:\n min_version = \"11.0.0\"\n try:\n version = importlib_version(\"pyarrow\")\n\n if Version(version) < Version(min_version):\n raise RuntimeError(\n f\"The pyarrow package must be version {min_version} or greater. \"\n f\"Found version {version}\"\n )\n import pyarrow.interchange as pi\n\n return pi\n except ImportError as err:\n raise ImportError(\n f\"Usage of the DataFrame Interchange Protocol requires\\n\"\n f\"version {min_version} or greater of the pyarrow package. \\n\"\n f\"This can be installed with pip using:\\n\"\n f' pip install \"pyarrow>={min_version}\"\\n'\n \"or conda:\\n\"\n f' conda install -c conda-forge \"pyarrow>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef pyarrow_available() -> bool:\n try:\n import_pyarrow_interchange()\n return True\n except ImportError:\n return False\n", "path": "altair/utils/_importers.py"}], "after_files": [{"content": "from types import ModuleType\nfrom packaging.version import Version\nfrom importlib.metadata import version as importlib_version\n\n\ndef import_vegafusion() -> ModuleType:\n min_version = \"1.5.0\"\n try:\n version = importlib_version(\"vegafusion\")\n embed_version = importlib_version(\"vegafusion-python-embed\")\n if version != embed_version or Version(version) < Version(min_version):\n raise RuntimeError(\n \"The versions of the vegafusion and vegafusion-python-embed packages must match\\n\"\n f\"and must be version {min_version} or greater.\\n\"\n f\"Found:\\n\"\n f\" - vegafusion=={version}\\n\"\n f\" - vegafusion-python-embed=={embed_version}\\n\"\n )\n import vegafusion as vf # type: ignore\n\n return vf\n except ImportError as err:\n raise ImportError(\n 'The \"vegafusion\" data transformer and chart.transformed_data feature requires\\n'\n f\"version {min_version} or greater of the 'vegafusion-python-embed' and 'vegafusion' packages.\\n\"\n \"These can be installed with pip using:\\n\"\n f' pip install \"vegafusion[embed]>={min_version}\"\\n'\n \"Or with conda using:\\n\"\n f' conda install -c conda-forge \"vegafusion-python-embed>={min_version}\" '\n f'\"vegafusion>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef import_vl_convert() -> ModuleType:\n min_version = \"1.1.0\"\n try:\n version = importlib_version(\"vl-convert-python\")\n if Version(version) < Version(min_version):\n raise RuntimeError(\n f\"The vl-convert-python package must be version {min_version} or greater. \"\n f\"Found version {version}\"\n )\n import vl_convert as vlc\n\n return vlc\n except ImportError as err:\n raise ImportError(\n f\"The vl-convert Vega-Lite compiler and file export feature requires\\n\"\n f\"version {min_version} or greater of the 'vl-convert-python' package. \\n\"\n f\"This can be installed with pip using:\\n\"\n f' pip install \"vl-convert-python>={min_version}\"\\n'\n \"or conda:\\n\"\n f' conda install -c conda-forge \"vl-convert-python>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef vl_version_for_vl_convert() -> str:\n from ..vegalite import SCHEMA_VERSION\n\n # Compute VlConvert's vl_version string (of the form 'v5_2')\n # from SCHEMA_VERSION (of the form 'v5.2.0')\n return \"_\".join(SCHEMA_VERSION.split(\".\")[:2])\n\n\ndef import_pyarrow_interchange() -> ModuleType:\n min_version = \"11.0.0\"\n try:\n version = importlib_version(\"pyarrow\")\n\n if Version(version) < Version(min_version):\n raise RuntimeError(\n f\"The pyarrow package must be version {min_version} or greater. \"\n f\"Found version {version}\"\n )\n import pyarrow.interchange as pi\n\n return pi\n except ImportError as err:\n raise ImportError(\n f\"Usage of the DataFrame Interchange Protocol requires\\n\"\n f\"version {min_version} or greater of the pyarrow package. \\n\"\n f\"This can be installed with pip using:\\n\"\n f' pip install \"pyarrow>={min_version}\"\\n'\n \"or conda:\\n\"\n f' conda install -c conda-forge \"pyarrow>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef pyarrow_available() -> bool:\n try:\n import_pyarrow_interchange()\n return True\n except ImportError:\n return False\n", "path": "altair/utils/_importers.py"}]}
1,363
252
gh_patches_debug_39632
rasdani/github-patches
git_diff
cltk__cltk-880
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove pandas as a dependency pandas is only used in the IndianSyllabifier and only for reading a csv file before its data in converted to a numpy array. pandas is also the largest external dependency (e.g. slowest to install in travis-ci). The csv reading and conversion to numpy can be done with the standard libraries, spec. ```csv``` Remove pandas as a dependency pandas is only used in the IndianSyllabifier and only for reading a csv file before its data in converted to a numpy array. pandas is also the largest external dependency (e.g. slowest to install in travis-ci). The csv reading and conversion to numpy can be done with the standard libraries, spec. ```csv``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cltk/stem/sanskrit/indian_syllabifier.py` Content: ``` 1 """Every phonetic of every language is given similar positions in the vectors. Therefore transliterations 2 happen when each offset is calculated relative to the ranges of the languages specified. 3 Every phonetic has a dedicated phonetic vector which describes all the facets of the character, whether it is 4 a vowel or a consonant whe ther it has a halanta, etc. 5 6 Source: https://github.com/anoopkunchukuttan/indic_nlp_library/blob/master/src/indicnlp/script/indic_scripts.py 7 """ 8 9 import os 10 11 try: 12 import numpy as np 13 import pandas as pd 14 except ImportError: 15 print('"pandas" and "numpy" libraries not installed.') 16 raise 17 18 __author__ = ['Anoop Kunchukuttan'] 19 __license__ = 'GPLv3' 20 21 22 # Indexes into the phonetic vector 23 PVIDX_BT_VOWEL = 0 24 PVIDX_BT_CONSONANT = 1 25 PVIDX_BT_NUKTA = 2 26 PVIDX_BT_HALANT = 3 27 PVIDX_BT_ANUSVAAR = 4 28 PVIDX_BT_MISC = 5 29 PVIDX_BT_S = PVIDX_BT_VOWEL 30 PVIDX_BT_E = PVIDX_BT_MISC + 1 31 32 PVIDX_VSTAT_DEP = 12 33 34 LC_TA = 'ta' 35 36 LANGUAGE_NAME_TO_CODE = {'hindi': 'hi', 'sanskrit': 'sa', 'punjabi': 'pa', 'gujarati': 'gu', 'oriya': 'or', 37 'tamil': 'ta', 'telegu': 'te', 'kannada': 'kn', 'malayalam': 'ml', 'sinhalese': 'si', 38 'marathi': 'mr', 'konkan': 'kk', 'nepali': 'ne', 'sindhi': 'sd', 'bengali': 'bn', 39 'assamese': 'as'} 40 41 42 # The phonetics of every script exist in the ranges of the dictionary mentioned below 43 SCRIPT_RANGES = { 44 'pa': [0x0a00, 0x0a7f], 45 'gu': [0x0a80, 0x0aff], 46 'or': [0x0b00, 0x0b7f], 47 'ta': [0x0b80, 0x0bff], 48 'te': [0x0c00, 0x0c7f], 49 'kn': [0x0c80, 0x0cff], 50 'ml': [0x0d00, 0x0d7f], 51 'si': [0x0d80, 0x0dff], 52 'hi': [0x0900, 0x097f], 53 'mr': [0x0900, 0x097f], 54 'kk': [0x0900, 0x097f], 55 'sa': [0x0900, 0x097f], 56 'ne': [0x0900, 0x097f], 57 'sd': [0x0900, 0x097f], 58 'bn': [0x0980, 0x09ff], 59 'as': [0x0980, 0x09ff], 60 } 61 62 COORDINATED_RANGE_START_INCLUSIVE = 0 63 COORDINATED_RANGE_END_INCLUSIVE = 0x6f 64 65 PV_PROP_RANGES = dict(basic_type=[0, 6], vowel_length=[6, 8], vowel_strength=[8, 11], vowel_status=[11, 13], 66 consonant_type=[13, 18], articulation_place=[18, 23], aspiration=[23, 25], voicing=[25, 27], 67 nasalization=[27, 29], vowel_horizontal=[29, 32], vowel_vertical=[32, 36], 68 vowel_roundness=[36, 38]) 69 70 PHONETIC_VECTOR_START_OFFSET = 6 71 72 73 class Syllabifier: 74 """Class for syllabalizing Indian language words.""" 75 76 def __init__(self, lang_name): 77 """Setup values.""" 78 79 self.lang_name = lang_name 80 assert self.lang_name in LANGUAGE_NAME_TO_CODE.keys(), 'Language not available' 81 self.lang = LANGUAGE_NAME_TO_CODE[lang_name] 82 83 assert self.lang in SCRIPT_RANGES.keys() 84 85 self.all_phonetic_data, self.tamil_phonetic_data, self.all_phonetic_vectors, self.tamil_phonetic_vectors, self.phonetic_vector_length = self.get_lang_data() 86 87 def get_lang_data(self): 88 """Define and call data for future use. Initializes and defines all 89 variables which define the phonetic vectors. 90 """ 91 92 root = os.path.expanduser('~') 93 csv_dir_path = os.path.join(root, 'cltk_data/sanskrit/model/sanskrit_models_cltk/phonetics') 94 95 all_phonetic_csv = os.path.join(csv_dir_path, 'all_script_phonetic_data.csv') 96 all_phonetic_data = pd.read_csv(all_phonetic_csv, encoding='utf-8') 97 tamil_csv = os.path.join(csv_dir_path, 'tamil_script_phonetic_data.csv') 98 tamil_phonetic_data = pd.read_csv(tamil_csv, encoding='utf-8') 99 100 all_phonetic_vectors = all_phonetic_data.ix[:, PHONETIC_VECTOR_START_OFFSET:].values 101 tamil_phonetic_vectors = tamil_phonetic_data.ix[:, PHONETIC_VECTOR_START_OFFSET:].values 102 103 phonetic_vector_length = all_phonetic_vectors.shape[1] 104 105 return all_phonetic_data, tamil_phonetic_data, all_phonetic_vectors, tamil_phonetic_vectors, phonetic_vector_length 106 107 @staticmethod 108 def in_coordinated_range_offset(c_offset): 109 """Applicable to Brahmi derived Indic scripts. Used to determine 110 whether offset is of a alphabetic character or not. 111 """ 112 return COORDINATED_RANGE_START_INCLUSIVE <= c_offset <= COORDINATED_RANGE_END_INCLUSIVE 113 114 def get_offset(self, c, lang): 115 """Gets the offset; that is the relative position in the range of the 116 specified language. 117 """ 118 return ord(c) - SCRIPT_RANGES[lang][0] 119 120 def invalid_vector(self): 121 """Returns an zero array of length 38""" 122 return np.array([0] * self.phonetic_vector_length) 123 124 def get_phonetic_info(self, lang): 125 """For a specified language (lang), it returns the matrix and the vecto 126 containing specifications of the characters. 127 """ 128 phonetic_data = self.all_phonetic_data if lang != LC_TA else self.tamil_phonetic_data 129 phonetic_vectors = self.all_phonetic_vectors if lang != LC_TA else self.tamil_phonetic_vectors 130 131 return phonetic_data, phonetic_vectors 132 133 def get_phonetic_feature_vector(self, c, lang): 134 """For a given character in a language, it gathers all the information related to it 135 (eg: whether fricative, plosive,etc)""" 136 137 offset = self.get_offset(c, lang) 138 if not self.in_coordinated_range_offset(offset): 139 return self.invalid_vector() 140 141 phonetic_data, phonetic_vectors = self.get_phonetic_info(lang) 142 143 if phonetic_data.ix[offset, 'Valid Vector Representation'] == 0: 144 return self.invalid_vector() 145 146 return phonetic_vectors[offset] 147 148 def get_property_vector(self, v, prop_name): 149 """Returns the part of the vector corresponding to the required property""" 150 return v[PV_PROP_RANGES[prop_name][0]:PV_PROP_RANGES[prop_name][1]] 151 152 153 def is_consonant(self, v): 154 """Checks the property of the character (of being a consonant) 155 selected against its phonetic vector. 156 """ 157 return v[PVIDX_BT_CONSONANT] == 1 158 159 160 def is_misc(self,v): 161 """Checks the property of the character (of being miscellenous) 162 selected against its phonetic vector. 163 """ 164 return v[PVIDX_BT_MISC] == 1 165 166 def is_valid(self, v): 167 """Checks if the character entered is valid, by checking against the 168 phonetic vector. At least 1 of the 38 properties have to be 169 satisfied for a valid vector. 170 """ 171 return np.sum(v) > 0 172 173 def is_vowel(self, v): 174 """Checks the property of the character (of being a vowel) selected against its phonetic vector 175 """ 176 return v[PVIDX_BT_VOWEL] == 1 177 178 def is_anusvaar(self, v): 179 """Checks the property of the character (of having an anusvaar) 180 selected against its phonetic vector. 181 """ 182 return v[PVIDX_BT_ANUSVAAR] == 1 183 184 def is_plosive(self, v): 185 """Checks the property of the character (of being a plosive 186 character) selected against its phonetic vector. 187 """ 188 return self.is_consonant(v) and self.get_property_vector(v, 'consonant_type')[0] == 1 189 190 def is_nukta(self,v): 191 """Checks the property of the character (of having a nukta) selected 192 against its phonetic vector. 193 """ 194 return v[PVIDX_BT_NUKTA] == 1 195 196 def is_dependent_vowel(self, v): 197 """Checks the property of the character (if it is a dependent 198 vowel) selected against its phonetic vector. 199 """ 200 return self.is_vowel(v) and v[PVIDX_VSTAT_DEP] == 1 201 202 def orthographic_syllabify(self, word): 203 """Main syllablic function.""" 204 p_vectors = [self.get_phonetic_feature_vector(c, self.lang) for c in word] 205 206 syllables = [] 207 208 for i in range(len(word)): 209 v = p_vectors[i] 210 211 syllables.append(word[i]) 212 213 if i + 1 < len(word) and (not self.is_valid(p_vectors[i + 1]) or self.is_misc(p_vectors[i + 1])): 214 syllables.append(u' ') 215 216 elif not self.is_valid(v) or self.is_misc(v): 217 syllables.append(u' ') 218 219 elif self.is_vowel(v): 220 221 anu_nonplos = (i + 2 < len(word) and 222 self.is_anusvaar(p_vectors[i + 1]) and 223 not self.is_plosive(p_vectors[i + 2]) 224 ) 225 226 anu_eow = (i + 2 == len(word) and 227 self.is_anusvaar(p_vectors[i + 1])) 228 229 if not (anu_nonplos or anu_eow): 230 syllables.append(u' ') 231 232 elif i + 1 < len(word) and (self.is_consonant(v) or self.is_nukta(v)): 233 if self.is_consonant(p_vectors[i + 1]): 234 syllables.append(u' ') 235 elif self.is_vowel(p_vectors[i + 1]) and not self.is_dependent_vowel(p_vectors[i + 1]): 236 syllables.append(u' ') 237 elif self.is_anusvaar(p_vectors[i + 1]): 238 anu_nonplos = (i + 2 < len(word) and not self.is_plosive(p_vectors[i + 2])) 239 240 anu_eow = i + 2 == len(word) 241 242 if not (anu_nonplos or anu_eow): 243 syllables.append(u' ') 244 245 return u''.join(syllables).strip().split(u' ') 246 247 248 if __name__ == '__main__': 249 syllabifier = Syllabifier('hindi') 250 current = syllabifier.orthographic_syllabify('नमस्ते') 251 print(current) 252 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cltk/stem/sanskrit/indian_syllabifier.py b/cltk/stem/sanskrit/indian_syllabifier.py --- a/cltk/stem/sanskrit/indian_syllabifier.py +++ b/cltk/stem/sanskrit/indian_syllabifier.py @@ -7,12 +7,12 @@ """ import os +import csv try: import numpy as np - import pandas as pd except ImportError: - print('"pandas" and "numpy" libraries not installed.') + print('"numpy" is not installed.') raise __author__ = ['Anoop Kunchukuttan'] @@ -93,12 +93,26 @@ csv_dir_path = os.path.join(root, 'cltk_data/sanskrit/model/sanskrit_models_cltk/phonetics') all_phonetic_csv = os.path.join(csv_dir_path, 'all_script_phonetic_data.csv') - all_phonetic_data = pd.read_csv(all_phonetic_csv, encoding='utf-8') tamil_csv = os.path.join(csv_dir_path, 'tamil_script_phonetic_data.csv') - tamil_phonetic_data = pd.read_csv(tamil_csv, encoding='utf-8') - all_phonetic_vectors = all_phonetic_data.ix[:, PHONETIC_VECTOR_START_OFFSET:].values - tamil_phonetic_vectors = tamil_phonetic_data.ix[:, PHONETIC_VECTOR_START_OFFSET:].values + # Make helper function for this + with open(all_phonetic_csv,'r') as f: + reader = csv.reader(f, delimiter = ',', quotechar = '"') + next(reader, None) # Skip headers + all_phonetic_data = [row for row in reader] + + with open(tamil_csv,'r') as f: + reader = csv.reader(f, delimiter = ',', quotechar = '"') + next(reader, None) # Skip headers + # tamil_phonetic_data = [row[PHONETIC_VECTOR_START_OFFSET:] for row in reader] + tamil_phonetic_data = [row for row in reader] + + # Handle better? + all_phonetic_data = [[int(cell) if cell=='0' or cell=='1' else cell for cell in row] for row in all_phonetic_data] + tamil_phonetic_data = [[int(cell) if cell=='0' or cell=='1' else cell for cell in row] for row in tamil_phonetic_data] + + all_phonetic_vectors = np.array([row[PHONETIC_VECTOR_START_OFFSET:] for row in all_phonetic_data]) + tamil_phonetic_vectors = np.array([row[PHONETIC_VECTOR_START_OFFSET:] for row in tamil_phonetic_data]) phonetic_vector_length = all_phonetic_vectors.shape[1] @@ -106,7 +120,7 @@ @staticmethod def in_coordinated_range_offset(c_offset): - """Applicable to Brahmi derived Indic scripts. Used to determine + """Applicable to Brahmi derived Indic scripts. Used to determine whether offset is of a alphabetic character or not. """ return COORDINATED_RANGE_START_INCLUSIVE <= c_offset <= COORDINATED_RANGE_END_INCLUSIVE @@ -140,7 +154,8 @@ phonetic_data, phonetic_vectors = self.get_phonetic_info(lang) - if phonetic_data.ix[offset, 'Valid Vector Representation'] == 0: + # 'Valid Vector Representation' is the [5] column + if phonetic_data[offset][5] == 0: return self.invalid_vector() return phonetic_vectors[offset]
{"golden_diff": "diff --git a/cltk/stem/sanskrit/indian_syllabifier.py b/cltk/stem/sanskrit/indian_syllabifier.py\n--- a/cltk/stem/sanskrit/indian_syllabifier.py\n+++ b/cltk/stem/sanskrit/indian_syllabifier.py\n@@ -7,12 +7,12 @@\n \"\"\"\n \n import os\n+import csv\n \n try:\n import numpy as np\n- import pandas as pd\n except ImportError:\n- print('\"pandas\" and \"numpy\" libraries not installed.')\n+ print('\"numpy\" is not installed.')\n raise\n \n __author__ = ['Anoop Kunchukuttan']\n@@ -93,12 +93,26 @@\n csv_dir_path = os.path.join(root, 'cltk_data/sanskrit/model/sanskrit_models_cltk/phonetics')\n \n all_phonetic_csv = os.path.join(csv_dir_path, 'all_script_phonetic_data.csv')\n- all_phonetic_data = pd.read_csv(all_phonetic_csv, encoding='utf-8')\n tamil_csv = os.path.join(csv_dir_path, 'tamil_script_phonetic_data.csv')\n- tamil_phonetic_data = pd.read_csv(tamil_csv, encoding='utf-8')\n \n- all_phonetic_vectors = all_phonetic_data.ix[:, PHONETIC_VECTOR_START_OFFSET:].values\n- tamil_phonetic_vectors = tamil_phonetic_data.ix[:, PHONETIC_VECTOR_START_OFFSET:].values\n+ # Make helper function for this\n+ with open(all_phonetic_csv,'r') as f:\n+ reader = csv.reader(f, delimiter = ',', quotechar = '\"')\n+ next(reader, None) # Skip headers\n+ all_phonetic_data = [row for row in reader]\n+\n+ with open(tamil_csv,'r') as f:\n+ reader = csv.reader(f, delimiter = ',', quotechar = '\"')\n+ next(reader, None) # Skip headers\n+ # tamil_phonetic_data = [row[PHONETIC_VECTOR_START_OFFSET:] for row in reader]\n+ tamil_phonetic_data = [row for row in reader]\n+\n+ # Handle better?\n+ all_phonetic_data = [[int(cell) if cell=='0' or cell=='1' else cell for cell in row] for row in all_phonetic_data]\n+ tamil_phonetic_data = [[int(cell) if cell=='0' or cell=='1' else cell for cell in row] for row in tamil_phonetic_data]\n+\n+ all_phonetic_vectors = np.array([row[PHONETIC_VECTOR_START_OFFSET:] for row in all_phonetic_data])\n+ tamil_phonetic_vectors = np.array([row[PHONETIC_VECTOR_START_OFFSET:] for row in tamil_phonetic_data])\n \n phonetic_vector_length = all_phonetic_vectors.shape[1]\n \n@@ -106,7 +120,7 @@\n \n @staticmethod\n def in_coordinated_range_offset(c_offset):\n- \"\"\"Applicable to Brahmi derived Indic scripts. Used to determine \n+ \"\"\"Applicable to Brahmi derived Indic scripts. Used to determine\n whether offset is of a alphabetic character or not.\n \"\"\"\n return COORDINATED_RANGE_START_INCLUSIVE <= c_offset <= COORDINATED_RANGE_END_INCLUSIVE\n@@ -140,7 +154,8 @@\n \n phonetic_data, phonetic_vectors = self.get_phonetic_info(lang)\n \n- if phonetic_data.ix[offset, 'Valid Vector Representation'] == 0:\n+ # 'Valid Vector Representation' is the [5] column\n+ if phonetic_data[offset][5] == 0:\n return self.invalid_vector()\n \n return phonetic_vectors[offset]\n", "issue": "Remove pandas as a dependency\npandas is only used in the IndianSyllabifier and only for reading a csv file before its data in converted to a numpy array. pandas is also the largest external dependency (e.g. slowest to install in travis-ci). The csv reading and conversion to numpy can be done with the standard libraries, spec. ```csv```\nRemove pandas as a dependency\npandas is only used in the IndianSyllabifier and only for reading a csv file before its data in converted to a numpy array. pandas is also the largest external dependency (e.g. slowest to install in travis-ci). The csv reading and conversion to numpy can be done with the standard libraries, spec. ```csv```\n", "before_files": [{"content": "\"\"\"Every phonetic of every language is given similar positions in the vectors. Therefore transliterations\nhappen when each offset is calculated relative to the ranges of the languages specified.\nEvery phonetic has a dedicated phonetic vector which describes all the facets of the character, whether it is\na vowel or a consonant whe ther it has a halanta, etc.\n\nSource: https://github.com/anoopkunchukuttan/indic_nlp_library/blob/master/src/indicnlp/script/indic_scripts.py\n\"\"\"\n\nimport os\n\ntry:\n import numpy as np\n import pandas as pd\nexcept ImportError:\n print('\"pandas\" and \"numpy\" libraries not installed.')\n raise\n\n__author__ = ['Anoop Kunchukuttan']\n__license__ = 'GPLv3'\n\n\n# Indexes into the phonetic vector\nPVIDX_BT_VOWEL = 0\nPVIDX_BT_CONSONANT = 1\nPVIDX_BT_NUKTA = 2\nPVIDX_BT_HALANT = 3\nPVIDX_BT_ANUSVAAR = 4\nPVIDX_BT_MISC = 5\nPVIDX_BT_S = PVIDX_BT_VOWEL\nPVIDX_BT_E = PVIDX_BT_MISC + 1\n\nPVIDX_VSTAT_DEP = 12\n\nLC_TA = 'ta'\n\nLANGUAGE_NAME_TO_CODE = {'hindi': 'hi', 'sanskrit': 'sa', 'punjabi': 'pa', 'gujarati': 'gu', 'oriya': 'or',\n 'tamil': 'ta', 'telegu': 'te', 'kannada': 'kn', 'malayalam': 'ml', 'sinhalese': 'si',\n 'marathi': 'mr', 'konkan': 'kk', 'nepali': 'ne', 'sindhi': 'sd', 'bengali': 'bn',\n 'assamese': 'as'}\n\n\n# The phonetics of every script exist in the ranges of the dictionary mentioned below\nSCRIPT_RANGES = {\n 'pa': [0x0a00, 0x0a7f],\n 'gu': [0x0a80, 0x0aff],\n 'or': [0x0b00, 0x0b7f],\n 'ta': [0x0b80, 0x0bff],\n 'te': [0x0c00, 0x0c7f],\n 'kn': [0x0c80, 0x0cff],\n 'ml': [0x0d00, 0x0d7f],\n 'si': [0x0d80, 0x0dff],\n 'hi': [0x0900, 0x097f],\n 'mr': [0x0900, 0x097f],\n 'kk': [0x0900, 0x097f],\n 'sa': [0x0900, 0x097f],\n 'ne': [0x0900, 0x097f],\n 'sd': [0x0900, 0x097f],\n 'bn': [0x0980, 0x09ff],\n 'as': [0x0980, 0x09ff],\n}\n\nCOORDINATED_RANGE_START_INCLUSIVE = 0\nCOORDINATED_RANGE_END_INCLUSIVE = 0x6f\n\nPV_PROP_RANGES = dict(basic_type=[0, 6], vowel_length=[6, 8], vowel_strength=[8, 11], vowel_status=[11, 13],\n consonant_type=[13, 18], articulation_place=[18, 23], aspiration=[23, 25], voicing=[25, 27],\n nasalization=[27, 29], vowel_horizontal=[29, 32], vowel_vertical=[32, 36],\n vowel_roundness=[36, 38])\n\nPHONETIC_VECTOR_START_OFFSET = 6\n\n\nclass Syllabifier:\n \"\"\"Class for syllabalizing Indian language words.\"\"\"\n\n def __init__(self, lang_name):\n \"\"\"Setup values.\"\"\"\n\n self.lang_name = lang_name\n assert self.lang_name in LANGUAGE_NAME_TO_CODE.keys(), 'Language not available'\n self.lang = LANGUAGE_NAME_TO_CODE[lang_name]\n\n assert self.lang in SCRIPT_RANGES.keys()\n\n self.all_phonetic_data, self.tamil_phonetic_data, self.all_phonetic_vectors, self.tamil_phonetic_vectors, self.phonetic_vector_length = self.get_lang_data()\n\n def get_lang_data(self):\n \"\"\"Define and call data for future use. Initializes and defines all\n variables which define the phonetic vectors.\n \"\"\"\n\n root = os.path.expanduser('~')\n csv_dir_path = os.path.join(root, 'cltk_data/sanskrit/model/sanskrit_models_cltk/phonetics')\n\n all_phonetic_csv = os.path.join(csv_dir_path, 'all_script_phonetic_data.csv')\n all_phonetic_data = pd.read_csv(all_phonetic_csv, encoding='utf-8')\n tamil_csv = os.path.join(csv_dir_path, 'tamil_script_phonetic_data.csv')\n tamil_phonetic_data = pd.read_csv(tamil_csv, encoding='utf-8')\n\n all_phonetic_vectors = all_phonetic_data.ix[:, PHONETIC_VECTOR_START_OFFSET:].values\n tamil_phonetic_vectors = tamil_phonetic_data.ix[:, PHONETIC_VECTOR_START_OFFSET:].values\n\n phonetic_vector_length = all_phonetic_vectors.shape[1]\n\n return all_phonetic_data, tamil_phonetic_data, all_phonetic_vectors, tamil_phonetic_vectors, phonetic_vector_length\n\n @staticmethod\n def in_coordinated_range_offset(c_offset):\n \"\"\"Applicable to Brahmi derived Indic scripts. Used to determine \n whether offset is of a alphabetic character or not.\n \"\"\"\n return COORDINATED_RANGE_START_INCLUSIVE <= c_offset <= COORDINATED_RANGE_END_INCLUSIVE\n\n def get_offset(self, c, lang):\n \"\"\"Gets the offset; that is the relative position in the range of the\n specified language.\n \"\"\"\n return ord(c) - SCRIPT_RANGES[lang][0]\n\n def invalid_vector(self):\n \"\"\"Returns an zero array of length 38\"\"\"\n return np.array([0] * self.phonetic_vector_length)\n\n def get_phonetic_info(self, lang):\n \"\"\"For a specified language (lang), it returns the matrix and the vecto\n containing specifications of the characters.\n \"\"\"\n phonetic_data = self.all_phonetic_data if lang != LC_TA else self.tamil_phonetic_data\n phonetic_vectors = self.all_phonetic_vectors if lang != LC_TA else self.tamil_phonetic_vectors\n\n return phonetic_data, phonetic_vectors\n\n def get_phonetic_feature_vector(self, c, lang):\n \"\"\"For a given character in a language, it gathers all the information related to it\n (eg: whether fricative, plosive,etc)\"\"\"\n\n offset = self.get_offset(c, lang)\n if not self.in_coordinated_range_offset(offset):\n return self.invalid_vector()\n\n phonetic_data, phonetic_vectors = self.get_phonetic_info(lang)\n\n if phonetic_data.ix[offset, 'Valid Vector Representation'] == 0:\n return self.invalid_vector()\n\n return phonetic_vectors[offset]\n\n def get_property_vector(self, v, prop_name):\n \"\"\"Returns the part of the vector corresponding to the required property\"\"\"\n return v[PV_PROP_RANGES[prop_name][0]:PV_PROP_RANGES[prop_name][1]]\n\n\n def is_consonant(self, v):\n \"\"\"Checks the property of the character (of being a consonant)\n selected against its phonetic vector.\n \"\"\"\n return v[PVIDX_BT_CONSONANT] == 1\n\n\n def is_misc(self,v):\n \"\"\"Checks the property of the character (of being miscellenous)\n selected against its phonetic vector.\n \"\"\"\n return v[PVIDX_BT_MISC] == 1\n\n def is_valid(self, v):\n \"\"\"Checks if the character entered is valid, by checking against the\n phonetic vector. At least 1 of the 38 properties have to be\n satisfied for a valid vector.\n \"\"\"\n return np.sum(v) > 0\n\n def is_vowel(self, v):\n \"\"\"Checks the property of the character (of being a vowel) selected against its phonetic vector\n \"\"\"\n return v[PVIDX_BT_VOWEL] == 1\n\n def is_anusvaar(self, v):\n \"\"\"Checks the property of the character (of having an anusvaar)\n selected against its phonetic vector.\n \"\"\"\n return v[PVIDX_BT_ANUSVAAR] == 1\n\n def is_plosive(self, v):\n \"\"\"Checks the property of the character (of being a plosive\n character) selected against its phonetic vector.\n \"\"\"\n return self.is_consonant(v) and self.get_property_vector(v, 'consonant_type')[0] == 1\n\n def is_nukta(self,v):\n \"\"\"Checks the property of the character (of having a nukta) selected\n against its phonetic vector.\n \"\"\"\n return v[PVIDX_BT_NUKTA] == 1\n\n def is_dependent_vowel(self, v):\n \"\"\"Checks the property of the character (if it is a dependent\n vowel) selected against its phonetic vector.\n \"\"\"\n return self.is_vowel(v) and v[PVIDX_VSTAT_DEP] == 1\n\n def orthographic_syllabify(self, word):\n \"\"\"Main syllablic function.\"\"\"\n p_vectors = [self.get_phonetic_feature_vector(c, self.lang) for c in word]\n\n syllables = []\n\n for i in range(len(word)):\n v = p_vectors[i]\n\n syllables.append(word[i])\n\n if i + 1 < len(word) and (not self.is_valid(p_vectors[i + 1]) or self.is_misc(p_vectors[i + 1])):\n syllables.append(u' ')\n\n elif not self.is_valid(v) or self.is_misc(v):\n syllables.append(u' ')\n\n elif self.is_vowel(v):\n\n anu_nonplos = (i + 2 < len(word) and\n self.is_anusvaar(p_vectors[i + 1]) and\n not self.is_plosive(p_vectors[i + 2])\n )\n\n anu_eow = (i + 2 == len(word) and\n self.is_anusvaar(p_vectors[i + 1]))\n\n if not (anu_nonplos or anu_eow):\n syllables.append(u' ')\n\n elif i + 1 < len(word) and (self.is_consonant(v) or self.is_nukta(v)):\n if self.is_consonant(p_vectors[i + 1]):\n syllables.append(u' ')\n elif self.is_vowel(p_vectors[i + 1]) and not self.is_dependent_vowel(p_vectors[i + 1]):\n syllables.append(u' ')\n elif self.is_anusvaar(p_vectors[i + 1]):\n anu_nonplos = (i + 2 < len(word) and not self.is_plosive(p_vectors[i + 2]))\n\n anu_eow = i + 2 == len(word)\n\n if not (anu_nonplos or anu_eow):\n syllables.append(u' ')\n\n return u''.join(syllables).strip().split(u' ')\n\n\nif __name__ == '__main__':\n syllabifier = Syllabifier('hindi')\n current = syllabifier.orthographic_syllabify('\u0928\u092e\u0938\u094d\u0924\u0947')\n print(current)\n", "path": "cltk/stem/sanskrit/indian_syllabifier.py"}], "after_files": [{"content": "\"\"\"Every phonetic of every language is given similar positions in the vectors. Therefore transliterations\nhappen when each offset is calculated relative to the ranges of the languages specified.\nEvery phonetic has a dedicated phonetic vector which describes all the facets of the character, whether it is\na vowel or a consonant whe ther it has a halanta, etc.\n\nSource: https://github.com/anoopkunchukuttan/indic_nlp_library/blob/master/src/indicnlp/script/indic_scripts.py\n\"\"\"\n\nimport os\nimport csv\n\ntry:\n import numpy as np\nexcept ImportError:\n print('\"numpy\" is not installed.')\n raise\n\n__author__ = ['Anoop Kunchukuttan']\n__license__ = 'GPLv3'\n\n\n# Indexes into the phonetic vector\nPVIDX_BT_VOWEL = 0\nPVIDX_BT_CONSONANT = 1\nPVIDX_BT_NUKTA = 2\nPVIDX_BT_HALANT = 3\nPVIDX_BT_ANUSVAAR = 4\nPVIDX_BT_MISC = 5\nPVIDX_BT_S = PVIDX_BT_VOWEL\nPVIDX_BT_E = PVIDX_BT_MISC + 1\n\nPVIDX_VSTAT_DEP = 12\n\nLC_TA = 'ta'\n\nLANGUAGE_NAME_TO_CODE = {'hindi': 'hi', 'sanskrit': 'sa', 'punjabi': 'pa', 'gujarati': 'gu', 'oriya': 'or',\n 'tamil': 'ta', 'telegu': 'te', 'kannada': 'kn', 'malayalam': 'ml', 'sinhalese': 'si',\n 'marathi': 'mr', 'konkan': 'kk', 'nepali': 'ne', 'sindhi': 'sd', 'bengali': 'bn',\n 'assamese': 'as'}\n\n\n# The phonetics of every script exist in the ranges of the dictionary mentioned below\nSCRIPT_RANGES = {\n 'pa': [0x0a00, 0x0a7f],\n 'gu': [0x0a80, 0x0aff],\n 'or': [0x0b00, 0x0b7f],\n 'ta': [0x0b80, 0x0bff],\n 'te': [0x0c00, 0x0c7f],\n 'kn': [0x0c80, 0x0cff],\n 'ml': [0x0d00, 0x0d7f],\n 'si': [0x0d80, 0x0dff],\n 'hi': [0x0900, 0x097f],\n 'mr': [0x0900, 0x097f],\n 'kk': [0x0900, 0x097f],\n 'sa': [0x0900, 0x097f],\n 'ne': [0x0900, 0x097f],\n 'sd': [0x0900, 0x097f],\n 'bn': [0x0980, 0x09ff],\n 'as': [0x0980, 0x09ff],\n}\n\nCOORDINATED_RANGE_START_INCLUSIVE = 0\nCOORDINATED_RANGE_END_INCLUSIVE = 0x6f\n\nPV_PROP_RANGES = dict(basic_type=[0, 6], vowel_length=[6, 8], vowel_strength=[8, 11], vowel_status=[11, 13],\n consonant_type=[13, 18], articulation_place=[18, 23], aspiration=[23, 25], voicing=[25, 27],\n nasalization=[27, 29], vowel_horizontal=[29, 32], vowel_vertical=[32, 36],\n vowel_roundness=[36, 38])\n\nPHONETIC_VECTOR_START_OFFSET = 6\n\n\nclass Syllabifier:\n \"\"\"Class for syllabalizing Indian language words.\"\"\"\n\n def __init__(self, lang_name):\n \"\"\"Setup values.\"\"\"\n\n self.lang_name = lang_name\n assert self.lang_name in LANGUAGE_NAME_TO_CODE.keys(), 'Language not available'\n self.lang = LANGUAGE_NAME_TO_CODE[lang_name]\n\n assert self.lang in SCRIPT_RANGES.keys()\n\n self.all_phonetic_data, self.tamil_phonetic_data, self.all_phonetic_vectors, self.tamil_phonetic_vectors, self.phonetic_vector_length = self.get_lang_data()\n\n def get_lang_data(self):\n \"\"\"Define and call data for future use. Initializes and defines all\n variables which define the phonetic vectors.\n \"\"\"\n\n root = os.path.expanduser('~')\n csv_dir_path = os.path.join(root, 'cltk_data/sanskrit/model/sanskrit_models_cltk/phonetics')\n\n all_phonetic_csv = os.path.join(csv_dir_path, 'all_script_phonetic_data.csv')\n tamil_csv = os.path.join(csv_dir_path, 'tamil_script_phonetic_data.csv')\n\n # Make helper function for this\n with open(all_phonetic_csv,'r') as f:\n reader = csv.reader(f, delimiter = ',', quotechar = '\"')\n next(reader, None) # Skip headers\n all_phonetic_data = [row for row in reader]\n\n with open(tamil_csv,'r') as f:\n reader = csv.reader(f, delimiter = ',', quotechar = '\"')\n next(reader, None) # Skip headers\n # tamil_phonetic_data = [row[PHONETIC_VECTOR_START_OFFSET:] for row in reader]\n tamil_phonetic_data = [row for row in reader]\n\n # Handle better?\n all_phonetic_data = [[int(cell) if cell=='0' or cell=='1' else cell for cell in row] for row in all_phonetic_data]\n tamil_phonetic_data = [[int(cell) if cell=='0' or cell=='1' else cell for cell in row] for row in tamil_phonetic_data]\n\n all_phonetic_vectors = np.array([row[PHONETIC_VECTOR_START_OFFSET:] for row in all_phonetic_data])\n tamil_phonetic_vectors = np.array([row[PHONETIC_VECTOR_START_OFFSET:] for row in tamil_phonetic_data])\n\n phonetic_vector_length = all_phonetic_vectors.shape[1]\n\n return all_phonetic_data, tamil_phonetic_data, all_phonetic_vectors, tamil_phonetic_vectors, phonetic_vector_length\n\n @staticmethod\n def in_coordinated_range_offset(c_offset):\n \"\"\"Applicable to Brahmi derived Indic scripts. Used to determine\n whether offset is of a alphabetic character or not.\n \"\"\"\n return COORDINATED_RANGE_START_INCLUSIVE <= c_offset <= COORDINATED_RANGE_END_INCLUSIVE\n\n def get_offset(self, c, lang):\n \"\"\"Gets the offset; that is the relative position in the range of the\n specified language.\n \"\"\"\n return ord(c) - SCRIPT_RANGES[lang][0]\n\n def invalid_vector(self):\n \"\"\"Returns an zero array of length 38\"\"\"\n return np.array([0] * self.phonetic_vector_length)\n\n def get_phonetic_info(self, lang):\n \"\"\"For a specified language (lang), it returns the matrix and the vecto\n containing specifications of the characters.\n \"\"\"\n phonetic_data = self.all_phonetic_data if lang != LC_TA else self.tamil_phonetic_data\n phonetic_vectors = self.all_phonetic_vectors if lang != LC_TA else self.tamil_phonetic_vectors\n\n return phonetic_data, phonetic_vectors\n\n def get_phonetic_feature_vector(self, c, lang):\n \"\"\"For a given character in a language, it gathers all the information related to it\n (eg: whether fricative, plosive,etc)\"\"\"\n\n offset = self.get_offset(c, lang)\n if not self.in_coordinated_range_offset(offset):\n return self.invalid_vector()\n\n phonetic_data, phonetic_vectors = self.get_phonetic_info(lang)\n\n # 'Valid Vector Representation' is the [5] column\n if phonetic_data[offset][5] == 0:\n return self.invalid_vector()\n\n return phonetic_vectors[offset]\n\n def get_property_vector(self, v, prop_name):\n \"\"\"Returns the part of the vector corresponding to the required property\"\"\"\n return v[PV_PROP_RANGES[prop_name][0]:PV_PROP_RANGES[prop_name][1]]\n\n\n def is_consonant(self, v):\n \"\"\"Checks the property of the character (of being a consonant)\n selected against its phonetic vector.\n \"\"\"\n return v[PVIDX_BT_CONSONANT] == 1\n\n\n def is_misc(self,v):\n \"\"\"Checks the property of the character (of being miscellenous)\n selected against its phonetic vector.\n \"\"\"\n return v[PVIDX_BT_MISC] == 1\n\n def is_valid(self, v):\n \"\"\"Checks if the character entered is valid, by checking against the\n phonetic vector. At least 1 of the 38 properties have to be\n satisfied for a valid vector.\n \"\"\"\n return np.sum(v) > 0\n\n def is_vowel(self, v):\n \"\"\"Checks the property of the character (of being a vowel) selected against its phonetic vector\n \"\"\"\n return v[PVIDX_BT_VOWEL] == 1\n\n def is_anusvaar(self, v):\n \"\"\"Checks the property of the character (of having an anusvaar)\n selected against its phonetic vector.\n \"\"\"\n return v[PVIDX_BT_ANUSVAAR] == 1\n\n def is_plosive(self, v):\n \"\"\"Checks the property of the character (of being a plosive\n character) selected against its phonetic vector.\n \"\"\"\n return self.is_consonant(v) and self.get_property_vector(v, 'consonant_type')[0] == 1\n\n def is_nukta(self,v):\n \"\"\"Checks the property of the character (of having a nukta) selected\n against its phonetic vector.\n \"\"\"\n return v[PVIDX_BT_NUKTA] == 1\n\n def is_dependent_vowel(self, v):\n \"\"\"Checks the property of the character (if it is a dependent\n vowel) selected against its phonetic vector.\n \"\"\"\n return self.is_vowel(v) and v[PVIDX_VSTAT_DEP] == 1\n\n def orthographic_syllabify(self, word):\n \"\"\"Main syllablic function.\"\"\"\n p_vectors = [self.get_phonetic_feature_vector(c, self.lang) for c in word]\n\n syllables = []\n\n for i in range(len(word)):\n v = p_vectors[i]\n\n syllables.append(word[i])\n\n if i + 1 < len(word) and (not self.is_valid(p_vectors[i + 1]) or self.is_misc(p_vectors[i + 1])):\n syllables.append(u' ')\n\n elif not self.is_valid(v) or self.is_misc(v):\n syllables.append(u' ')\n\n elif self.is_vowel(v):\n\n anu_nonplos = (i + 2 < len(word) and\n self.is_anusvaar(p_vectors[i + 1]) and\n not self.is_plosive(p_vectors[i + 2])\n )\n\n anu_eow = (i + 2 == len(word) and\n self.is_anusvaar(p_vectors[i + 1]))\n\n if not (anu_nonplos or anu_eow):\n syllables.append(u' ')\n\n elif i + 1 < len(word) and (self.is_consonant(v) or self.is_nukta(v)):\n if self.is_consonant(p_vectors[i + 1]):\n syllables.append(u' ')\n elif self.is_vowel(p_vectors[i + 1]) and not self.is_dependent_vowel(p_vectors[i + 1]):\n syllables.append(u' ')\n elif self.is_anusvaar(p_vectors[i + 1]):\n anu_nonplos = (i + 2 < len(word) and not self.is_plosive(p_vectors[i + 2]))\n\n anu_eow = i + 2 == len(word)\n\n if not (anu_nonplos or anu_eow):\n syllables.append(u' ')\n\n return u''.join(syllables).strip().split(u' ')\n\n\nif __name__ == '__main__':\n syllabifier = Syllabifier('hindi')\n current = syllabifier.orthographic_syllabify('\u0928\u092e\u0938\u094d\u0924\u0947')\n print(current)\n", "path": "cltk/stem/sanskrit/indian_syllabifier.py"}]}
3,734
837
gh_patches_debug_26165
rasdani/github-patches
git_diff
dask__dask-463
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Invalid magics in dask.imperitive.Value ``` python __inv__ __concat__ __not__ ``` You are setting these to `operator.inv`, `operator.concat` and `operator.not_`; however, `inv` is an alias for `invert` and the magic is already defined. `concat` is an alias for `add` that is more readable for lists and collections. Also, `not_` cannot be overridden with a function, `__not__` is not called. If you are using these names internally then I apologize; however, I would urge you not to "invent" your own magic methods: from pep8: > `__double_leading_and_trailing_underscore__` : "magic" objects or attributes that live in user-controlled namespaces. E.g. `__init__`, `__import__` or `__file__` . Never invent such names; only use them as documented. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dask/imperative.py` Content: ``` 1 import operator 2 from functools import partial, wraps 3 from itertools import chain, count 4 from collections import Iterator 5 6 from toolz import merge, unique, curry 7 8 from .optimize import cull, fuse 9 from .context import _globals 10 from .compatibility import apply 11 from . import threaded 12 13 __all__ = ['do', 'value', 'Value'] 14 15 16 def flat_unique(ls): 17 """Flatten ``ls``, filter by unique id, and return a list""" 18 return list(unique(chain.from_iterable(ls), key=id)) 19 20 21 def unzip(ls, nout): 22 """Unzip a list of lists into ``nout`` outputs.""" 23 out = list(zip(*ls)) 24 if not out: 25 out = [()] * nout 26 return out 27 28 29 def to_task_dasks(expr): 30 """Normalize a python object and extract all sub-dasks. 31 32 - Replace ``Values`` with their keys 33 - Convert literals to things the schedulers can handle 34 - Extract dasks from all enclosed values 35 36 Parameters 37 ---------- 38 expr : object 39 The object to be normalized. This function knows how to handle 40 ``Value``s, as well as most builtin python types. 41 42 Returns 43 ------- 44 task : normalized task to be run 45 dasks : list of dasks that form the dag for this task 46 47 Examples 48 -------- 49 50 >>> a = value(1, 'a') 51 >>> b = value(2, 'b') 52 >>> task, dasks = to_task_dasks([a, b, 3]) 53 >>> task # doctest: +SKIP 54 (list, ['a', 'b', 3]) 55 >>> dasks # doctest: +SKIP 56 [{'a': 1}, {'b': 2}] 57 58 >>> task, dasks = to_task_dasks({a: 1, b: 2}) 59 >>> task # doctest: +SKIP 60 (dict, (list, [(list, ['a', 1]), (list, ['b', 2])])) 61 >>> dasks # doctest: +SKIP 62 [{'a': 1}, {'b': 2}] 63 """ 64 if isinstance(expr, Value): 65 return expr.key, expr._dasks 66 elif isinstance(expr, (Iterator, list, tuple, set)): 67 args, dasks = unzip(map(to_task_dasks, expr), 2) 68 args = list(args) 69 dasks = flat_unique(dasks) 70 # Ensure output type matches input type 71 if isinstance(expr, (list, tuple, set)): 72 return (type(expr), args), dasks 73 else: 74 return args, dasks 75 elif isinstance(expr, dict): 76 args, dasks = to_task_dasks(list([k, v] for k, v in expr.items())) 77 return (dict, args), dasks 78 else: 79 return expr, [] 80 81 82 tokens = ('_{0}'.format(i) for i in count(1)) 83 84 85 def tokenize(v, pure=False): 86 """Mapping function from task -> consistent name. 87 88 Parameters 89 ---------- 90 v : object 91 Any python object (or tuple of objects) that summarize the task. 92 pure : boolean, optional 93 If True, a consistent hash function is tried on the input. If this 94 fails, then a unique identifier is used. If False (default), then a 95 unique identifier is always used. 96 """ 97 # TODO: May have hash collisions... 98 if pure: 99 try: 100 return str(hash(v)) 101 except TypeError: 102 pass 103 return next(tokens) 104 105 106 def applyfunc(func, args, kwargs, pure=False): 107 """Create a Value by applying a function to args. 108 109 Given a function and arguments, return a Value that represents the result 110 of that computation.""" 111 112 args, dasks = unzip(map(to_task_dasks, args), 2) 113 dasks = flat_unique(dasks) 114 name = tokenize((func, args, frozenset(kwargs.items())), pure) 115 if kwargs: 116 func = partial(func, **kwargs) 117 dasks.append({name: (func,) + args}) 118 return Value(name, dasks) 119 120 121 @curry 122 def do(func, pure=False): 123 """Wraps a function so that it outputs a ``Value``. 124 125 Examples 126 -------- 127 Can be used as a decorator: 128 129 >>> @do 130 ... def add(a, b): 131 ... return a + b 132 >>> res = add(1, 2) 133 >>> type(res) == Value 134 True 135 >>> res.compute() 136 3 137 138 For other cases, it may be cleaner to call ``do`` on a function at call 139 time: 140 141 >>> res2 = do(sum)([res, 2, 3]) 142 >>> res2.compute() 143 8 144 145 ``do`` also accepts an optional keyword ``pure``. If False (default), then 146 subsequent calls will always produce a different ``Value``. This is useful 147 for non-pure functions (such as ``time`` or ``random``). 148 149 >>> from random import random 150 >>> out1 = do(random)() 151 >>> out2 = do(random)() 152 >>> out1.key == out2.key 153 False 154 155 If you know a function is pure (output only depends on the input, with no 156 global state), then you can set ``pure=True``. This will attempt to apply a 157 consistent name to the output, but will fallback on the same behavior of 158 ``pure=False`` if this fails. 159 160 >>> @do(pure=True) 161 ... def add(a, b): 162 ... return a + b 163 >>> out1 = add(1, 2) 164 >>> out2 = add(1, 2) 165 >>> out1.key == out2.key 166 True 167 """ 168 @wraps(func) 169 def _dfunc(*args, **kwargs): 170 return applyfunc(func, args, kwargs, pure=pure) 171 return _dfunc 172 173 174 def optimize(dsk, keys): 175 dsk2 = cull(dsk, keys) 176 return fuse(dsk2) 177 178 179 def get(dsk, keys, get=None, **kwargs): 180 """Specialized get function""" 181 get = get or _globals['get'] or threaded.get 182 dsk2 = optimize(dsk, keys) 183 return get(dsk2, keys, **kwargs) 184 185 186 def right(method): 187 """Wrapper to create 'right' version of operator given left version""" 188 def _inner(self, other): 189 return method(other, self) 190 return _inner 191 192 193 class Value(object): 194 """Represents a value to be computed by dask. 195 196 Equivalent to the output from a single key in a dask graph. 197 """ 198 __slots__ = ('_key', '_dasks') 199 200 def __init__(self, name, dasks): 201 object.__setattr__(self, '_key', name) 202 object.__setattr__(self, '_dasks', dasks) 203 204 def compute(self, **kwargs): 205 """Compute the result.""" 206 dask1 = cull(self.dask, self.key) 207 return get(dask1, self.key, **kwargs) 208 209 @property 210 def dask(self): 211 return merge(*self._dasks) 212 213 @property 214 def key(self): 215 return self._key 216 217 def visualize(self, optimize_graph=False, **kwargs): 218 """Visualize the dask as a graph""" 219 from dask.dot import dot_graph 220 if optimize_graph: 221 return dot_graph(optimize(self.dask, self.key), **kwargs) 222 else: 223 return dot_graph(self.dask, **kwargs) 224 225 def __repr__(self): 226 return "Value({0})".format(repr(self.key)) 227 228 def __hash__(self): 229 return hash(self.key) 230 231 def __dir__(self): 232 return list(self.__dict__.keys()) 233 234 def __getattr__(self, attr): 235 if not attr.startswith('_'): 236 return do(getattr, True)(self, attr) 237 else: 238 raise AttributeError("Attribute {0} not found".format(attr)) 239 240 def __setattr__(self, attr, val): 241 raise TypeError("Value objects are immutable") 242 243 def __setitem__(self, index, val): 244 raise TypeError("Value objects are immutable") 245 246 def __iter__(self): 247 raise TypeError("Value objects are not iterable") 248 249 def __call__(self, *args, **kwargs): 250 return do(apply)(self, args, kwargs) 251 252 def __bool__(self): 253 raise TypeError("Truth of Value objects is not supported") 254 255 __nonzero__ = __bool__ 256 257 __abs__ = do(operator.abs, True) 258 __add__ = do(operator.add, True) 259 __and__ = do(operator.and_, True) 260 __concat__ = do(operator.concat, True) 261 __delitem__ = do(operator.delitem, True) 262 __div__ = do(operator.floordiv, True) 263 __eq__ = do(operator.eq, True) 264 __floordiv__ = do(operator.floordiv, True) 265 __ge__ = do(operator.ge, True) 266 __getitem__ = do(operator.getitem, True) 267 __gt__ = do(operator.gt, True) 268 __index__ = do(operator.index, True) 269 __inv__ = do(operator.inv, True) 270 __invert__ = do(operator.invert, True) 271 __le__ = do(operator.le, True) 272 __lshift__ = do(operator.lshift, True) 273 __lt__ = do(operator.lt, True) 274 __mod__ = do(operator.mod, True) 275 __mul__ = do(operator.mul, True) 276 __ne__ = do(operator.ne, True) 277 __neg__ = do(operator.neg, True) 278 __not__ = do(operator.not_, True) 279 __or__ = do(operator.or_, True) 280 __pos__ = do(operator.pos, True) 281 __pow__ = do(operator.pow, True) 282 __radd__ = do(right(operator.add), True) 283 __rand__ = do(right(operator.and_), True) 284 __rdiv__ = do(right(operator.floordiv), True) 285 __rfloordiv__ = do(right(operator.floordiv), True) 286 __rlshift__ = do(right(operator.lshift), True) 287 __rmod__ = do(right(operator.mod), True) 288 __rmul__ = do(right(operator.mul), True) 289 __ror__ = do(right(operator.or_), True) 290 __rpow__ = do(right(operator.pow), True) 291 __rrshift__ = do(right(operator.rshift), True) 292 __rshift__ = do(operator.rshift, True) 293 __rsub__ = do(right(operator.sub), True) 294 __rtruediv__ = do(right(operator.truediv), True) 295 __rxor__ = do(right(operator.xor), True) 296 __sub__ = do(operator.sub, True) 297 __truediv__ = do(operator.truediv, True) 298 __xor__ = do(operator.xor, True) 299 300 301 def value(val, name=None): 302 """Create a ``Value`` from a python object. 303 304 Parameters 305 ---------- 306 val : object 307 Object to be wrapped. 308 name : string, optional 309 Name to be used in the resulting dask. 310 311 Examples 312 -------- 313 >>> a = value([1, 2, 3]) 314 >>> a.compute() 315 [1, 2, 3] 316 317 Values can act as a proxy to the underlying object. Many operators are 318 supported: 319 320 >>> (a + [1, 2]).compute() 321 [1, 2, 3, 1, 2] 322 >>> a[1].compute() 323 2 324 325 Method and attribute access also works: 326 327 >>> a.count(2).compute() 328 1 329 330 Note that if a method doesn't exist, no error will be thrown until runtime: 331 332 >>> res = a.not_a_real_method() 333 >>> res.compute() # doctest: +SKIP 334 AttributeError("'list' object has no attribute 'not_a_real_method'") 335 """ 336 337 name = name or tokenize(val, True) 338 task, dasks = to_task_dasks(val) 339 dasks.append({name: task}) 340 return Value(name, dasks) 341 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dask/imperative.py b/dask/imperative.py --- a/dask/imperative.py +++ b/dask/imperative.py @@ -257,8 +257,6 @@ __abs__ = do(operator.abs, True) __add__ = do(operator.add, True) __and__ = do(operator.and_, True) - __concat__ = do(operator.concat, True) - __delitem__ = do(operator.delitem, True) __div__ = do(operator.floordiv, True) __eq__ = do(operator.eq, True) __floordiv__ = do(operator.floordiv, True) @@ -266,7 +264,6 @@ __getitem__ = do(operator.getitem, True) __gt__ = do(operator.gt, True) __index__ = do(operator.index, True) - __inv__ = do(operator.inv, True) __invert__ = do(operator.invert, True) __le__ = do(operator.le, True) __lshift__ = do(operator.lshift, True) @@ -275,7 +272,6 @@ __mul__ = do(operator.mul, True) __ne__ = do(operator.ne, True) __neg__ = do(operator.neg, True) - __not__ = do(operator.not_, True) __or__ = do(operator.or_, True) __pos__ = do(operator.pos, True) __pow__ = do(operator.pow, True)
{"golden_diff": "diff --git a/dask/imperative.py b/dask/imperative.py\n--- a/dask/imperative.py\n+++ b/dask/imperative.py\n@@ -257,8 +257,6 @@\n __abs__ = do(operator.abs, True)\n __add__ = do(operator.add, True)\n __and__ = do(operator.and_, True)\n- __concat__ = do(operator.concat, True)\n- __delitem__ = do(operator.delitem, True)\n __div__ = do(operator.floordiv, True)\n __eq__ = do(operator.eq, True)\n __floordiv__ = do(operator.floordiv, True)\n@@ -266,7 +264,6 @@\n __getitem__ = do(operator.getitem, True)\n __gt__ = do(operator.gt, True)\n __index__ = do(operator.index, True)\n- __inv__ = do(operator.inv, True)\n __invert__ = do(operator.invert, True)\n __le__ = do(operator.le, True)\n __lshift__ = do(operator.lshift, True)\n@@ -275,7 +272,6 @@\n __mul__ = do(operator.mul, True)\n __ne__ = do(operator.ne, True)\n __neg__ = do(operator.neg, True)\n- __not__ = do(operator.not_, True)\n __or__ = do(operator.or_, True)\n __pos__ = do(operator.pos, True)\n __pow__ = do(operator.pow, True)\n", "issue": "Invalid magics in dask.imperitive.Value\n``` python\n__inv__\n__concat__\n__not__\n```\n\nYou are setting these to `operator.inv`, `operator.concat` and `operator.not_`; however, `inv` is an alias for `invert` and the magic is already defined. `concat` is an alias for `add` that is more readable for lists and collections. Also, `not_` cannot be overridden with a function, `__not__` is not called.\n\nIf you are using these names internally then I apologize; however, I would urge you not to \"invent\" your own magic methods:\n\nfrom pep8:\n\n> `__double_leading_and_trailing_underscore__` : \"magic\" objects or attributes that live in user-controlled namespaces. E.g. `__init__`, `__import__` or `__file__` . Never invent such names; only use them as documented. \n\n", "before_files": [{"content": "import operator\nfrom functools import partial, wraps\nfrom itertools import chain, count\nfrom collections import Iterator\n\nfrom toolz import merge, unique, curry\n\nfrom .optimize import cull, fuse\nfrom .context import _globals\nfrom .compatibility import apply\nfrom . import threaded\n\n__all__ = ['do', 'value', 'Value']\n\n\ndef flat_unique(ls):\n \"\"\"Flatten ``ls``, filter by unique id, and return a list\"\"\"\n return list(unique(chain.from_iterable(ls), key=id))\n\n\ndef unzip(ls, nout):\n \"\"\"Unzip a list of lists into ``nout`` outputs.\"\"\"\n out = list(zip(*ls))\n if not out:\n out = [()] * nout\n return out\n\n\ndef to_task_dasks(expr):\n \"\"\"Normalize a python object and extract all sub-dasks.\n\n - Replace ``Values`` with their keys\n - Convert literals to things the schedulers can handle\n - Extract dasks from all enclosed values\n\n Parameters\n ----------\n expr : object\n The object to be normalized. This function knows how to handle\n ``Value``s, as well as most builtin python types.\n\n Returns\n -------\n task : normalized task to be run\n dasks : list of dasks that form the dag for this task\n\n Examples\n --------\n\n >>> a = value(1, 'a')\n >>> b = value(2, 'b')\n >>> task, dasks = to_task_dasks([a, b, 3])\n >>> task # doctest: +SKIP\n (list, ['a', 'b', 3])\n >>> dasks # doctest: +SKIP\n [{'a': 1}, {'b': 2}]\n\n >>> task, dasks = to_task_dasks({a: 1, b: 2})\n >>> task # doctest: +SKIP\n (dict, (list, [(list, ['a', 1]), (list, ['b', 2])]))\n >>> dasks # doctest: +SKIP\n [{'a': 1}, {'b': 2}]\n \"\"\"\n if isinstance(expr, Value):\n return expr.key, expr._dasks\n elif isinstance(expr, (Iterator, list, tuple, set)):\n args, dasks = unzip(map(to_task_dasks, expr), 2)\n args = list(args)\n dasks = flat_unique(dasks)\n # Ensure output type matches input type\n if isinstance(expr, (list, tuple, set)):\n return (type(expr), args), dasks\n else:\n return args, dasks\n elif isinstance(expr, dict):\n args, dasks = to_task_dasks(list([k, v] for k, v in expr.items()))\n return (dict, args), dasks\n else:\n return expr, []\n\n\ntokens = ('_{0}'.format(i) for i in count(1))\n\n\ndef tokenize(v, pure=False):\n \"\"\"Mapping function from task -> consistent name.\n\n Parameters\n ----------\n v : object\n Any python object (or tuple of objects) that summarize the task.\n pure : boolean, optional\n If True, a consistent hash function is tried on the input. If this\n fails, then a unique identifier is used. If False (default), then a\n unique identifier is always used.\n \"\"\"\n # TODO: May have hash collisions...\n if pure:\n try:\n return str(hash(v))\n except TypeError:\n pass\n return next(tokens)\n\n\ndef applyfunc(func, args, kwargs, pure=False):\n \"\"\"Create a Value by applying a function to args.\n\n Given a function and arguments, return a Value that represents the result\n of that computation.\"\"\"\n\n args, dasks = unzip(map(to_task_dasks, args), 2)\n dasks = flat_unique(dasks)\n name = tokenize((func, args, frozenset(kwargs.items())), pure)\n if kwargs:\n func = partial(func, **kwargs)\n dasks.append({name: (func,) + args})\n return Value(name, dasks)\n\n\n@curry\ndef do(func, pure=False):\n \"\"\"Wraps a function so that it outputs a ``Value``.\n\n Examples\n --------\n Can be used as a decorator:\n\n >>> @do\n ... def add(a, b):\n ... return a + b\n >>> res = add(1, 2)\n >>> type(res) == Value\n True\n >>> res.compute()\n 3\n\n For other cases, it may be cleaner to call ``do`` on a function at call\n time:\n\n >>> res2 = do(sum)([res, 2, 3])\n >>> res2.compute()\n 8\n\n ``do`` also accepts an optional keyword ``pure``. If False (default), then\n subsequent calls will always produce a different ``Value``. This is useful\n for non-pure functions (such as ``time`` or ``random``).\n\n >>> from random import random\n >>> out1 = do(random)()\n >>> out2 = do(random)()\n >>> out1.key == out2.key\n False\n\n If you know a function is pure (output only depends on the input, with no\n global state), then you can set ``pure=True``. This will attempt to apply a\n consistent name to the output, but will fallback on the same behavior of\n ``pure=False`` if this fails.\n\n >>> @do(pure=True)\n ... def add(a, b):\n ... return a + b\n >>> out1 = add(1, 2)\n >>> out2 = add(1, 2)\n >>> out1.key == out2.key\n True\n \"\"\"\n @wraps(func)\n def _dfunc(*args, **kwargs):\n return applyfunc(func, args, kwargs, pure=pure)\n return _dfunc\n\n\ndef optimize(dsk, keys):\n dsk2 = cull(dsk, keys)\n return fuse(dsk2)\n\n\ndef get(dsk, keys, get=None, **kwargs):\n \"\"\"Specialized get function\"\"\"\n get = get or _globals['get'] or threaded.get\n dsk2 = optimize(dsk, keys)\n return get(dsk2, keys, **kwargs)\n\n\ndef right(method):\n \"\"\"Wrapper to create 'right' version of operator given left version\"\"\"\n def _inner(self, other):\n return method(other, self)\n return _inner\n\n\nclass Value(object):\n \"\"\"Represents a value to be computed by dask.\n\n Equivalent to the output from a single key in a dask graph.\n \"\"\"\n __slots__ = ('_key', '_dasks')\n\n def __init__(self, name, dasks):\n object.__setattr__(self, '_key', name)\n object.__setattr__(self, '_dasks', dasks)\n\n def compute(self, **kwargs):\n \"\"\"Compute the result.\"\"\"\n dask1 = cull(self.dask, self.key)\n return get(dask1, self.key, **kwargs)\n\n @property\n def dask(self):\n return merge(*self._dasks)\n\n @property\n def key(self):\n return self._key\n\n def visualize(self, optimize_graph=False, **kwargs):\n \"\"\"Visualize the dask as a graph\"\"\"\n from dask.dot import dot_graph\n if optimize_graph:\n return dot_graph(optimize(self.dask, self.key), **kwargs)\n else:\n return dot_graph(self.dask, **kwargs)\n\n def __repr__(self):\n return \"Value({0})\".format(repr(self.key))\n\n def __hash__(self):\n return hash(self.key)\n\n def __dir__(self):\n return list(self.__dict__.keys())\n\n def __getattr__(self, attr):\n if not attr.startswith('_'):\n return do(getattr, True)(self, attr)\n else:\n raise AttributeError(\"Attribute {0} not found\".format(attr))\n\n def __setattr__(self, attr, val):\n raise TypeError(\"Value objects are immutable\")\n\n def __setitem__(self, index, val):\n raise TypeError(\"Value objects are immutable\")\n\n def __iter__(self):\n raise TypeError(\"Value objects are not iterable\")\n\n def __call__(self, *args, **kwargs):\n return do(apply)(self, args, kwargs)\n\n def __bool__(self):\n raise TypeError(\"Truth of Value objects is not supported\")\n\n __nonzero__ = __bool__\n\n __abs__ = do(operator.abs, True)\n __add__ = do(operator.add, True)\n __and__ = do(operator.and_, True)\n __concat__ = do(operator.concat, True)\n __delitem__ = do(operator.delitem, True)\n __div__ = do(operator.floordiv, True)\n __eq__ = do(operator.eq, True)\n __floordiv__ = do(operator.floordiv, True)\n __ge__ = do(operator.ge, True)\n __getitem__ = do(operator.getitem, True)\n __gt__ = do(operator.gt, True)\n __index__ = do(operator.index, True)\n __inv__ = do(operator.inv, True)\n __invert__ = do(operator.invert, True)\n __le__ = do(operator.le, True)\n __lshift__ = do(operator.lshift, True)\n __lt__ = do(operator.lt, True)\n __mod__ = do(operator.mod, True)\n __mul__ = do(operator.mul, True)\n __ne__ = do(operator.ne, True)\n __neg__ = do(operator.neg, True)\n __not__ = do(operator.not_, True)\n __or__ = do(operator.or_, True)\n __pos__ = do(operator.pos, True)\n __pow__ = do(operator.pow, True)\n __radd__ = do(right(operator.add), True)\n __rand__ = do(right(operator.and_), True)\n __rdiv__ = do(right(operator.floordiv), True)\n __rfloordiv__ = do(right(operator.floordiv), True)\n __rlshift__ = do(right(operator.lshift), True)\n __rmod__ = do(right(operator.mod), True)\n __rmul__ = do(right(operator.mul), True)\n __ror__ = do(right(operator.or_), True)\n __rpow__ = do(right(operator.pow), True)\n __rrshift__ = do(right(operator.rshift), True)\n __rshift__ = do(operator.rshift, True)\n __rsub__ = do(right(operator.sub), True)\n __rtruediv__ = do(right(operator.truediv), True)\n __rxor__ = do(right(operator.xor), True)\n __sub__ = do(operator.sub, True)\n __truediv__ = do(operator.truediv, True)\n __xor__ = do(operator.xor, True)\n\n\ndef value(val, name=None):\n \"\"\"Create a ``Value`` from a python object.\n\n Parameters\n ----------\n val : object\n Object to be wrapped.\n name : string, optional\n Name to be used in the resulting dask.\n\n Examples\n --------\n >>> a = value([1, 2, 3])\n >>> a.compute()\n [1, 2, 3]\n\n Values can act as a proxy to the underlying object. Many operators are\n supported:\n\n >>> (a + [1, 2]).compute()\n [1, 2, 3, 1, 2]\n >>> a[1].compute()\n 2\n\n Method and attribute access also works:\n\n >>> a.count(2).compute()\n 1\n\n Note that if a method doesn't exist, no error will be thrown until runtime:\n\n >>> res = a.not_a_real_method()\n >>> res.compute() # doctest: +SKIP\n AttributeError(\"'list' object has no attribute 'not_a_real_method'\")\n \"\"\"\n\n name = name or tokenize(val, True)\n task, dasks = to_task_dasks(val)\n dasks.append({name: task})\n return Value(name, dasks)\n", "path": "dask/imperative.py"}], "after_files": [{"content": "import operator\nfrom functools import partial, wraps\nfrom itertools import chain, count\nfrom collections import Iterator\n\nfrom toolz import merge, unique, curry\n\nfrom .optimize import cull, fuse\nfrom .context import _globals\nfrom .compatibility import apply\nfrom . import threaded\n\n__all__ = ['do', 'value', 'Value']\n\n\ndef flat_unique(ls):\n \"\"\"Flatten ``ls``, filter by unique id, and return a list\"\"\"\n return list(unique(chain.from_iterable(ls), key=id))\n\n\ndef unzip(ls, nout):\n \"\"\"Unzip a list of lists into ``nout`` outputs.\"\"\"\n out = list(zip(*ls))\n if not out:\n out = [()] * nout\n return out\n\n\ndef to_task_dasks(expr):\n \"\"\"Normalize a python object and extract all sub-dasks.\n\n - Replace ``Values`` with their keys\n - Convert literals to things the schedulers can handle\n - Extract dasks from all enclosed values\n\n Parameters\n ----------\n expr : object\n The object to be normalized. This function knows how to handle\n ``Value``s, as well as most builtin python types.\n\n Returns\n -------\n task : normalized task to be run\n dasks : list of dasks that form the dag for this task\n\n Examples\n --------\n\n >>> a = value(1, 'a')\n >>> b = value(2, 'b')\n >>> task, dasks = to_task_dasks([a, b, 3])\n >>> task # doctest: +SKIP\n (list, ['a', 'b', 3])\n >>> dasks # doctest: +SKIP\n [{'a': 1}, {'b': 2}]\n\n >>> task, dasks = to_task_dasks({a: 1, b: 2})\n >>> task # doctest: +SKIP\n (dict, (list, [(list, ['a', 1]), (list, ['b', 2])]))\n >>> dasks # doctest: +SKIP\n [{'a': 1}, {'b': 2}]\n \"\"\"\n if isinstance(expr, Value):\n return expr.key, expr._dasks\n elif isinstance(expr, (Iterator, list, tuple, set)):\n args, dasks = unzip(map(to_task_dasks, expr), 2)\n args = list(args)\n dasks = flat_unique(dasks)\n # Ensure output type matches input type\n if isinstance(expr, (list, tuple, set)):\n return (type(expr), args), dasks\n else:\n return args, dasks\n elif isinstance(expr, dict):\n args, dasks = to_task_dasks(list([k, v] for k, v in expr.items()))\n return (dict, args), dasks\n else:\n return expr, []\n\n\ntokens = ('_{0}'.format(i) for i in count(1))\n\n\ndef tokenize(v, pure=False):\n \"\"\"Mapping function from task -> consistent name.\n\n Parameters\n ----------\n v : object\n Any python object (or tuple of objects) that summarize the task.\n pure : boolean, optional\n If True, a consistent hash function is tried on the input. If this\n fails, then a unique identifier is used. If False (default), then a\n unique identifier is always used.\n \"\"\"\n # TODO: May have hash collisions...\n if pure:\n try:\n return str(hash(v))\n except TypeError:\n pass\n return next(tokens)\n\n\ndef applyfunc(func, args, kwargs, pure=False):\n \"\"\"Create a Value by applying a function to args.\n\n Given a function and arguments, return a Value that represents the result\n of that computation.\"\"\"\n\n args, dasks = unzip(map(to_task_dasks, args), 2)\n dasks = flat_unique(dasks)\n name = tokenize((func, args, frozenset(kwargs.items())), pure)\n if kwargs:\n func = partial(func, **kwargs)\n dasks.append({name: (func,) + args})\n return Value(name, dasks)\n\n\n@curry\ndef do(func, pure=False):\n \"\"\"Wraps a function so that it outputs a ``Value``.\n\n Examples\n --------\n Can be used as a decorator:\n\n >>> @do\n ... def add(a, b):\n ... return a + b\n >>> res = add(1, 2)\n >>> type(res) == Value\n True\n >>> res.compute()\n 3\n\n For other cases, it may be cleaner to call ``do`` on a function at call\n time:\n\n >>> res2 = do(sum)([res, 2, 3])\n >>> res2.compute()\n 8\n\n ``do`` also accepts an optional keyword ``pure``. If False (default), then\n subsequent calls will always produce a different ``Value``. This is useful\n for non-pure functions (such as ``time`` or ``random``).\n\n >>> from random import random\n >>> out1 = do(random)()\n >>> out2 = do(random)()\n >>> out1.key == out2.key\n False\n\n If you know a function is pure (output only depends on the input, with no\n global state), then you can set ``pure=True``. This will attempt to apply a\n consistent name to the output, but will fallback on the same behavior of\n ``pure=False`` if this fails.\n\n >>> @do(pure=True)\n ... def add(a, b):\n ... return a + b\n >>> out1 = add(1, 2)\n >>> out2 = add(1, 2)\n >>> out1.key == out2.key\n True\n \"\"\"\n @wraps(func)\n def _dfunc(*args, **kwargs):\n return applyfunc(func, args, kwargs, pure=pure)\n return _dfunc\n\n\ndef optimize(dsk, keys):\n dsk2 = cull(dsk, keys)\n return fuse(dsk2)\n\n\ndef get(dsk, keys, get=None, **kwargs):\n \"\"\"Specialized get function\"\"\"\n get = get or _globals['get'] or threaded.get\n dsk2 = optimize(dsk, keys)\n return get(dsk2, keys, **kwargs)\n\n\ndef right(method):\n \"\"\"Wrapper to create 'right' version of operator given left version\"\"\"\n def _inner(self, other):\n return method(other, self)\n return _inner\n\n\nclass Value(object):\n \"\"\"Represents a value to be computed by dask.\n\n Equivalent to the output from a single key in a dask graph.\n \"\"\"\n __slots__ = ('_key', '_dasks')\n\n def __init__(self, name, dasks):\n object.__setattr__(self, '_key', name)\n object.__setattr__(self, '_dasks', dasks)\n\n def compute(self, **kwargs):\n \"\"\"Compute the result.\"\"\"\n dask1 = cull(self.dask, self.key)\n return get(dask1, self.key, **kwargs)\n\n @property\n def dask(self):\n return merge(*self._dasks)\n\n @property\n def key(self):\n return self._key\n\n def visualize(self, optimize_graph=False, **kwargs):\n \"\"\"Visualize the dask as a graph\"\"\"\n from dask.dot import dot_graph\n if optimize_graph:\n return dot_graph(optimize(self.dask, self.key), **kwargs)\n else:\n return dot_graph(self.dask, **kwargs)\n\n def __repr__(self):\n return \"Value({0})\".format(repr(self.key))\n\n def __hash__(self):\n return hash(self.key)\n\n def __dir__(self):\n return list(self.__dict__.keys())\n\n def __getattr__(self, attr):\n if not attr.startswith('_'):\n return do(getattr, True)(self, attr)\n else:\n raise AttributeError(\"Attribute {0} not found\".format(attr))\n\n def __setattr__(self, attr, val):\n raise TypeError(\"Value objects are immutable\")\n\n def __setitem__(self, index, val):\n raise TypeError(\"Value objects are immutable\")\n\n def __iter__(self):\n raise TypeError(\"Value objects are not iterable\")\n\n def __call__(self, *args, **kwargs):\n return do(apply)(self, args, kwargs)\n\n def __bool__(self):\n raise TypeError(\"Truth of Value objects is not supported\")\n\n __nonzero__ = __bool__\n\n __abs__ = do(operator.abs, True)\n __add__ = do(operator.add, True)\n __and__ = do(operator.and_, True)\n __div__ = do(operator.floordiv, True)\n __eq__ = do(operator.eq, True)\n __floordiv__ = do(operator.floordiv, True)\n __ge__ = do(operator.ge, True)\n __getitem__ = do(operator.getitem, True)\n __gt__ = do(operator.gt, True)\n __index__ = do(operator.index, True)\n __invert__ = do(operator.invert, True)\n __le__ = do(operator.le, True)\n __lshift__ = do(operator.lshift, True)\n __lt__ = do(operator.lt, True)\n __mod__ = do(operator.mod, True)\n __mul__ = do(operator.mul, True)\n __ne__ = do(operator.ne, True)\n __neg__ = do(operator.neg, True)\n __or__ = do(operator.or_, True)\n __pos__ = do(operator.pos, True)\n __pow__ = do(operator.pow, True)\n __radd__ = do(right(operator.add), True)\n __rand__ = do(right(operator.and_), True)\n __rdiv__ = do(right(operator.floordiv), True)\n __rfloordiv__ = do(right(operator.floordiv), True)\n __rlshift__ = do(right(operator.lshift), True)\n __rmod__ = do(right(operator.mod), True)\n __rmul__ = do(right(operator.mul), True)\n __ror__ = do(right(operator.or_), True)\n __rpow__ = do(right(operator.pow), True)\n __rrshift__ = do(right(operator.rshift), True)\n __rshift__ = do(operator.rshift, True)\n __rsub__ = do(right(operator.sub), True)\n __rtruediv__ = do(right(operator.truediv), True)\n __rxor__ = do(right(operator.xor), True)\n __sub__ = do(operator.sub, True)\n __truediv__ = do(operator.truediv, True)\n __xor__ = do(operator.xor, True)\n\n\ndef value(val, name=None):\n \"\"\"Create a ``Value`` from a python object.\n\n Parameters\n ----------\n val : object\n Object to be wrapped.\n name : string, optional\n Name to be used in the resulting dask.\n\n Examples\n --------\n >>> a = value([1, 2, 3])\n >>> a.compute()\n [1, 2, 3]\n\n Values can act as a proxy to the underlying object. Many operators are\n supported:\n\n >>> (a + [1, 2]).compute()\n [1, 2, 3, 1, 2]\n >>> a[1].compute()\n 2\n\n Method and attribute access also works:\n\n >>> a.count(2).compute()\n 1\n\n Note that if a method doesn't exist, no error will be thrown until runtime:\n\n >>> res = a.not_a_real_method()\n >>> res.compute() # doctest: +SKIP\n AttributeError(\"'list' object has no attribute 'not_a_real_method'\")\n \"\"\"\n\n name = name or tokenize(val, True)\n task, dasks = to_task_dasks(val)\n dasks.append({name: task})\n return Value(name, dasks)\n", "path": "dask/imperative.py"}]}
4,051
338
gh_patches_debug_4396
rasdani/github-patches
git_diff
oppia__oppia-1465
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- In the rich-text editor, auto-prepend "https://" to links which don't specify a protocol ``` Currently the non-interactive link widget will only accept links that begin with either "http://" or "https://". I propose that whenever a link does not, e.g. "www.google.com" we automatically prepend "http://www.google.com" to the link string that is stored. ``` Original issue reported on code.google.com by `[email protected]` on 24 Aug 2014 at 9:43 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `extensions/rich_text_components/Link/Link.py` Content: ``` 1 # coding: utf-8 2 # 3 # Copyright 2014 The Oppia Authors. All Rights Reserved. 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, softwar 12 # distributed under the License is distributed on an "AS-IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 from extensions.rich_text_components import base 18 19 20 class Link(base.BaseRichTextComponent): 21 """A rich-text component for displaying links.""" 22 23 name = 'Link' 24 category = 'Basic Input' 25 description = 'A link to a URL.' 26 frontend_name = 'link' 27 tooltip = 'Insert link' 28 29 _customization_arg_specs = [{ 30 'name': 'url', 31 'description': ( 32 'The link URL. It must start with http:// or https://'), 33 'schema': { 34 'type': 'custom', 35 'obj_type': 'SanitizedUrl', 36 }, 37 'default_value': 'https://www.example.com', 38 }, { 39 'name': 'text', 40 'description': ( 41 'The link text. If left blank, the link URL will be used.'), 42 'schema': { 43 'type': 'unicode', 44 }, 45 'default_value': '', 46 }, { 47 'name': 'open_link_in_same_window', 48 'description': 'Open the link in the same window?', 49 'schema': { 50 'type': 'bool' 51 }, 52 'default_value': False, 53 }] 54 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/extensions/rich_text_components/Link/Link.py b/extensions/rich_text_components/Link/Link.py --- a/extensions/rich_text_components/Link/Link.py +++ b/extensions/rich_text_components/Link/Link.py @@ -29,7 +29,7 @@ _customization_arg_specs = [{ 'name': 'url', 'description': ( - 'The link URL. It must start with http:// or https://'), + 'The link URL. If no protocol is specified, HTTPS will be used.'), 'schema': { 'type': 'custom', 'obj_type': 'SanitizedUrl',
{"golden_diff": "diff --git a/extensions/rich_text_components/Link/Link.py b/extensions/rich_text_components/Link/Link.py\n--- a/extensions/rich_text_components/Link/Link.py\n+++ b/extensions/rich_text_components/Link/Link.py\n@@ -29,7 +29,7 @@\n _customization_arg_specs = [{\n 'name': 'url',\n 'description': (\n- 'The link URL. It must start with http:// or https://'),\n+ 'The link URL. If no protocol is specified, HTTPS will be used.'),\n 'schema': {\n 'type': 'custom',\n 'obj_type': 'SanitizedUrl',\n", "issue": "In the rich-text editor, auto-prepend \"https://\" to links which don't specify a protocol\n```\nCurrently the non-interactive link widget will only accept links that begin \nwith either \"http://\" or \"https://\". I propose that whenever a link does not, \ne.g. \"www.google.com\" we automatically prepend \"http://www.google.com\" to the \nlink string that is stored.\n```\n\nOriginal issue reported on code.google.com by `[email protected]` on 24 Aug 2014 at 9:43\n\n", "before_files": [{"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softwar\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom extensions.rich_text_components import base\n\n\nclass Link(base.BaseRichTextComponent):\n \"\"\"A rich-text component for displaying links.\"\"\"\n\n name = 'Link'\n category = 'Basic Input'\n description = 'A link to a URL.'\n frontend_name = 'link'\n tooltip = 'Insert link'\n\n _customization_arg_specs = [{\n 'name': 'url',\n 'description': (\n 'The link URL. It must start with http:// or https://'),\n 'schema': {\n 'type': 'custom',\n 'obj_type': 'SanitizedUrl',\n },\n 'default_value': 'https://www.example.com',\n }, {\n 'name': 'text',\n 'description': (\n 'The link text. If left blank, the link URL will be used.'),\n 'schema': {\n 'type': 'unicode',\n },\n 'default_value': '',\n }, {\n 'name': 'open_link_in_same_window',\n 'description': 'Open the link in the same window?',\n 'schema': {\n 'type': 'bool'\n },\n 'default_value': False,\n }]\n", "path": "extensions/rich_text_components/Link/Link.py"}], "after_files": [{"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softwar\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom extensions.rich_text_components import base\n\n\nclass Link(base.BaseRichTextComponent):\n \"\"\"A rich-text component for displaying links.\"\"\"\n\n name = 'Link'\n category = 'Basic Input'\n description = 'A link to a URL.'\n frontend_name = 'link'\n tooltip = 'Insert link'\n\n _customization_arg_specs = [{\n 'name': 'url',\n 'description': (\n 'The link URL. If no protocol is specified, HTTPS will be used.'),\n 'schema': {\n 'type': 'custom',\n 'obj_type': 'SanitizedUrl',\n },\n 'default_value': 'https://www.example.com',\n }, {\n 'name': 'text',\n 'description': (\n 'The link text. If left blank, the link URL will be used.'),\n 'schema': {\n 'type': 'unicode',\n },\n 'default_value': '',\n }, {\n 'name': 'open_link_in_same_window',\n 'description': 'Open the link in the same window?',\n 'schema': {\n 'type': 'bool'\n },\n 'default_value': False,\n }]\n", "path": "extensions/rich_text_components/Link/Link.py"}]}
871
141
gh_patches_debug_26467
rasdani/github-patches
git_diff
liqd__a4-meinberlin-891
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Import of Bezirksregionen stopped working `$ manage.py import_geodata --gdal-legacy` Leads to a `KeyError`, probably the data format has changed. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `meinberlin/apps/maps/management/commands/import_geodata.py` Content: ``` 1 import json 2 import os 3 import subprocess 4 import sys 5 6 from django.core.management.base import BaseCommand 7 8 from meinberlin.apps.maps import models as map_models 9 10 11 class Command(BaseCommand): 12 help = 'Create map presets for berlin GEO-Data' 13 14 def add_arguments(self, parser): 15 parser.add_argument( 16 '--gdal-legacy', 17 action='store_true', 18 dest='gdal_legacy', 19 default=False, 20 help='GDAL version <= 1.10', 21 ) 22 23 def handle(self, *args, **options): 24 self.is_gdal_legacy = options['gdal_legacy'] 25 self._import_districts() 26 self._import_regions() 27 28 def _import_districts(self): 29 category = self._preset_category('Berlin') 30 tmpfile = '/tmp/bezirke.json' 31 url = 'http://fbinter.stadt-berlin.de/fb/' \ 32 'wfs/geometry/senstadt/re_bezirke/' 33 self._download_geodata(tmpfile, url, 'fis:re_bezirke') 34 data = json.load(open(tmpfile, 'r')) 35 for feature in data['features']: 36 district = feature['properties']['spatial_alias'] 37 if not map_models.MapPreset.objects.filter(name=district).exists(): 38 self._create_map_preset(district, feature, category) 39 os.remove(tmpfile) 40 41 def _import_regions(self): 42 url = 'http://fbinter.stadt-berlin.de/fb/' \ 43 'wfs/geometry/senstadt/re_bezirksregion' 44 tmpfile = '/tmp/bezirksregions.json' 45 self._download_geodata(tmpfile, url, 46 'fis:re_bezirksregion') 47 data = json.load(open(tmpfile, 'r')) 48 for feature in data['features']: 49 district = feature['properties']['BEZIRK'] 50 region = feature['properties']['BZR_NAME'] 51 category = self._preset_category(district) 52 if not map_models.MapPreset.objects.filter(name=region).exists(): 53 self._create_map_preset(region, feature, category) 54 os.remove(tmpfile) 55 56 def _preset_category(self, name): 57 category, _ = \ 58 map_models.MapPresetCategory.objects.get_or_create(name=name) 59 return category 60 61 def _create_map_preset(self, name, feature, category): 62 polygon = { 63 'type': 'FeatureCollection', 64 'features': [feature] 65 } 66 map_preset = map_models.MapPreset( 67 name=name, 68 polygon=polygon, 69 category=category 70 ) 71 map_preset.save() 72 73 def _download_geodata(self, filename: str, url: str, layer: str): 74 try: 75 os.remove(filename) 76 except: 77 pass 78 79 src = 'WFS:{}{}'.format( 80 url, 81 '?TYPENAMES=GML2' if self.is_gdal_legacy else '' 82 ) 83 try: 84 print('Trying to download file from {}'.format(url)) 85 subprocess.check_call([ 86 'ogr2ogr', '-s_srs', 'EPSG:25833', '-t_srs', 'WGS84', 87 '-f', 'geoJSON', filename, src, layer 88 ]) 89 except FileNotFoundError as e: 90 print('Make sure ogr2ogr is installed and in user PATH.') 91 sys.exit(e) 92 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/meinberlin/apps/maps/management/commands/import_geodata.py b/meinberlin/apps/maps/management/commands/import_geodata.py --- a/meinberlin/apps/maps/management/commands/import_geodata.py +++ b/meinberlin/apps/maps/management/commands/import_geodata.py @@ -40,13 +40,13 @@ def _import_regions(self): url = 'http://fbinter.stadt-berlin.de/fb/' \ - 'wfs/geometry/senstadt/re_bezirksregion' + 'wfs/geometry/senstadt/re_bezirksregion/' tmpfile = '/tmp/bezirksregions.json' self._download_geodata(tmpfile, url, 'fis:re_bezirksregion') data = json.load(open(tmpfile, 'r')) for feature in data['features']: - district = feature['properties']['BEZIRK'] + district = feature['properties']['BEZNAME'] region = feature['properties']['BZR_NAME'] category = self._preset_category(district) if not map_models.MapPreset.objects.filter(name=region).exists(): @@ -78,7 +78,7 @@ src = 'WFS:{}{}'.format( url, - '?TYPENAMES=GML2' if self.is_gdal_legacy else '' + '?VERSION=1.1.0' if self.is_gdal_legacy else '' ) try: print('Trying to download file from {}'.format(url))
{"golden_diff": "diff --git a/meinberlin/apps/maps/management/commands/import_geodata.py b/meinberlin/apps/maps/management/commands/import_geodata.py\n--- a/meinberlin/apps/maps/management/commands/import_geodata.py\n+++ b/meinberlin/apps/maps/management/commands/import_geodata.py\n@@ -40,13 +40,13 @@\n \n def _import_regions(self):\n url = 'http://fbinter.stadt-berlin.de/fb/' \\\n- 'wfs/geometry/senstadt/re_bezirksregion'\n+ 'wfs/geometry/senstadt/re_bezirksregion/'\n tmpfile = '/tmp/bezirksregions.json'\n self._download_geodata(tmpfile, url,\n 'fis:re_bezirksregion')\n data = json.load(open(tmpfile, 'r'))\n for feature in data['features']:\n- district = feature['properties']['BEZIRK']\n+ district = feature['properties']['BEZNAME']\n region = feature['properties']['BZR_NAME']\n category = self._preset_category(district)\n if not map_models.MapPreset.objects.filter(name=region).exists():\n@@ -78,7 +78,7 @@\n \n src = 'WFS:{}{}'.format(\n url,\n- '?TYPENAMES=GML2' if self.is_gdal_legacy else ''\n+ '?VERSION=1.1.0' if self.is_gdal_legacy else ''\n )\n try:\n print('Trying to download file from {}'.format(url))\n", "issue": "Import of Bezirksregionen stopped working\n`$ manage.py import_geodata --gdal-legacy`\r\n\r\nLeads to a `KeyError`, probably the data format has changed.\r\n\n", "before_files": [{"content": "import json\nimport os\nimport subprocess\nimport sys\n\nfrom django.core.management.base import BaseCommand\n\nfrom meinberlin.apps.maps import models as map_models\n\n\nclass Command(BaseCommand):\n help = 'Create map presets for berlin GEO-Data'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--gdal-legacy',\n action='store_true',\n dest='gdal_legacy',\n default=False,\n help='GDAL version <= 1.10',\n )\n\n def handle(self, *args, **options):\n self.is_gdal_legacy = options['gdal_legacy']\n self._import_districts()\n self._import_regions()\n\n def _import_districts(self):\n category = self._preset_category('Berlin')\n tmpfile = '/tmp/bezirke.json'\n url = 'http://fbinter.stadt-berlin.de/fb/' \\\n 'wfs/geometry/senstadt/re_bezirke/'\n self._download_geodata(tmpfile, url, 'fis:re_bezirke')\n data = json.load(open(tmpfile, 'r'))\n for feature in data['features']:\n district = feature['properties']['spatial_alias']\n if not map_models.MapPreset.objects.filter(name=district).exists():\n self._create_map_preset(district, feature, category)\n os.remove(tmpfile)\n\n def _import_regions(self):\n url = 'http://fbinter.stadt-berlin.de/fb/' \\\n 'wfs/geometry/senstadt/re_bezirksregion'\n tmpfile = '/tmp/bezirksregions.json'\n self._download_geodata(tmpfile, url,\n 'fis:re_bezirksregion')\n data = json.load(open(tmpfile, 'r'))\n for feature in data['features']:\n district = feature['properties']['BEZIRK']\n region = feature['properties']['BZR_NAME']\n category = self._preset_category(district)\n if not map_models.MapPreset.objects.filter(name=region).exists():\n self._create_map_preset(region, feature, category)\n os.remove(tmpfile)\n\n def _preset_category(self, name):\n category, _ = \\\n map_models.MapPresetCategory.objects.get_or_create(name=name)\n return category\n\n def _create_map_preset(self, name, feature, category):\n polygon = {\n 'type': 'FeatureCollection',\n 'features': [feature]\n }\n map_preset = map_models.MapPreset(\n name=name,\n polygon=polygon,\n category=category\n )\n map_preset.save()\n\n def _download_geodata(self, filename: str, url: str, layer: str):\n try:\n os.remove(filename)\n except:\n pass\n\n src = 'WFS:{}{}'.format(\n url,\n '?TYPENAMES=GML2' if self.is_gdal_legacy else ''\n )\n try:\n print('Trying to download file from {}'.format(url))\n subprocess.check_call([\n 'ogr2ogr', '-s_srs', 'EPSG:25833', '-t_srs', 'WGS84',\n '-f', 'geoJSON', filename, src, layer\n ])\n except FileNotFoundError as e:\n print('Make sure ogr2ogr is installed and in user PATH.')\n sys.exit(e)\n", "path": "meinberlin/apps/maps/management/commands/import_geodata.py"}], "after_files": [{"content": "import json\nimport os\nimport subprocess\nimport sys\n\nfrom django.core.management.base import BaseCommand\n\nfrom meinberlin.apps.maps import models as map_models\n\n\nclass Command(BaseCommand):\n help = 'Create map presets for berlin GEO-Data'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--gdal-legacy',\n action='store_true',\n dest='gdal_legacy',\n default=False,\n help='GDAL version <= 1.10',\n )\n\n def handle(self, *args, **options):\n self.is_gdal_legacy = options['gdal_legacy']\n self._import_districts()\n self._import_regions()\n\n def _import_districts(self):\n category = self._preset_category('Berlin')\n tmpfile = '/tmp/bezirke.json'\n url = 'http://fbinter.stadt-berlin.de/fb/' \\\n 'wfs/geometry/senstadt/re_bezirke/'\n self._download_geodata(tmpfile, url, 'fis:re_bezirke')\n data = json.load(open(tmpfile, 'r'))\n for feature in data['features']:\n district = feature['properties']['spatial_alias']\n if not map_models.MapPreset.objects.filter(name=district).exists():\n self._create_map_preset(district, feature, category)\n os.remove(tmpfile)\n\n def _import_regions(self):\n url = 'http://fbinter.stadt-berlin.de/fb/' \\\n 'wfs/geometry/senstadt/re_bezirksregion/'\n tmpfile = '/tmp/bezirksregions.json'\n self._download_geodata(tmpfile, url,\n 'fis:re_bezirksregion')\n data = json.load(open(tmpfile, 'r'))\n for feature in data['features']:\n district = feature['properties']['BEZNAME']\n region = feature['properties']['BZR_NAME']\n category = self._preset_category(district)\n if not map_models.MapPreset.objects.filter(name=region).exists():\n self._create_map_preset(region, feature, category)\n os.remove(tmpfile)\n\n def _preset_category(self, name):\n category, _ = \\\n map_models.MapPresetCategory.objects.get_or_create(name=name)\n return category\n\n def _create_map_preset(self, name, feature, category):\n polygon = {\n 'type': 'FeatureCollection',\n 'features': [feature]\n }\n map_preset = map_models.MapPreset(\n name=name,\n polygon=polygon,\n category=category\n )\n map_preset.save()\n\n def _download_geodata(self, filename: str, url: str, layer: str):\n try:\n os.remove(filename)\n except:\n pass\n\n src = 'WFS:{}{}'.format(\n url,\n '?VERSION=1.1.0' if self.is_gdal_legacy else ''\n )\n try:\n print('Trying to download file from {}'.format(url))\n subprocess.check_call([\n 'ogr2ogr', '-s_srs', 'EPSG:25833', '-t_srs', 'WGS84',\n '-f', 'geoJSON', filename, src, layer\n ])\n except FileNotFoundError as e:\n print('Make sure ogr2ogr is installed and in user PATH.')\n sys.exit(e)\n", "path": "meinberlin/apps/maps/management/commands/import_geodata.py"}]}
1,215
341
gh_patches_debug_4003
rasdani/github-patches
git_diff
encode__httpx-1469
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- environ["SERVER_PORT"] can't be "None" ### Checklist <!-- Please make sure you check all these items before submitting your bug report. --> - [x] The bug is reproducible against the latest release and/or `master`. - [x] There are no similar issues or pull requests to fix it yet. ### Describe the bug https://github.com/abersheeran/a2wsgi/issues/8 ```python =================================== FAILURES =================================== ____________ test_convert_asgi_to_wsgi[app1-a2wsgi-ASGIMiddleware] _____________ app = <a2wsgi.asgi.ASGIMiddleware object at 0x7fced147eb80> name = 'a2wsgi-ASGIMiddleware' @pytest.mark.parametrize( "app, name", [(wsgi_echo, "pure-WSGI"), (ASGIMiddleware(asgi_echo), "a2wsgi-ASGIMiddleware")], ) def test_convert_asgi_to_wsgi(app, name): with httpx.Client(app=app, base_url="http://testserver") as client: start_time = time.time_ns() for _ in range(100): > client.post("/", data=b"hello world") a2wsgi/benchmark.py:99: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /usr/local/lib/python3.8/site-packages/httpx/_client.py:992: in post return self.request( /usr/local/lib/python3.8/site-packages/httpx/_client.py:733: in request return self.send( /usr/local/lib/python3.8/site-packages/httpx/_client.py:767: in send response = self._send_handling_auth( /usr/local/lib/python3.8/site-packages/httpx/_client.py:805: in _send_handling_auth response = self._send_handling_redirects( /usr/local/lib/python3.8/site-packages/httpx/_client.py:837: in _send_handling_redirects response = self._send_single_request(request, timeout) /usr/local/lib/python3.8/site-packages/httpx/_client.py:861: in _send_single_request (status_code, headers, stream, ext) = transport.request( /usr/local/lib/python3.8/site-packages/httpx/_transports/wsgi.py:113: in request result = _skip_leading_empty_chunks(result) /usr/local/lib/python3.8/site-packages/httpx/_transports/wsgi.py:10: in _skip_leading_empty_chunks for chunk in body: a2wsgi/a2wsgi/asgi.py:160: in __call__ self.app(build_scope(environ), self.asgi_receive, self.asgi_send) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ environ = {'CONTENT_LENGTH': '11', 'HTTP_ACCEPT': '*/*', 'HTTP_ACCEPT_ENCODING': 'gzip, deflate', 'HTTP_CONNECTION': 'keep-alive', ...} def build_scope(environ: Environ) -> Scope: headers = [ ( each[5:].lower().replace("_", "-").encode("latin1"), environ[each].encode("latin1"), ) for each in environ.keys() if each.startswith("HTTP_") ] if environ.get("CONTENT_TYPE"): headers.append((b"content-type", environ["CONTENT_TYPE"].encode("latin1"))) if environ.get("CONTENT_LENGTH"): headers.append((b"content-length", environ["CONTENT_LENGTH"].encode("latin1"))) if environ.get("REMOTE_ADDR") and environ.get("REMOTE_PORT"): client = (environ.get("REMOTE_ADDR"), int(environ.get("REMOTE_PORT"))) else: client = None return { "type": "http", "asgi": {"version": "3.0", "spec_version": "3.0"}, "http_version": environ.get("SERVER_PROTOCOL", "http/1.0").split("/")[1], "method": environ["REQUEST_METHOD"], "scheme": environ.get("wsgi.url_scheme", "http"), "path": environ["PATH_INFO"].encode("latin1").decode("utf8"), "query_string": environ["QUERY_STRING"].encode("ascii"), "root_path": environ.get("SCRIPT_NAME", "").encode("latin1").decode("utf8"), "client": client, > "server": (environ["SERVER_NAME"], int(environ["SERVER_PORT"])), "headers": headers, } E ValueError: invalid literal for int() with base 10: 'None' a2wsgi/a2wsgi/asgi.py:94: ValueError =========================== short test summary info ============================ FAILED a2wsgi/benchmark.py::test_convert_asgi_to_wsgi[app1-a2wsgi-ASGIMiddleware] ==================== 1 failed, 5 passed in 95.47s (0:01:35) ==================== ``` ### Expected behavior https://www.python.org/dev/peps/pep-3333/#environ-variables `SERVER_PORT` must be a valid integer value for URL splicing. In the WSGI application test, it may not have a real value, but we should give a default value, such as 80. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `httpx/_transports/wsgi.py` Content: ``` 1 import io 2 import itertools 3 import typing 4 from urllib.parse import unquote 5 6 import httpcore 7 8 9 def _skip_leading_empty_chunks(body: typing.Iterable) -> typing.Iterable: 10 body = iter(body) 11 for chunk in body: 12 if chunk: 13 return itertools.chain([chunk], body) 14 return [] 15 16 17 class WSGITransport(httpcore.SyncHTTPTransport): 18 """ 19 A custom transport that handles sending requests directly to an WSGI app. 20 The simplest way to use this functionality is to use the `app` argument. 21 22 ``` 23 client = httpx.Client(app=app) 24 ``` 25 26 Alternatively, you can setup the transport instance explicitly. 27 This allows you to include any additional configuration arguments specific 28 to the WSGITransport class: 29 30 ``` 31 transport = httpx.WSGITransport( 32 app=app, 33 script_name="/submount", 34 remote_addr="1.2.3.4" 35 ) 36 client = httpx.Client(transport=transport) 37 ``` 38 39 Arguments: 40 41 * `app` - The ASGI application. 42 * `raise_app_exceptions` - Boolean indicating if exceptions in the application 43 should be raised. Default to `True`. Can be set to `False` for use cases 44 such as testing the content of a client 500 response. 45 * `script_name` - The root path on which the ASGI application should be mounted. 46 * `remote_addr` - A string indicating the client IP of incoming requests. 47 ``` 48 """ 49 50 def __init__( 51 self, 52 app: typing.Callable, 53 raise_app_exceptions: bool = True, 54 script_name: str = "", 55 remote_addr: str = "127.0.0.1", 56 ) -> None: 57 self.app = app 58 self.raise_app_exceptions = raise_app_exceptions 59 self.script_name = script_name 60 self.remote_addr = remote_addr 61 62 def request( 63 self, 64 method: bytes, 65 url: typing.Tuple[bytes, bytes, typing.Optional[int], bytes], 66 headers: typing.List[typing.Tuple[bytes, bytes]] = None, 67 stream: httpcore.SyncByteStream = None, 68 ext: dict = None, 69 ) -> typing.Tuple[ 70 int, typing.List[typing.Tuple[bytes, bytes]], httpcore.SyncByteStream, dict 71 ]: 72 headers = [] if headers is None else headers 73 stream = httpcore.PlainByteStream(content=b"") if stream is None else stream 74 75 scheme, host, port, full_path = url 76 path, _, query = full_path.partition(b"?") 77 environ = { 78 "wsgi.version": (1, 0), 79 "wsgi.url_scheme": scheme.decode("ascii"), 80 "wsgi.input": io.BytesIO(b"".join(stream)), 81 "wsgi.errors": io.BytesIO(), 82 "wsgi.multithread": True, 83 "wsgi.multiprocess": False, 84 "wsgi.run_once": False, 85 "REQUEST_METHOD": method.decode(), 86 "SCRIPT_NAME": self.script_name, 87 "PATH_INFO": unquote(path.decode("ascii")), 88 "QUERY_STRING": query.decode("ascii"), 89 "SERVER_NAME": host.decode("ascii"), 90 "SERVER_PORT": str(port), 91 "REMOTE_ADDR": self.remote_addr, 92 } 93 for header_key, header_value in headers: 94 key = header_key.decode("ascii").upper().replace("-", "_") 95 if key not in ("CONTENT_TYPE", "CONTENT_LENGTH"): 96 key = "HTTP_" + key 97 environ[key] = header_value.decode("ascii") 98 99 seen_status = None 100 seen_response_headers = None 101 seen_exc_info = None 102 103 def start_response( 104 status: str, response_headers: list, exc_info: typing.Any = None 105 ) -> None: 106 nonlocal seen_status, seen_response_headers, seen_exc_info 107 seen_status = status 108 seen_response_headers = response_headers 109 seen_exc_info = exc_info 110 111 result = self.app(environ, start_response) 112 # This is needed because the status returned by start_response 113 # shouldn't be used until the first non-empty chunk has been served. 114 result = _skip_leading_empty_chunks(result) 115 116 assert seen_status is not None 117 assert seen_response_headers is not None 118 if seen_exc_info and self.raise_app_exceptions: 119 raise seen_exc_info[1] 120 121 status_code = int(seen_status.split()[0]) 122 headers = [ 123 (key.encode("ascii"), value.encode("ascii")) 124 for key, value in seen_response_headers 125 ] 126 stream = httpcore.IteratorByteStream(iterator=result) 127 ext = {} 128 129 return (status_code, headers, stream, ext) 130 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/httpx/_transports/wsgi.py b/httpx/_transports/wsgi.py --- a/httpx/_transports/wsgi.py +++ b/httpx/_transports/wsgi.py @@ -74,6 +74,9 @@ scheme, host, port, full_path = url path, _, query = full_path.partition(b"?") + if port is None: + port = {b"http": 80, b"https": 443}[scheme] + environ = { "wsgi.version": (1, 0), "wsgi.url_scheme": scheme.decode("ascii"),
{"golden_diff": "diff --git a/httpx/_transports/wsgi.py b/httpx/_transports/wsgi.py\n--- a/httpx/_transports/wsgi.py\n+++ b/httpx/_transports/wsgi.py\n@@ -74,6 +74,9 @@\n \n scheme, host, port, full_path = url\n path, _, query = full_path.partition(b\"?\")\n+ if port is None:\n+ port = {b\"http\": 80, b\"https\": 443}[scheme]\n+\n environ = {\n \"wsgi.version\": (1, 0),\n \"wsgi.url_scheme\": scheme.decode(\"ascii\"),\n", "issue": "environ[\"SERVER_PORT\"] can't be \"None\"\n### Checklist\r\n\r\n<!-- Please make sure you check all these items before submitting your bug report. -->\r\n\r\n- [x] The bug is reproducible against the latest release and/or `master`.\r\n- [x] There are no similar issues or pull requests to fix it yet.\r\n\r\n### Describe the bug\r\n\r\nhttps://github.com/abersheeran/a2wsgi/issues/8\r\n\r\n```python\r\n=================================== FAILURES ===================================\r\n____________ test_convert_asgi_to_wsgi[app1-a2wsgi-ASGIMiddleware] _____________\r\n\r\napp = <a2wsgi.asgi.ASGIMiddleware object at 0x7fced147eb80>\r\nname = 'a2wsgi-ASGIMiddleware'\r\n\r\n @pytest.mark.parametrize(\r\n \"app, name\",\r\n [(wsgi_echo, \"pure-WSGI\"), (ASGIMiddleware(asgi_echo), \"a2wsgi-ASGIMiddleware\")],\r\n )\r\n def test_convert_asgi_to_wsgi(app, name):\r\n with httpx.Client(app=app, base_url=\"http://testserver\") as client:\r\n start_time = time.time_ns()\r\n for _ in range(100):\r\n> client.post(\"/\", data=b\"hello world\")\r\n\r\na2wsgi/benchmark.py:99:\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _\r\n/usr/local/lib/python3.8/site-packages/httpx/_client.py:992: in post\r\n return self.request(\r\n/usr/local/lib/python3.8/site-packages/httpx/_client.py:733: in request\r\n return self.send(\r\n/usr/local/lib/python3.8/site-packages/httpx/_client.py:767: in send\r\n response = self._send_handling_auth(\r\n/usr/local/lib/python3.8/site-packages/httpx/_client.py:805: in _send_handling_auth\r\n response = self._send_handling_redirects(\r\n/usr/local/lib/python3.8/site-packages/httpx/_client.py:837: in _send_handling_redirects\r\n response = self._send_single_request(request, timeout)\r\n/usr/local/lib/python3.8/site-packages/httpx/_client.py:861: in _send_single_request\r\n (status_code, headers, stream, ext) = transport.request(\r\n/usr/local/lib/python3.8/site-packages/httpx/_transports/wsgi.py:113: in request\r\n result = _skip_leading_empty_chunks(result)\r\n/usr/local/lib/python3.8/site-packages/httpx/_transports/wsgi.py:10: in _skip_leading_empty_chunks\r\n for chunk in body:\r\na2wsgi/a2wsgi/asgi.py:160: in __call__\r\n self.app(build_scope(environ), self.asgi_receive, self.asgi_send)\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _\r\n\r\nenviron = {'CONTENT_LENGTH': '11', 'HTTP_ACCEPT': '*/*', 'HTTP_ACCEPT_ENCODING': 'gzip, deflate', 'HTTP_CONNECTION': 'keep-alive', ...}\r\n\r\n def build_scope(environ: Environ) -> Scope:\r\n headers = [\r\n (\r\n each[5:].lower().replace(\"_\", \"-\").encode(\"latin1\"),\r\n environ[each].encode(\"latin1\"),\r\n )\r\n for each in environ.keys()\r\n if each.startswith(\"HTTP_\")\r\n ]\r\n if environ.get(\"CONTENT_TYPE\"):\r\n headers.append((b\"content-type\", environ[\"CONTENT_TYPE\"].encode(\"latin1\")))\r\n if environ.get(\"CONTENT_LENGTH\"):\r\n headers.append((b\"content-length\", environ[\"CONTENT_LENGTH\"].encode(\"latin1\")))\r\n\r\n if environ.get(\"REMOTE_ADDR\") and environ.get(\"REMOTE_PORT\"):\r\n client = (environ.get(\"REMOTE_ADDR\"), int(environ.get(\"REMOTE_PORT\")))\r\n else:\r\n client = None\r\n\r\n return {\r\n \"type\": \"http\",\r\n \"asgi\": {\"version\": \"3.0\", \"spec_version\": \"3.0\"},\r\n \"http_version\": environ.get(\"SERVER_PROTOCOL\", \"http/1.0\").split(\"/\")[1],\r\n \"method\": environ[\"REQUEST_METHOD\"],\r\n \"scheme\": environ.get(\"wsgi.url_scheme\", \"http\"),\r\n \"path\": environ[\"PATH_INFO\"].encode(\"latin1\").decode(\"utf8\"),\r\n \"query_string\": environ[\"QUERY_STRING\"].encode(\"ascii\"),\r\n \"root_path\": environ.get(\"SCRIPT_NAME\", \"\").encode(\"latin1\").decode(\"utf8\"),\r\n \"client\": client,\r\n> \"server\": (environ[\"SERVER_NAME\"], int(environ[\"SERVER_PORT\"])),\r\n \"headers\": headers,\r\n }\r\nE ValueError: invalid literal for int() with base 10: 'None'\r\n\r\na2wsgi/a2wsgi/asgi.py:94: ValueError\r\n=========================== short test summary info ============================\r\nFAILED a2wsgi/benchmark.py::test_convert_asgi_to_wsgi[app1-a2wsgi-ASGIMiddleware]\r\n==================== 1 failed, 5 passed in 95.47s (0:01:35) ====================\r\n```\r\n\r\n### Expected behavior\r\n\r\nhttps://www.python.org/dev/peps/pep-3333/#environ-variables\r\n\r\n`SERVER_PORT` must be a valid integer value for URL splicing. In the WSGI application test, it may not have a real value, but we should give a default value, such as 80.\r\n\n", "before_files": [{"content": "import io\nimport itertools\nimport typing\nfrom urllib.parse import unquote\n\nimport httpcore\n\n\ndef _skip_leading_empty_chunks(body: typing.Iterable) -> typing.Iterable:\n body = iter(body)\n for chunk in body:\n if chunk:\n return itertools.chain([chunk], body)\n return []\n\n\nclass WSGITransport(httpcore.SyncHTTPTransport):\n \"\"\"\n A custom transport that handles sending requests directly to an WSGI app.\n The simplest way to use this functionality is to use the `app` argument.\n\n ```\n client = httpx.Client(app=app)\n ```\n\n Alternatively, you can setup the transport instance explicitly.\n This allows you to include any additional configuration arguments specific\n to the WSGITransport class:\n\n ```\n transport = httpx.WSGITransport(\n app=app,\n script_name=\"/submount\",\n remote_addr=\"1.2.3.4\"\n )\n client = httpx.Client(transport=transport)\n ```\n\n Arguments:\n\n * `app` - The ASGI application.\n * `raise_app_exceptions` - Boolean indicating if exceptions in the application\n should be raised. Default to `True`. Can be set to `False` for use cases\n such as testing the content of a client 500 response.\n * `script_name` - The root path on which the ASGI application should be mounted.\n * `remote_addr` - A string indicating the client IP of incoming requests.\n ```\n \"\"\"\n\n def __init__(\n self,\n app: typing.Callable,\n raise_app_exceptions: bool = True,\n script_name: str = \"\",\n remote_addr: str = \"127.0.0.1\",\n ) -> None:\n self.app = app\n self.raise_app_exceptions = raise_app_exceptions\n self.script_name = script_name\n self.remote_addr = remote_addr\n\n def request(\n self,\n method: bytes,\n url: typing.Tuple[bytes, bytes, typing.Optional[int], bytes],\n headers: typing.List[typing.Tuple[bytes, bytes]] = None,\n stream: httpcore.SyncByteStream = None,\n ext: dict = None,\n ) -> typing.Tuple[\n int, typing.List[typing.Tuple[bytes, bytes]], httpcore.SyncByteStream, dict\n ]:\n headers = [] if headers is None else headers\n stream = httpcore.PlainByteStream(content=b\"\") if stream is None else stream\n\n scheme, host, port, full_path = url\n path, _, query = full_path.partition(b\"?\")\n environ = {\n \"wsgi.version\": (1, 0),\n \"wsgi.url_scheme\": scheme.decode(\"ascii\"),\n \"wsgi.input\": io.BytesIO(b\"\".join(stream)),\n \"wsgi.errors\": io.BytesIO(),\n \"wsgi.multithread\": True,\n \"wsgi.multiprocess\": False,\n \"wsgi.run_once\": False,\n \"REQUEST_METHOD\": method.decode(),\n \"SCRIPT_NAME\": self.script_name,\n \"PATH_INFO\": unquote(path.decode(\"ascii\")),\n \"QUERY_STRING\": query.decode(\"ascii\"),\n \"SERVER_NAME\": host.decode(\"ascii\"),\n \"SERVER_PORT\": str(port),\n \"REMOTE_ADDR\": self.remote_addr,\n }\n for header_key, header_value in headers:\n key = header_key.decode(\"ascii\").upper().replace(\"-\", \"_\")\n if key not in (\"CONTENT_TYPE\", \"CONTENT_LENGTH\"):\n key = \"HTTP_\" + key\n environ[key] = header_value.decode(\"ascii\")\n\n seen_status = None\n seen_response_headers = None\n seen_exc_info = None\n\n def start_response(\n status: str, response_headers: list, exc_info: typing.Any = None\n ) -> None:\n nonlocal seen_status, seen_response_headers, seen_exc_info\n seen_status = status\n seen_response_headers = response_headers\n seen_exc_info = exc_info\n\n result = self.app(environ, start_response)\n # This is needed because the status returned by start_response\n # shouldn't be used until the first non-empty chunk has been served.\n result = _skip_leading_empty_chunks(result)\n\n assert seen_status is not None\n assert seen_response_headers is not None\n if seen_exc_info and self.raise_app_exceptions:\n raise seen_exc_info[1]\n\n status_code = int(seen_status.split()[0])\n headers = [\n (key.encode(\"ascii\"), value.encode(\"ascii\"))\n for key, value in seen_response_headers\n ]\n stream = httpcore.IteratorByteStream(iterator=result)\n ext = {}\n\n return (status_code, headers, stream, ext)\n", "path": "httpx/_transports/wsgi.py"}], "after_files": [{"content": "import io\nimport itertools\nimport typing\nfrom urllib.parse import unquote\n\nimport httpcore\n\n\ndef _skip_leading_empty_chunks(body: typing.Iterable) -> typing.Iterable:\n body = iter(body)\n for chunk in body:\n if chunk:\n return itertools.chain([chunk], body)\n return []\n\n\nclass WSGITransport(httpcore.SyncHTTPTransport):\n \"\"\"\n A custom transport that handles sending requests directly to an WSGI app.\n The simplest way to use this functionality is to use the `app` argument.\n\n ```\n client = httpx.Client(app=app)\n ```\n\n Alternatively, you can setup the transport instance explicitly.\n This allows you to include any additional configuration arguments specific\n to the WSGITransport class:\n\n ```\n transport = httpx.WSGITransport(\n app=app,\n script_name=\"/submount\",\n remote_addr=\"1.2.3.4\"\n )\n client = httpx.Client(transport=transport)\n ```\n\n Arguments:\n\n * `app` - The ASGI application.\n * `raise_app_exceptions` - Boolean indicating if exceptions in the application\n should be raised. Default to `True`. Can be set to `False` for use cases\n such as testing the content of a client 500 response.\n * `script_name` - The root path on which the ASGI application should be mounted.\n * `remote_addr` - A string indicating the client IP of incoming requests.\n ```\n \"\"\"\n\n def __init__(\n self,\n app: typing.Callable,\n raise_app_exceptions: bool = True,\n script_name: str = \"\",\n remote_addr: str = \"127.0.0.1\",\n ) -> None:\n self.app = app\n self.raise_app_exceptions = raise_app_exceptions\n self.script_name = script_name\n self.remote_addr = remote_addr\n\n def request(\n self,\n method: bytes,\n url: typing.Tuple[bytes, bytes, typing.Optional[int], bytes],\n headers: typing.List[typing.Tuple[bytes, bytes]] = None,\n stream: httpcore.SyncByteStream = None,\n ext: dict = None,\n ) -> typing.Tuple[\n int, typing.List[typing.Tuple[bytes, bytes]], httpcore.SyncByteStream, dict\n ]:\n headers = [] if headers is None else headers\n stream = httpcore.PlainByteStream(content=b\"\") if stream is None else stream\n\n scheme, host, port, full_path = url\n path, _, query = full_path.partition(b\"?\")\n if port is None:\n port = {b\"http\": 80, b\"https\": 443}[scheme]\n\n environ = {\n \"wsgi.version\": (1, 0),\n \"wsgi.url_scheme\": scheme.decode(\"ascii\"),\n \"wsgi.input\": io.BytesIO(b\"\".join(stream)),\n \"wsgi.errors\": io.BytesIO(),\n \"wsgi.multithread\": True,\n \"wsgi.multiprocess\": False,\n \"wsgi.run_once\": False,\n \"REQUEST_METHOD\": method.decode(),\n \"SCRIPT_NAME\": self.script_name,\n \"PATH_INFO\": unquote(path.decode(\"ascii\")),\n \"QUERY_STRING\": query.decode(\"ascii\"),\n \"SERVER_NAME\": host.decode(\"ascii\"),\n \"SERVER_PORT\": str(port),\n \"REMOTE_ADDR\": self.remote_addr,\n }\n for header_key, header_value in headers:\n key = header_key.decode(\"ascii\").upper().replace(\"-\", \"_\")\n if key not in (\"CONTENT_TYPE\", \"CONTENT_LENGTH\"):\n key = \"HTTP_\" + key\n environ[key] = header_value.decode(\"ascii\")\n\n seen_status = None\n seen_response_headers = None\n seen_exc_info = None\n\n def start_response(\n status: str, response_headers: list, exc_info: typing.Any = None\n ) -> None:\n nonlocal seen_status, seen_response_headers, seen_exc_info\n seen_status = status\n seen_response_headers = response_headers\n seen_exc_info = exc_info\n\n result = self.app(environ, start_response)\n # This is needed because the status returned by start_response\n # shouldn't be used until the first non-empty chunk has been served.\n result = _skip_leading_empty_chunks(result)\n\n assert seen_status is not None\n assert seen_response_headers is not None\n if seen_exc_info and self.raise_app_exceptions:\n raise seen_exc_info[1]\n\n status_code = int(seen_status.split()[0])\n headers = [\n (key.encode(\"ascii\"), value.encode(\"ascii\"))\n for key, value in seen_response_headers\n ]\n stream = httpcore.IteratorByteStream(iterator=result)\n ext = {}\n\n return (status_code, headers, stream, ext)\n", "path": "httpx/_transports/wsgi.py"}]}
2,786
139
gh_patches_debug_23255
rasdani/github-patches
git_diff
pallets__werkzeug-1636
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Enable x_host and x_proto by default through deprecated proxyfix? When #1314 refactored ProxyFix, it aliased num_proxies to x_for only (as that was the only one which previously supported multiple proxies). However in doing so it disabled forwarding of host and scheme, [which were forwarded by default up to 0.14](https://github.com/pallets/werkzeug/blob/f1d15a2c7f35dcde0e52787c9fdf9ea6f6405308/werkzeug/contrib/fixers.py#L146-L151), which breaks systems relying on this. I'd like to set x_host=1 and x_proto=1 in 0.15 IFF the user goes through the deprecated wrapper (`werkzeug.contrib.fixers.ProxyFix`) in order to restore the old behaviour, and maybe add a small warning about this behavioural change to the deprecation message (or possibly in one of the `versionchanged` blocks of the new version). Would that be acceptable / interesting or a waste of time? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/werkzeug/middleware/proxy_fix.py` Content: ``` 1 """ 2 X-Forwarded-For Proxy Fix 3 ========================= 4 5 This module provides a middleware that adjusts the WSGI environ based on 6 ``X-Forwarded-`` headers that proxies in front of an application may 7 set. 8 9 When an application is running behind a proxy server, WSGI may see the 10 request as coming from that server rather than the real client. Proxies 11 set various headers to track where the request actually came from. 12 13 This middleware should only be applied if the application is actually 14 behind such a proxy, and should be configured with the number of proxies 15 that are chained in front of it. Not all proxies set all the headers. 16 Since incoming headers can be faked, you must set how many proxies are 17 setting each header so the middleware knows what to trust. 18 19 .. autoclass:: ProxyFix 20 21 :copyright: 2007 Pallets 22 :license: BSD-3-Clause 23 """ 24 import warnings 25 26 27 class ProxyFix(object): 28 """Adjust the WSGI environ based on ``X-Forwarded-`` that proxies in 29 front of the application may set. 30 31 - ``X-Forwarded-For`` sets ``REMOTE_ADDR``. 32 - ``X-Forwarded-Proto`` sets ``wsgi.url_scheme``. 33 - ``X-Forwarded-Host`` sets ``HTTP_HOST``, ``SERVER_NAME``, and 34 ``SERVER_PORT``. 35 - ``X-Forwarded-Port`` sets ``HTTP_HOST`` and ``SERVER_PORT``. 36 - ``X-Forwarded-Prefix`` sets ``SCRIPT_NAME``. 37 38 You must tell the middleware how many proxies set each header so it 39 knows what values to trust. It is a security issue to trust values 40 that came from the client rather than a proxy. 41 42 The original values of the headers are stored in the WSGI 43 environ as ``werkzeug.proxy_fix.orig``, a dict. 44 45 :param app: The WSGI application to wrap. 46 :param x_for: Number of values to trust for ``X-Forwarded-For``. 47 :param x_proto: Number of values to trust for ``X-Forwarded-Proto``. 48 :param x_host: Number of values to trust for ``X-Forwarded-Host``. 49 :param x_port: Number of values to trust for ``X-Forwarded-Port``. 50 :param x_prefix: Number of values to trust for 51 ``X-Forwarded-Prefix``. 52 :param num_proxies: Deprecated, use ``x_for`` instead. 53 54 .. code-block:: python 55 56 from werkzeug.middleware.proxy_fix import ProxyFix 57 # App is behind one proxy that sets the -For and -Host headers. 58 app = ProxyFix(app, x_for=1, x_host=1) 59 60 .. versionchanged:: 0.15 61 All headers support multiple values. The ``num_proxies`` 62 argument is deprecated. Each header is configured with a 63 separate number of trusted proxies. 64 65 .. versionchanged:: 0.15 66 Original WSGI environ values are stored in the 67 ``werkzeug.proxy_fix.orig`` dict. ``orig_remote_addr``, 68 ``orig_wsgi_url_scheme``, and ``orig_http_host`` are deprecated 69 and will be removed in 1.0. 70 71 .. versionchanged:: 0.15 72 Support ``X-Forwarded-Port`` and ``X-Forwarded-Prefix``. 73 74 .. versionchanged:: 0.15 75 ``X-Fowarded-Host`` and ``X-Forwarded-Port`` modify 76 ``SERVER_NAME`` and ``SERVER_PORT``. 77 """ 78 79 def __init__( 80 self, app, num_proxies=None, x_for=1, x_proto=0, x_host=0, x_port=0, x_prefix=0 81 ): 82 self.app = app 83 self.x_for = x_for 84 self.x_proto = x_proto 85 self.x_host = x_host 86 self.x_port = x_port 87 self.x_prefix = x_prefix 88 self.num_proxies = num_proxies 89 90 @property 91 def num_proxies(self): 92 """The number of proxies setting ``X-Forwarded-For`` in front 93 of the application. 94 95 .. deprecated:: 0.15 96 A separate number of trusted proxies is configured for each 97 header. ``num_proxies`` maps to ``x_for``. This method will 98 be removed in 1.0. 99 100 :internal: 101 """ 102 warnings.warn( 103 "'num_proxies' is deprecated as of version 0.15 and will be" 104 " removed in version 1.0. Use 'x_for' instead.", 105 DeprecationWarning, 106 stacklevel=2, 107 ) 108 return self.x_for 109 110 @num_proxies.setter 111 def num_proxies(self, value): 112 if value is not None: 113 warnings.warn( 114 "'num_proxies' is deprecated as of version 0.15 and" 115 " will be removed in version 1.0. Use 'x_for' instead.", 116 DeprecationWarning, 117 stacklevel=2, 118 ) 119 self.x_for = value 120 121 def get_remote_addr(self, forwarded_for): 122 """Get the real ``remote_addr`` by looking backwards ``x_for`` 123 number of values in the ``X-Forwarded-For`` header. 124 125 :param forwarded_for: List of values parsed from the 126 ``X-Forwarded-For`` header. 127 :return: The real ``remote_addr``, or ``None`` if there were not 128 at least ``x_for`` values. 129 130 .. deprecated:: 0.15 131 This is handled internally for each header. This method will 132 be removed in 1.0. 133 134 .. versionchanged:: 0.9 135 Use ``num_proxies`` instead of always picking the first 136 value. 137 138 .. versionadded:: 0.8 139 """ 140 warnings.warn( 141 "'get_remote_addr' is deprecated as of version 0.15 and" 142 " will be removed in version 1.0. It is now handled" 143 " internally for each header.", 144 DeprecationWarning, 145 ) 146 return self._get_trusted_comma(self.x_for, ",".join(forwarded_for)) 147 148 def _get_trusted_comma(self, trusted, value): 149 """Get the real value from a comma-separated header based on the 150 configured number of trusted proxies. 151 152 :param trusted: Number of values to trust in the header. 153 :param value: Header value to parse. 154 :return: The real value, or ``None`` if there are fewer values 155 than the number of trusted proxies. 156 157 .. versionadded:: 0.15 158 """ 159 if not (trusted and value): 160 return 161 values = [x.strip() for x in value.split(",")] 162 if len(values) >= trusted: 163 return values[-trusted] 164 165 def __call__(self, environ, start_response): 166 """Modify the WSGI environ based on the various ``Forwarded`` 167 headers before calling the wrapped application. Store the 168 original environ values in ``werkzeug.proxy_fix.orig_{key}``. 169 """ 170 environ_get = environ.get 171 orig_remote_addr = environ_get("REMOTE_ADDR") 172 orig_wsgi_url_scheme = environ_get("wsgi.url_scheme") 173 orig_http_host = environ_get("HTTP_HOST") 174 environ.update( 175 { 176 "werkzeug.proxy_fix.orig": { 177 "REMOTE_ADDR": orig_remote_addr, 178 "wsgi.url_scheme": orig_wsgi_url_scheme, 179 "HTTP_HOST": orig_http_host, 180 "SERVER_NAME": environ_get("SERVER_NAME"), 181 "SERVER_PORT": environ_get("SERVER_PORT"), 182 "SCRIPT_NAME": environ_get("SCRIPT_NAME"), 183 }, 184 # todo: remove deprecated keys 185 "werkzeug.proxy_fix.orig_remote_addr": orig_remote_addr, 186 "werkzeug.proxy_fix.orig_wsgi_url_scheme": orig_wsgi_url_scheme, 187 "werkzeug.proxy_fix.orig_http_host": orig_http_host, 188 } 189 ) 190 191 x_for = self._get_trusted_comma(self.x_for, environ_get("HTTP_X_FORWARDED_FOR")) 192 if x_for: 193 environ["REMOTE_ADDR"] = x_for 194 195 x_proto = self._get_trusted_comma( 196 self.x_proto, environ_get("HTTP_X_FORWARDED_PROTO") 197 ) 198 if x_proto: 199 environ["wsgi.url_scheme"] = x_proto 200 201 x_host = self._get_trusted_comma( 202 self.x_host, environ_get("HTTP_X_FORWARDED_HOST") 203 ) 204 if x_host: 205 environ["HTTP_HOST"] = x_host 206 parts = x_host.split(":", 1) 207 environ["SERVER_NAME"] = parts[0] 208 if len(parts) == 2: 209 environ["SERVER_PORT"] = parts[1] 210 211 x_port = self._get_trusted_comma( 212 self.x_port, environ_get("HTTP_X_FORWARDED_PORT") 213 ) 214 if x_port: 215 host = environ.get("HTTP_HOST") 216 if host: 217 parts = host.split(":", 1) 218 host = parts[0] if len(parts) == 2 else host 219 environ["HTTP_HOST"] = "%s:%s" % (host, x_port) 220 environ["SERVER_PORT"] = x_port 221 222 x_prefix = self._get_trusted_comma( 223 self.x_prefix, environ_get("HTTP_X_FORWARDED_PREFIX") 224 ) 225 if x_prefix: 226 environ["SCRIPT_NAME"] = x_prefix 227 228 return self.app(environ, start_response) 229 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/werkzeug/middleware/proxy_fix.py b/src/werkzeug/middleware/proxy_fix.py --- a/src/werkzeug/middleware/proxy_fix.py +++ b/src/werkzeug/middleware/proxy_fix.py @@ -77,7 +77,7 @@ """ def __init__( - self, app, num_proxies=None, x_for=1, x_proto=0, x_host=0, x_port=0, x_prefix=0 + self, app, num_proxies=None, x_for=1, x_proto=1, x_host=0, x_port=0, x_prefix=0 ): self.app = app self.x_for = x_for @@ -112,11 +112,15 @@ if value is not None: warnings.warn( "'num_proxies' is deprecated as of version 0.15 and" - " will be removed in version 1.0. Use 'x_for' instead.", + " will be removed in version 1.0. Use" + " 'x_for={value}, x_proto={value}, x_host={value}'" + " instead.".format(value=value), DeprecationWarning, stacklevel=2, ) self.x_for = value + self.x_proto = value + self.x_host = value def get_remote_addr(self, forwarded_for): """Get the real ``remote_addr`` by looking backwards ``x_for``
{"golden_diff": "diff --git a/src/werkzeug/middleware/proxy_fix.py b/src/werkzeug/middleware/proxy_fix.py\n--- a/src/werkzeug/middleware/proxy_fix.py\n+++ b/src/werkzeug/middleware/proxy_fix.py\n@@ -77,7 +77,7 @@\n \"\"\"\n \n def __init__(\n- self, app, num_proxies=None, x_for=1, x_proto=0, x_host=0, x_port=0, x_prefix=0\n+ self, app, num_proxies=None, x_for=1, x_proto=1, x_host=0, x_port=0, x_prefix=0\n ):\n self.app = app\n self.x_for = x_for\n@@ -112,11 +112,15 @@\n if value is not None:\n warnings.warn(\n \"'num_proxies' is deprecated as of version 0.15 and\"\n- \" will be removed in version 1.0. Use 'x_for' instead.\",\n+ \" will be removed in version 1.0. Use\"\n+ \" 'x_for={value}, x_proto={value}, x_host={value}'\"\n+ \" instead.\".format(value=value),\n DeprecationWarning,\n stacklevel=2,\n )\n self.x_for = value\n+ self.x_proto = value\n+ self.x_host = value\n \n def get_remote_addr(self, forwarded_for):\n \"\"\"Get the real ``remote_addr`` by looking backwards ``x_for``\n", "issue": "Enable x_host and x_proto by default through deprecated proxyfix?\nWhen #1314 refactored ProxyFix, it aliased num_proxies to x_for only (as that was the only one which previously supported multiple proxies).\r\n\r\nHowever in doing so it disabled forwarding of host and scheme, [which were forwarded by default up to 0.14](https://github.com/pallets/werkzeug/blob/f1d15a2c7f35dcde0e52787c9fdf9ea6f6405308/werkzeug/contrib/fixers.py#L146-L151), which breaks systems relying on this.\r\n\r\nI'd like to set x_host=1 and x_proto=1 in 0.15 IFF the user goes through the deprecated wrapper (`werkzeug.contrib.fixers.ProxyFix`) in order to restore the old behaviour, and maybe add a small warning about this behavioural change to the deprecation message (or possibly in one of the `versionchanged` blocks of the new version). Would that be acceptable / interesting or a waste of time?\n", "before_files": [{"content": "\"\"\"\nX-Forwarded-For Proxy Fix\n=========================\n\nThis module provides a middleware that adjusts the WSGI environ based on\n``X-Forwarded-`` headers that proxies in front of an application may\nset.\n\nWhen an application is running behind a proxy server, WSGI may see the\nrequest as coming from that server rather than the real client. Proxies\nset various headers to track where the request actually came from.\n\nThis middleware should only be applied if the application is actually\nbehind such a proxy, and should be configured with the number of proxies\nthat are chained in front of it. Not all proxies set all the headers.\nSince incoming headers can be faked, you must set how many proxies are\nsetting each header so the middleware knows what to trust.\n\n.. autoclass:: ProxyFix\n\n:copyright: 2007 Pallets\n:license: BSD-3-Clause\n\"\"\"\nimport warnings\n\n\nclass ProxyFix(object):\n \"\"\"Adjust the WSGI environ based on ``X-Forwarded-`` that proxies in\n front of the application may set.\n\n - ``X-Forwarded-For`` sets ``REMOTE_ADDR``.\n - ``X-Forwarded-Proto`` sets ``wsgi.url_scheme``.\n - ``X-Forwarded-Host`` sets ``HTTP_HOST``, ``SERVER_NAME``, and\n ``SERVER_PORT``.\n - ``X-Forwarded-Port`` sets ``HTTP_HOST`` and ``SERVER_PORT``.\n - ``X-Forwarded-Prefix`` sets ``SCRIPT_NAME``.\n\n You must tell the middleware how many proxies set each header so it\n knows what values to trust. It is a security issue to trust values\n that came from the client rather than a proxy.\n\n The original values of the headers are stored in the WSGI\n environ as ``werkzeug.proxy_fix.orig``, a dict.\n\n :param app: The WSGI application to wrap.\n :param x_for: Number of values to trust for ``X-Forwarded-For``.\n :param x_proto: Number of values to trust for ``X-Forwarded-Proto``.\n :param x_host: Number of values to trust for ``X-Forwarded-Host``.\n :param x_port: Number of values to trust for ``X-Forwarded-Port``.\n :param x_prefix: Number of values to trust for\n ``X-Forwarded-Prefix``.\n :param num_proxies: Deprecated, use ``x_for`` instead.\n\n .. code-block:: python\n\n from werkzeug.middleware.proxy_fix import ProxyFix\n # App is behind one proxy that sets the -For and -Host headers.\n app = ProxyFix(app, x_for=1, x_host=1)\n\n .. versionchanged:: 0.15\n All headers support multiple values. The ``num_proxies``\n argument is deprecated. Each header is configured with a\n separate number of trusted proxies.\n\n .. versionchanged:: 0.15\n Original WSGI environ values are stored in the\n ``werkzeug.proxy_fix.orig`` dict. ``orig_remote_addr``,\n ``orig_wsgi_url_scheme``, and ``orig_http_host`` are deprecated\n and will be removed in 1.0.\n\n .. versionchanged:: 0.15\n Support ``X-Forwarded-Port`` and ``X-Forwarded-Prefix``.\n\n .. versionchanged:: 0.15\n ``X-Fowarded-Host`` and ``X-Forwarded-Port`` modify\n ``SERVER_NAME`` and ``SERVER_PORT``.\n \"\"\"\n\n def __init__(\n self, app, num_proxies=None, x_for=1, x_proto=0, x_host=0, x_port=0, x_prefix=0\n ):\n self.app = app\n self.x_for = x_for\n self.x_proto = x_proto\n self.x_host = x_host\n self.x_port = x_port\n self.x_prefix = x_prefix\n self.num_proxies = num_proxies\n\n @property\n def num_proxies(self):\n \"\"\"The number of proxies setting ``X-Forwarded-For`` in front\n of the application.\n\n .. deprecated:: 0.15\n A separate number of trusted proxies is configured for each\n header. ``num_proxies`` maps to ``x_for``. This method will\n be removed in 1.0.\n\n :internal:\n \"\"\"\n warnings.warn(\n \"'num_proxies' is deprecated as of version 0.15 and will be\"\n \" removed in version 1.0. Use 'x_for' instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return self.x_for\n\n @num_proxies.setter\n def num_proxies(self, value):\n if value is not None:\n warnings.warn(\n \"'num_proxies' is deprecated as of version 0.15 and\"\n \" will be removed in version 1.0. Use 'x_for' instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n self.x_for = value\n\n def get_remote_addr(self, forwarded_for):\n \"\"\"Get the real ``remote_addr`` by looking backwards ``x_for``\n number of values in the ``X-Forwarded-For`` header.\n\n :param forwarded_for: List of values parsed from the\n ``X-Forwarded-For`` header.\n :return: The real ``remote_addr``, or ``None`` if there were not\n at least ``x_for`` values.\n\n .. deprecated:: 0.15\n This is handled internally for each header. This method will\n be removed in 1.0.\n\n .. versionchanged:: 0.9\n Use ``num_proxies`` instead of always picking the first\n value.\n\n .. versionadded:: 0.8\n \"\"\"\n warnings.warn(\n \"'get_remote_addr' is deprecated as of version 0.15 and\"\n \" will be removed in version 1.0. It is now handled\"\n \" internally for each header.\",\n DeprecationWarning,\n )\n return self._get_trusted_comma(self.x_for, \",\".join(forwarded_for))\n\n def _get_trusted_comma(self, trusted, value):\n \"\"\"Get the real value from a comma-separated header based on the\n configured number of trusted proxies.\n\n :param trusted: Number of values to trust in the header.\n :param value: Header value to parse.\n :return: The real value, or ``None`` if there are fewer values\n than the number of trusted proxies.\n\n .. versionadded:: 0.15\n \"\"\"\n if not (trusted and value):\n return\n values = [x.strip() for x in value.split(\",\")]\n if len(values) >= trusted:\n return values[-trusted]\n\n def __call__(self, environ, start_response):\n \"\"\"Modify the WSGI environ based on the various ``Forwarded``\n headers before calling the wrapped application. Store the\n original environ values in ``werkzeug.proxy_fix.orig_{key}``.\n \"\"\"\n environ_get = environ.get\n orig_remote_addr = environ_get(\"REMOTE_ADDR\")\n orig_wsgi_url_scheme = environ_get(\"wsgi.url_scheme\")\n orig_http_host = environ_get(\"HTTP_HOST\")\n environ.update(\n {\n \"werkzeug.proxy_fix.orig\": {\n \"REMOTE_ADDR\": orig_remote_addr,\n \"wsgi.url_scheme\": orig_wsgi_url_scheme,\n \"HTTP_HOST\": orig_http_host,\n \"SERVER_NAME\": environ_get(\"SERVER_NAME\"),\n \"SERVER_PORT\": environ_get(\"SERVER_PORT\"),\n \"SCRIPT_NAME\": environ_get(\"SCRIPT_NAME\"),\n },\n # todo: remove deprecated keys\n \"werkzeug.proxy_fix.orig_remote_addr\": orig_remote_addr,\n \"werkzeug.proxy_fix.orig_wsgi_url_scheme\": orig_wsgi_url_scheme,\n \"werkzeug.proxy_fix.orig_http_host\": orig_http_host,\n }\n )\n\n x_for = self._get_trusted_comma(self.x_for, environ_get(\"HTTP_X_FORWARDED_FOR\"))\n if x_for:\n environ[\"REMOTE_ADDR\"] = x_for\n\n x_proto = self._get_trusted_comma(\n self.x_proto, environ_get(\"HTTP_X_FORWARDED_PROTO\")\n )\n if x_proto:\n environ[\"wsgi.url_scheme\"] = x_proto\n\n x_host = self._get_trusted_comma(\n self.x_host, environ_get(\"HTTP_X_FORWARDED_HOST\")\n )\n if x_host:\n environ[\"HTTP_HOST\"] = x_host\n parts = x_host.split(\":\", 1)\n environ[\"SERVER_NAME\"] = parts[0]\n if len(parts) == 2:\n environ[\"SERVER_PORT\"] = parts[1]\n\n x_port = self._get_trusted_comma(\n self.x_port, environ_get(\"HTTP_X_FORWARDED_PORT\")\n )\n if x_port:\n host = environ.get(\"HTTP_HOST\")\n if host:\n parts = host.split(\":\", 1)\n host = parts[0] if len(parts) == 2 else host\n environ[\"HTTP_HOST\"] = \"%s:%s\" % (host, x_port)\n environ[\"SERVER_PORT\"] = x_port\n\n x_prefix = self._get_trusted_comma(\n self.x_prefix, environ_get(\"HTTP_X_FORWARDED_PREFIX\")\n )\n if x_prefix:\n environ[\"SCRIPT_NAME\"] = x_prefix\n\n return self.app(environ, start_response)\n", "path": "src/werkzeug/middleware/proxy_fix.py"}], "after_files": [{"content": "\"\"\"\nX-Forwarded-For Proxy Fix\n=========================\n\nThis module provides a middleware that adjusts the WSGI environ based on\n``X-Forwarded-`` headers that proxies in front of an application may\nset.\n\nWhen an application is running behind a proxy server, WSGI may see the\nrequest as coming from that server rather than the real client. Proxies\nset various headers to track where the request actually came from.\n\nThis middleware should only be applied if the application is actually\nbehind such a proxy, and should be configured with the number of proxies\nthat are chained in front of it. Not all proxies set all the headers.\nSince incoming headers can be faked, you must set how many proxies are\nsetting each header so the middleware knows what to trust.\n\n.. autoclass:: ProxyFix\n\n:copyright: 2007 Pallets\n:license: BSD-3-Clause\n\"\"\"\nimport warnings\n\n\nclass ProxyFix(object):\n \"\"\"Adjust the WSGI environ based on ``X-Forwarded-`` that proxies in\n front of the application may set.\n\n - ``X-Forwarded-For`` sets ``REMOTE_ADDR``.\n - ``X-Forwarded-Proto`` sets ``wsgi.url_scheme``.\n - ``X-Forwarded-Host`` sets ``HTTP_HOST``, ``SERVER_NAME``, and\n ``SERVER_PORT``.\n - ``X-Forwarded-Port`` sets ``HTTP_HOST`` and ``SERVER_PORT``.\n - ``X-Forwarded-Prefix`` sets ``SCRIPT_NAME``.\n\n You must tell the middleware how many proxies set each header so it\n knows what values to trust. It is a security issue to trust values\n that came from the client rather than a proxy.\n\n The original values of the headers are stored in the WSGI\n environ as ``werkzeug.proxy_fix.orig``, a dict.\n\n :param app: The WSGI application to wrap.\n :param x_for: Number of values to trust for ``X-Forwarded-For``.\n :param x_proto: Number of values to trust for ``X-Forwarded-Proto``.\n :param x_host: Number of values to trust for ``X-Forwarded-Host``.\n :param x_port: Number of values to trust for ``X-Forwarded-Port``.\n :param x_prefix: Number of values to trust for\n ``X-Forwarded-Prefix``.\n :param num_proxies: Deprecated, use ``x_for`` instead.\n\n .. code-block:: python\n\n from werkzeug.middleware.proxy_fix import ProxyFix\n # App is behind one proxy that sets the -For and -Host headers.\n app = ProxyFix(app, x_for=1, x_host=1)\n\n .. versionchanged:: 0.15\n All headers support multiple values. The ``num_proxies``\n argument is deprecated. Each header is configured with a\n separate number of trusted proxies.\n\n .. versionchanged:: 0.15\n Original WSGI environ values are stored in the\n ``werkzeug.proxy_fix.orig`` dict. ``orig_remote_addr``,\n ``orig_wsgi_url_scheme``, and ``orig_http_host`` are deprecated\n and will be removed in 1.0.\n\n .. versionchanged:: 0.15\n Support ``X-Forwarded-Port`` and ``X-Forwarded-Prefix``.\n\n .. versionchanged:: 0.15\n ``X-Fowarded-Host`` and ``X-Forwarded-Port`` modify\n ``SERVER_NAME`` and ``SERVER_PORT``.\n \"\"\"\n\n def __init__(\n self, app, num_proxies=None, x_for=1, x_proto=1, x_host=0, x_port=0, x_prefix=0\n ):\n self.app = app\n self.x_for = x_for\n self.x_proto = x_proto\n self.x_host = x_host\n self.x_port = x_port\n self.x_prefix = x_prefix\n self.num_proxies = num_proxies\n\n @property\n def num_proxies(self):\n \"\"\"The number of proxies setting ``X-Forwarded-For`` in front\n of the application.\n\n .. deprecated:: 0.15\n A separate number of trusted proxies is configured for each\n header. ``num_proxies`` maps to ``x_for``. This method will\n be removed in 1.0.\n\n :internal:\n \"\"\"\n warnings.warn(\n \"'num_proxies' is deprecated as of version 0.15 and will be\"\n \" removed in version 1.0. Use 'x_for' instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return self.x_for\n\n @num_proxies.setter\n def num_proxies(self, value):\n if value is not None:\n warnings.warn(\n \"'num_proxies' is deprecated as of version 0.15 and\"\n \" will be removed in version 1.0. Use\"\n \" 'x_for={value}, x_proto={value}, x_host={value}'\"\n \" instead.\".format(value=value),\n DeprecationWarning,\n stacklevel=2,\n )\n self.x_for = value\n self.x_proto = value\n self.x_host = value\n\n def get_remote_addr(self, forwarded_for):\n \"\"\"Get the real ``remote_addr`` by looking backwards ``x_for``\n number of values in the ``X-Forwarded-For`` header.\n\n :param forwarded_for: List of values parsed from the\n ``X-Forwarded-For`` header.\n :return: The real ``remote_addr``, or ``None`` if there were not\n at least ``x_for`` values.\n\n .. deprecated:: 0.15\n This is handled internally for each header. This method will\n be removed in 1.0.\n\n .. versionchanged:: 0.9\n Use ``num_proxies`` instead of always picking the first\n value.\n\n .. versionadded:: 0.8\n \"\"\"\n warnings.warn(\n \"'get_remote_addr' is deprecated as of version 0.15 and\"\n \" will be removed in version 1.0. It is now handled\"\n \" internally for each header.\",\n DeprecationWarning,\n )\n return self._get_trusted_comma(self.x_for, \",\".join(forwarded_for))\n\n def _get_trusted_comma(self, trusted, value):\n \"\"\"Get the real value from a comma-separated header based on the\n configured number of trusted proxies.\n\n :param trusted: Number of values to trust in the header.\n :param value: Header value to parse.\n :return: The real value, or ``None`` if there are fewer values\n than the number of trusted proxies.\n\n .. versionadded:: 0.15\n \"\"\"\n if not (trusted and value):\n return\n values = [x.strip() for x in value.split(\",\")]\n if len(values) >= trusted:\n return values[-trusted]\n\n def __call__(self, environ, start_response):\n \"\"\"Modify the WSGI environ based on the various ``Forwarded``\n headers before calling the wrapped application. Store the\n original environ values in ``werkzeug.proxy_fix.orig_{key}``.\n \"\"\"\n environ_get = environ.get\n orig_remote_addr = environ_get(\"REMOTE_ADDR\")\n orig_wsgi_url_scheme = environ_get(\"wsgi.url_scheme\")\n orig_http_host = environ_get(\"HTTP_HOST\")\n environ.update(\n {\n \"werkzeug.proxy_fix.orig\": {\n \"REMOTE_ADDR\": orig_remote_addr,\n \"wsgi.url_scheme\": orig_wsgi_url_scheme,\n \"HTTP_HOST\": orig_http_host,\n \"SERVER_NAME\": environ_get(\"SERVER_NAME\"),\n \"SERVER_PORT\": environ_get(\"SERVER_PORT\"),\n \"SCRIPT_NAME\": environ_get(\"SCRIPT_NAME\"),\n },\n # todo: remove deprecated keys\n \"werkzeug.proxy_fix.orig_remote_addr\": orig_remote_addr,\n \"werkzeug.proxy_fix.orig_wsgi_url_scheme\": orig_wsgi_url_scheme,\n \"werkzeug.proxy_fix.orig_http_host\": orig_http_host,\n }\n )\n\n x_for = self._get_trusted_comma(self.x_for, environ_get(\"HTTP_X_FORWARDED_FOR\"))\n if x_for:\n environ[\"REMOTE_ADDR\"] = x_for\n\n x_proto = self._get_trusted_comma(\n self.x_proto, environ_get(\"HTTP_X_FORWARDED_PROTO\")\n )\n if x_proto:\n environ[\"wsgi.url_scheme\"] = x_proto\n\n x_host = self._get_trusted_comma(\n self.x_host, environ_get(\"HTTP_X_FORWARDED_HOST\")\n )\n if x_host:\n environ[\"HTTP_HOST\"] = x_host\n parts = x_host.split(\":\", 1)\n environ[\"SERVER_NAME\"] = parts[0]\n if len(parts) == 2:\n environ[\"SERVER_PORT\"] = parts[1]\n\n x_port = self._get_trusted_comma(\n self.x_port, environ_get(\"HTTP_X_FORWARDED_PORT\")\n )\n if x_port:\n host = environ.get(\"HTTP_HOST\")\n if host:\n parts = host.split(\":\", 1)\n host = parts[0] if len(parts) == 2 else host\n environ[\"HTTP_HOST\"] = \"%s:%s\" % (host, x_port)\n environ[\"SERVER_PORT\"] = x_port\n\n x_prefix = self._get_trusted_comma(\n self.x_prefix, environ_get(\"HTTP_X_FORWARDED_PREFIX\")\n )\n if x_prefix:\n environ[\"SCRIPT_NAME\"] = x_prefix\n\n return self.app(environ, start_response)\n", "path": "src/werkzeug/middleware/proxy_fix.py"}]}
3,163
332
gh_patches_debug_39431
rasdani/github-patches
git_diff
ansible__ansible-13647
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Role search path precedence change in v2 In 1.9 the path precedence appears to be: 1. playbook_dir/roles/ 2. playbook_dir/ In 2.0 the path precedence has changed to what appears to be 1. playbook_dir/ 2. playbook_dir/roles/ To validate this, the file tree looks like: ``` . ├── ansible.cfg ├── roles │   └── test │   └── tasks │   └── main.yml ├── test │   └── tasks │   └── main.yml └── test.yml ``` ansible.cfg in this case is completely empty. #### test.yml ``` --- - hosts: all gather_facts: false roles: - test ``` Each `main.yml` should contain a single debug, each with different message such as: ``` - debug: msg=in_roles_dir ``` ``` - debug: msg=in_playbook_dir ``` This causes a completely non-role related directory in the `playbook_dir` to potentially, override an actual role of the same name in the `roles` directory. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/ansible/playbook/role/definition.py` Content: ``` 1 # (c) 2014 Michael DeHaan, <[email protected]> 2 # 3 # This file is part of Ansible 4 # 5 # Ansible is free software: you can redistribute it and/or modify 6 # it under the terms of the GNU General Public License as published by 7 # the Free Software Foundation, either version 3 of the License, or 8 # (at your option) any later version. 9 # 10 # Ansible is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU General Public License 16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>. 17 18 # Make coding more python3-ish 19 from __future__ import (absolute_import, division, print_function) 20 __metaclass__ = type 21 22 from ansible.compat.six import iteritems, string_types 23 24 import os 25 26 from ansible import constants as C 27 from ansible.errors import AnsibleError 28 from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping 29 from ansible.playbook.attribute import Attribute, FieldAttribute 30 from ansible.playbook.base import Base 31 from ansible.playbook.become import Become 32 from ansible.playbook.conditional import Conditional 33 from ansible.playbook.taggable import Taggable 34 from ansible.template import Templar 35 from ansible.utils.path import unfrackpath 36 37 38 __all__ = ['RoleDefinition'] 39 40 41 class RoleDefinition(Base, Become, Conditional, Taggable): 42 43 _role = FieldAttribute(isa='string') 44 45 def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None): 46 self._play = play 47 self._variable_manager = variable_manager 48 self._loader = loader 49 50 self._role_path = None 51 self._role_basedir = role_basedir 52 self._role_params = dict() 53 super(RoleDefinition, self).__init__() 54 55 #def __repr__(self): 56 # return 'ROLEDEF: ' + self._attributes.get('role', '<no name set>') 57 58 @staticmethod 59 def load(data, variable_manager=None, loader=None): 60 raise AnsibleError("not implemented") 61 62 def preprocess_data(self, ds): 63 # role names that are simply numbers can be parsed by PyYAML 64 # as integers even when quoted, so turn it into a string type 65 if isinstance(ds, int): 66 ds = "%s" % ds 67 68 assert isinstance(ds, dict) or isinstance(ds, string_types) or isinstance(ds, AnsibleBaseYAMLObject) 69 70 if isinstance(ds, dict): 71 ds = super(RoleDefinition, self).preprocess_data(ds) 72 73 # save the original ds for use later 74 self._ds = ds 75 76 # we create a new data structure here, using the same 77 # object used internally by the YAML parsing code so we 78 # can preserve file:line:column information if it exists 79 new_ds = AnsibleMapping() 80 if isinstance(ds, AnsibleBaseYAMLObject): 81 new_ds.ansible_pos = ds.ansible_pos 82 83 # first we pull the role name out of the data structure, 84 # and then use that to determine the role path (which may 85 # result in a new role name, if it was a file path) 86 role_name = self._load_role_name(ds) 87 (role_name, role_path) = self._load_role_path(role_name) 88 89 # next, we split the role params out from the valid role 90 # attributes and update the new datastructure with that 91 # result and the role name 92 if isinstance(ds, dict): 93 (new_role_def, role_params) = self._split_role_params(ds) 94 new_ds.update(new_role_def) 95 self._role_params = role_params 96 97 # set the role name in the new ds 98 new_ds['role'] = role_name 99 100 # we store the role path internally 101 self._role_path = role_path 102 103 # and return the cleaned-up data structure 104 return new_ds 105 106 def _load_role_name(self, ds): 107 ''' 108 Returns the role name (either the role: or name: field) from 109 the role definition, or (when the role definition is a simple 110 string), just that string 111 ''' 112 113 if isinstance(ds, string_types): 114 return ds 115 116 role_name = ds.get('role', ds.get('name')) 117 if not role_name or not isinstance(role_name, string_types): 118 raise AnsibleError('role definitions must contain a role name', obj=ds) 119 120 # if we have the required datastructures, and if the role_name 121 # contains a variable, try and template it now 122 if self._variable_manager: 123 all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play) 124 templar = Templar(loader=self._loader, variables=all_vars) 125 if templar._contains_vars(role_name): 126 role_name = templar.template(role_name) 127 128 return role_name 129 130 def _load_role_path(self, role_name): 131 ''' 132 the 'role', as specified in the ds (or as a bare string), can either 133 be a simple name or a full path. If it is a full path, we use the 134 basename as the role name, otherwise we take the name as-given and 135 append it to the default role path 136 ''' 137 138 role_path = unfrackpath(role_name) 139 140 if self._loader.path_exists(role_path): 141 role_name = os.path.basename(role_name) 142 return (role_name, role_path) 143 else: 144 # we always start the search for roles in the base directory of the playbook 145 role_search_paths = [ 146 os.path.join(self._loader.get_basedir(), u'roles'), 147 u'./roles', 148 self._loader.get_basedir(), 149 u'./' 150 ] 151 152 # also search in the configured roles path 153 if C.DEFAULT_ROLES_PATH: 154 configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep) 155 role_search_paths.extend(configured_paths) 156 157 # finally, append the roles basedir, if it was set, so we can 158 # search relative to that directory for dependent roles 159 if self._role_basedir: 160 role_search_paths.append(self._role_basedir) 161 162 # create a templar class to template the dependency names, in 163 # case they contain variables 164 if self._variable_manager is not None: 165 all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play) 166 else: 167 all_vars = dict() 168 169 templar = Templar(loader=self._loader, variables=all_vars) 170 role_name = templar.template(role_name) 171 172 # now iterate through the possible paths and return the first one we find 173 for path in role_search_paths: 174 path = templar.template(path) 175 role_path = unfrackpath(os.path.join(path, role_name)) 176 if self._loader.path_exists(role_path): 177 return (role_name, role_path) 178 179 raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(role_search_paths)), obj=self._ds) 180 181 def _split_role_params(self, ds): 182 ''' 183 Splits any random role params off from the role spec and store 184 them in a dictionary of params for parsing later 185 ''' 186 187 role_def = dict() 188 role_params = dict() 189 base_attribute_names = frozenset(self._get_base_attributes().keys()) 190 for (key, value) in iteritems(ds): 191 # use the list of FieldAttribute values to determine what is and is not 192 # an extra parameter for this role (or sub-class of this role) 193 if key not in base_attribute_names: 194 # this key does not match a field attribute, so it must be a role param 195 role_params[key] = value 196 else: 197 # this is a field attribute, so copy it over directly 198 role_def[key] = value 199 200 return (role_def, role_params) 201 202 def get_role_params(self): 203 return self._role_params.copy() 204 205 def get_role_path(self): 206 return self._role_path 207 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/ansible/playbook/role/definition.py b/lib/ansible/playbook/role/definition.py --- a/lib/ansible/playbook/role/definition.py +++ b/lib/ansible/playbook/role/definition.py @@ -135,46 +135,44 @@ append it to the default role path ''' - role_path = unfrackpath(role_name) + # we always start the search for roles in the base directory of the playbook + role_search_paths = [ + os.path.join(self._loader.get_basedir(), u'roles'), + self._loader.get_basedir(), + ] + + # also search in the configured roles path + if C.DEFAULT_ROLES_PATH: + configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep) + role_search_paths.extend(configured_paths) + + # finally, append the roles basedir, if it was set, so we can + # search relative to that directory for dependent roles + if self._role_basedir: + role_search_paths.append(self._role_basedir) + + # create a templar class to template the dependency names, in + # case they contain variables + if self._variable_manager is not None: + all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play) + else: + all_vars = dict() + + templar = Templar(loader=self._loader, variables=all_vars) + role_name = templar.template(role_name) + + # now iterate through the possible paths and return the first one we find + for path in role_search_paths: + path = templar.template(path) + role_path = unfrackpath(os.path.join(path, role_name)) + if self._loader.path_exists(role_path): + return (role_name, role_path) + # if not found elsewhere try to extract path from name + role_path = unfrackpath(role_name) if self._loader.path_exists(role_path): role_name = os.path.basename(role_name) return (role_name, role_path) - else: - # we always start the search for roles in the base directory of the playbook - role_search_paths = [ - os.path.join(self._loader.get_basedir(), u'roles'), - u'./roles', - self._loader.get_basedir(), - u'./' - ] - - # also search in the configured roles path - if C.DEFAULT_ROLES_PATH: - configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep) - role_search_paths.extend(configured_paths) - - # finally, append the roles basedir, if it was set, so we can - # search relative to that directory for dependent roles - if self._role_basedir: - role_search_paths.append(self._role_basedir) - - # create a templar class to template the dependency names, in - # case they contain variables - if self._variable_manager is not None: - all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play) - else: - all_vars = dict() - - templar = Templar(loader=self._loader, variables=all_vars) - role_name = templar.template(role_name) - - # now iterate through the possible paths and return the first one we find - for path in role_search_paths: - path = templar.template(path) - role_path = unfrackpath(os.path.join(path, role_name)) - if self._loader.path_exists(role_path): - return (role_name, role_path) raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(role_search_paths)), obj=self._ds)
{"golden_diff": "diff --git a/lib/ansible/playbook/role/definition.py b/lib/ansible/playbook/role/definition.py\n--- a/lib/ansible/playbook/role/definition.py\n+++ b/lib/ansible/playbook/role/definition.py\n@@ -135,46 +135,44 @@\n append it to the default role path\n '''\n \n- role_path = unfrackpath(role_name)\n+ # we always start the search for roles in the base directory of the playbook\n+ role_search_paths = [\n+ os.path.join(self._loader.get_basedir(), u'roles'),\n+ self._loader.get_basedir(),\n+ ]\n+\n+ # also search in the configured roles path\n+ if C.DEFAULT_ROLES_PATH:\n+ configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep)\n+ role_search_paths.extend(configured_paths)\n+\n+ # finally, append the roles basedir, if it was set, so we can\n+ # search relative to that directory for dependent roles\n+ if self._role_basedir:\n+ role_search_paths.append(self._role_basedir)\n+\n+ # create a templar class to template the dependency names, in\n+ # case they contain variables\n+ if self._variable_manager is not None:\n+ all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play)\n+ else:\n+ all_vars = dict()\n+\n+ templar = Templar(loader=self._loader, variables=all_vars)\n+ role_name = templar.template(role_name)\n+\n+ # now iterate through the possible paths and return the first one we find\n+ for path in role_search_paths:\n+ path = templar.template(path)\n+ role_path = unfrackpath(os.path.join(path, role_name))\n+ if self._loader.path_exists(role_path):\n+ return (role_name, role_path)\n \n+ # if not found elsewhere try to extract path from name\n+ role_path = unfrackpath(role_name)\n if self._loader.path_exists(role_path):\n role_name = os.path.basename(role_name)\n return (role_name, role_path)\n- else:\n- # we always start the search for roles in the base directory of the playbook\n- role_search_paths = [\n- os.path.join(self._loader.get_basedir(), u'roles'),\n- u'./roles',\n- self._loader.get_basedir(),\n- u'./'\n- ]\n-\n- # also search in the configured roles path\n- if C.DEFAULT_ROLES_PATH:\n- configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep)\n- role_search_paths.extend(configured_paths)\n-\n- # finally, append the roles basedir, if it was set, so we can\n- # search relative to that directory for dependent roles\n- if self._role_basedir:\n- role_search_paths.append(self._role_basedir)\n-\n- # create a templar class to template the dependency names, in\n- # case they contain variables\n- if self._variable_manager is not None:\n- all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play)\n- else:\n- all_vars = dict()\n-\n- templar = Templar(loader=self._loader, variables=all_vars)\n- role_name = templar.template(role_name)\n-\n- # now iterate through the possible paths and return the first one we find\n- for path in role_search_paths:\n- path = templar.template(path)\n- role_path = unfrackpath(os.path.join(path, role_name))\n- if self._loader.path_exists(role_path):\n- return (role_name, role_path)\n \n raise AnsibleError(\"the role '%s' was not found in %s\" % (role_name, \":\".join(role_search_paths)), obj=self._ds)\n", "issue": "Role search path precedence change in v2\nIn 1.9 the path precedence appears to be:\n1. playbook_dir/roles/\n2. playbook_dir/\n\nIn 2.0 the path precedence has changed to what appears to be\n1. playbook_dir/\n2. playbook_dir/roles/\n\nTo validate this, the file tree looks like:\n\n```\n.\n\u251c\u2500\u2500 ansible.cfg\n\u251c\u2500\u2500 roles\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 test\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 tasks\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 main.yml\n\u251c\u2500\u2500 test\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 tasks\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 main.yml\n\u2514\u2500\u2500 test.yml\n```\n\nansible.cfg in this case is completely empty.\n#### test.yml\n\n```\n\n---\n- hosts: all\n gather_facts: false\n roles:\n - test\n```\n\nEach `main.yml` should contain a single debug, each with different message such as:\n\n```\n- debug: msg=in_roles_dir\n```\n\n```\n- debug: msg=in_playbook_dir\n```\n\nThis causes a completely non-role related directory in the `playbook_dir` to potentially, override an actual role of the same name in the `roles` directory.\n\n", "before_files": [{"content": "# (c) 2014 Michael DeHaan, <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom ansible.compat.six import iteritems, string_types\n\nimport os\n\nfrom ansible import constants as C\nfrom ansible.errors import AnsibleError\nfrom ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping\nfrom ansible.playbook.attribute import Attribute, FieldAttribute\nfrom ansible.playbook.base import Base\nfrom ansible.playbook.become import Become\nfrom ansible.playbook.conditional import Conditional\nfrom ansible.playbook.taggable import Taggable\nfrom ansible.template import Templar\nfrom ansible.utils.path import unfrackpath\n\n\n__all__ = ['RoleDefinition']\n\n\nclass RoleDefinition(Base, Become, Conditional, Taggable):\n\n _role = FieldAttribute(isa='string')\n\n def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None):\n self._play = play\n self._variable_manager = variable_manager\n self._loader = loader\n\n self._role_path = None\n self._role_basedir = role_basedir\n self._role_params = dict()\n super(RoleDefinition, self).__init__()\n\n #def __repr__(self):\n # return 'ROLEDEF: ' + self._attributes.get('role', '<no name set>')\n\n @staticmethod\n def load(data, variable_manager=None, loader=None):\n raise AnsibleError(\"not implemented\")\n\n def preprocess_data(self, ds):\n # role names that are simply numbers can be parsed by PyYAML\n # as integers even when quoted, so turn it into a string type\n if isinstance(ds, int):\n ds = \"%s\" % ds\n\n assert isinstance(ds, dict) or isinstance(ds, string_types) or isinstance(ds, AnsibleBaseYAMLObject)\n\n if isinstance(ds, dict):\n ds = super(RoleDefinition, self).preprocess_data(ds)\n\n # save the original ds for use later\n self._ds = ds\n\n # we create a new data structure here, using the same\n # object used internally by the YAML parsing code so we\n # can preserve file:line:column information if it exists\n new_ds = AnsibleMapping()\n if isinstance(ds, AnsibleBaseYAMLObject):\n new_ds.ansible_pos = ds.ansible_pos\n\n # first we pull the role name out of the data structure,\n # and then use that to determine the role path (which may\n # result in a new role name, if it was a file path)\n role_name = self._load_role_name(ds)\n (role_name, role_path) = self._load_role_path(role_name)\n\n # next, we split the role params out from the valid role\n # attributes and update the new datastructure with that\n # result and the role name\n if isinstance(ds, dict):\n (new_role_def, role_params) = self._split_role_params(ds)\n new_ds.update(new_role_def)\n self._role_params = role_params\n\n # set the role name in the new ds\n new_ds['role'] = role_name\n\n # we store the role path internally\n self._role_path = role_path\n\n # and return the cleaned-up data structure\n return new_ds\n\n def _load_role_name(self, ds):\n '''\n Returns the role name (either the role: or name: field) from\n the role definition, or (when the role definition is a simple\n string), just that string\n '''\n\n if isinstance(ds, string_types):\n return ds\n\n role_name = ds.get('role', ds.get('name'))\n if not role_name or not isinstance(role_name, string_types):\n raise AnsibleError('role definitions must contain a role name', obj=ds)\n\n # if we have the required datastructures, and if the role_name\n # contains a variable, try and template it now\n if self._variable_manager:\n all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play)\n templar = Templar(loader=self._loader, variables=all_vars)\n if templar._contains_vars(role_name):\n role_name = templar.template(role_name)\n\n return role_name\n\n def _load_role_path(self, role_name):\n '''\n the 'role', as specified in the ds (or as a bare string), can either\n be a simple name or a full path. If it is a full path, we use the\n basename as the role name, otherwise we take the name as-given and\n append it to the default role path\n '''\n\n role_path = unfrackpath(role_name)\n\n if self._loader.path_exists(role_path):\n role_name = os.path.basename(role_name)\n return (role_name, role_path)\n else:\n # we always start the search for roles in the base directory of the playbook\n role_search_paths = [\n os.path.join(self._loader.get_basedir(), u'roles'),\n u'./roles',\n self._loader.get_basedir(),\n u'./'\n ]\n\n # also search in the configured roles path\n if C.DEFAULT_ROLES_PATH:\n configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep)\n role_search_paths.extend(configured_paths)\n\n # finally, append the roles basedir, if it was set, so we can\n # search relative to that directory for dependent roles\n if self._role_basedir:\n role_search_paths.append(self._role_basedir)\n\n # create a templar class to template the dependency names, in\n # case they contain variables\n if self._variable_manager is not None:\n all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play)\n else:\n all_vars = dict()\n\n templar = Templar(loader=self._loader, variables=all_vars)\n role_name = templar.template(role_name)\n\n # now iterate through the possible paths and return the first one we find\n for path in role_search_paths:\n path = templar.template(path)\n role_path = unfrackpath(os.path.join(path, role_name))\n if self._loader.path_exists(role_path):\n return (role_name, role_path)\n\n raise AnsibleError(\"the role '%s' was not found in %s\" % (role_name, \":\".join(role_search_paths)), obj=self._ds)\n\n def _split_role_params(self, ds):\n '''\n Splits any random role params off from the role spec and store\n them in a dictionary of params for parsing later\n '''\n\n role_def = dict()\n role_params = dict()\n base_attribute_names = frozenset(self._get_base_attributes().keys())\n for (key, value) in iteritems(ds):\n # use the list of FieldAttribute values to determine what is and is not\n # an extra parameter for this role (or sub-class of this role)\n if key not in base_attribute_names:\n # this key does not match a field attribute, so it must be a role param\n role_params[key] = value\n else:\n # this is a field attribute, so copy it over directly\n role_def[key] = value\n\n return (role_def, role_params)\n\n def get_role_params(self):\n return self._role_params.copy()\n\n def get_role_path(self):\n return self._role_path\n", "path": "lib/ansible/playbook/role/definition.py"}], "after_files": [{"content": "# (c) 2014 Michael DeHaan, <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom ansible.compat.six import iteritems, string_types\n\nimport os\n\nfrom ansible import constants as C\nfrom ansible.errors import AnsibleError\nfrom ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping\nfrom ansible.playbook.attribute import Attribute, FieldAttribute\nfrom ansible.playbook.base import Base\nfrom ansible.playbook.become import Become\nfrom ansible.playbook.conditional import Conditional\nfrom ansible.playbook.taggable import Taggable\nfrom ansible.template import Templar\nfrom ansible.utils.path import unfrackpath\n\n\n__all__ = ['RoleDefinition']\n\n\nclass RoleDefinition(Base, Become, Conditional, Taggable):\n\n _role = FieldAttribute(isa='string')\n\n def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None):\n self._play = play\n self._variable_manager = variable_manager\n self._loader = loader\n\n self._role_path = None\n self._role_basedir = role_basedir\n self._role_params = dict()\n super(RoleDefinition, self).__init__()\n\n #def __repr__(self):\n # return 'ROLEDEF: ' + self._attributes.get('role', '<no name set>')\n\n @staticmethod\n def load(data, variable_manager=None, loader=None):\n raise AnsibleError(\"not implemented\")\n\n def preprocess_data(self, ds):\n # role names that are simply numbers can be parsed by PyYAML\n # as integers even when quoted, so turn it into a string type\n if isinstance(ds, int):\n ds = \"%s\" % ds\n\n assert isinstance(ds, dict) or isinstance(ds, string_types) or isinstance(ds, AnsibleBaseYAMLObject)\n\n if isinstance(ds, dict):\n ds = super(RoleDefinition, self).preprocess_data(ds)\n\n # save the original ds for use later\n self._ds = ds\n\n # we create a new data structure here, using the same\n # object used internally by the YAML parsing code so we\n # can preserve file:line:column information if it exists\n new_ds = AnsibleMapping()\n if isinstance(ds, AnsibleBaseYAMLObject):\n new_ds.ansible_pos = ds.ansible_pos\n\n # first we pull the role name out of the data structure,\n # and then use that to determine the role path (which may\n # result in a new role name, if it was a file path)\n role_name = self._load_role_name(ds)\n (role_name, role_path) = self._load_role_path(role_name)\n\n # next, we split the role params out from the valid role\n # attributes and update the new datastructure with that\n # result and the role name\n if isinstance(ds, dict):\n (new_role_def, role_params) = self._split_role_params(ds)\n new_ds.update(new_role_def)\n self._role_params = role_params\n\n # set the role name in the new ds\n new_ds['role'] = role_name\n\n # we store the role path internally\n self._role_path = role_path\n\n # and return the cleaned-up data structure\n return new_ds\n\n def _load_role_name(self, ds):\n '''\n Returns the role name (either the role: or name: field) from\n the role definition, or (when the role definition is a simple\n string), just that string\n '''\n\n if isinstance(ds, string_types):\n return ds\n\n role_name = ds.get('role', ds.get('name'))\n if not role_name or not isinstance(role_name, string_types):\n raise AnsibleError('role definitions must contain a role name', obj=ds)\n\n # if we have the required datastructures, and if the role_name\n # contains a variable, try and template it now\n if self._variable_manager:\n all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play)\n templar = Templar(loader=self._loader, variables=all_vars)\n if templar._contains_vars(role_name):\n role_name = templar.template(role_name)\n\n return role_name\n\n def _load_role_path(self, role_name):\n '''\n the 'role', as specified in the ds (or as a bare string), can either\n be a simple name or a full path. If it is a full path, we use the\n basename as the role name, otherwise we take the name as-given and\n append it to the default role path\n '''\n\n # we always start the search for roles in the base directory of the playbook\n role_search_paths = [\n os.path.join(self._loader.get_basedir(), u'roles'),\n self._loader.get_basedir(),\n ]\n\n # also search in the configured roles path\n if C.DEFAULT_ROLES_PATH:\n configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep)\n role_search_paths.extend(configured_paths)\n\n # finally, append the roles basedir, if it was set, so we can\n # search relative to that directory for dependent roles\n if self._role_basedir:\n role_search_paths.append(self._role_basedir)\n\n # create a templar class to template the dependency names, in\n # case they contain variables\n if self._variable_manager is not None:\n all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play)\n else:\n all_vars = dict()\n\n templar = Templar(loader=self._loader, variables=all_vars)\n role_name = templar.template(role_name)\n\n # now iterate through the possible paths and return the first one we find\n for path in role_search_paths:\n path = templar.template(path)\n role_path = unfrackpath(os.path.join(path, role_name))\n if self._loader.path_exists(role_path):\n return (role_name, role_path)\n\n # if not found elsewhere try to extract path from name\n role_path = unfrackpath(role_name)\n if self._loader.path_exists(role_path):\n role_name = os.path.basename(role_name)\n return (role_name, role_path)\n\n raise AnsibleError(\"the role '%s' was not found in %s\" % (role_name, \":\".join(role_search_paths)), obj=self._ds)\n\n def _split_role_params(self, ds):\n '''\n Splits any random role params off from the role spec and store\n them in a dictionary of params for parsing later\n '''\n\n role_def = dict()\n role_params = dict()\n base_attribute_names = frozenset(self._get_base_attributes().keys())\n for (key, value) in iteritems(ds):\n # use the list of FieldAttribute values to determine what is and is not\n # an extra parameter for this role (or sub-class of this role)\n if key not in base_attribute_names:\n # this key does not match a field attribute, so it must be a role param\n role_params[key] = value\n else:\n # this is a field attribute, so copy it over directly\n role_def[key] = value\n\n return (role_def, role_params)\n\n def get_role_params(self):\n return self._role_params.copy()\n\n def get_role_path(self):\n return self._role_path\n", "path": "lib/ansible/playbook/role/definition.py"}]}
2,825
847
gh_patches_debug_39559
rasdani/github-patches
git_diff
kserve__kserve-3313
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- inference request fails when sending with less number of features than the total model features for KServe 0.11 lightgbm /kind bug **What steps did you take and what happened:** [A clear and concise description of what the bug is.] In 0.10 LightGBM we are able to send a dictionary of features that is less than the total number of features used. In 0.11 we are getting the following errors ``` {“error”:“The number of features in data (1) is not the same as it was in training data (30).\nYou can set ``predict_disable_shape_check=true` ``` **What did you expect to happen:** One suspicion is that in 0.11 we are passing the feature names and seems predict is able to pad zero for the missing features https://github.com/kserve/kserve/blob/release-0.10/python/lgbserver/lgbserver/model.py#L58 However in 0.11 we are no longer passing the feature names https://github.com/kserve/kserve/blob/release-0.11/python/lgbserver/lgbserver/model.py **What's the InferenceService yaml:** [To help us debug please run `kubectl get isvc $name -n $namespace -oyaml` and paste the output] **Anything else you would like to add:** [Miscellaneous information that will assist in solving the issue.] **Environment:** - Istio Version: - Knative Version: - KServe Version: - Kubeflow version: - Cloud Environment:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm] - Minikube/Kind version: - Kubernetes version: (use `kubectl version`): - OS (e.g. from `/etc/os-release`): --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `python/kserve/kserve/utils/utils.py` Content: ``` 1 # Copyright 2021 The KServe Authors. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os 16 import sys 17 import uuid 18 from kserve.protocol.grpc.grpc_predict_v2_pb2 import InferParameter 19 from typing import Dict, Union, List 20 21 from kserve.utils.numpy_codec import from_np_dtype 22 import pandas as pd 23 import numpy as np 24 import psutil 25 from cloudevents.conversion import to_binary, to_structured 26 from cloudevents.http import CloudEvent 27 from grpc import ServicerContext 28 from kserve.protocol.infer_type import InferOutput, InferRequest, InferResponse 29 30 31 def is_running_in_k8s(): 32 return os.path.isdir('/var/run/secrets/kubernetes.io/') 33 34 35 def get_current_k8s_namespace(): 36 with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r') as f: 37 return f.readline() 38 39 40 def get_default_target_namespace(): 41 if not is_running_in_k8s(): 42 return 'default' 43 return get_current_k8s_namespace() 44 45 46 def get_isvc_namespace(inferenceservice): 47 return inferenceservice.metadata.namespace or get_default_target_namespace() 48 49 50 def get_ig_namespace(inferencegraph): 51 return inferencegraph.metadata.namespace or get_default_target_namespace() 52 53 54 def cpu_count(): 55 """Get the available CPU count for this system. 56 Takes the minimum value from the following locations: 57 - Total system cpus available on the host. 58 - CPU Affinity (if set) 59 - Cgroups limit (if set) 60 """ 61 count = os.cpu_count() 62 63 # Check CPU affinity if available 64 try: 65 affinity_count = len(psutil.Process().cpu_affinity()) 66 if affinity_count > 0: 67 count = min(count, affinity_count) 68 except Exception: 69 pass 70 71 # Check cgroups if available 72 if sys.platform == "linux": 73 try: 74 with open("/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us") as f: 75 quota = int(f.read()) 76 with open("/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us") as f: 77 period = int(f.read()) 78 cgroups_count = int(quota / period) 79 if cgroups_count > 0: 80 count = min(count, cgroups_count) 81 except Exception: 82 pass 83 84 return count 85 86 87 def is_structured_cloudevent(body: Dict) -> bool: 88 """Returns True if the JSON request body resembles a structured CloudEvent""" 89 return "time" in body \ 90 and "type" in body \ 91 and "source" in body \ 92 and "id" in body \ 93 and "specversion" in body \ 94 and "data" in body 95 96 97 def create_response_cloudevent(model_name: str, response: Dict, req_attributes: Dict, 98 binary_event=False) -> tuple: 99 ce_attributes = {} 100 101 if os.getenv("CE_MERGE", "false").lower() == "true": 102 if binary_event: 103 ce_attributes = req_attributes 104 if "datacontenttype" in ce_attributes: # Optional field so must check 105 del ce_attributes["datacontenttype"] 106 else: 107 ce_attributes = req_attributes 108 109 # Remove these fields so we generate new ones 110 del ce_attributes["id"] 111 del ce_attributes["time"] 112 113 ce_attributes["type"] = os.getenv("CE_TYPE", "io.kserve.inference.response") 114 ce_attributes["source"] = os.getenv("CE_SOURCE", f"io.kserve.inference.{model_name}") 115 116 event = CloudEvent(ce_attributes, response) 117 118 if binary_event: 119 event_headers, event_body = to_binary(event) 120 else: 121 event_headers, event_body = to_structured(event) 122 123 return event_headers, event_body 124 125 126 def generate_uuid() -> str: 127 return str(uuid.uuid4()) 128 129 130 def to_headers(context: ServicerContext) -> Dict[str, str]: 131 metadata = context.invocation_metadata() 132 if hasattr(context, "trailing_metadata"): 133 metadata += context.trailing_metadata() 134 headers = {} 135 for metadatum in metadata: 136 headers[metadatum.key] = metadatum.value 137 138 return headers 139 140 141 def get_predict_input(payload: Union[Dict, InferRequest]) -> Union[np.ndarray, pd.DataFrame]: 142 if isinstance(payload, Dict): 143 instances = payload["inputs"] if "inputs" in payload else payload["instances"] 144 if len(instances) == 0: 145 return np.array(instances) 146 if isinstance(instances[0], Dict): 147 dfs = [] 148 for input in instances: 149 dfs.append(pd.DataFrame(input)) 150 inputs = pd.concat(dfs, axis=0) 151 return inputs 152 153 # Handles the following input format 154 # {'inputs': [ 155 # [{'sepal_width_(cm)': 3.5}, 156 # {'petal_length_(cm)': 1.4}, 157 # {'petal_width_(cm)': 0.2}, 158 # {'sepal_length_(cm)': 5.1}] 159 # ]} 160 elif isinstance(instances[0], List) and len(instances[0]) != 0 and isinstance(instances[0][0], Dict): 161 data: Dict[str, List] = {} 162 for instance in instances: 163 for item in instance: 164 for key, val in item.items(): 165 if key in data: 166 data[key].append(val) 167 else: 168 data[key] = [val] 169 return pd.DataFrame(data) 170 else: 171 return np.array(instances) 172 173 elif isinstance(payload, InferRequest): 174 content_type = '' 175 parameters = payload.parameters 176 if parameters: 177 if isinstance(parameters.get("content_type"), InferParameter): 178 # for v2 grpc, we get InferParameter obj eg: {"content_type": string_param: "pd"} 179 content_type = str(parameters.get("content_type").string_param) 180 else: 181 # for v2 http, we get string eg: {"content_type": "pd"} 182 content_type = parameters.get("content_type") 183 184 if content_type == "pd": 185 return payload.as_dataframe() 186 else: 187 input = payload.inputs[0] 188 return input.as_numpy() 189 190 191 def get_predict_response(payload: Union[Dict, InferRequest], result: Union[np.ndarray, pd.DataFrame], 192 model_name: str) -> Union[Dict, InferResponse]: 193 if isinstance(payload, Dict): 194 infer_outputs = result 195 if isinstance(result, pd.DataFrame): 196 infer_outputs = [] 197 for label, row in result.iterrows(): 198 infer_outputs.append(row.to_dict()) 199 elif isinstance(result, np.ndarray): 200 infer_outputs = result.tolist() 201 return {"predictions": infer_outputs} 202 elif isinstance(payload, InferRequest): 203 infer_outputs = [] 204 if isinstance(result, pd.DataFrame): 205 for col in result.columns: 206 infer_output = InferOutput( 207 name=col, 208 shape=list(result[col].shape), 209 datatype=from_np_dtype(result[col].dtype), 210 data=result[col].tolist() 211 ) 212 infer_outputs.append(infer_output) 213 else: 214 infer_output = InferOutput( 215 name="output-0", 216 shape=list(result.shape), 217 datatype=from_np_dtype(result.dtype), 218 data=result.flatten().tolist() 219 ) 220 infer_outputs.append(infer_output) 221 return InferResponse( 222 model_name=model_name, 223 infer_outputs=infer_outputs, 224 response_id=payload.id if payload.id else generate_uuid() 225 ) 226 227 228 def strtobool(val: str) -> bool: 229 """Convert a string representation of truth to True or False. 230 231 True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values 232 are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if 233 'val' is anything else. 234 235 Adapted from deprecated `distutils` 236 https://github.com/python/cpython/blob/3.11/Lib/distutils/util.py 237 """ 238 val = val.lower() 239 if val in ('y', 'yes', 't', 'true', 'on', '1'): 240 return True 241 elif val in ('n', 'no', 'f', 'false', 'off', '0'): 242 return False 243 else: 244 raise ValueError("invalid truth value %r" % (val,)) 245 ``` Path: `python/lgbserver/lgbserver/model.py` Content: ``` 1 # Copyright 2021 The KServe Authors. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 16 import os 17 from typing import Dict, Union 18 19 from lightgbm import Booster 20 21 from kserve import Model 22 from kserve.errors import InferenceError, ModelMissingError 23 from kserve.storage import Storage 24 25 from kserve.protocol.infer_type import InferRequest, InferResponse 26 from kserve.utils.utils import get_predict_input, get_predict_response 27 28 MODEL_EXTENSIONS = (".bst") 29 30 31 class LightGBMModel(Model): 32 def __init__(self, name: str, model_dir: str, nthread: int, 33 booster: Booster = None): 34 super().__init__(name) 35 self.name = name 36 self.model_dir = model_dir 37 self.nthread = nthread 38 if booster is not None: 39 self._booster = booster 40 self.ready = True 41 42 def load(self) -> bool: 43 model_path = Storage.download(self.model_dir) 44 model_files = [] 45 for file in os.listdir(model_path): 46 file_path = os.path.join(model_path, file) 47 if os.path.isfile(file_path) and file.endswith(MODEL_EXTENSIONS): 48 model_files.append(file_path) 49 if len(model_files) == 0: 50 raise ModelMissingError(model_path) 51 elif len(model_files) > 1: 52 raise RuntimeError('More than one model file is detected, ' 53 f'Only one is allowed within model_dir: {model_files}') 54 self._booster = Booster(params={"nthread": self.nthread}, 55 model_file=model_files[0]) 56 self.ready = True 57 return self.ready 58 59 def predict(self, payload: Union[Dict, InferRequest], headers: Dict[str, str] = None) -> Union[Dict, InferResponse]: 60 try: 61 instances = get_predict_input(payload) 62 result = self._booster.predict(instances) 63 return get_predict_response(payload, result, self.name) 64 except Exception as e: 65 raise InferenceError(str(e)) 66 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/python/kserve/kserve/utils/utils.py b/python/kserve/kserve/utils/utils.py --- a/python/kserve/kserve/utils/utils.py +++ b/python/kserve/kserve/utils/utils.py @@ -87,11 +87,11 @@ def is_structured_cloudevent(body: Dict) -> bool: """Returns True if the JSON request body resembles a structured CloudEvent""" return "time" in body \ - and "type" in body \ - and "source" in body \ - and "id" in body \ - and "specversion" in body \ - and "data" in body + and "type" in body \ + and "source" in body \ + and "id" in body \ + and "specversion" in body \ + and "data" in body def create_response_cloudevent(model_name: str, response: Dict, req_attributes: Dict, @@ -138,35 +138,18 @@ return headers -def get_predict_input(payload: Union[Dict, InferRequest]) -> Union[np.ndarray, pd.DataFrame]: +def get_predict_input(payload: Union[Dict, InferRequest], columns: List = None) -> Union[np.ndarray, pd.DataFrame]: if isinstance(payload, Dict): instances = payload["inputs"] if "inputs" in payload else payload["instances"] if len(instances) == 0: return np.array(instances) - if isinstance(instances[0], Dict): + if isinstance(instances[0], Dict) or ( + isinstance(instances[0], List) and len(instances[0]) != 0 and isinstance(instances[0][0], Dict)): dfs = [] for input in instances: - dfs.append(pd.DataFrame(input)) + dfs.append(pd.DataFrame(input, columns=columns)) inputs = pd.concat(dfs, axis=0) return inputs - - # Handles the following input format - # {'inputs': [ - # [{'sepal_width_(cm)': 3.5}, - # {'petal_length_(cm)': 1.4}, - # {'petal_width_(cm)': 0.2}, - # {'sepal_length_(cm)': 5.1}] - # ]} - elif isinstance(instances[0], List) and len(instances[0]) != 0 and isinstance(instances[0][0], Dict): - data: Dict[str, List] = {} - for instance in instances: - for item in instance: - for key, val in item.items(): - if key in data: - data[key].append(val) - else: - data[key] = [val] - return pd.DataFrame(data) else: return np.array(instances) diff --git a/python/lgbserver/lgbserver/model.py b/python/lgbserver/lgbserver/model.py --- a/python/lgbserver/lgbserver/model.py +++ b/python/lgbserver/lgbserver/model.py @@ -58,7 +58,7 @@ def predict(self, payload: Union[Dict, InferRequest], headers: Dict[str, str] = None) -> Union[Dict, InferResponse]: try: - instances = get_predict_input(payload) + instances = get_predict_input(payload, columns=self._booster.feature_name()) result = self._booster.predict(instances) return get_predict_response(payload, result, self.name) except Exception as e:
{"golden_diff": "diff --git a/python/kserve/kserve/utils/utils.py b/python/kserve/kserve/utils/utils.py\n--- a/python/kserve/kserve/utils/utils.py\n+++ b/python/kserve/kserve/utils/utils.py\n@@ -87,11 +87,11 @@\n def is_structured_cloudevent(body: Dict) -> bool:\n \"\"\"Returns True if the JSON request body resembles a structured CloudEvent\"\"\"\n return \"time\" in body \\\n- and \"type\" in body \\\n- and \"source\" in body \\\n- and \"id\" in body \\\n- and \"specversion\" in body \\\n- and \"data\" in body\n+ and \"type\" in body \\\n+ and \"source\" in body \\\n+ and \"id\" in body \\\n+ and \"specversion\" in body \\\n+ and \"data\" in body\n \n \n def create_response_cloudevent(model_name: str, response: Dict, req_attributes: Dict,\n@@ -138,35 +138,18 @@\n return headers\n \n \n-def get_predict_input(payload: Union[Dict, InferRequest]) -> Union[np.ndarray, pd.DataFrame]:\n+def get_predict_input(payload: Union[Dict, InferRequest], columns: List = None) -> Union[np.ndarray, pd.DataFrame]:\n if isinstance(payload, Dict):\n instances = payload[\"inputs\"] if \"inputs\" in payload else payload[\"instances\"]\n if len(instances) == 0:\n return np.array(instances)\n- if isinstance(instances[0], Dict):\n+ if isinstance(instances[0], Dict) or (\n+ isinstance(instances[0], List) and len(instances[0]) != 0 and isinstance(instances[0][0], Dict)):\n dfs = []\n for input in instances:\n- dfs.append(pd.DataFrame(input))\n+ dfs.append(pd.DataFrame(input, columns=columns))\n inputs = pd.concat(dfs, axis=0)\n return inputs\n-\n- # Handles the following input format\n- # {'inputs': [\n- # [{'sepal_width_(cm)': 3.5},\n- # {'petal_length_(cm)': 1.4},\n- # {'petal_width_(cm)': 0.2},\n- # {'sepal_length_(cm)': 5.1}]\n- # ]}\n- elif isinstance(instances[0], List) and len(instances[0]) != 0 and isinstance(instances[0][0], Dict):\n- data: Dict[str, List] = {}\n- for instance in instances:\n- for item in instance:\n- for key, val in item.items():\n- if key in data:\n- data[key].append(val)\n- else:\n- data[key] = [val]\n- return pd.DataFrame(data)\n else:\n return np.array(instances)\n \ndiff --git a/python/lgbserver/lgbserver/model.py b/python/lgbserver/lgbserver/model.py\n--- a/python/lgbserver/lgbserver/model.py\n+++ b/python/lgbserver/lgbserver/model.py\n@@ -58,7 +58,7 @@\n \n def predict(self, payload: Union[Dict, InferRequest], headers: Dict[str, str] = None) -> Union[Dict, InferResponse]:\n try:\n- instances = get_predict_input(payload)\n+ instances = get_predict_input(payload, columns=self._booster.feature_name())\n result = self._booster.predict(instances)\n return get_predict_response(payload, result, self.name)\n except Exception as e:\n", "issue": " inference request fails when sending with less number of features than the total model features for KServe 0.11 lightgbm\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\n[A clear and concise description of what the bug is.]\r\n\r\nIn 0.10 LightGBM we are able to send a dictionary of features that is less than the total number of features used. \r\n\r\nIn 0.11 we are getting the following errors\r\n```\r\n {\u201cerror\u201d:\u201cThe number of features in data (1) is not the same as it was in training data (30).\\nYou can set ``predict_disable_shape_check=true`\r\n```\r\n\r\n\r\n\r\n**What did you expect to happen:**\r\nOne suspicion is that in 0.11 we are passing the feature names and seems predict is able to pad zero for the missing features\r\n\r\nhttps://github.com/kserve/kserve/blob/release-0.10/python/lgbserver/lgbserver/model.py#L58\r\n\r\n\r\nHowever in 0.11 we are no longer passing the feature names\r\nhttps://github.com/kserve/kserve/blob/release-0.11/python/lgbserver/lgbserver/model.py\r\n\r\n\r\n**What's the InferenceService yaml:**\r\n[To help us debug please run `kubectl get isvc $name -n $namespace -oyaml` and paste the output]\r\n\r\n**Anything else you would like to add:**\r\n[Miscellaneous information that will assist in solving the issue.]\r\n\r\n\r\n**Environment:**\r\n\r\n- Istio Version:\r\n- Knative Version:\r\n- KServe Version:\r\n- Kubeflow version:\r\n- Cloud Environment:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm]\r\n- Minikube/Kind version:\r\n- Kubernetes version: (use `kubectl version`):\r\n- OS (e.g. from `/etc/os-release`):\r\n\n", "before_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport uuid\nfrom kserve.protocol.grpc.grpc_predict_v2_pb2 import InferParameter\nfrom typing import Dict, Union, List\n\nfrom kserve.utils.numpy_codec import from_np_dtype\nimport pandas as pd\nimport numpy as np\nimport psutil\nfrom cloudevents.conversion import to_binary, to_structured\nfrom cloudevents.http import CloudEvent\nfrom grpc import ServicerContext\nfrom kserve.protocol.infer_type import InferOutput, InferRequest, InferResponse\n\n\ndef is_running_in_k8s():\n return os.path.isdir('/var/run/secrets/kubernetes.io/')\n\n\ndef get_current_k8s_namespace():\n with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r') as f:\n return f.readline()\n\n\ndef get_default_target_namespace():\n if not is_running_in_k8s():\n return 'default'\n return get_current_k8s_namespace()\n\n\ndef get_isvc_namespace(inferenceservice):\n return inferenceservice.metadata.namespace or get_default_target_namespace()\n\n\ndef get_ig_namespace(inferencegraph):\n return inferencegraph.metadata.namespace or get_default_target_namespace()\n\n\ndef cpu_count():\n \"\"\"Get the available CPU count for this system.\n Takes the minimum value from the following locations:\n - Total system cpus available on the host.\n - CPU Affinity (if set)\n - Cgroups limit (if set)\n \"\"\"\n count = os.cpu_count()\n\n # Check CPU affinity if available\n try:\n affinity_count = len(psutil.Process().cpu_affinity())\n if affinity_count > 0:\n count = min(count, affinity_count)\n except Exception:\n pass\n\n # Check cgroups if available\n if sys.platform == \"linux\":\n try:\n with open(\"/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us\") as f:\n quota = int(f.read())\n with open(\"/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us\") as f:\n period = int(f.read())\n cgroups_count = int(quota / period)\n if cgroups_count > 0:\n count = min(count, cgroups_count)\n except Exception:\n pass\n\n return count\n\n\ndef is_structured_cloudevent(body: Dict) -> bool:\n \"\"\"Returns True if the JSON request body resembles a structured CloudEvent\"\"\"\n return \"time\" in body \\\n and \"type\" in body \\\n and \"source\" in body \\\n and \"id\" in body \\\n and \"specversion\" in body \\\n and \"data\" in body\n\n\ndef create_response_cloudevent(model_name: str, response: Dict, req_attributes: Dict,\n binary_event=False) -> tuple:\n ce_attributes = {}\n\n if os.getenv(\"CE_MERGE\", \"false\").lower() == \"true\":\n if binary_event:\n ce_attributes = req_attributes\n if \"datacontenttype\" in ce_attributes: # Optional field so must check\n del ce_attributes[\"datacontenttype\"]\n else:\n ce_attributes = req_attributes\n\n # Remove these fields so we generate new ones\n del ce_attributes[\"id\"]\n del ce_attributes[\"time\"]\n\n ce_attributes[\"type\"] = os.getenv(\"CE_TYPE\", \"io.kserve.inference.response\")\n ce_attributes[\"source\"] = os.getenv(\"CE_SOURCE\", f\"io.kserve.inference.{model_name}\")\n\n event = CloudEvent(ce_attributes, response)\n\n if binary_event:\n event_headers, event_body = to_binary(event)\n else:\n event_headers, event_body = to_structured(event)\n\n return event_headers, event_body\n\n\ndef generate_uuid() -> str:\n return str(uuid.uuid4())\n\n\ndef to_headers(context: ServicerContext) -> Dict[str, str]:\n metadata = context.invocation_metadata()\n if hasattr(context, \"trailing_metadata\"):\n metadata += context.trailing_metadata()\n headers = {}\n for metadatum in metadata:\n headers[metadatum.key] = metadatum.value\n\n return headers\n\n\ndef get_predict_input(payload: Union[Dict, InferRequest]) -> Union[np.ndarray, pd.DataFrame]:\n if isinstance(payload, Dict):\n instances = payload[\"inputs\"] if \"inputs\" in payload else payload[\"instances\"]\n if len(instances) == 0:\n return np.array(instances)\n if isinstance(instances[0], Dict):\n dfs = []\n for input in instances:\n dfs.append(pd.DataFrame(input))\n inputs = pd.concat(dfs, axis=0)\n return inputs\n\n # Handles the following input format\n # {'inputs': [\n # [{'sepal_width_(cm)': 3.5},\n # {'petal_length_(cm)': 1.4},\n # {'petal_width_(cm)': 0.2},\n # {'sepal_length_(cm)': 5.1}]\n # ]}\n elif isinstance(instances[0], List) and len(instances[0]) != 0 and isinstance(instances[0][0], Dict):\n data: Dict[str, List] = {}\n for instance in instances:\n for item in instance:\n for key, val in item.items():\n if key in data:\n data[key].append(val)\n else:\n data[key] = [val]\n return pd.DataFrame(data)\n else:\n return np.array(instances)\n\n elif isinstance(payload, InferRequest):\n content_type = ''\n parameters = payload.parameters\n if parameters:\n if isinstance(parameters.get(\"content_type\"), InferParameter):\n # for v2 grpc, we get InferParameter obj eg: {\"content_type\": string_param: \"pd\"}\n content_type = str(parameters.get(\"content_type\").string_param)\n else:\n # for v2 http, we get string eg: {\"content_type\": \"pd\"}\n content_type = parameters.get(\"content_type\")\n\n if content_type == \"pd\":\n return payload.as_dataframe()\n else:\n input = payload.inputs[0]\n return input.as_numpy()\n\n\ndef get_predict_response(payload: Union[Dict, InferRequest], result: Union[np.ndarray, pd.DataFrame],\n model_name: str) -> Union[Dict, InferResponse]:\n if isinstance(payload, Dict):\n infer_outputs = result\n if isinstance(result, pd.DataFrame):\n infer_outputs = []\n for label, row in result.iterrows():\n infer_outputs.append(row.to_dict())\n elif isinstance(result, np.ndarray):\n infer_outputs = result.tolist()\n return {\"predictions\": infer_outputs}\n elif isinstance(payload, InferRequest):\n infer_outputs = []\n if isinstance(result, pd.DataFrame):\n for col in result.columns:\n infer_output = InferOutput(\n name=col,\n shape=list(result[col].shape),\n datatype=from_np_dtype(result[col].dtype),\n data=result[col].tolist()\n )\n infer_outputs.append(infer_output)\n else:\n infer_output = InferOutput(\n name=\"output-0\",\n shape=list(result.shape),\n datatype=from_np_dtype(result.dtype),\n data=result.flatten().tolist()\n )\n infer_outputs.append(infer_output)\n return InferResponse(\n model_name=model_name,\n infer_outputs=infer_outputs,\n response_id=payload.id if payload.id else generate_uuid()\n )\n\n\ndef strtobool(val: str) -> bool:\n \"\"\"Convert a string representation of truth to True or False.\n\n True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values\n are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if\n 'val' is anything else.\n\n Adapted from deprecated `distutils`\n https://github.com/python/cpython/blob/3.11/Lib/distutils/util.py\n \"\"\"\n val = val.lower()\n if val in ('y', 'yes', 't', 'true', 'on', '1'):\n return True\n elif val in ('n', 'no', 'f', 'false', 'off', '0'):\n return False\n else:\n raise ValueError(\"invalid truth value %r\" % (val,))\n", "path": "python/kserve/kserve/utils/utils.py"}, {"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nfrom typing import Dict, Union\n\nfrom lightgbm import Booster\n\nfrom kserve import Model\nfrom kserve.errors import InferenceError, ModelMissingError\nfrom kserve.storage import Storage\n\nfrom kserve.protocol.infer_type import InferRequest, InferResponse\nfrom kserve.utils.utils import get_predict_input, get_predict_response\n\nMODEL_EXTENSIONS = (\".bst\")\n\n\nclass LightGBMModel(Model):\n def __init__(self, name: str, model_dir: str, nthread: int,\n booster: Booster = None):\n super().__init__(name)\n self.name = name\n self.model_dir = model_dir\n self.nthread = nthread\n if booster is not None:\n self._booster = booster\n self.ready = True\n\n def load(self) -> bool:\n model_path = Storage.download(self.model_dir)\n model_files = []\n for file in os.listdir(model_path):\n file_path = os.path.join(model_path, file)\n if os.path.isfile(file_path) and file.endswith(MODEL_EXTENSIONS):\n model_files.append(file_path)\n if len(model_files) == 0:\n raise ModelMissingError(model_path)\n elif len(model_files) > 1:\n raise RuntimeError('More than one model file is detected, '\n f'Only one is allowed within model_dir: {model_files}')\n self._booster = Booster(params={\"nthread\": self.nthread},\n model_file=model_files[0])\n self.ready = True\n return self.ready\n\n def predict(self, payload: Union[Dict, InferRequest], headers: Dict[str, str] = None) -> Union[Dict, InferResponse]:\n try:\n instances = get_predict_input(payload)\n result = self._booster.predict(instances)\n return get_predict_response(payload, result, self.name)\n except Exception as e:\n raise InferenceError(str(e))\n", "path": "python/lgbserver/lgbserver/model.py"}], "after_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport uuid\nfrom kserve.protocol.grpc.grpc_predict_v2_pb2 import InferParameter\nfrom typing import Dict, Union, List\n\nfrom kserve.utils.numpy_codec import from_np_dtype\nimport pandas as pd\nimport numpy as np\nimport psutil\nfrom cloudevents.conversion import to_binary, to_structured\nfrom cloudevents.http import CloudEvent\nfrom grpc import ServicerContext\nfrom kserve.protocol.infer_type import InferOutput, InferRequest, InferResponse\n\n\ndef is_running_in_k8s():\n return os.path.isdir('/var/run/secrets/kubernetes.io/')\n\n\ndef get_current_k8s_namespace():\n with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r') as f:\n return f.readline()\n\n\ndef get_default_target_namespace():\n if not is_running_in_k8s():\n return 'default'\n return get_current_k8s_namespace()\n\n\ndef get_isvc_namespace(inferenceservice):\n return inferenceservice.metadata.namespace or get_default_target_namespace()\n\n\ndef get_ig_namespace(inferencegraph):\n return inferencegraph.metadata.namespace or get_default_target_namespace()\n\n\ndef cpu_count():\n \"\"\"Get the available CPU count for this system.\n Takes the minimum value from the following locations:\n - Total system cpus available on the host.\n - CPU Affinity (if set)\n - Cgroups limit (if set)\n \"\"\"\n count = os.cpu_count()\n\n # Check CPU affinity if available\n try:\n affinity_count = len(psutil.Process().cpu_affinity())\n if affinity_count > 0:\n count = min(count, affinity_count)\n except Exception:\n pass\n\n # Check cgroups if available\n if sys.platform == \"linux\":\n try:\n with open(\"/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us\") as f:\n quota = int(f.read())\n with open(\"/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us\") as f:\n period = int(f.read())\n cgroups_count = int(quota / period)\n if cgroups_count > 0:\n count = min(count, cgroups_count)\n except Exception:\n pass\n\n return count\n\n\ndef is_structured_cloudevent(body: Dict) -> bool:\n \"\"\"Returns True if the JSON request body resembles a structured CloudEvent\"\"\"\n return \"time\" in body \\\n and \"type\" in body \\\n and \"source\" in body \\\n and \"id\" in body \\\n and \"specversion\" in body \\\n and \"data\" in body\n\n\ndef create_response_cloudevent(model_name: str, response: Dict, req_attributes: Dict,\n binary_event=False) -> tuple:\n ce_attributes = {}\n\n if os.getenv(\"CE_MERGE\", \"false\").lower() == \"true\":\n if binary_event:\n ce_attributes = req_attributes\n if \"datacontenttype\" in ce_attributes: # Optional field so must check\n del ce_attributes[\"datacontenttype\"]\n else:\n ce_attributes = req_attributes\n\n # Remove these fields so we generate new ones\n del ce_attributes[\"id\"]\n del ce_attributes[\"time\"]\n\n ce_attributes[\"type\"] = os.getenv(\"CE_TYPE\", \"io.kserve.inference.response\")\n ce_attributes[\"source\"] = os.getenv(\"CE_SOURCE\", f\"io.kserve.inference.{model_name}\")\n\n event = CloudEvent(ce_attributes, response)\n\n if binary_event:\n event_headers, event_body = to_binary(event)\n else:\n event_headers, event_body = to_structured(event)\n\n return event_headers, event_body\n\n\ndef generate_uuid() -> str:\n return str(uuid.uuid4())\n\n\ndef to_headers(context: ServicerContext) -> Dict[str, str]:\n metadata = context.invocation_metadata()\n if hasattr(context, \"trailing_metadata\"):\n metadata += context.trailing_metadata()\n headers = {}\n for metadatum in metadata:\n headers[metadatum.key] = metadatum.value\n\n return headers\n\n\ndef get_predict_input(payload: Union[Dict, InferRequest], columns: List = None) -> Union[np.ndarray, pd.DataFrame]:\n if isinstance(payload, Dict):\n instances = payload[\"inputs\"] if \"inputs\" in payload else payload[\"instances\"]\n if len(instances) == 0:\n return np.array(instances)\n if isinstance(instances[0], Dict) or (\n isinstance(instances[0], List) and len(instances[0]) != 0 and isinstance(instances[0][0], Dict)):\n dfs = []\n for input in instances:\n dfs.append(pd.DataFrame(input, columns=columns))\n inputs = pd.concat(dfs, axis=0)\n return inputs\n else:\n return np.array(instances)\n\n elif isinstance(payload, InferRequest):\n content_type = ''\n parameters = payload.parameters\n if parameters:\n if isinstance(parameters.get(\"content_type\"), InferParameter):\n # for v2 grpc, we get InferParameter obj eg: {\"content_type\": string_param: \"pd\"}\n content_type = str(parameters.get(\"content_type\").string_param)\n else:\n # for v2 http, we get string eg: {\"content_type\": \"pd\"}\n content_type = parameters.get(\"content_type\")\n\n if content_type == \"pd\":\n return payload.as_dataframe()\n else:\n input = payload.inputs[0]\n return input.as_numpy()\n\n\ndef get_predict_response(payload: Union[Dict, InferRequest], result: Union[np.ndarray, pd.DataFrame],\n model_name: str) -> Union[Dict, InferResponse]:\n if isinstance(payload, Dict):\n infer_outputs = result\n if isinstance(result, pd.DataFrame):\n infer_outputs = []\n for label, row in result.iterrows():\n infer_outputs.append(row.to_dict())\n elif isinstance(result, np.ndarray):\n infer_outputs = result.tolist()\n return {\"predictions\": infer_outputs}\n elif isinstance(payload, InferRequest):\n infer_outputs = []\n if isinstance(result, pd.DataFrame):\n for col in result.columns:\n infer_output = InferOutput(\n name=col,\n shape=list(result[col].shape),\n datatype=from_np_dtype(result[col].dtype),\n data=result[col].tolist()\n )\n infer_outputs.append(infer_output)\n else:\n infer_output = InferOutput(\n name=\"output-0\",\n shape=list(result.shape),\n datatype=from_np_dtype(result.dtype),\n data=result.flatten().tolist()\n )\n infer_outputs.append(infer_output)\n return InferResponse(\n model_name=model_name,\n infer_outputs=infer_outputs,\n response_id=payload.id if payload.id else generate_uuid()\n )\n\n\ndef strtobool(val: str) -> bool:\n \"\"\"Convert a string representation of truth to True or False.\n\n True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values\n are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if\n 'val' is anything else.\n\n Adapted from deprecated `distutils`\n https://github.com/python/cpython/blob/3.11/Lib/distutils/util.py\n \"\"\"\n val = val.lower()\n if val in ('y', 'yes', 't', 'true', 'on', '1'):\n return True\n elif val in ('n', 'no', 'f', 'false', 'off', '0'):\n return False\n else:\n raise ValueError(\"invalid truth value %r\" % (val,))\n", "path": "python/kserve/kserve/utils/utils.py"}, {"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nfrom typing import Dict, Union\n\nfrom lightgbm import Booster\n\nfrom kserve import Model\nfrom kserve.errors import InferenceError, ModelMissingError\nfrom kserve.storage import Storage\n\nfrom kserve.protocol.infer_type import InferRequest, InferResponse\nfrom kserve.utils.utils import get_predict_input, get_predict_response\n\nMODEL_EXTENSIONS = (\".bst\")\n\n\nclass LightGBMModel(Model):\n def __init__(self, name: str, model_dir: str, nthread: int,\n booster: Booster = None):\n super().__init__(name)\n self.name = name\n self.model_dir = model_dir\n self.nthread = nthread\n if booster is not None:\n self._booster = booster\n self.ready = True\n\n def load(self) -> bool:\n model_path = Storage.download(self.model_dir)\n model_files = []\n for file in os.listdir(model_path):\n file_path = os.path.join(model_path, file)\n if os.path.isfile(file_path) and file.endswith(MODEL_EXTENSIONS):\n model_files.append(file_path)\n if len(model_files) == 0:\n raise ModelMissingError(model_path)\n elif len(model_files) > 1:\n raise RuntimeError('More than one model file is detected, '\n f'Only one is allowed within model_dir: {model_files}')\n self._booster = Booster(params={\"nthread\": self.nthread},\n model_file=model_files[0])\n self.ready = True\n return self.ready\n\n def predict(self, payload: Union[Dict, InferRequest], headers: Dict[str, str] = None) -> Union[Dict, InferResponse]:\n try:\n instances = get_predict_input(payload, columns=self._booster.feature_name())\n result = self._booster.predict(instances)\n return get_predict_response(payload, result, self.name)\n except Exception as e:\n raise InferenceError(str(e))\n", "path": "python/lgbserver/lgbserver/model.py"}]}
3,867
772
gh_patches_debug_4242
rasdani/github-patches
git_diff
kivy__python-for-android-1995
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- TestGetSystemPythonExecutable.test_virtualenv test fail The `TestGetSystemPythonExecutable.test_virtualenv` and `TestGetSystemPythonExecutable.test_venv` tests started failing all of a sudden. Error was: ``` ModuleNotFoundError: No module named \'pytoml\'\n' ``` This ca be reproduced in local via: ```sh pytest tests/test_pythonpackage_basic.py::TestGetSystemPythonExecutable::test_virtualenv ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 2 import glob 3 from io import open # for open(..,encoding=...) parameter in python 2 4 from os import walk 5 from os.path import join, dirname, sep 6 import os 7 import re 8 from setuptools import setup, find_packages 9 10 # NOTE: All package data should also be set in MANIFEST.in 11 12 packages = find_packages() 13 14 package_data = {'': ['*.tmpl', 15 '*.patch', ], } 16 17 data_files = [] 18 19 20 21 # must be a single statement since buildozer is currently parsing it, refs: 22 # https://github.com/kivy/buildozer/issues/722 23 install_reqs = [ 24 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six', 25 'enum34; python_version<"3.4"', 'sh>=1.10; sys_platform!="nt"', 26 'pep517', 'pytoml', 'virtualenv' 27 ] 28 # (pep517, pytoml and virtualenv are used by pythonpackage.py) 29 30 # By specifying every file manually, package_data will be able to 31 # include them in binary distributions. Note that we have to add 32 # everything as a 'pythonforandroid' rule, using '' apparently doesn't 33 # work. 34 def recursively_include(results, directory, patterns): 35 for root, subfolders, files in walk(directory): 36 for fn in files: 37 if not any([glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns]): 38 continue 39 filename = join(root, fn) 40 directory = 'pythonforandroid' 41 if directory not in results: 42 results[directory] = [] 43 results[directory].append(join(*filename.split(sep)[1:])) 44 45 recursively_include(package_data, 'pythonforandroid/recipes', 46 ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h', 47 '*.mk', '*.jam', ]) 48 recursively_include(package_data, 'pythonforandroid/bootstraps', 49 ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png', 50 '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl', 51 '*.gradle', '.gitkeep', 'gradlew*', '*.jar', "*.patch", ]) 52 recursively_include(package_data, 'pythonforandroid/bootstraps', 53 ['sdl-config', ]) 54 recursively_include(package_data, 'pythonforandroid/bootstraps/webview', 55 ['*.html', ]) 56 recursively_include(package_data, 'pythonforandroid', 57 ['liblink', 'biglink', 'liblink.sh']) 58 59 with open(join(dirname(__file__), 'README.md'), 60 encoding="utf-8", 61 errors="replace", 62 ) as fileh: 63 long_description = fileh.read() 64 65 init_filen = join(dirname(__file__), 'pythonforandroid', '__init__.py') 66 version = None 67 try: 68 with open(init_filen, 69 encoding="utf-8", 70 errors="replace" 71 ) as fileh: 72 lines = fileh.readlines() 73 except IOError: 74 pass 75 else: 76 for line in lines: 77 line = line.strip() 78 if line.startswith('__version__ = '): 79 matches = re.findall(r'["\'].+["\']', line) 80 if matches: 81 version = matches[0].strip("'").strip('"') 82 break 83 if version is None: 84 raise Exception('Error: version could not be loaded from {}'.format(init_filen)) 85 86 setup(name='python-for-android', 87 version=version, 88 description='Android APK packager for Python scripts and apps', 89 long_description=long_description, 90 long_description_content_type='text/markdown', 91 author='The Kivy team', 92 author_email='[email protected]', 93 url='https://github.com/kivy/python-for-android', 94 license='MIT', 95 install_requires=install_reqs, 96 entry_points={ 97 'console_scripts': [ 98 'python-for-android = pythonforandroid.entrypoints:main', 99 'p4a = pythonforandroid.entrypoints:main', 100 ], 101 'distutils.commands': [ 102 'apk = pythonforandroid.bdistapk:BdistAPK', 103 ], 104 }, 105 classifiers = [ 106 'Development Status :: 5 - Production/Stable', 107 'Intended Audience :: Developers', 108 'License :: OSI Approved :: MIT License', 109 'Operating System :: Microsoft :: Windows', 110 'Operating System :: OS Independent', 111 'Operating System :: POSIX :: Linux', 112 'Operating System :: MacOS :: MacOS X', 113 'Operating System :: Android', 114 'Programming Language :: C', 115 'Programming Language :: Python :: 3', 116 'Topic :: Software Development', 117 'Topic :: Utilities', 118 ], 119 packages=packages, 120 package_data=package_data, 121 ) 122 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -23,7 +23,7 @@ install_reqs = [ 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six', 'enum34; python_version<"3.4"', 'sh>=1.10; sys_platform!="nt"', - 'pep517', 'pytoml', 'virtualenv' + 'pep517<0.7.0"', 'pytoml', 'virtualenv' ] # (pep517, pytoml and virtualenv are used by pythonpackage.py)
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -23,7 +23,7 @@\n install_reqs = [\n 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six',\n 'enum34; python_version<\"3.4\"', 'sh>=1.10; sys_platform!=\"nt\"',\n- 'pep517', 'pytoml', 'virtualenv'\n+ 'pep517<0.7.0\"', 'pytoml', 'virtualenv'\n ]\n # (pep517, pytoml and virtualenv are used by pythonpackage.py)\n", "issue": "TestGetSystemPythonExecutable.test_virtualenv test fail\nThe `TestGetSystemPythonExecutable.test_virtualenv` and `TestGetSystemPythonExecutable.test_venv` tests started failing all of a sudden.\r\nError was:\r\n```\r\nModuleNotFoundError: No module named \\'pytoml\\'\\n'\r\n```\r\nThis ca be reproduced in local via:\r\n```sh\r\npytest tests/test_pythonpackage_basic.py::TestGetSystemPythonExecutable::test_virtualenv\r\n```\r\n\r\n\n", "before_files": [{"content": "\nimport glob\nfrom io import open # for open(..,encoding=...) parameter in python 2\nfrom os import walk\nfrom os.path import join, dirname, sep\nimport os\nimport re\nfrom setuptools import setup, find_packages\n\n# NOTE: All package data should also be set in MANIFEST.in\n\npackages = find_packages()\n\npackage_data = {'': ['*.tmpl',\n '*.patch', ], }\n\ndata_files = []\n\n\n\n# must be a single statement since buildozer is currently parsing it, refs:\n# https://github.com/kivy/buildozer/issues/722\ninstall_reqs = [\n 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six',\n 'enum34; python_version<\"3.4\"', 'sh>=1.10; sys_platform!=\"nt\"',\n 'pep517', 'pytoml', 'virtualenv'\n]\n# (pep517, pytoml and virtualenv are used by pythonpackage.py)\n\n# By specifying every file manually, package_data will be able to\n# include them in binary distributions. Note that we have to add\n# everything as a 'pythonforandroid' rule, using '' apparently doesn't\n# work.\ndef recursively_include(results, directory, patterns):\n for root, subfolders, files in walk(directory):\n for fn in files:\n if not any([glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns]):\n continue\n filename = join(root, fn)\n directory = 'pythonforandroid'\n if directory not in results:\n results[directory] = []\n results[directory].append(join(*filename.split(sep)[1:]))\n\nrecursively_include(package_data, 'pythonforandroid/recipes',\n ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',\n '*.mk', '*.jam', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',\n '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl',\n '*.gradle', '.gitkeep', 'gradlew*', '*.jar', \"*.patch\", ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['sdl-config', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps/webview',\n ['*.html', ])\nrecursively_include(package_data, 'pythonforandroid',\n ['liblink', 'biglink', 'liblink.sh'])\n\nwith open(join(dirname(__file__), 'README.md'),\n encoding=\"utf-8\",\n errors=\"replace\",\n ) as fileh:\n long_description = fileh.read()\n\ninit_filen = join(dirname(__file__), 'pythonforandroid', '__init__.py')\nversion = None\ntry:\n with open(init_filen,\n encoding=\"utf-8\",\n errors=\"replace\"\n ) as fileh:\n lines = fileh.readlines()\nexcept IOError:\n pass\nelse:\n for line in lines:\n line = line.strip()\n if line.startswith('__version__ = '):\n matches = re.findall(r'[\"\\'].+[\"\\']', line)\n if matches:\n version = matches[0].strip(\"'\").strip('\"')\n break\nif version is None:\n raise Exception('Error: version could not be loaded from {}'.format(init_filen))\n\nsetup(name='python-for-android',\n version=version,\n description='Android APK packager for Python scripts and apps',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='The Kivy team',\n author_email='[email protected]',\n url='https://github.com/kivy/python-for-android',\n license='MIT',\n install_requires=install_reqs,\n entry_points={\n 'console_scripts': [\n 'python-for-android = pythonforandroid.entrypoints:main',\n 'p4a = pythonforandroid.entrypoints:main',\n ],\n 'distutils.commands': [\n 'apk = pythonforandroid.bdistapk:BdistAPK',\n ],\n },\n classifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: OS Independent',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Android',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n ],\n packages=packages,\n package_data=package_data,\n )\n", "path": "setup.py"}], "after_files": [{"content": "\nimport glob\nfrom io import open # for open(..,encoding=...) parameter in python 2\nfrom os import walk\nfrom os.path import join, dirname, sep\nimport os\nimport re\nfrom setuptools import setup, find_packages\n\n# NOTE: All package data should also be set in MANIFEST.in\n\npackages = find_packages()\n\npackage_data = {'': ['*.tmpl',\n '*.patch', ], }\n\ndata_files = []\n\n\n\n# must be a single statement since buildozer is currently parsing it, refs:\n# https://github.com/kivy/buildozer/issues/722\ninstall_reqs = [\n 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six',\n 'enum34; python_version<\"3.4\"', 'sh>=1.10; sys_platform!=\"nt\"',\n 'pep517<0.7.0\"', 'pytoml', 'virtualenv'\n]\n# (pep517, pytoml and virtualenv are used by pythonpackage.py)\n\n# By specifying every file manually, package_data will be able to\n# include them in binary distributions. Note that we have to add\n# everything as a 'pythonforandroid' rule, using '' apparently doesn't\n# work.\ndef recursively_include(results, directory, patterns):\n for root, subfolders, files in walk(directory):\n for fn in files:\n if not any([glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns]):\n continue\n filename = join(root, fn)\n directory = 'pythonforandroid'\n if directory not in results:\n results[directory] = []\n results[directory].append(join(*filename.split(sep)[1:]))\n\nrecursively_include(package_data, 'pythonforandroid/recipes',\n ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',\n '*.mk', '*.jam', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',\n '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl',\n '*.gradle', '.gitkeep', 'gradlew*', '*.jar', \"*.patch\", ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['sdl-config', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps/webview',\n ['*.html', ])\nrecursively_include(package_data, 'pythonforandroid',\n ['liblink', 'biglink', 'liblink.sh'])\n\nwith open(join(dirname(__file__), 'README.md'),\n encoding=\"utf-8\",\n errors=\"replace\",\n ) as fileh:\n long_description = fileh.read()\n\ninit_filen = join(dirname(__file__), 'pythonforandroid', '__init__.py')\nversion = None\ntry:\n with open(init_filen,\n encoding=\"utf-8\",\n errors=\"replace\"\n ) as fileh:\n lines = fileh.readlines()\nexcept IOError:\n pass\nelse:\n for line in lines:\n line = line.strip()\n if line.startswith('__version__ = '):\n matches = re.findall(r'[\"\\'].+[\"\\']', line)\n if matches:\n version = matches[0].strip(\"'\").strip('\"')\n break\nif version is None:\n raise Exception('Error: version could not be loaded from {}'.format(init_filen))\n\nsetup(name='python-for-android',\n version=version,\n description='Android APK packager for Python scripts and apps',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='The Kivy team',\n author_email='[email protected]',\n url='https://github.com/kivy/python-for-android',\n license='MIT',\n install_requires=install_reqs,\n entry_points={\n 'console_scripts': [\n 'python-for-android = pythonforandroid.entrypoints:main',\n 'p4a = pythonforandroid.entrypoints:main',\n ],\n 'distutils.commands': [\n 'apk = pythonforandroid.bdistapk:BdistAPK',\n ],\n },\n classifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: OS Independent',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Android',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n ],\n packages=packages,\n package_data=package_data,\n )\n", "path": "setup.py"}]}
1,625
150
gh_patches_debug_23758
rasdani/github-patches
git_diff
holoviz__panel-705
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Broken image link in Django user guide The link https://panel.pyviz.org/apps/django2/sliders.png in https://panel.pyviz.org/user_guide/Django_Apps.html doesn't seem to go anywhere; was that meant to point to https://parambokeh.pyviz.org/assets/sliders.png ? Broken image link in Django user guide The link https://panel.pyviz.org/apps/django2/sliders.png in https://panel.pyviz.org/user_guide/Django_Apps.html doesn't seem to go anywhere; was that meant to point to https://parambokeh.pyviz.org/assets/sliders.png ? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `panel/util.py` Content: ``` 1 """ 2 Various general utilities used in the panel codebase. 3 """ 4 from __future__ import absolute_import, division, unicode_literals 5 6 import re 7 import sys 8 import inspect 9 import numbers 10 import datetime as dt 11 12 from datetime import datetime 13 from six import string_types 14 from collections import defaultdict, OrderedDict 15 try: 16 from collections.abc import MutableSequence, MutableMapping 17 except ImportError: # support for python>3.8 18 from collections import MutableSequence, MutableMapping 19 20 import param 21 import numpy as np 22 23 datetime_types = (np.datetime64, dt.datetime, dt.date) 24 25 if sys.version_info.major > 2: 26 unicode = str 27 28 29 def hashable(x): 30 if isinstance(x, MutableSequence): 31 return tuple(x) 32 elif isinstance(x, MutableMapping): 33 return tuple([(k,v) for k,v in x.items()]) 34 else: 35 return x 36 37 38 def isIn(obj, objs): 39 """ 40 Checks if the object is in the list of objects safely. 41 """ 42 for o in objs: 43 if o is obj: 44 return True 45 try: 46 if o == obj: 47 return True 48 except: 49 pass 50 return False 51 52 53 def indexOf(obj, objs): 54 """ 55 Returns the index of an object in a list of objects. Unlike the 56 list.index method this function only checks for identity not 57 equality. 58 """ 59 for i, o in enumerate(objs): 60 if o is obj: 61 return i 62 try: 63 if o == obj: 64 return i 65 except: 66 pass 67 raise ValueError('%s not in list' % obj) 68 69 70 def as_unicode(obj): 71 """ 72 Safely casts any object to unicode including regular string 73 (i.e. bytes) types in python 2. 74 """ 75 if sys.version_info.major < 3 and isinstance(obj, str): 76 obj = obj.decode('utf-8') 77 return unicode(obj) 78 79 80 def param_name(name): 81 """ 82 Removes the integer id from a Parameterized class name. 83 """ 84 match = re.match(r'(.)+(\d){5}', name) 85 return name[:-5] if match else name 86 87 88 def unicode_repr(obj): 89 """ 90 Returns a repr without the unicode prefix. 91 """ 92 if sys.version_info.major == 2 and isinstance(obj, unicode): 93 return repr(obj)[1:] 94 return repr(obj) 95 96 97 def abbreviated_repr(value, max_length=25, natural_breaks=(',', ' ')): 98 """ 99 Returns an abbreviated repr for the supplied object. Attempts to 100 find a natural break point while adhering to the maximum length. 101 """ 102 vrepr = repr(value) 103 if len(vrepr) > max_length: 104 # Attempt to find natural cutoff point 105 abbrev = vrepr[max_length//2:] 106 natural_break = None 107 for brk in natural_breaks: 108 if brk in abbrev: 109 natural_break = abbrev.index(brk) + max_length//2 110 break 111 if natural_break and natural_break < max_length: 112 max_length = natural_break + 1 113 114 end_char = '' 115 if isinstance(value, list): 116 end_char = ']' 117 elif isinstance(value, OrderedDict): 118 end_char = '])' 119 elif isinstance(value, (dict, set)): 120 end_char = '}' 121 return vrepr[:max_length+1] + '...' + end_char 122 return vrepr 123 124 125 def param_reprs(parameterized, skip=None): 126 """ 127 Returns a list of reprs for parameters on the parameterized object. 128 Skips default and empty values. 129 """ 130 cls = type(parameterized).__name__ 131 param_reprs = [] 132 for p, v in sorted(parameterized.get_param_values()): 133 if v is parameterized.param[p].default: continue 134 elif v is None: continue 135 elif isinstance(v, string_types) and v == '': continue 136 elif isinstance(v, list) and v == []: continue 137 elif isinstance(v, dict) and v == {}: continue 138 elif (skip and p in skip) or (p == 'name' and v.startswith(cls)): continue 139 param_reprs.append('%s=%s' % (p, abbreviated_repr(v))) 140 return param_reprs 141 142 143 def full_groupby(l, key=lambda x: x): 144 """ 145 Groupby implementation which does not require a prior sort 146 """ 147 d = defaultdict(list) 148 for item in l: 149 d[key(item)].append(item) 150 return d.items() 151 152 153 def get_method_owner(meth): 154 """ 155 Returns the instance owning the supplied instancemethod or 156 the class owning the supplied classmethod. 157 """ 158 if inspect.ismethod(meth): 159 if sys.version_info < (3,0): 160 return meth.im_class if meth.im_self is None else meth.im_self 161 else: 162 return meth.__self__ 163 164 165 def is_parameterized(obj): 166 """ 167 Whether an object is a Parameterized class or instance. 168 """ 169 return (isinstance(obj, param.Parameterized) or 170 (isinstance(obj, type) and issubclass(obj, param.Parameterized))) 171 172 173 def isdatetime(value): 174 """ 175 Whether the array or scalar is recognized datetime type. 176 """ 177 if isinstance(value, np.ndarray): 178 return (value.dtype.kind == "M" or 179 (value.dtype.kind == "O" and len(value) and 180 isinstance(value[0], datetime_types))) 181 elif isinstance(value, list): 182 return all(isinstance(d, datetime_types) for d in value) 183 else: 184 return isinstance(value, datetime_types) 185 186 def value_as_datetime(value): 187 """ 188 Retrieve the value tuple as a tuple of datetime objects. 189 """ 190 if isinstance(value, numbers.Number): 191 value = datetime.utcfromtimestamp(value / 1000) 192 return value 193 194 195 def value_as_date(value): 196 if isinstance(value, numbers.Number): 197 value = datetime.utcfromtimestamp(value / 1000).date() 198 elif isinstance(value, datetime): 199 value = value.date() 200 return value 201 ``` Path: `doc/conf.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 from nbsite.shared_conf import * 4 5 project = u'Panel' 6 authors = u'Panel contributors' 7 copyright = u'2019 ' + authors 8 description = 'High-level dashboarding for python visualization libraries' 9 10 import panel 11 version = release = str(panel.__version__) 12 13 html_static_path += ['_static'] 14 html_theme = 'sphinx_ioam_theme' 15 html_theme_options = { 16 'logo': 'logo_horizontal.png', 17 'favicon': 'favicon.ico', 18 'css': 'site.css' 19 } 20 21 extensions += ['nbsite.gallery'] 22 23 nbsite_gallery_conf = { 24 'github_org': 'pyviz', 25 'github_project': 'panel', 26 'galleries': { 27 'gallery': { 28 'title': 'Gallery', 29 'sections': [ 30 {'path': 'demos', 31 'title': 'Demos', 32 'description': 'A set of sophisticated apps built to demonstrate the features of Panel.'}, 33 {'path': 'simple', 34 'title': 'Simple Apps', 35 'description': 'Simple example apps meant to provide a quick introduction to Panel.'}, 36 {'path': 'apis', 37 'title': 'APIs', 38 'description': ('Examples meant to demonstrate the usage of different Panel APIs ' 39 'such as interact and reactive functions.')}, 40 {'path': 'layout', 41 'title': 'Layouts', 42 'description': 'How to leverage Panel layout components to achieve complex layouts.'}, 43 {'path': 'dynamic', 44 'title': 'Dynamic UIs', 45 'description': ('Examples demonstrating how to build dynamic UIs with components that' 46 'are added or removed interactively.')}, 47 {'path': 'param', 48 'title': 'Param based apps', 49 'description': 'Using the Param library to express UIs independently of Panel.'}, 50 {'path': 'links', 51 'title': 'Linking', 52 'description': ('Using Javascript based links to define interactivity without ' 53 'without requiring a live kernel.')}, 54 {'path': 'external', 55 'title': 'External libraries', 56 'description': 'Wrapping external libraries with Panel.'} 57 ] 58 }, 59 'reference': { 60 'title': 'Reference Gallery', 61 'sections': [ 62 'panes', 63 'layouts', 64 'widgets' 65 ] 66 } 67 }, 68 'thumbnail_url': 'https://assets.holoviews.org/panel/thumbnails', 69 'deployment_url': 'https://panel-gallery.pyviz.demo.anaconda.com/' 70 } 71 72 _NAV = ( 73 ('Getting started', 'getting_started/index'), 74 ('User Guide', 'user_guide/index'), 75 ('Gallery', 'gallery/index'), 76 ('Reference Gallery', 'reference/index'), 77 ('Developer Guide', 'developer_guide/index'), 78 ('FAQ', 'FAQ'), 79 ('About', 'about') 80 ) 81 82 templates_path = ['_templates'] 83 84 html_context.update({ 85 'PROJECT': project, 86 'DESCRIPTION': description, 87 'AUTHOR': authors, 88 'VERSION': version, 89 'WEBSITE_URL': 'https://panel.pyviz.org', 90 'WEBSITE_SERVER': 'https://panel.pyviz.org', 91 'VERSION': version, 92 'NAV': _NAV, 93 'LINKS': _NAV, 94 'SOCIAL': ( 95 ('Gitter', '//gitter.im/pyviz/pyviz'), 96 ('Github', '//github.com/pyviz/panel'), 97 ) 98 }) 99 100 nbbuild_patterns_to_take_along = ["simple.html"] 101 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/doc/conf.py b/doc/conf.py --- a/doc/conf.py +++ b/doc/conf.py @@ -69,7 +69,7 @@ 'deployment_url': 'https://panel-gallery.pyviz.demo.anaconda.com/' } -_NAV = ( +_NAV = ( ('Getting started', 'getting_started/index'), ('User Guide', 'user_guide/index'), ('Gallery', 'gallery/index'), @@ -88,7 +88,6 @@ 'VERSION': version, 'WEBSITE_URL': 'https://panel.pyviz.org', 'WEBSITE_SERVER': 'https://panel.pyviz.org', - 'VERSION': version, 'NAV': _NAV, 'LINKS': _NAV, 'SOCIAL': ( diff --git a/panel/util.py b/panel/util.py --- a/panel/util.py +++ b/panel/util.py @@ -12,9 +12,9 @@ from datetime import datetime from six import string_types from collections import defaultdict, OrderedDict -try: +try: # python >= 3.3 from collections.abc import MutableSequence, MutableMapping -except ImportError: # support for python>3.8 +except ImportError: from collections import MutableSequence, MutableMapping import param
{"golden_diff": "diff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -69,7 +69,7 @@\n 'deployment_url': 'https://panel-gallery.pyviz.demo.anaconda.com/'\n }\n \n-_NAV = (\n+_NAV = (\n ('Getting started', 'getting_started/index'),\n ('User Guide', 'user_guide/index'),\n ('Gallery', 'gallery/index'),\n@@ -88,7 +88,6 @@\n 'VERSION': version,\n 'WEBSITE_URL': 'https://panel.pyviz.org',\n 'WEBSITE_SERVER': 'https://panel.pyviz.org',\n- 'VERSION': version,\n 'NAV': _NAV,\n 'LINKS': _NAV,\n 'SOCIAL': (\ndiff --git a/panel/util.py b/panel/util.py\n--- a/panel/util.py\n+++ b/panel/util.py\n@@ -12,9 +12,9 @@\n from datetime import datetime\n from six import string_types\n from collections import defaultdict, OrderedDict\n-try:\n+try: # python >= 3.3\n from collections.abc import MutableSequence, MutableMapping\n-except ImportError: # support for python>3.8\n+except ImportError:\n from collections import MutableSequence, MutableMapping\n \n import param\n", "issue": "Broken image link in Django user guide\nThe link https://panel.pyviz.org/apps/django2/sliders.png in https://panel.pyviz.org/user_guide/Django_Apps.html doesn't seem to go anywhere; was that meant to point to https://parambokeh.pyviz.org/assets/sliders.png ?\nBroken image link in Django user guide\nThe link https://panel.pyviz.org/apps/django2/sliders.png in https://panel.pyviz.org/user_guide/Django_Apps.html doesn't seem to go anywhere; was that meant to point to https://parambokeh.pyviz.org/assets/sliders.png ?\n", "before_files": [{"content": "\"\"\"\nVarious general utilities used in the panel codebase.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport re\nimport sys\nimport inspect\nimport numbers\nimport datetime as dt\n\nfrom datetime import datetime\nfrom six import string_types\nfrom collections import defaultdict, OrderedDict\ntry:\n from collections.abc import MutableSequence, MutableMapping\nexcept ImportError: # support for python>3.8\n from collections import MutableSequence, MutableMapping\n\nimport param\nimport numpy as np\n\ndatetime_types = (np.datetime64, dt.datetime, dt.date)\n\nif sys.version_info.major > 2:\n unicode = str\n\n\ndef hashable(x):\n if isinstance(x, MutableSequence):\n return tuple(x)\n elif isinstance(x, MutableMapping):\n return tuple([(k,v) for k,v in x.items()])\n else:\n return x\n\n\ndef isIn(obj, objs):\n \"\"\"\n Checks if the object is in the list of objects safely.\n \"\"\"\n for o in objs:\n if o is obj:\n return True\n try:\n if o == obj:\n return True\n except:\n pass\n return False\n\n\ndef indexOf(obj, objs):\n \"\"\"\n Returns the index of an object in a list of objects. Unlike the\n list.index method this function only checks for identity not\n equality.\n \"\"\"\n for i, o in enumerate(objs):\n if o is obj:\n return i\n try:\n if o == obj:\n return i\n except:\n pass\n raise ValueError('%s not in list' % obj)\n\n\ndef as_unicode(obj):\n \"\"\"\n Safely casts any object to unicode including regular string\n (i.e. bytes) types in python 2.\n \"\"\"\n if sys.version_info.major < 3 and isinstance(obj, str):\n obj = obj.decode('utf-8')\n return unicode(obj)\n\n\ndef param_name(name):\n \"\"\"\n Removes the integer id from a Parameterized class name.\n \"\"\"\n match = re.match(r'(.)+(\\d){5}', name)\n return name[:-5] if match else name\n\n\ndef unicode_repr(obj):\n \"\"\"\n Returns a repr without the unicode prefix.\n \"\"\"\n if sys.version_info.major == 2 and isinstance(obj, unicode):\n return repr(obj)[1:]\n return repr(obj)\n\n\ndef abbreviated_repr(value, max_length=25, natural_breaks=(',', ' ')):\n \"\"\"\n Returns an abbreviated repr for the supplied object. Attempts to\n find a natural break point while adhering to the maximum length.\n \"\"\"\n vrepr = repr(value)\n if len(vrepr) > max_length:\n # Attempt to find natural cutoff point\n abbrev = vrepr[max_length//2:]\n natural_break = None\n for brk in natural_breaks:\n if brk in abbrev:\n natural_break = abbrev.index(brk) + max_length//2\n break\n if natural_break and natural_break < max_length:\n max_length = natural_break + 1\n\n end_char = ''\n if isinstance(value, list):\n end_char = ']'\n elif isinstance(value, OrderedDict):\n end_char = '])'\n elif isinstance(value, (dict, set)):\n end_char = '}'\n return vrepr[:max_length+1] + '...' + end_char\n return vrepr\n\n\ndef param_reprs(parameterized, skip=None):\n \"\"\"\n Returns a list of reprs for parameters on the parameterized object.\n Skips default and empty values.\n \"\"\"\n cls = type(parameterized).__name__\n param_reprs = []\n for p, v in sorted(parameterized.get_param_values()):\n if v is parameterized.param[p].default: continue\n elif v is None: continue\n elif isinstance(v, string_types) and v == '': continue\n elif isinstance(v, list) and v == []: continue\n elif isinstance(v, dict) and v == {}: continue\n elif (skip and p in skip) or (p == 'name' and v.startswith(cls)): continue\n param_reprs.append('%s=%s' % (p, abbreviated_repr(v)))\n return param_reprs\n\n\ndef full_groupby(l, key=lambda x: x):\n \"\"\"\n Groupby implementation which does not require a prior sort\n \"\"\"\n d = defaultdict(list)\n for item in l:\n d[key(item)].append(item)\n return d.items()\n\n\ndef get_method_owner(meth):\n \"\"\"\n Returns the instance owning the supplied instancemethod or\n the class owning the supplied classmethod.\n \"\"\"\n if inspect.ismethod(meth):\n if sys.version_info < (3,0):\n return meth.im_class if meth.im_self is None else meth.im_self\n else:\n return meth.__self__\n\n\ndef is_parameterized(obj):\n \"\"\"\n Whether an object is a Parameterized class or instance.\n \"\"\"\n return (isinstance(obj, param.Parameterized) or\n (isinstance(obj, type) and issubclass(obj, param.Parameterized)))\n\n\ndef isdatetime(value):\n \"\"\"\n Whether the array or scalar is recognized datetime type.\n \"\"\"\n if isinstance(value, np.ndarray):\n return (value.dtype.kind == \"M\" or\n (value.dtype.kind == \"O\" and len(value) and\n isinstance(value[0], datetime_types)))\n elif isinstance(value, list):\n return all(isinstance(d, datetime_types) for d in value)\n else:\n return isinstance(value, datetime_types)\n\ndef value_as_datetime(value):\n \"\"\"\n Retrieve the value tuple as a tuple of datetime objects.\n \"\"\"\n if isinstance(value, numbers.Number):\n value = datetime.utcfromtimestamp(value / 1000)\n return value\n\n\ndef value_as_date(value):\n if isinstance(value, numbers.Number):\n value = datetime.utcfromtimestamp(value / 1000).date()\n elif isinstance(value, datetime):\n value = value.date()\n return value\n", "path": "panel/util.py"}, {"content": "# -*- coding: utf-8 -*-\n\nfrom nbsite.shared_conf import *\n\nproject = u'Panel'\nauthors = u'Panel contributors'\ncopyright = u'2019 ' + authors\ndescription = 'High-level dashboarding for python visualization libraries'\n\nimport panel\nversion = release = str(panel.__version__)\n\nhtml_static_path += ['_static']\nhtml_theme = 'sphinx_ioam_theme'\nhtml_theme_options = {\n 'logo': 'logo_horizontal.png',\n 'favicon': 'favicon.ico',\n 'css': 'site.css' \n}\n\nextensions += ['nbsite.gallery']\n\nnbsite_gallery_conf = {\n 'github_org': 'pyviz',\n 'github_project': 'panel',\n 'galleries': {\n 'gallery': {\n 'title': 'Gallery',\n 'sections': [\n {'path': 'demos',\n 'title': 'Demos',\n 'description': 'A set of sophisticated apps built to demonstrate the features of Panel.'},\n {'path': 'simple',\n 'title': 'Simple Apps',\n 'description': 'Simple example apps meant to provide a quick introduction to Panel.'},\n {'path': 'apis',\n 'title': 'APIs',\n 'description': ('Examples meant to demonstrate the usage of different Panel APIs '\n 'such as interact and reactive functions.')},\n {'path': 'layout',\n 'title': 'Layouts',\n 'description': 'How to leverage Panel layout components to achieve complex layouts.'},\n {'path': 'dynamic',\n 'title': 'Dynamic UIs',\n 'description': ('Examples demonstrating how to build dynamic UIs with components that'\n 'are added or removed interactively.')},\n {'path': 'param',\n 'title': 'Param based apps',\n 'description': 'Using the Param library to express UIs independently of Panel.'},\n {'path': 'links',\n 'title': 'Linking',\n 'description': ('Using Javascript based links to define interactivity without '\n 'without requiring a live kernel.')},\n {'path': 'external',\n 'title': 'External libraries',\n 'description': 'Wrapping external libraries with Panel.'}\n ]\n },\n 'reference': {\n 'title': 'Reference Gallery',\n 'sections': [\n 'panes',\n 'layouts',\n 'widgets'\n ]\n }\n },\n 'thumbnail_url': 'https://assets.holoviews.org/panel/thumbnails',\n 'deployment_url': 'https://panel-gallery.pyviz.demo.anaconda.com/'\n}\n\n_NAV = (\n ('Getting started', 'getting_started/index'),\n ('User Guide', 'user_guide/index'),\n ('Gallery', 'gallery/index'),\n ('Reference Gallery', 'reference/index'),\n ('Developer Guide', 'developer_guide/index'),\n ('FAQ', 'FAQ'),\n ('About', 'about')\n)\n\ntemplates_path = ['_templates']\n\nhtml_context.update({\n 'PROJECT': project,\n 'DESCRIPTION': description,\n 'AUTHOR': authors,\n 'VERSION': version,\n 'WEBSITE_URL': 'https://panel.pyviz.org',\n 'WEBSITE_SERVER': 'https://panel.pyviz.org',\n 'VERSION': version,\n 'NAV': _NAV,\n 'LINKS': _NAV,\n 'SOCIAL': (\n ('Gitter', '//gitter.im/pyviz/pyviz'),\n ('Github', '//github.com/pyviz/panel'),\n )\n})\n\nnbbuild_patterns_to_take_along = [\"simple.html\"]\n", "path": "doc/conf.py"}], "after_files": [{"content": "\"\"\"\nVarious general utilities used in the panel codebase.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport re\nimport sys\nimport inspect\nimport numbers\nimport datetime as dt\n\nfrom datetime import datetime\nfrom six import string_types\nfrom collections import defaultdict, OrderedDict\ntry: # python >= 3.3\n from collections.abc import MutableSequence, MutableMapping\nexcept ImportError:\n from collections import MutableSequence, MutableMapping\n\nimport param\nimport numpy as np\n\ndatetime_types = (np.datetime64, dt.datetime, dt.date)\n\nif sys.version_info.major > 2:\n unicode = str\n\n\ndef hashable(x):\n if isinstance(x, MutableSequence):\n return tuple(x)\n elif isinstance(x, MutableMapping):\n return tuple([(k,v) for k,v in x.items()])\n else:\n return x\n\n\ndef isIn(obj, objs):\n \"\"\"\n Checks if the object is in the list of objects safely.\n \"\"\"\n for o in objs:\n if o is obj:\n return True\n try:\n if o == obj:\n return True\n except:\n pass\n return False\n\n\ndef indexOf(obj, objs):\n \"\"\"\n Returns the index of an object in a list of objects. Unlike the\n list.index method this function only checks for identity not\n equality.\n \"\"\"\n for i, o in enumerate(objs):\n if o is obj:\n return i\n try:\n if o == obj:\n return i\n except:\n pass\n raise ValueError('%s not in list' % obj)\n\n\ndef as_unicode(obj):\n \"\"\"\n Safely casts any object to unicode including regular string\n (i.e. bytes) types in python 2.\n \"\"\"\n if sys.version_info.major < 3 and isinstance(obj, str):\n obj = obj.decode('utf-8')\n return unicode(obj)\n\n\ndef param_name(name):\n \"\"\"\n Removes the integer id from a Parameterized class name.\n \"\"\"\n match = re.match(r'(.)+(\\d){5}', name)\n return name[:-5] if match else name\n\n\ndef unicode_repr(obj):\n \"\"\"\n Returns a repr without the unicode prefix.\n \"\"\"\n if sys.version_info.major == 2 and isinstance(obj, unicode):\n return repr(obj)[1:]\n return repr(obj)\n\n\ndef abbreviated_repr(value, max_length=25, natural_breaks=(',', ' ')):\n \"\"\"\n Returns an abbreviated repr for the supplied object. Attempts to\n find a natural break point while adhering to the maximum length.\n \"\"\"\n vrepr = repr(value)\n if len(vrepr) > max_length:\n # Attempt to find natural cutoff point\n abbrev = vrepr[max_length//2:]\n natural_break = None\n for brk in natural_breaks:\n if brk in abbrev:\n natural_break = abbrev.index(brk) + max_length//2\n break\n if natural_break and natural_break < max_length:\n max_length = natural_break + 1\n\n end_char = ''\n if isinstance(value, list):\n end_char = ']'\n elif isinstance(value, OrderedDict):\n end_char = '])'\n elif isinstance(value, (dict, set)):\n end_char = '}'\n return vrepr[:max_length+1] + '...' + end_char\n return vrepr\n\n\ndef param_reprs(parameterized, skip=None):\n \"\"\"\n Returns a list of reprs for parameters on the parameterized object.\n Skips default and empty values.\n \"\"\"\n cls = type(parameterized).__name__\n param_reprs = []\n for p, v in sorted(parameterized.get_param_values()):\n if v is parameterized.param[p].default: continue\n elif v is None: continue\n elif isinstance(v, string_types) and v == '': continue\n elif isinstance(v, list) and v == []: continue\n elif isinstance(v, dict) and v == {}: continue\n elif (skip and p in skip) or (p == 'name' and v.startswith(cls)): continue\n param_reprs.append('%s=%s' % (p, abbreviated_repr(v)))\n return param_reprs\n\n\ndef full_groupby(l, key=lambda x: x):\n \"\"\"\n Groupby implementation which does not require a prior sort\n \"\"\"\n d = defaultdict(list)\n for item in l:\n d[key(item)].append(item)\n return d.items()\n\n\ndef get_method_owner(meth):\n \"\"\"\n Returns the instance owning the supplied instancemethod or\n the class owning the supplied classmethod.\n \"\"\"\n if inspect.ismethod(meth):\n if sys.version_info < (3,0):\n return meth.im_class if meth.im_self is None else meth.im_self\n else:\n return meth.__self__\n\n\ndef is_parameterized(obj):\n \"\"\"\n Whether an object is a Parameterized class or instance.\n \"\"\"\n return (isinstance(obj, param.Parameterized) or\n (isinstance(obj, type) and issubclass(obj, param.Parameterized)))\n\n\ndef isdatetime(value):\n \"\"\"\n Whether the array or scalar is recognized datetime type.\n \"\"\"\n if isinstance(value, np.ndarray):\n return (value.dtype.kind == \"M\" or\n (value.dtype.kind == \"O\" and len(value) and\n isinstance(value[0], datetime_types)))\n elif isinstance(value, list):\n return all(isinstance(d, datetime_types) for d in value)\n else:\n return isinstance(value, datetime_types)\n\ndef value_as_datetime(value):\n \"\"\"\n Retrieve the value tuple as a tuple of datetime objects.\n \"\"\"\n if isinstance(value, numbers.Number):\n value = datetime.utcfromtimestamp(value / 1000)\n return value\n\n\ndef value_as_date(value):\n if isinstance(value, numbers.Number):\n value = datetime.utcfromtimestamp(value / 1000).date()\n elif isinstance(value, datetime):\n value = value.date()\n return value\n", "path": "panel/util.py"}, {"content": "# -*- coding: utf-8 -*-\n\nfrom nbsite.shared_conf import *\n\nproject = u'Panel'\nauthors = u'Panel contributors'\ncopyright = u'2019 ' + authors\ndescription = 'High-level dashboarding for python visualization libraries'\n\nimport panel\nversion = release = str(panel.__version__)\n\nhtml_static_path += ['_static']\nhtml_theme = 'sphinx_ioam_theme'\nhtml_theme_options = {\n 'logo': 'logo_horizontal.png',\n 'favicon': 'favicon.ico',\n 'css': 'site.css' \n}\n\nextensions += ['nbsite.gallery']\n\nnbsite_gallery_conf = {\n 'github_org': 'pyviz',\n 'github_project': 'panel',\n 'galleries': {\n 'gallery': {\n 'title': 'Gallery',\n 'sections': [\n {'path': 'demos',\n 'title': 'Demos',\n 'description': 'A set of sophisticated apps built to demonstrate the features of Panel.'},\n {'path': 'simple',\n 'title': 'Simple Apps',\n 'description': 'Simple example apps meant to provide a quick introduction to Panel.'},\n {'path': 'apis',\n 'title': 'APIs',\n 'description': ('Examples meant to demonstrate the usage of different Panel APIs '\n 'such as interact and reactive functions.')},\n {'path': 'layout',\n 'title': 'Layouts',\n 'description': 'How to leverage Panel layout components to achieve complex layouts.'},\n {'path': 'dynamic',\n 'title': 'Dynamic UIs',\n 'description': ('Examples demonstrating how to build dynamic UIs with components that'\n 'are added or removed interactively.')},\n {'path': 'param',\n 'title': 'Param based apps',\n 'description': 'Using the Param library to express UIs independently of Panel.'},\n {'path': 'links',\n 'title': 'Linking',\n 'description': ('Using Javascript based links to define interactivity without '\n 'without requiring a live kernel.')},\n {'path': 'external',\n 'title': 'External libraries',\n 'description': 'Wrapping external libraries with Panel.'}\n ]\n },\n 'reference': {\n 'title': 'Reference Gallery',\n 'sections': [\n 'panes',\n 'layouts',\n 'widgets'\n ]\n }\n },\n 'thumbnail_url': 'https://assets.holoviews.org/panel/thumbnails',\n 'deployment_url': 'https://panel-gallery.pyviz.demo.anaconda.com/'\n}\n\n_NAV = (\n ('Getting started', 'getting_started/index'),\n ('User Guide', 'user_guide/index'),\n ('Gallery', 'gallery/index'),\n ('Reference Gallery', 'reference/index'),\n ('Developer Guide', 'developer_guide/index'),\n ('FAQ', 'FAQ'),\n ('About', 'about')\n)\n\ntemplates_path = ['_templates']\n\nhtml_context.update({\n 'PROJECT': project,\n 'DESCRIPTION': description,\n 'AUTHOR': authors,\n 'VERSION': version,\n 'WEBSITE_URL': 'https://panel.pyviz.org',\n 'WEBSITE_SERVER': 'https://panel.pyviz.org',\n 'NAV': _NAV,\n 'LINKS': _NAV,\n 'SOCIAL': (\n ('Gitter', '//gitter.im/pyviz/pyviz'),\n ('Github', '//github.com/pyviz/panel'),\n )\n})\n\nnbbuild_patterns_to_take_along = [\"simple.html\"]\n", "path": "doc/conf.py"}]}
3,119
283
gh_patches_debug_11585
rasdani/github-patches
git_diff
ibis-project__ibis-4602
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- bug: `.visualize(label_edges=True)` case ops.NodeList we get ValueError tuple.index(x): x not in tuple Hi, There's still one small thing that needs fixing in the great `.visualize(label_edges=True)` feature 😄 : When running... ```py import ibis t = ibis.table((("a", "int32"), ("b", "string"))) expr = t[(t["a"] == 1) & (t["b"] == "x")] expr.visualize(label_edges=True) ``` ...I get: ``` Exception has occurred: ValueError - tuple.index(x): x not in tuple ``` at the following line: https://github.com/ibis-project/ibis/blob/2c9cfea15fc4d5f61e9099c3b270ea61498b5e45/ibis/expr/visualize.py#L117 This is happening when `v` is an `ops.NodeList` and thus its `v.args` is a tuple `of tuples` and NOT a tuple of nodes. Given that on the next line we have special logic for `ops.NodeList`, maybe one quick fix could be to use the `.values`/`.args[0]` in such cases: ![image](https://user-images.githubusercontent.com/10154357/192848219-48560533-9cbc-4621-8759-145dcbe95462.png) Thank you. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ibis/expr/visualize.py` Content: ``` 1 import sys 2 import tempfile 3 from html import escape 4 5 import graphviz as gv 6 7 import ibis 8 import ibis.common.exceptions as com 9 import ibis.expr.operations as ops 10 from ibis.common.graph import Graph 11 12 13 def get_type(node): 14 try: 15 return str(node.output_dtype) 16 except (AttributeError, NotImplementedError): 17 pass 18 19 try: 20 schema = node.schema 21 except (AttributeError, NotImplementedError): 22 # TODO(kszucs): this branch should be removed 23 try: 24 # As a last resort try get the name of the output_type class 25 return node.output_type.__name__ 26 except (AttributeError, NotImplementedError): 27 return '\u2205' # empty set character 28 except com.IbisError: 29 assert isinstance(node, ops.Join) 30 left_table_name = getattr(node.left, 'name', None) or ops.genname() 31 left_schema = node.left.schema 32 right_table_name = getattr(node.right, 'name', None) or ops.genname() 33 right_schema = node.right.schema 34 pairs = [ 35 (f'{left_table_name}.{left_column}', type) 36 for left_column, type in left_schema.items() 37 ] + [ 38 (f'{right_table_name}.{right_column}', type) 39 for right_column, type in right_schema.items() 40 ] 41 schema = ibis.schema(pairs) 42 43 return ( 44 ''.join( 45 '<BR ALIGN="LEFT" /> <I>{}</I>: {}'.format( 46 escape(name), escape(str(type)) 47 ) 48 for name, type in zip(schema.names, schema.types) 49 ) 50 + '<BR ALIGN="LEFT" />' 51 ) 52 53 54 def get_label(node): 55 typename = get_type(node) # Already an escaped string 56 name = type(node).__name__ 57 nodename = ( 58 node.name 59 if isinstance( 60 node, (ops.Literal, ops.TableColumn, ops.Alias, ops.PhysicalTable) 61 ) 62 else None 63 ) 64 if nodename is not None: 65 if isinstance(node, ops.TableNode): 66 label_fmt = '<<I>{}</I>: <B>{}</B>{}>' 67 else: 68 label_fmt = '<<I>{}</I>: <B>{}</B><BR ALIGN="LEFT" />:: {}>' 69 label = label_fmt.format(escape(nodename), escape(name), typename) 70 else: 71 if isinstance(node, ops.TableNode): 72 label_fmt = '<<B>{}</B>{}>' 73 else: 74 label_fmt = '<<B>{}</B><BR ALIGN="LEFT" />:: {}>' 75 label = label_fmt.format(escape(name), typename) 76 return label 77 78 79 DEFAULT_NODE_ATTRS = {'shape': 'box', 'fontname': 'Deja Vu Sans Mono'} 80 DEFAULT_EDGE_ATTRS = {'fontname': 'Deja Vu Sans Mono'} 81 82 83 def to_graph(expr, node_attr=None, edge_attr=None, label_edges: bool = False): 84 graph = Graph.from_bfs(expr.op()) 85 86 g = gv.Digraph( 87 node_attr=node_attr or DEFAULT_NODE_ATTRS, 88 edge_attr=edge_attr or DEFAULT_EDGE_ATTRS, 89 ) 90 91 g.attr(rankdir='BT') 92 93 seen = set() 94 edges = set() 95 96 for v, us in graph.items(): 97 if isinstance(v, ops.NodeList) and not v: 98 continue 99 100 vhash = str(hash(v)) 101 if v not in seen: 102 g.node(vhash, label=get_label(v)) 103 seen.add(v) 104 105 for u in us: 106 if isinstance(u, ops.NodeList) and not u: 107 continue 108 109 uhash = str(hash(u)) 110 if u not in seen: 111 g.node(uhash, label=get_label(u)) 112 seen.add(u) 113 if (edge := (u, v)) not in edges: 114 if not label_edges: 115 label = None 116 else: 117 index = v.args.index(u) 118 if isinstance(v, ops.NodeList): 119 arg_name = f"values[{index}]" 120 else: 121 arg_name = v.argnames[index] 122 label = f"<.{arg_name}>" 123 124 g.edge(uhash, vhash, label=label) 125 edges.add(edge) 126 return g 127 128 129 def draw(graph, path=None, format='png', verbose: bool = False): 130 if verbose: 131 print(graph.source, file=sys.stderr) 132 133 piped_source = graph.pipe(format=format) 134 135 if path is None: 136 with tempfile.NamedTemporaryFile( 137 delete=False, suffix=f'.{format}', mode='wb' 138 ) as f: 139 f.write(piped_source) 140 return f.name 141 else: 142 with open(path, mode='wb') as f: 143 f.write(piped_source) 144 return path 145 146 147 if __name__ == '__main__': 148 from argparse import ArgumentParser 149 150 from ibis import _ 151 152 p = ArgumentParser( 153 description="Render a GraphViz SVG of an example ibis expression." 154 ) 155 156 p.add_argument( 157 "-v", 158 "--verbose", 159 action="count", 160 default=0, 161 help="Print GraphViz DOT code to stderr.", 162 ) 163 p.add_argument( 164 "-l", 165 "--label-edges", 166 action="store_true", 167 help="Show operation inputs as edge labels.", 168 ) 169 170 args = p.parse_args() 171 172 left = ibis.table(dict(a="int64", b="string"), name="left") 173 right = ibis.table(dict(b="string", c="int64", d="string"), name="right") 174 expr = ( 175 left.inner_join(right, "b") 176 .select(left.a, b=right.c, c=right.d) 177 .filter((_.a + _.b * 2 * _.b / _.b**3 > 4) & (_.b > 5)) 178 .groupby(_.c) 179 .having(_.a.mean() > 0.0) 180 .aggregate(a_mean=_.a.mean(), b_sum=_.b.sum()) 181 ) 182 expr.visualize(verbose=args.verbose > 0, label_edges=args.label_edges) 183 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ibis/expr/visualize.py b/ibis/expr/visualize.py --- a/ibis/expr/visualize.py +++ b/ibis/expr/visualize.py @@ -114,10 +114,11 @@ if not label_edges: label = None else: - index = v.args.index(u) if isinstance(v, ops.NodeList): + index = v.values.index(u) arg_name = f"values[{index}]" else: + index = v.args.index(u) arg_name = v.argnames[index] label = f"<.{arg_name}>"
{"golden_diff": "diff --git a/ibis/expr/visualize.py b/ibis/expr/visualize.py\n--- a/ibis/expr/visualize.py\n+++ b/ibis/expr/visualize.py\n@@ -114,10 +114,11 @@\n if not label_edges:\n label = None\n else:\n- index = v.args.index(u)\n if isinstance(v, ops.NodeList):\n+ index = v.values.index(u)\n arg_name = f\"values[{index}]\"\n else:\n+ index = v.args.index(u)\n arg_name = v.argnames[index]\n label = f\"<.{arg_name}>\"\n", "issue": "bug: `.visualize(label_edges=True)` case ops.NodeList we get ValueError tuple.index(x): x not in tuple\nHi,\r\n\r\nThere's still one small thing that needs fixing in the great `.visualize(label_edges=True)` feature \ud83d\ude04 :\r\n\r\nWhen running...\r\n```py\r\nimport ibis\r\nt = ibis.table(((\"a\", \"int32\"), (\"b\", \"string\")))\r\nexpr = t[(t[\"a\"] == 1) & (t[\"b\"] == \"x\")]\r\n\r\nexpr.visualize(label_edges=True)\r\n```\r\n...I get:\r\n```\r\nException has occurred: ValueError - tuple.index(x): x not in tuple\r\n```\r\nat the following line:\r\nhttps://github.com/ibis-project/ibis/blob/2c9cfea15fc4d5f61e9099c3b270ea61498b5e45/ibis/expr/visualize.py#L117\r\n\r\nThis is happening when `v` is an `ops.NodeList` and thus its `v.args` is a tuple `of tuples` and NOT a tuple of nodes.\r\n\r\nGiven that on the next line we have special logic for `ops.NodeList`, maybe one quick fix could be to use the `.values`/`.args[0]` in such cases:\r\n\r\n![image](https://user-images.githubusercontent.com/10154357/192848219-48560533-9cbc-4621-8759-145dcbe95462.png)\r\n\r\nThank you.\n", "before_files": [{"content": "import sys\nimport tempfile\nfrom html import escape\n\nimport graphviz as gv\n\nimport ibis\nimport ibis.common.exceptions as com\nimport ibis.expr.operations as ops\nfrom ibis.common.graph import Graph\n\n\ndef get_type(node):\n try:\n return str(node.output_dtype)\n except (AttributeError, NotImplementedError):\n pass\n\n try:\n schema = node.schema\n except (AttributeError, NotImplementedError):\n # TODO(kszucs): this branch should be removed\n try:\n # As a last resort try get the name of the output_type class\n return node.output_type.__name__\n except (AttributeError, NotImplementedError):\n return '\\u2205' # empty set character\n except com.IbisError:\n assert isinstance(node, ops.Join)\n left_table_name = getattr(node.left, 'name', None) or ops.genname()\n left_schema = node.left.schema\n right_table_name = getattr(node.right, 'name', None) or ops.genname()\n right_schema = node.right.schema\n pairs = [\n (f'{left_table_name}.{left_column}', type)\n for left_column, type in left_schema.items()\n ] + [\n (f'{right_table_name}.{right_column}', type)\n for right_column, type in right_schema.items()\n ]\n schema = ibis.schema(pairs)\n\n return (\n ''.join(\n '<BR ALIGN=\"LEFT\" /> <I>{}</I>: {}'.format(\n escape(name), escape(str(type))\n )\n for name, type in zip(schema.names, schema.types)\n )\n + '<BR ALIGN=\"LEFT\" />'\n )\n\n\ndef get_label(node):\n typename = get_type(node) # Already an escaped string\n name = type(node).__name__\n nodename = (\n node.name\n if isinstance(\n node, (ops.Literal, ops.TableColumn, ops.Alias, ops.PhysicalTable)\n )\n else None\n )\n if nodename is not None:\n if isinstance(node, ops.TableNode):\n label_fmt = '<<I>{}</I>: <B>{}</B>{}>'\n else:\n label_fmt = '<<I>{}</I>: <B>{}</B><BR ALIGN=\"LEFT\" />:: {}>'\n label = label_fmt.format(escape(nodename), escape(name), typename)\n else:\n if isinstance(node, ops.TableNode):\n label_fmt = '<<B>{}</B>{}>'\n else:\n label_fmt = '<<B>{}</B><BR ALIGN=\"LEFT\" />:: {}>'\n label = label_fmt.format(escape(name), typename)\n return label\n\n\nDEFAULT_NODE_ATTRS = {'shape': 'box', 'fontname': 'Deja Vu Sans Mono'}\nDEFAULT_EDGE_ATTRS = {'fontname': 'Deja Vu Sans Mono'}\n\n\ndef to_graph(expr, node_attr=None, edge_attr=None, label_edges: bool = False):\n graph = Graph.from_bfs(expr.op())\n\n g = gv.Digraph(\n node_attr=node_attr or DEFAULT_NODE_ATTRS,\n edge_attr=edge_attr or DEFAULT_EDGE_ATTRS,\n )\n\n g.attr(rankdir='BT')\n\n seen = set()\n edges = set()\n\n for v, us in graph.items():\n if isinstance(v, ops.NodeList) and not v:\n continue\n\n vhash = str(hash(v))\n if v not in seen:\n g.node(vhash, label=get_label(v))\n seen.add(v)\n\n for u in us:\n if isinstance(u, ops.NodeList) and not u:\n continue\n\n uhash = str(hash(u))\n if u not in seen:\n g.node(uhash, label=get_label(u))\n seen.add(u)\n if (edge := (u, v)) not in edges:\n if not label_edges:\n label = None\n else:\n index = v.args.index(u)\n if isinstance(v, ops.NodeList):\n arg_name = f\"values[{index}]\"\n else:\n arg_name = v.argnames[index]\n label = f\"<.{arg_name}>\"\n\n g.edge(uhash, vhash, label=label)\n edges.add(edge)\n return g\n\n\ndef draw(graph, path=None, format='png', verbose: bool = False):\n if verbose:\n print(graph.source, file=sys.stderr)\n\n piped_source = graph.pipe(format=format)\n\n if path is None:\n with tempfile.NamedTemporaryFile(\n delete=False, suffix=f'.{format}', mode='wb'\n ) as f:\n f.write(piped_source)\n return f.name\n else:\n with open(path, mode='wb') as f:\n f.write(piped_source)\n return path\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n\n from ibis import _\n\n p = ArgumentParser(\n description=\"Render a GraphViz SVG of an example ibis expression.\"\n )\n\n p.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"count\",\n default=0,\n help=\"Print GraphViz DOT code to stderr.\",\n )\n p.add_argument(\n \"-l\",\n \"--label-edges\",\n action=\"store_true\",\n help=\"Show operation inputs as edge labels.\",\n )\n\n args = p.parse_args()\n\n left = ibis.table(dict(a=\"int64\", b=\"string\"), name=\"left\")\n right = ibis.table(dict(b=\"string\", c=\"int64\", d=\"string\"), name=\"right\")\n expr = (\n left.inner_join(right, \"b\")\n .select(left.a, b=right.c, c=right.d)\n .filter((_.a + _.b * 2 * _.b / _.b**3 > 4) & (_.b > 5))\n .groupby(_.c)\n .having(_.a.mean() > 0.0)\n .aggregate(a_mean=_.a.mean(), b_sum=_.b.sum())\n )\n expr.visualize(verbose=args.verbose > 0, label_edges=args.label_edges)\n", "path": "ibis/expr/visualize.py"}], "after_files": [{"content": "import sys\nimport tempfile\nfrom html import escape\n\nimport graphviz as gv\n\nimport ibis\nimport ibis.common.exceptions as com\nimport ibis.expr.operations as ops\nfrom ibis.common.graph import Graph\n\n\ndef get_type(node):\n try:\n return str(node.output_dtype)\n except (AttributeError, NotImplementedError):\n pass\n\n try:\n schema = node.schema\n except (AttributeError, NotImplementedError):\n # TODO(kszucs): this branch should be removed\n try:\n # As a last resort try get the name of the output_type class\n return node.output_type.__name__\n except (AttributeError, NotImplementedError):\n return '\\u2205' # empty set character\n except com.IbisError:\n assert isinstance(node, ops.Join)\n left_table_name = getattr(node.left, 'name', None) or ops.genname()\n left_schema = node.left.schema\n right_table_name = getattr(node.right, 'name', None) or ops.genname()\n right_schema = node.right.schema\n pairs = [\n (f'{left_table_name}.{left_column}', type)\n for left_column, type in left_schema.items()\n ] + [\n (f'{right_table_name}.{right_column}', type)\n for right_column, type in right_schema.items()\n ]\n schema = ibis.schema(pairs)\n\n return (\n ''.join(\n '<BR ALIGN=\"LEFT\" /> <I>{}</I>: {}'.format(\n escape(name), escape(str(type))\n )\n for name, type in zip(schema.names, schema.types)\n )\n + '<BR ALIGN=\"LEFT\" />'\n )\n\n\ndef get_label(node):\n typename = get_type(node) # Already an escaped string\n name = type(node).__name__\n nodename = (\n node.name\n if isinstance(\n node, (ops.Literal, ops.TableColumn, ops.Alias, ops.PhysicalTable)\n )\n else None\n )\n if nodename is not None:\n if isinstance(node, ops.TableNode):\n label_fmt = '<<I>{}</I>: <B>{}</B>{}>'\n else:\n label_fmt = '<<I>{}</I>: <B>{}</B><BR ALIGN=\"LEFT\" />:: {}>'\n label = label_fmt.format(escape(nodename), escape(name), typename)\n else:\n if isinstance(node, ops.TableNode):\n label_fmt = '<<B>{}</B>{}>'\n else:\n label_fmt = '<<B>{}</B><BR ALIGN=\"LEFT\" />:: {}>'\n label = label_fmt.format(escape(name), typename)\n return label\n\n\nDEFAULT_NODE_ATTRS = {'shape': 'box', 'fontname': 'Deja Vu Sans Mono'}\nDEFAULT_EDGE_ATTRS = {'fontname': 'Deja Vu Sans Mono'}\n\n\ndef to_graph(expr, node_attr=None, edge_attr=None, label_edges: bool = False):\n graph = Graph.from_bfs(expr.op())\n\n g = gv.Digraph(\n node_attr=node_attr or DEFAULT_NODE_ATTRS,\n edge_attr=edge_attr or DEFAULT_EDGE_ATTRS,\n )\n\n g.attr(rankdir='BT')\n\n seen = set()\n edges = set()\n\n for v, us in graph.items():\n if isinstance(v, ops.NodeList) and not v:\n continue\n\n vhash = str(hash(v))\n if v not in seen:\n g.node(vhash, label=get_label(v))\n seen.add(v)\n\n for u in us:\n if isinstance(u, ops.NodeList) and not u:\n continue\n\n uhash = str(hash(u))\n if u not in seen:\n g.node(uhash, label=get_label(u))\n seen.add(u)\n if (edge := (u, v)) not in edges:\n if not label_edges:\n label = None\n else:\n if isinstance(v, ops.NodeList):\n index = v.values.index(u)\n arg_name = f\"values[{index}]\"\n else:\n index = v.args.index(u)\n arg_name = v.argnames[index]\n label = f\"<.{arg_name}>\"\n\n g.edge(uhash, vhash, label=label)\n edges.add(edge)\n return g\n\n\ndef draw(graph, path=None, format='png', verbose: bool = False):\n if verbose:\n print(graph.source, file=sys.stderr)\n\n piped_source = graph.pipe(format=format)\n\n if path is None:\n with tempfile.NamedTemporaryFile(\n delete=False, suffix=f'.{format}', mode='wb'\n ) as f:\n f.write(piped_source)\n return f.name\n else:\n with open(path, mode='wb') as f:\n f.write(piped_source)\n return path\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n\n from ibis import _\n\n p = ArgumentParser(\n description=\"Render a GraphViz SVG of an example ibis expression.\"\n )\n\n p.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"count\",\n default=0,\n help=\"Print GraphViz DOT code to stderr.\",\n )\n p.add_argument(\n \"-l\",\n \"--label-edges\",\n action=\"store_true\",\n help=\"Show operation inputs as edge labels.\",\n )\n\n args = p.parse_args()\n\n left = ibis.table(dict(a=\"int64\", b=\"string\"), name=\"left\")\n right = ibis.table(dict(b=\"string\", c=\"int64\", d=\"string\"), name=\"right\")\n expr = (\n left.inner_join(right, \"b\")\n .select(left.a, b=right.c, c=right.d)\n .filter((_.a + _.b * 2 * _.b / _.b**3 > 4) & (_.b > 5))\n .groupby(_.c)\n .having(_.a.mean() > 0.0)\n .aggregate(a_mean=_.a.mean(), b_sum=_.b.sum())\n )\n expr.visualize(verbose=args.verbose > 0, label_edges=args.label_edges)\n", "path": "ibis/expr/visualize.py"}]}
2,362
147
gh_patches_debug_26844
rasdani/github-patches
git_diff
ESMCI__cime-1712
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- PIO_NUMTASKS is getting reset Setting env_run.xml's PIO_STRIDE to default -99 and PIO_NUMTASKS to 64 resets both PIO_STRIDE and PIO_NUMTASKS to different default values at run-time. XML comments: ``` <entry id="PIO_STRIDE"> <desc> stride in compute comm of io tasks for each component, if this value is -99 it will be computed based on PIO_NUMTASKS and number of compute tasks </desc> ``` but instead of 64, PIO_NUMTASKS is getting reset to 8 or 16 depending on the number of component's MPI tasks. Note that setting PIO_STRIDE to a specific value and leaving PIO_NUMTASKS at default -99 works as expected: PIO_NUMTASKS is set to NTASKS / PIO_STRIDE. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/lib/CIME/case_run.py` Content: ``` 1 from CIME.XML.standard_module_setup import * 2 from CIME.case_submit import submit 3 from CIME.utils import gzip_existing_file, new_lid, run_and_log_case_status 4 from CIME.check_lockedfiles import check_lockedfiles 5 from CIME.get_timing import get_timing 6 from CIME.provenance import save_prerun_provenance, save_postrun_provenance 7 from CIME.preview_namelists import create_namelists 8 from CIME.case_st_archive import case_st_archive, restore_from_archive 9 10 import shutil, time, sys, os, glob 11 12 logger = logging.getLogger(__name__) 13 14 ############################################################################### 15 def pre_run_check(case, lid, skip_pnl=False): 16 ############################################################################### 17 18 # Pre run initialization code.. 19 caseroot = case.get_value("CASEROOT") 20 din_loc_root = case.get_value("DIN_LOC_ROOT") 21 batchsubmit = case.get_value("BATCHSUBMIT") 22 mpilib = case.get_value("MPILIB") 23 rundir = case.get_value("RUNDIR") 24 build_complete = case.get_value("BUILD_COMPLETE") 25 26 if case.get_value("TESTCASE") == "PFS": 27 env_mach_pes = os.path.join(caseroot,"env_mach_pes.xml") 28 shutil.copy(env_mach_pes,"{}.{}".format(env_mach_pes, lid)) 29 30 # check for locked files. 31 check_lockedfiles(case.get_value("CASEROOT")) 32 logger.debug("check_lockedfiles OK") 33 34 # check that build is done 35 expect(build_complete, 36 "BUILD_COMPLETE is not true\nPlease rebuild the model interactively") 37 logger.debug("build complete is {} ".format(build_complete)) 38 39 # load the module environment... 40 case.load_env() 41 42 # set environment variables 43 # This is a requirement for yellowstone only 44 if mpilib == "mpi-serial" and "MP_MPILIB" in os.environ: 45 del os.environ["MP_MPILIB"] 46 else: 47 os.environ["MPILIB"] = mpilib 48 49 if batchsubmit is None or len(batchsubmit) == 0: 50 os.environ["LBQUERY"] = "FALSE" 51 os.environ["BATCHQUERY"] = "undefined" 52 elif batchsubmit == 'UNSET': 53 os.environ["LBQUERY"] = "FALSE" 54 os.environ["BATCHQUERY"] = "undefined" 55 else: 56 os.environ["LBQUERY"] = "TRUE" 57 58 # create the timing directories, optionally cleaning them if needed. 59 if not os.path.isdir(rundir): 60 os.mkdir(rundir) 61 62 if os.path.isdir(os.path.join(rundir, "timing")): 63 shutil.rmtree(os.path.join(rundir, "timing")) 64 65 os.makedirs(os.path.join(rundir, "timing", "checkpoints")) 66 67 # This needs to be done everytime the LID changes in order for log files to be set up correctly 68 # The following also needs to be called in case a user changes a user_nl_xxx file OR an env_run.xml 69 # variable while the job is in the queue 70 if not skip_pnl: 71 create_namelists(case) 72 73 logger.info("-------------------------------------------------------------------------") 74 logger.info(" - Prestage required restarts into {}".format(rundir)) 75 logger.info(" - Case input data directory (DIN_LOC_ROOT) is {} ".format(din_loc_root)) 76 logger.info(" - Checking for required input datasets in DIN_LOC_ROOT") 77 logger.info("-------------------------------------------------------------------------") 78 79 ############################################################################### 80 def _run_model_impl(case, lid, skip_pnl=False): 81 ############################################################################### 82 83 pre_run_check(case, lid, skip_pnl=skip_pnl) 84 85 model = case.get_value("MODEL") 86 87 # Set OMP_NUM_THREADS 88 env_mach_pes = case.get_env("mach_pes") 89 comp_classes = case.get_values("COMP_CLASSES") 90 thread_count = env_mach_pes.get_max_thread_count(comp_classes) 91 os.environ["OMP_NUM_THREADS"] = str(thread_count) 92 93 # Run the model 94 logger.info("{} MODEL EXECUTION BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) 95 96 cmd = case.get_mpirun_cmd(job="case.run") 97 cmd = case.get_resolved_value(cmd) 98 logger.info("run command is {} ".format(cmd)) 99 100 rundir = case.get_value("RUNDIR") 101 loop = True 102 103 while loop: 104 loop = False 105 stat = run_cmd(cmd, from_dir=rundir)[0] 106 model_logfile = os.path.join(rundir, model + ".log." + lid) 107 # Determine if failure was due to a failed node, if so, try to restart 108 if stat != 0: 109 node_fail_re = case.get_value("NODE_FAIL_REGEX") 110 if node_fail_re: 111 node_fail_regex = re.compile(node_fail_re) 112 model_logfile = os.path.join(rundir, model + ".log." + lid) 113 if os.path.exists(model_logfile): 114 num_fails = len(node_fail_regex.findall(open(model_logfile, 'r').read())) 115 if num_fails > 0 and case.spare_nodes >= num_fails: 116 # We failed due to node failure! 117 logger.warning("Detected model run failed due to node failure, restarting") 118 119 # Archive the last consistent set of restart files and restore them 120 case_st_archive(case, no_resubmit=True) 121 restore_from_archive(case) 122 123 orig_cont = case.get_value("CONTINUE_RUN") 124 if not orig_cont: 125 case.set_value("CONTINUE_RUN", True) 126 create_namelists(case) 127 128 lid = new_lid() 129 loop = True 130 131 case.spare_nodes -= num_fails 132 133 if not loop: 134 # We failed and we're not restarting 135 expect(False, "RUN FAIL: Command '{}' failed\nSee log file for details: {}".format(cmd, model_logfile)) 136 137 logger.info("{} MODEL EXECUTION HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) 138 139 post_run_check(case, lid) 140 141 return lid 142 143 ############################################################################### 144 def run_model(case, lid, skip_pnl=False): 145 ############################################################################### 146 functor = lambda: _run_model_impl(case, lid, skip_pnl=skip_pnl) 147 return run_and_log_case_status(functor, "case.run", caseroot=case.get_value("CASEROOT")) 148 149 ############################################################################### 150 def post_run_check(case, lid): 151 ############################################################################### 152 153 rundir = case.get_value("RUNDIR") 154 model = case.get_value("MODEL") 155 156 # find the last model.log and cpl.log 157 model_logfile = os.path.join(rundir, model + ".log." + lid) 158 cpl_logfile = os.path.join(rundir, "cpl" + ".log." + lid) 159 160 if not os.path.isfile(model_logfile): 161 expect(False, "Model did not complete, no {} log file ".format(model_logfile)) 162 elif not os.path.isfile(cpl_logfile): 163 expect(False, "Model did not complete, no cpl log file '{}'".format(cpl_logfile)) 164 elif os.stat(model_logfile).st_size == 0: 165 expect(False, "Run FAILED") 166 else: 167 with open(cpl_logfile, 'r') as fd: 168 if 'SUCCESSFUL TERMINATION' not in fd.read(): 169 expect(False, "Model did not complete - see {} \n ".format(cpl_logfile)) 170 171 ############################################################################### 172 def save_logs(case, lid): 173 ############################################################################### 174 logdir = case.get_value("LOGDIR") 175 if logdir is not None and len(logdir) > 0: 176 if not os.path.isdir(logdir): 177 os.makedirs(logdir) 178 179 caseroot = case.get_value("CASEROOT") 180 rundir = case.get_value("RUNDIR") 181 logfiles = glob.glob(os.path.join(rundir, "*.log.{}".format(lid))) 182 for logfile in logfiles: 183 if os.path.isfile(logfile): 184 logfile_gz = gzip_existing_file(logfile) 185 shutil.copy(logfile_gz, 186 os.path.join(caseroot, logdir, os.path.basename(logfile_gz))) 187 188 ############################################################################### 189 def resubmit_check(case): 190 ############################################################################### 191 192 # check to see if we need to do resubmission from this particular job, 193 # Note that Mira requires special logic 194 195 dout_s = case.get_value("DOUT_S") 196 logger.warn("dout_s {} ".format(dout_s)) 197 mach = case.get_value("MACH") 198 logger.warn("mach {} ".format(mach)) 199 testcase = case.get_value("TESTCASE") 200 resubmit_num = case.get_value("RESUBMIT") 201 logger.warn("resubmit_num {}".format(resubmit_num)) 202 # If dout_s is True than short-term archiving handles the resubmit 203 # If dout_s is True and machine is mira submit the st_archive script 204 resubmit = False 205 if not dout_s and resubmit_num > 0: 206 resubmit = True 207 elif dout_s and mach == 'mira': 208 caseroot = case.get_value("CASEROOT") 209 cimeroot = case.get_value("CIMEROOT") 210 cmd = "ssh cooleylogin1 'cd {}; CIMEROOT={} ./case.submit {} --job case.st_archive'".format(caseroot, cimeroot, caseroot) 211 run_cmd(cmd, verbose=True) 212 213 if resubmit: 214 if testcase is not None and testcase in ['ERR']: 215 job = "case.test" 216 else: 217 job = "case.run" 218 submit(case, job=job, resubmit=True) 219 220 ############################################################################### 221 def do_external(script_name, caseroot, rundir, lid, prefix): 222 ############################################################################### 223 filename = "{}.external.log.{}".format(prefix, lid) 224 outfile = os.path.join(rundir, filename) 225 cmd = script_name + " 1> {} {} 2>&1".format(outfile, caseroot) 226 logger.info("running {}".format(script_name)) 227 run_cmd_no_fail(cmd) 228 229 ############################################################################### 230 def do_data_assimilation(da_script, caseroot, cycle, lid, rundir): 231 ############################################################################### 232 filename = "da.log.{}".format(lid) 233 outfile = os.path.join(rundir, filename) 234 cmd = da_script + " 1> {} {} {:d} 2>&1".format(outfile, caseroot, cycle) 235 logger.info("running {}".format(da_script)) 236 run_cmd_no_fail(cmd) 237 238 ############################################################################### 239 def case_run(case, skip_pnl=False): 240 ############################################################################### 241 # Set up the run, run the model, do the postrun steps 242 run_with_submit = case.get_value("RUN_WITH_SUBMIT") 243 expect(run_with_submit, 244 "You are not calling the run script via the submit script. " 245 "As a result, short-term archiving will not be called automatically." 246 "Please submit your run using the submit script like so:" 247 " ./case.submit") 248 249 # Forces user to use case.submit if they re-submit 250 if case.get_value("TESTCASE") is None: 251 case.set_value("RUN_WITH_SUBMIT", False) 252 253 prerun_script = case.get_value("PRERUN_SCRIPT") 254 postrun_script = case.get_value("POSTRUN_SCRIPT") 255 256 data_assimilation = case.get_value("DATA_ASSIMILATION") 257 data_assimilation_cycles = case.get_value("DATA_ASSIMILATION_CYCLES") 258 data_assimilation_script = case.get_value("DATA_ASSIMILATION_SCRIPT") 259 260 # set up the LID 261 lid = new_lid() 262 263 save_prerun_provenance(case) 264 265 for cycle in range(data_assimilation_cycles): 266 # After the first DA cycle, runs are restart runs 267 if cycle > 0: 268 case.set_value("CONTINUE_RUN", "TRUE") 269 lid = new_lid() 270 271 if prerun_script: 272 do_external(prerun_script, case.get_value("CASEROOT"), case.get_value("RUNDIR"), 273 lid, prefix="prerun") 274 275 lid = run_model(case, lid, skip_pnl) 276 save_logs(case, lid) # Copy log files back to caseroot 277 if case.get_value("CHECK_TIMING") or case.get_value("SAVE_TIMING"): 278 get_timing(case, lid) # Run the getTiming script 279 280 if data_assimilation: 281 do_data_assimilation(data_assimilation_script, case.get_value("CASEROOT"), cycle, lid, 282 case.get_value("RUNDIR")) 283 284 if postrun_script: 285 do_external(postrun_script, case.get_value("CASEROOT"), case.get_value("RUNDIR"), 286 lid, prefix="postrun") 287 288 save_postrun_provenance(case) 289 290 logger.warn("check for resubmit") 291 resubmit_check(case) 292 293 return True 294 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/lib/CIME/case_run.py b/scripts/lib/CIME/case_run.py --- a/scripts/lib/CIME/case_run.py +++ b/scripts/lib/CIME/case_run.py @@ -269,8 +269,10 @@ lid = new_lid() if prerun_script: + case.flush() do_external(prerun_script, case.get_value("CASEROOT"), case.get_value("RUNDIR"), lid, prefix="prerun") + case.read_xml() lid = run_model(case, lid, skip_pnl) save_logs(case, lid) # Copy log files back to caseroot @@ -278,12 +280,16 @@ get_timing(case, lid) # Run the getTiming script if data_assimilation: + case.flush() do_data_assimilation(data_assimilation_script, case.get_value("CASEROOT"), cycle, lid, case.get_value("RUNDIR")) + case.read_xml() if postrun_script: + case.flush() do_external(postrun_script, case.get_value("CASEROOT"), case.get_value("RUNDIR"), lid, prefix="postrun") + case.read_xml() save_postrun_provenance(case)
{"golden_diff": "diff --git a/scripts/lib/CIME/case_run.py b/scripts/lib/CIME/case_run.py\n--- a/scripts/lib/CIME/case_run.py\n+++ b/scripts/lib/CIME/case_run.py\n@@ -269,8 +269,10 @@\n lid = new_lid()\n \n if prerun_script:\n+ case.flush()\n do_external(prerun_script, case.get_value(\"CASEROOT\"), case.get_value(\"RUNDIR\"),\n lid, prefix=\"prerun\")\n+ case.read_xml()\n \n lid = run_model(case, lid, skip_pnl)\n save_logs(case, lid) # Copy log files back to caseroot\n@@ -278,12 +280,16 @@\n get_timing(case, lid) # Run the getTiming script\n \n if data_assimilation:\n+ case.flush()\n do_data_assimilation(data_assimilation_script, case.get_value(\"CASEROOT\"), cycle, lid,\n case.get_value(\"RUNDIR\"))\n+ case.read_xml()\n \n if postrun_script:\n+ case.flush()\n do_external(postrun_script, case.get_value(\"CASEROOT\"), case.get_value(\"RUNDIR\"),\n lid, prefix=\"postrun\")\n+ case.read_xml()\n \n save_postrun_provenance(case)\n", "issue": "PIO_NUMTASKS is getting reset\nSetting env_run.xml's PIO_STRIDE to default -99 and PIO_NUMTASKS to 64 resets both PIO_STRIDE and PIO_NUMTASKS to different default values at run-time.\r\n\r\nXML comments:\r\n```\r\n <entry id=\"PIO_STRIDE\">\r\n <desc>\r\n stride in compute comm of io tasks for each component, if this value is -99 it will \r\n be computed based on PIO_NUMTASKS and number of compute tasks\r\n </desc>\r\n```\r\n\r\nbut instead of 64, PIO_NUMTASKS is getting reset to 8 or 16 depending on the number of component's MPI tasks.\r\n\r\nNote that setting PIO_STRIDE to a specific value and leaving PIO_NUMTASKS at default -99 works as expected: PIO_NUMTASKS is set to NTASKS / PIO_STRIDE.\n", "before_files": [{"content": "from CIME.XML.standard_module_setup import *\nfrom CIME.case_submit import submit\nfrom CIME.utils import gzip_existing_file, new_lid, run_and_log_case_status\nfrom CIME.check_lockedfiles import check_lockedfiles\nfrom CIME.get_timing import get_timing\nfrom CIME.provenance import save_prerun_provenance, save_postrun_provenance\nfrom CIME.preview_namelists import create_namelists\nfrom CIME.case_st_archive import case_st_archive, restore_from_archive\n\nimport shutil, time, sys, os, glob\n\nlogger = logging.getLogger(__name__)\n\n###############################################################################\ndef pre_run_check(case, lid, skip_pnl=False):\n###############################################################################\n\n # Pre run initialization code..\n caseroot = case.get_value(\"CASEROOT\")\n din_loc_root = case.get_value(\"DIN_LOC_ROOT\")\n batchsubmit = case.get_value(\"BATCHSUBMIT\")\n mpilib = case.get_value(\"MPILIB\")\n rundir = case.get_value(\"RUNDIR\")\n build_complete = case.get_value(\"BUILD_COMPLETE\")\n\n if case.get_value(\"TESTCASE\") == \"PFS\":\n env_mach_pes = os.path.join(caseroot,\"env_mach_pes.xml\")\n shutil.copy(env_mach_pes,\"{}.{}\".format(env_mach_pes, lid))\n\n # check for locked files.\n check_lockedfiles(case.get_value(\"CASEROOT\"))\n logger.debug(\"check_lockedfiles OK\")\n\n # check that build is done\n expect(build_complete,\n \"BUILD_COMPLETE is not true\\nPlease rebuild the model interactively\")\n logger.debug(\"build complete is {} \".format(build_complete))\n\n # load the module environment...\n case.load_env()\n\n # set environment variables\n # This is a requirement for yellowstone only\n if mpilib == \"mpi-serial\" and \"MP_MPILIB\" in os.environ:\n del os.environ[\"MP_MPILIB\"]\n else:\n os.environ[\"MPILIB\"] = mpilib\n\n if batchsubmit is None or len(batchsubmit) == 0:\n os.environ[\"LBQUERY\"] = \"FALSE\"\n os.environ[\"BATCHQUERY\"] = \"undefined\"\n elif batchsubmit == 'UNSET':\n os.environ[\"LBQUERY\"] = \"FALSE\"\n os.environ[\"BATCHQUERY\"] = \"undefined\"\n else:\n os.environ[\"LBQUERY\"] = \"TRUE\"\n\n # create the timing directories, optionally cleaning them if needed.\n if not os.path.isdir(rundir):\n os.mkdir(rundir)\n\n if os.path.isdir(os.path.join(rundir, \"timing\")):\n shutil.rmtree(os.path.join(rundir, \"timing\"))\n\n os.makedirs(os.path.join(rundir, \"timing\", \"checkpoints\"))\n\n # This needs to be done everytime the LID changes in order for log files to be set up correctly\n # The following also needs to be called in case a user changes a user_nl_xxx file OR an env_run.xml\n # variable while the job is in the queue\n if not skip_pnl:\n create_namelists(case)\n\n logger.info(\"-------------------------------------------------------------------------\")\n logger.info(\" - Prestage required restarts into {}\".format(rundir))\n logger.info(\" - Case input data directory (DIN_LOC_ROOT) is {} \".format(din_loc_root))\n logger.info(\" - Checking for required input datasets in DIN_LOC_ROOT\")\n logger.info(\"-------------------------------------------------------------------------\")\n\n###############################################################################\ndef _run_model_impl(case, lid, skip_pnl=False):\n###############################################################################\n\n pre_run_check(case, lid, skip_pnl=skip_pnl)\n\n model = case.get_value(\"MODEL\")\n\n # Set OMP_NUM_THREADS\n env_mach_pes = case.get_env(\"mach_pes\")\n comp_classes = case.get_values(\"COMP_CLASSES\")\n thread_count = env_mach_pes.get_max_thread_count(comp_classes)\n os.environ[\"OMP_NUM_THREADS\"] = str(thread_count)\n\n # Run the model\n logger.info(\"{} MODEL EXECUTION BEGINS HERE\".format(time.strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n cmd = case.get_mpirun_cmd(job=\"case.run\")\n cmd = case.get_resolved_value(cmd)\n logger.info(\"run command is {} \".format(cmd))\n\n rundir = case.get_value(\"RUNDIR\")\n loop = True\n\n while loop:\n loop = False\n stat = run_cmd(cmd, from_dir=rundir)[0]\n model_logfile = os.path.join(rundir, model + \".log.\" + lid)\n # Determine if failure was due to a failed node, if so, try to restart\n if stat != 0:\n node_fail_re = case.get_value(\"NODE_FAIL_REGEX\")\n if node_fail_re:\n node_fail_regex = re.compile(node_fail_re)\n model_logfile = os.path.join(rundir, model + \".log.\" + lid)\n if os.path.exists(model_logfile):\n num_fails = len(node_fail_regex.findall(open(model_logfile, 'r').read()))\n if num_fails > 0 and case.spare_nodes >= num_fails:\n # We failed due to node failure!\n logger.warning(\"Detected model run failed due to node failure, restarting\")\n\n # Archive the last consistent set of restart files and restore them\n case_st_archive(case, no_resubmit=True)\n restore_from_archive(case)\n\n orig_cont = case.get_value(\"CONTINUE_RUN\")\n if not orig_cont:\n case.set_value(\"CONTINUE_RUN\", True)\n create_namelists(case)\n\n lid = new_lid()\n loop = True\n\n case.spare_nodes -= num_fails\n\n if not loop:\n # We failed and we're not restarting\n expect(False, \"RUN FAIL: Command '{}' failed\\nSee log file for details: {}\".format(cmd, model_logfile))\n\n logger.info(\"{} MODEL EXECUTION HAS FINISHED\".format(time.strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n post_run_check(case, lid)\n\n return lid\n\n###############################################################################\ndef run_model(case, lid, skip_pnl=False):\n###############################################################################\n functor = lambda: _run_model_impl(case, lid, skip_pnl=skip_pnl)\n return run_and_log_case_status(functor, \"case.run\", caseroot=case.get_value(\"CASEROOT\"))\n\n###############################################################################\ndef post_run_check(case, lid):\n###############################################################################\n\n rundir = case.get_value(\"RUNDIR\")\n model = case.get_value(\"MODEL\")\n\n # find the last model.log and cpl.log\n model_logfile = os.path.join(rundir, model + \".log.\" + lid)\n cpl_logfile = os.path.join(rundir, \"cpl\" + \".log.\" + lid)\n\n if not os.path.isfile(model_logfile):\n expect(False, \"Model did not complete, no {} log file \".format(model_logfile))\n elif not os.path.isfile(cpl_logfile):\n expect(False, \"Model did not complete, no cpl log file '{}'\".format(cpl_logfile))\n elif os.stat(model_logfile).st_size == 0:\n expect(False, \"Run FAILED\")\n else:\n with open(cpl_logfile, 'r') as fd:\n if 'SUCCESSFUL TERMINATION' not in fd.read():\n expect(False, \"Model did not complete - see {} \\n \".format(cpl_logfile))\n\n###############################################################################\ndef save_logs(case, lid):\n###############################################################################\n logdir = case.get_value(\"LOGDIR\")\n if logdir is not None and len(logdir) > 0:\n if not os.path.isdir(logdir):\n os.makedirs(logdir)\n\n caseroot = case.get_value(\"CASEROOT\")\n rundir = case.get_value(\"RUNDIR\")\n logfiles = glob.glob(os.path.join(rundir, \"*.log.{}\".format(lid)))\n for logfile in logfiles:\n if os.path.isfile(logfile):\n logfile_gz = gzip_existing_file(logfile)\n shutil.copy(logfile_gz,\n os.path.join(caseroot, logdir, os.path.basename(logfile_gz)))\n\n###############################################################################\ndef resubmit_check(case):\n###############################################################################\n\n # check to see if we need to do resubmission from this particular job,\n # Note that Mira requires special logic\n\n dout_s = case.get_value(\"DOUT_S\")\n logger.warn(\"dout_s {} \".format(dout_s))\n mach = case.get_value(\"MACH\")\n logger.warn(\"mach {} \".format(mach))\n testcase = case.get_value(\"TESTCASE\")\n resubmit_num = case.get_value(\"RESUBMIT\")\n logger.warn(\"resubmit_num {}\".format(resubmit_num))\n # If dout_s is True than short-term archiving handles the resubmit\n # If dout_s is True and machine is mira submit the st_archive script\n resubmit = False\n if not dout_s and resubmit_num > 0:\n resubmit = True\n elif dout_s and mach == 'mira':\n caseroot = case.get_value(\"CASEROOT\")\n cimeroot = case.get_value(\"CIMEROOT\")\n cmd = \"ssh cooleylogin1 'cd {}; CIMEROOT={} ./case.submit {} --job case.st_archive'\".format(caseroot, cimeroot, caseroot)\n run_cmd(cmd, verbose=True)\n\n if resubmit:\n if testcase is not None and testcase in ['ERR']:\n job = \"case.test\"\n else:\n job = \"case.run\"\n submit(case, job=job, resubmit=True)\n\n###############################################################################\ndef do_external(script_name, caseroot, rundir, lid, prefix):\n###############################################################################\n filename = \"{}.external.log.{}\".format(prefix, lid)\n outfile = os.path.join(rundir, filename)\n cmd = script_name + \" 1> {} {} 2>&1\".format(outfile, caseroot)\n logger.info(\"running {}\".format(script_name))\n run_cmd_no_fail(cmd)\n\n###############################################################################\ndef do_data_assimilation(da_script, caseroot, cycle, lid, rundir):\n###############################################################################\n filename = \"da.log.{}\".format(lid)\n outfile = os.path.join(rundir, filename)\n cmd = da_script + \" 1> {} {} {:d} 2>&1\".format(outfile, caseroot, cycle)\n logger.info(\"running {}\".format(da_script))\n run_cmd_no_fail(cmd)\n\n###############################################################################\ndef case_run(case, skip_pnl=False):\n###############################################################################\n # Set up the run, run the model, do the postrun steps\n run_with_submit = case.get_value(\"RUN_WITH_SUBMIT\")\n expect(run_with_submit,\n \"You are not calling the run script via the submit script. \"\n \"As a result, short-term archiving will not be called automatically.\"\n \"Please submit your run using the submit script like so:\"\n \" ./case.submit\")\n\n # Forces user to use case.submit if they re-submit\n if case.get_value(\"TESTCASE\") is None:\n case.set_value(\"RUN_WITH_SUBMIT\", False)\n\n prerun_script = case.get_value(\"PRERUN_SCRIPT\")\n postrun_script = case.get_value(\"POSTRUN_SCRIPT\")\n\n data_assimilation = case.get_value(\"DATA_ASSIMILATION\")\n data_assimilation_cycles = case.get_value(\"DATA_ASSIMILATION_CYCLES\")\n data_assimilation_script = case.get_value(\"DATA_ASSIMILATION_SCRIPT\")\n\n # set up the LID\n lid = new_lid()\n\n save_prerun_provenance(case)\n\n for cycle in range(data_assimilation_cycles):\n # After the first DA cycle, runs are restart runs\n if cycle > 0:\n case.set_value(\"CONTINUE_RUN\", \"TRUE\")\n lid = new_lid()\n\n if prerun_script:\n do_external(prerun_script, case.get_value(\"CASEROOT\"), case.get_value(\"RUNDIR\"),\n lid, prefix=\"prerun\")\n\n lid = run_model(case, lid, skip_pnl)\n save_logs(case, lid) # Copy log files back to caseroot\n if case.get_value(\"CHECK_TIMING\") or case.get_value(\"SAVE_TIMING\"):\n get_timing(case, lid) # Run the getTiming script\n\n if data_assimilation:\n do_data_assimilation(data_assimilation_script, case.get_value(\"CASEROOT\"), cycle, lid,\n case.get_value(\"RUNDIR\"))\n\n if postrun_script:\n do_external(postrun_script, case.get_value(\"CASEROOT\"), case.get_value(\"RUNDIR\"),\n lid, prefix=\"postrun\")\n\n save_postrun_provenance(case)\n\n logger.warn(\"check for resubmit\")\n resubmit_check(case)\n\n return True\n", "path": "scripts/lib/CIME/case_run.py"}], "after_files": [{"content": "from CIME.XML.standard_module_setup import *\nfrom CIME.case_submit import submit\nfrom CIME.utils import gzip_existing_file, new_lid, run_and_log_case_status\nfrom CIME.check_lockedfiles import check_lockedfiles\nfrom CIME.get_timing import get_timing\nfrom CIME.provenance import save_prerun_provenance, save_postrun_provenance\nfrom CIME.preview_namelists import create_namelists\nfrom CIME.case_st_archive import case_st_archive, restore_from_archive\n\nimport shutil, time, sys, os, glob\n\nlogger = logging.getLogger(__name__)\n\n###############################################################################\ndef pre_run_check(case, lid, skip_pnl=False):\n###############################################################################\n\n # Pre run initialization code..\n caseroot = case.get_value(\"CASEROOT\")\n din_loc_root = case.get_value(\"DIN_LOC_ROOT\")\n batchsubmit = case.get_value(\"BATCHSUBMIT\")\n mpilib = case.get_value(\"MPILIB\")\n rundir = case.get_value(\"RUNDIR\")\n build_complete = case.get_value(\"BUILD_COMPLETE\")\n\n if case.get_value(\"TESTCASE\") == \"PFS\":\n env_mach_pes = os.path.join(caseroot,\"env_mach_pes.xml\")\n shutil.copy(env_mach_pes,\"{}.{}\".format(env_mach_pes, lid))\n\n # check for locked files.\n check_lockedfiles(case.get_value(\"CASEROOT\"))\n logger.debug(\"check_lockedfiles OK\")\n\n # check that build is done\n expect(build_complete,\n \"BUILD_COMPLETE is not true\\nPlease rebuild the model interactively\")\n logger.debug(\"build complete is {} \".format(build_complete))\n\n # load the module environment...\n case.load_env()\n\n # set environment variables\n # This is a requirement for yellowstone only\n if mpilib == \"mpi-serial\" and \"MP_MPILIB\" in os.environ:\n del os.environ[\"MP_MPILIB\"]\n else:\n os.environ[\"MPILIB\"] = mpilib\n\n if batchsubmit is None or len(batchsubmit) == 0:\n os.environ[\"LBQUERY\"] = \"FALSE\"\n os.environ[\"BATCHQUERY\"] = \"undefined\"\n elif batchsubmit == 'UNSET':\n os.environ[\"LBQUERY\"] = \"FALSE\"\n os.environ[\"BATCHQUERY\"] = \"undefined\"\n else:\n os.environ[\"LBQUERY\"] = \"TRUE\"\n\n # create the timing directories, optionally cleaning them if needed.\n if not os.path.isdir(rundir):\n os.mkdir(rundir)\n\n if os.path.isdir(os.path.join(rundir, \"timing\")):\n shutil.rmtree(os.path.join(rundir, \"timing\"))\n\n os.makedirs(os.path.join(rundir, \"timing\", \"checkpoints\"))\n\n # This needs to be done everytime the LID changes in order for log files to be set up correctly\n # The following also needs to be called in case a user changes a user_nl_xxx file OR an env_run.xml\n # variable while the job is in the queue\n if not skip_pnl:\n create_namelists(case)\n\n logger.info(\"-------------------------------------------------------------------------\")\n logger.info(\" - Prestage required restarts into {}\".format(rundir))\n logger.info(\" - Case input data directory (DIN_LOC_ROOT) is {} \".format(din_loc_root))\n logger.info(\" - Checking for required input datasets in DIN_LOC_ROOT\")\n logger.info(\"-------------------------------------------------------------------------\")\n\n###############################################################################\ndef _run_model_impl(case, lid, skip_pnl=False):\n###############################################################################\n\n pre_run_check(case, lid, skip_pnl=skip_pnl)\n\n model = case.get_value(\"MODEL\")\n\n # Set OMP_NUM_THREADS\n env_mach_pes = case.get_env(\"mach_pes\")\n comp_classes = case.get_values(\"COMP_CLASSES\")\n thread_count = env_mach_pes.get_max_thread_count(comp_classes)\n os.environ[\"OMP_NUM_THREADS\"] = str(thread_count)\n\n # Run the model\n logger.info(\"{} MODEL EXECUTION BEGINS HERE\".format(time.strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n cmd = case.get_mpirun_cmd(job=\"case.run\")\n cmd = case.get_resolved_value(cmd)\n logger.info(\"run command is {} \".format(cmd))\n\n rundir = case.get_value(\"RUNDIR\")\n loop = True\n\n while loop:\n loop = False\n stat = run_cmd(cmd, from_dir=rundir)[0]\n model_logfile = os.path.join(rundir, model + \".log.\" + lid)\n # Determine if failure was due to a failed node, if so, try to restart\n if stat != 0:\n node_fail_re = case.get_value(\"NODE_FAIL_REGEX\")\n if node_fail_re:\n node_fail_regex = re.compile(node_fail_re)\n model_logfile = os.path.join(rundir, model + \".log.\" + lid)\n if os.path.exists(model_logfile):\n num_fails = len(node_fail_regex.findall(open(model_logfile, 'r').read()))\n if num_fails > 0 and case.spare_nodes >= num_fails:\n # We failed due to node failure!\n logger.warning(\"Detected model run failed due to node failure, restarting\")\n\n # Archive the last consistent set of restart files and restore them\n case_st_archive(case, no_resubmit=True)\n restore_from_archive(case)\n\n orig_cont = case.get_value(\"CONTINUE_RUN\")\n if not orig_cont:\n case.set_value(\"CONTINUE_RUN\", True)\n create_namelists(case)\n\n lid = new_lid()\n loop = True\n\n case.spare_nodes -= num_fails\n\n if not loop:\n # We failed and we're not restarting\n expect(False, \"RUN FAIL: Command '{}' failed\\nSee log file for details: {}\".format(cmd, model_logfile))\n\n logger.info(\"{} MODEL EXECUTION HAS FINISHED\".format(time.strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n post_run_check(case, lid)\n\n return lid\n\n###############################################################################\ndef run_model(case, lid, skip_pnl=False):\n###############################################################################\n functor = lambda: _run_model_impl(case, lid, skip_pnl=skip_pnl)\n return run_and_log_case_status(functor, \"case.run\", caseroot=case.get_value(\"CASEROOT\"))\n\n###############################################################################\ndef post_run_check(case, lid):\n###############################################################################\n\n rundir = case.get_value(\"RUNDIR\")\n model = case.get_value(\"MODEL\")\n\n # find the last model.log and cpl.log\n model_logfile = os.path.join(rundir, model + \".log.\" + lid)\n cpl_logfile = os.path.join(rundir, \"cpl\" + \".log.\" + lid)\n\n if not os.path.isfile(model_logfile):\n expect(False, \"Model did not complete, no {} log file \".format(model_logfile))\n elif not os.path.isfile(cpl_logfile):\n expect(False, \"Model did not complete, no cpl log file '{}'\".format(cpl_logfile))\n elif os.stat(model_logfile).st_size == 0:\n expect(False, \"Run FAILED\")\n else:\n with open(cpl_logfile, 'r') as fd:\n if 'SUCCESSFUL TERMINATION' not in fd.read():\n expect(False, \"Model did not complete - see {} \\n \".format(cpl_logfile))\n\n###############################################################################\ndef save_logs(case, lid):\n###############################################################################\n logdir = case.get_value(\"LOGDIR\")\n if logdir is not None and len(logdir) > 0:\n if not os.path.isdir(logdir):\n os.makedirs(logdir)\n\n caseroot = case.get_value(\"CASEROOT\")\n rundir = case.get_value(\"RUNDIR\")\n logfiles = glob.glob(os.path.join(rundir, \"*.log.{}\".format(lid)))\n for logfile in logfiles:\n if os.path.isfile(logfile):\n logfile_gz = gzip_existing_file(logfile)\n shutil.copy(logfile_gz,\n os.path.join(caseroot, logdir, os.path.basename(logfile_gz)))\n\n###############################################################################\ndef resubmit_check(case):\n###############################################################################\n\n # check to see if we need to do resubmission from this particular job,\n # Note that Mira requires special logic\n\n dout_s = case.get_value(\"DOUT_S\")\n logger.warn(\"dout_s {} \".format(dout_s))\n mach = case.get_value(\"MACH\")\n logger.warn(\"mach {} \".format(mach))\n testcase = case.get_value(\"TESTCASE\")\n resubmit_num = case.get_value(\"RESUBMIT\")\n logger.warn(\"resubmit_num {}\".format(resubmit_num))\n # If dout_s is True than short-term archiving handles the resubmit\n # If dout_s is True and machine is mira submit the st_archive script\n resubmit = False\n if not dout_s and resubmit_num > 0:\n resubmit = True\n elif dout_s and mach == 'mira':\n caseroot = case.get_value(\"CASEROOT\")\n cimeroot = case.get_value(\"CIMEROOT\")\n cmd = \"ssh cooleylogin1 'cd {}; CIMEROOT={} ./case.submit {} --job case.st_archive'\".format(caseroot, cimeroot, caseroot)\n run_cmd(cmd, verbose=True)\n\n if resubmit:\n if testcase is not None and testcase in ['ERR']:\n job = \"case.test\"\n else:\n job = \"case.run\"\n submit(case, job=job, resubmit=True)\n\n###############################################################################\ndef do_external(script_name, caseroot, rundir, lid, prefix):\n###############################################################################\n filename = \"{}.external.log.{}\".format(prefix, lid)\n outfile = os.path.join(rundir, filename)\n cmd = script_name + \" 1> {} {} 2>&1\".format(outfile, caseroot)\n logger.info(\"running {}\".format(script_name))\n run_cmd_no_fail(cmd)\n\n###############################################################################\ndef do_data_assimilation(da_script, caseroot, cycle, lid, rundir):\n###############################################################################\n filename = \"da.log.{}\".format(lid)\n outfile = os.path.join(rundir, filename)\n cmd = da_script + \" 1> {} {} {:d} 2>&1\".format(outfile, caseroot, cycle)\n logger.info(\"running {}\".format(da_script))\n run_cmd_no_fail(cmd)\n\n###############################################################################\ndef case_run(case, skip_pnl=False):\n###############################################################################\n # Set up the run, run the model, do the postrun steps\n run_with_submit = case.get_value(\"RUN_WITH_SUBMIT\")\n expect(run_with_submit,\n \"You are not calling the run script via the submit script. \"\n \"As a result, short-term archiving will not be called automatically.\"\n \"Please submit your run using the submit script like so:\"\n \" ./case.submit\")\n\n # Forces user to use case.submit if they re-submit\n if case.get_value(\"TESTCASE\") is None:\n case.set_value(\"RUN_WITH_SUBMIT\", False)\n\n prerun_script = case.get_value(\"PRERUN_SCRIPT\")\n postrun_script = case.get_value(\"POSTRUN_SCRIPT\")\n\n data_assimilation = case.get_value(\"DATA_ASSIMILATION\")\n data_assimilation_cycles = case.get_value(\"DATA_ASSIMILATION_CYCLES\")\n data_assimilation_script = case.get_value(\"DATA_ASSIMILATION_SCRIPT\")\n\n # set up the LID\n lid = new_lid()\n\n save_prerun_provenance(case)\n\n for cycle in range(data_assimilation_cycles):\n # After the first DA cycle, runs are restart runs\n if cycle > 0:\n case.set_value(\"CONTINUE_RUN\", \"TRUE\")\n lid = new_lid()\n\n if prerun_script:\n case.flush()\n do_external(prerun_script, case.get_value(\"CASEROOT\"), case.get_value(\"RUNDIR\"),\n lid, prefix=\"prerun\")\n case.read_xml()\n\n lid = run_model(case, lid, skip_pnl)\n save_logs(case, lid) # Copy log files back to caseroot\n if case.get_value(\"CHECK_TIMING\") or case.get_value(\"SAVE_TIMING\"):\n get_timing(case, lid) # Run the getTiming script\n\n if data_assimilation:\n case.flush()\n do_data_assimilation(data_assimilation_script, case.get_value(\"CASEROOT\"), cycle, lid,\n case.get_value(\"RUNDIR\"))\n case.read_xml()\n\n if postrun_script:\n case.flush()\n do_external(postrun_script, case.get_value(\"CASEROOT\"), case.get_value(\"RUNDIR\"),\n lid, prefix=\"postrun\")\n case.read_xml()\n\n save_postrun_provenance(case)\n\n logger.warn(\"check for resubmit\")\n resubmit_check(case)\n\n return True\n", "path": "scripts/lib/CIME/case_run.py"}]}
4,006
291
gh_patches_debug_37107
rasdani/github-patches
git_diff
opsdroid__opsdroid-1355
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Telegram connector needs update for event message # Description The telegram connector uses message.target['id'] instead of message.target. This leads to problems when trying to use default_target per normal configuration and usage of the event flow logic. ## Steps to Reproduce Setup telegram as the default connector... call the telegram connector.send with a target... ``` await self.opsdroid.send(Message(text='hello', target='<useridhere>') ``` The default_target is also always None. ``` await self.opsdroid.send(Message(text='hello') ``` You can hack around it with.. ``` sillytarget = { 'id': <useridhere> } await self.opsdroid.send(Message(text='hello', target=sillytarget) ``` ## Expected Functionality message.target should work like the other core connectors ## Experienced Functionality Errors out. ``` opsdroid | ERROR opsdroid.connector.telegram.send_message(): Unable to respond. ``` ## Versions - opsdroid: latest/stable - python 3.7.6 - docker image: opsdroid/opsdroid:latest ## Configuration File ```yaml connectors: ## Telegram (core) telegram: token: "......" # optional update-interval: 0.5 # Interval between checking for messages whitelisted-users: # List of users who can speak to the bot, if not set anyone can speak - ...... ``` ## Additional Details None <!-- Love opsdroid? Please consider supporting our collective: +👉 https://opencollective.com/opsdroid/donate --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `opsdroid/connector/telegram/__init__.py` Content: ``` 1 """A connector for Telegram.""" 2 import asyncio 3 import logging 4 import aiohttp 5 from voluptuous import Required 6 7 from opsdroid.connector import Connector, register_event 8 from opsdroid.events import Message, Image 9 10 11 _LOGGER = logging.getLogger(__name__) 12 CONFIG_SCHEMA = { 13 Required("token"): str, 14 "update-interval": float, 15 "default-user": str, 16 "whitelisted-users": list, 17 } 18 19 20 class ConnectorTelegram(Connector): 21 """A connector the the char service Telegram.""" 22 23 def __init__(self, config, opsdroid=None): 24 """Create the connector. 25 26 Args: 27 config (dict): configuration settings from the 28 file config.yaml. 29 opsdroid (OpsDroid): An instance of opsdroid.core. 30 31 """ 32 _LOGGER.debug(_("Loaded Telegram Connector")) 33 super().__init__(config, opsdroid=opsdroid) 34 self.name = "telegram" 35 self.opsdroid = opsdroid 36 self.latest_update = None 37 self.default_target = None 38 self.listening = True 39 self.default_user = config.get("default-user", None) 40 self.whitelisted_users = config.get("whitelisted-users", None) 41 self.update_interval = config.get("update-interval", 1) 42 self.session = None 43 self._closing = asyncio.Event() 44 self.loop = asyncio.get_event_loop() 45 46 try: 47 self.token = config["token"] 48 except (KeyError, AttributeError): 49 _LOGGER.error( 50 _( 51 "Unable to login: Access token is missing. Telegram connector will be unavailable." 52 ) 53 ) 54 55 @staticmethod 56 def get_user(response): 57 """Get user from response. 58 59 The API response is different depending on how 60 the bot is set up and where the message is coming 61 from. This method was created to keep if/else 62 statements to a minium on _parse_message. 63 64 Args: 65 response (dict): Response returned by aiohttp.ClientSession. 66 67 """ 68 user = None 69 user_id = None 70 71 if "username" in response["message"]["from"]: 72 user = response["message"]["from"]["username"] 73 74 elif "first_name" in response["message"]["from"]: 75 user = response["message"]["from"]["first_name"] 76 user_id = response["message"]["from"]["id"] 77 78 return user, user_id 79 80 def handle_user_permission(self, response, user): 81 """Handle user permissions. 82 83 This will check if the user that tried to talk with 84 the bot is allowed to do so. It will also work with 85 userid to improve security. 86 87 """ 88 user_id = response["message"]["from"]["id"] 89 90 if ( 91 not self.whitelisted_users 92 or user in self.whitelisted_users 93 or user_id in self.whitelisted_users 94 ): 95 return True 96 97 return False 98 99 def build_url(self, method): 100 """Build the url to connect to the API. 101 102 Args: 103 method (string): API call end point. 104 105 Return: 106 String that represents the full API url. 107 108 """ 109 return "https://api.telegram.org/bot{}/{}".format(self.token, method) 110 111 async def delete_webhook(self): 112 """Delete Telegram webhook. 113 114 The Telegram api will thrown an 409 error when an webhook is 115 active and a call to getUpdates is made. This method will 116 try to request the deletion of the webhook to make the getUpdate 117 request possible. 118 119 """ 120 _LOGGER.debug(_("Sending deleteWebhook request to Telegram...")) 121 resp = await self.session.get(self.build_url("deleteWebhook")) 122 123 if resp.status == 200: 124 _LOGGER.debug(_("Telegram webhook deleted successfully.")) 125 else: 126 _LOGGER.debug(_("Unable to delete webhook.")) 127 128 async def connect(self): 129 """Connect to Telegram. 130 131 This method is not an authorization call. It basically 132 checks if the API token was provided and makes an API 133 call to Telegram and evaluates the status of the call. 134 135 """ 136 137 _LOGGER.debug(_("Connecting to Telegram.")) 138 self.session = aiohttp.ClientSession() 139 140 resp = await self.session.get(self.build_url("getMe")) 141 142 if resp.status != 200: 143 _LOGGER.error(_("Unable to connect.")) 144 _LOGGER.error(_("Telegram error %s, %s."), resp.status, resp.text) 145 else: 146 json = await resp.json() 147 _LOGGER.debug(json) 148 _LOGGER.debug(_("Connected to Telegram as %s."), json["result"]["username"]) 149 150 async def _parse_message(self, response): 151 """Handle logic to parse a received message. 152 153 Since everyone can send a private message to any user/bot 154 in Telegram, this method allows to set a list of whitelisted 155 users that can interact with the bot. If any other user tries 156 to interact with the bot the command is not parsed and instead 157 the bot will inform that user that he is not allowed to talk 158 with the bot. 159 160 We also set self.latest_update to +1 in order to get the next 161 available message (or an empty {} if no message has been received 162 yet) with the method self._get_messages(). 163 164 Args: 165 response (dict): Response returned by aiohttp.ClientSession. 166 167 """ 168 for result in response["result"]: 169 _LOGGER.debug(result) 170 if result.get("edited_message", None): 171 result["message"] = result.pop("edited_message") 172 if "channel" in result["message"]["chat"]["type"]: 173 _LOGGER.debug( 174 _("Channel message parsing not supported " "- Ignoring message.") 175 ) 176 elif "message" in result and "text" in result["message"]: 177 user, user_id = self.get_user(result) 178 message = Message( 179 text=result["message"]["text"], 180 user=user, 181 user_id=user_id, 182 target=result["message"]["chat"], 183 connector=self, 184 ) 185 186 if self.handle_user_permission(result, user): 187 await self.opsdroid.parse(message) 188 else: 189 message.text = ( 190 "Sorry, you're not allowed " "to speak with this bot." 191 ) 192 await self.send(message) 193 self.latest_update = result["update_id"] + 1 194 elif ( 195 "message" in result 196 and "sticker" in result["message"] 197 and "emoji" in result["message"]["sticker"] 198 ): 199 self.latest_update = result["update_id"] + 1 200 _LOGGER.debug( 201 _("Emoji message parsing not supported - Ignoring message.") 202 ) 203 else: 204 _LOGGER.error(_("Unable to parse the message.")) 205 206 async def _get_messages(self): 207 """Connect to the Telegram API. 208 209 Uses an aiohttp ClientSession to connect to Telegram API 210 and get the latest messages from the chat service. 211 212 The data["offset"] is used to consume every new message, the API 213 returns an int - "update_id" value. In order to get the next 214 message this value needs to be increased by 1 the next time 215 the API is called. If no new messages exists the API will just 216 return an empty {}. 217 218 """ 219 data = {} 220 if self.latest_update is not None: 221 data["offset"] = self.latest_update 222 223 await asyncio.sleep(self.update_interval) 224 resp = await self.session.get(self.build_url("getUpdates"), params=data) 225 226 if resp.status == 409: 227 _LOGGER.info( 228 _( 229 "Can't get updates because previous webhook is still active. Will try to delete webhook." 230 ) 231 ) 232 await self.delete_webhook() 233 234 if resp.status != 200: 235 _LOGGER.error(_("Telegram error %s, %s."), resp.status, resp.text) 236 self.listening = False 237 else: 238 json = await resp.json() 239 240 await self._parse_message(json) 241 242 async def get_messages_loop(self): 243 """Listen for and parse new messages. 244 245 The bot will always listen to all opened chat windows, 246 as long as opsdroid is running. Since anyone can start 247 a new chat with the bot is recommended that a list of 248 users to be whitelisted be provided in config.yaml. 249 250 The method will sleep asynchronously at the end of 251 every loop. The time can either be specified in the 252 config.yaml with the param update-interval - this 253 defaults to 1 second. 254 255 """ 256 while self.listening: 257 await self._get_messages() 258 259 async def listen(self): 260 """Listen method of the connector. 261 262 Every connector has to implement the listen method. When an 263 infinite loop is running, it becomes hard to cancel this task. 264 So we are creating a task and set it on a variable so we can 265 cancel the task. 266 267 """ 268 message_getter = self.loop.create_task(await self.get_messages_loop()) 269 await self._closing.wait() 270 message_getter.cancel() 271 272 @register_event(Message) 273 async def send_message(self, message): 274 """Respond with a message. 275 276 Args: 277 message (object): An instance of Message. 278 279 """ 280 _LOGGER.debug(_("Responding with: %s."), message.text) 281 282 data = dict() 283 data["chat_id"] = message.target["id"] 284 data["text"] = message.text 285 resp = await self.session.post(self.build_url("sendMessage"), data=data) 286 if resp.status == 200: 287 _LOGGER.debug(_("Successfully responded.")) 288 else: 289 _LOGGER.error(_("Unable to respond.")) 290 291 @register_event(Image) 292 async def send_image(self, file_event): 293 """Send Image to Telegram. 294 295 Gets the chat id from the channel and then 296 sends the bytes of the image as multipart/form-data. 297 298 """ 299 data = aiohttp.FormData() 300 data.add_field( 301 "chat_id", str(file_event.target["id"]), content_type="multipart/form-data" 302 ) 303 data.add_field( 304 "photo", 305 await file_event.get_file_bytes(), 306 content_type="multipart/form-data", 307 ) 308 309 resp = await self.session.post(self.build_url("sendPhoto"), data=data) 310 if resp.status == 200: 311 _LOGGER.debug(_("Sent %s image successfully."), file_event.name) 312 else: 313 _LOGGER.debug(_("Unable to send image - Status Code %s."), resp.status) 314 315 async def disconnect(self): 316 """Disconnect from Telegram. 317 318 Stops the infinite loop found in self._listen(), closes 319 aiohttp session. 320 321 """ 322 self.listening = False 323 self._closing.set() 324 await self.session.close() 325 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/opsdroid/connector/telegram/__init__.py b/opsdroid/connector/telegram/__init__.py --- a/opsdroid/connector/telegram/__init__.py +++ b/opsdroid/connector/telegram/__init__.py @@ -34,9 +34,9 @@ self.name = "telegram" self.opsdroid = opsdroid self.latest_update = None - self.default_target = None self.listening = True self.default_user = config.get("default-user", None) + self.default_target = self.default_user self.whitelisted_users = config.get("whitelisted-users", None) self.update_interval = config.get("update-interval", 1) self.session = None @@ -167,9 +167,13 @@ """ for result in response["result"]: _LOGGER.debug(result) + if result.get("edited_message", None): result["message"] = result.pop("edited_message") - if "channel" in result["message"]["chat"]["type"]: + if result.get("channel_post", None) or result.get( + "edited_channel_post", None + ): + self.latest_update = result["update_id"] + 1 _LOGGER.debug( _("Channel message parsing not supported " "- Ignoring message.") ) @@ -179,7 +183,7 @@ text=result["message"]["text"], user=user, user_id=user_id, - target=result["message"]["chat"], + target=result["message"]["chat"]["id"], connector=self, ) @@ -277,10 +281,12 @@ message (object): An instance of Message. """ - _LOGGER.debug(_("Responding with: %s."), message.text) + _LOGGER.debug( + _("Responding with: '%s' at target: '%s'"), message.text, message.target + ) data = dict() - data["chat_id"] = message.target["id"] + data["chat_id"] = message.target data["text"] = message.text resp = await self.session.post(self.build_url("sendMessage"), data=data) if resp.status == 200:
{"golden_diff": "diff --git a/opsdroid/connector/telegram/__init__.py b/opsdroid/connector/telegram/__init__.py\n--- a/opsdroid/connector/telegram/__init__.py\n+++ b/opsdroid/connector/telegram/__init__.py\n@@ -34,9 +34,9 @@\n self.name = \"telegram\"\n self.opsdroid = opsdroid\n self.latest_update = None\n- self.default_target = None\n self.listening = True\n self.default_user = config.get(\"default-user\", None)\n+ self.default_target = self.default_user\n self.whitelisted_users = config.get(\"whitelisted-users\", None)\n self.update_interval = config.get(\"update-interval\", 1)\n self.session = None\n@@ -167,9 +167,13 @@\n \"\"\"\n for result in response[\"result\"]:\n _LOGGER.debug(result)\n+\n if result.get(\"edited_message\", None):\n result[\"message\"] = result.pop(\"edited_message\")\n- if \"channel\" in result[\"message\"][\"chat\"][\"type\"]:\n+ if result.get(\"channel_post\", None) or result.get(\n+ \"edited_channel_post\", None\n+ ):\n+ self.latest_update = result[\"update_id\"] + 1\n _LOGGER.debug(\n _(\"Channel message parsing not supported \" \"- Ignoring message.\")\n )\n@@ -179,7 +183,7 @@\n text=result[\"message\"][\"text\"],\n user=user,\n user_id=user_id,\n- target=result[\"message\"][\"chat\"],\n+ target=result[\"message\"][\"chat\"][\"id\"],\n connector=self,\n )\n \n@@ -277,10 +281,12 @@\n message (object): An instance of Message.\n \n \"\"\"\n- _LOGGER.debug(_(\"Responding with: %s.\"), message.text)\n+ _LOGGER.debug(\n+ _(\"Responding with: '%s' at target: '%s'\"), message.text, message.target\n+ )\n \n data = dict()\n- data[\"chat_id\"] = message.target[\"id\"]\n+ data[\"chat_id\"] = message.target\n data[\"text\"] = message.text\n resp = await self.session.post(self.build_url(\"sendMessage\"), data=data)\n if resp.status == 200:\n", "issue": "Telegram connector needs update for event message\n# Description\r\nThe telegram connector uses message.target['id'] instead of message.target. This leads to problems when trying to use default_target per normal configuration and usage of the event flow logic.\r\n\r\n## Steps to Reproduce\r\nSetup telegram as the default connector... call the telegram connector.send with a target... \r\n```\r\nawait self.opsdroid.send(Message(text='hello', target='<useridhere>') \r\n```\r\nThe default_target is also always None.\r\n```\r\nawait self.opsdroid.send(Message(text='hello') \r\n```\r\nYou can hack around it with..\r\n```\r\nsillytarget = { 'id': <useridhere> }\r\nawait self.opsdroid.send(Message(text='hello', target=sillytarget) \r\n```\r\n## Expected Functionality\r\nmessage.target should work like the other core connectors\r\n\r\n## Experienced Functionality\r\nErrors out.\r\n```\r\nopsdroid | ERROR opsdroid.connector.telegram.send_message(): Unable to respond. \r\n```\r\n## Versions\r\n\r\n- opsdroid: latest/stable\r\n- python 3.7.6\r\n- docker image: opsdroid/opsdroid:latest\r\n\r\n## Configuration File\r\n```yaml\r\nconnectors:\r\n## Telegram (core)\r\n telegram:\r\n token: \"......\"\r\n # optional\r\n update-interval: 0.5 # Interval between checking for messages\r\n whitelisted-users: # List of users who can speak to the bot, if not set anyone can speak\r\n - ......\r\n```\r\n## Additional Details\r\nNone\r\n\r\n<!-- Love opsdroid? Please consider supporting our collective:\r\n +\ud83d\udc49 https://opencollective.com/opsdroid/donate -->\r\n\n", "before_files": [{"content": "\"\"\"A connector for Telegram.\"\"\"\nimport asyncio\nimport logging\nimport aiohttp\nfrom voluptuous import Required\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message, Image\n\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\n Required(\"token\"): str,\n \"update-interval\": float,\n \"default-user\": str,\n \"whitelisted-users\": list,\n}\n\n\nclass ConnectorTelegram(Connector):\n \"\"\"A connector the the char service Telegram.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\n\n Args:\n config (dict): configuration settings from the\n file config.yaml.\n opsdroid (OpsDroid): An instance of opsdroid.core.\n\n \"\"\"\n _LOGGER.debug(_(\"Loaded Telegram Connector\"))\n super().__init__(config, opsdroid=opsdroid)\n self.name = \"telegram\"\n self.opsdroid = opsdroid\n self.latest_update = None\n self.default_target = None\n self.listening = True\n self.default_user = config.get(\"default-user\", None)\n self.whitelisted_users = config.get(\"whitelisted-users\", None)\n self.update_interval = config.get(\"update-interval\", 1)\n self.session = None\n self._closing = asyncio.Event()\n self.loop = asyncio.get_event_loop()\n\n try:\n self.token = config[\"token\"]\n except (KeyError, AttributeError):\n _LOGGER.error(\n _(\n \"Unable to login: Access token is missing. Telegram connector will be unavailable.\"\n )\n )\n\n @staticmethod\n def get_user(response):\n \"\"\"Get user from response.\n\n The API response is different depending on how\n the bot is set up and where the message is coming\n from. This method was created to keep if/else\n statements to a minium on _parse_message.\n\n Args:\n response (dict): Response returned by aiohttp.ClientSession.\n\n \"\"\"\n user = None\n user_id = None\n\n if \"username\" in response[\"message\"][\"from\"]:\n user = response[\"message\"][\"from\"][\"username\"]\n\n elif \"first_name\" in response[\"message\"][\"from\"]:\n user = response[\"message\"][\"from\"][\"first_name\"]\n user_id = response[\"message\"][\"from\"][\"id\"]\n\n return user, user_id\n\n def handle_user_permission(self, response, user):\n \"\"\"Handle user permissions.\n\n This will check if the user that tried to talk with\n the bot is allowed to do so. It will also work with\n userid to improve security.\n\n \"\"\"\n user_id = response[\"message\"][\"from\"][\"id\"]\n\n if (\n not self.whitelisted_users\n or user in self.whitelisted_users\n or user_id in self.whitelisted_users\n ):\n return True\n\n return False\n\n def build_url(self, method):\n \"\"\"Build the url to connect to the API.\n\n Args:\n method (string): API call end point.\n\n Return:\n String that represents the full API url.\n\n \"\"\"\n return \"https://api.telegram.org/bot{}/{}\".format(self.token, method)\n\n async def delete_webhook(self):\n \"\"\"Delete Telegram webhook.\n\n The Telegram api will thrown an 409 error when an webhook is\n active and a call to getUpdates is made. This method will\n try to request the deletion of the webhook to make the getUpdate\n request possible.\n\n \"\"\"\n _LOGGER.debug(_(\"Sending deleteWebhook request to Telegram...\"))\n resp = await self.session.get(self.build_url(\"deleteWebhook\"))\n\n if resp.status == 200:\n _LOGGER.debug(_(\"Telegram webhook deleted successfully.\"))\n else:\n _LOGGER.debug(_(\"Unable to delete webhook.\"))\n\n async def connect(self):\n \"\"\"Connect to Telegram.\n\n This method is not an authorization call. It basically\n checks if the API token was provided and makes an API\n call to Telegram and evaluates the status of the call.\n\n \"\"\"\n\n _LOGGER.debug(_(\"Connecting to Telegram.\"))\n self.session = aiohttp.ClientSession()\n\n resp = await self.session.get(self.build_url(\"getMe\"))\n\n if resp.status != 200:\n _LOGGER.error(_(\"Unable to connect.\"))\n _LOGGER.error(_(\"Telegram error %s, %s.\"), resp.status, resp.text)\n else:\n json = await resp.json()\n _LOGGER.debug(json)\n _LOGGER.debug(_(\"Connected to Telegram as %s.\"), json[\"result\"][\"username\"])\n\n async def _parse_message(self, response):\n \"\"\"Handle logic to parse a received message.\n\n Since everyone can send a private message to any user/bot\n in Telegram, this method allows to set a list of whitelisted\n users that can interact with the bot. If any other user tries\n to interact with the bot the command is not parsed and instead\n the bot will inform that user that he is not allowed to talk\n with the bot.\n\n We also set self.latest_update to +1 in order to get the next\n available message (or an empty {} if no message has been received\n yet) with the method self._get_messages().\n\n Args:\n response (dict): Response returned by aiohttp.ClientSession.\n\n \"\"\"\n for result in response[\"result\"]:\n _LOGGER.debug(result)\n if result.get(\"edited_message\", None):\n result[\"message\"] = result.pop(\"edited_message\")\n if \"channel\" in result[\"message\"][\"chat\"][\"type\"]:\n _LOGGER.debug(\n _(\"Channel message parsing not supported \" \"- Ignoring message.\")\n )\n elif \"message\" in result and \"text\" in result[\"message\"]:\n user, user_id = self.get_user(result)\n message = Message(\n text=result[\"message\"][\"text\"],\n user=user,\n user_id=user_id,\n target=result[\"message\"][\"chat\"],\n connector=self,\n )\n\n if self.handle_user_permission(result, user):\n await self.opsdroid.parse(message)\n else:\n message.text = (\n \"Sorry, you're not allowed \" \"to speak with this bot.\"\n )\n await self.send(message)\n self.latest_update = result[\"update_id\"] + 1\n elif (\n \"message\" in result\n and \"sticker\" in result[\"message\"]\n and \"emoji\" in result[\"message\"][\"sticker\"]\n ):\n self.latest_update = result[\"update_id\"] + 1\n _LOGGER.debug(\n _(\"Emoji message parsing not supported - Ignoring message.\")\n )\n else:\n _LOGGER.error(_(\"Unable to parse the message.\"))\n\n async def _get_messages(self):\n \"\"\"Connect to the Telegram API.\n\n Uses an aiohttp ClientSession to connect to Telegram API\n and get the latest messages from the chat service.\n\n The data[\"offset\"] is used to consume every new message, the API\n returns an int - \"update_id\" value. In order to get the next\n message this value needs to be increased by 1 the next time\n the API is called. If no new messages exists the API will just\n return an empty {}.\n\n \"\"\"\n data = {}\n if self.latest_update is not None:\n data[\"offset\"] = self.latest_update\n\n await asyncio.sleep(self.update_interval)\n resp = await self.session.get(self.build_url(\"getUpdates\"), params=data)\n\n if resp.status == 409:\n _LOGGER.info(\n _(\n \"Can't get updates because previous webhook is still active. Will try to delete webhook.\"\n )\n )\n await self.delete_webhook()\n\n if resp.status != 200:\n _LOGGER.error(_(\"Telegram error %s, %s.\"), resp.status, resp.text)\n self.listening = False\n else:\n json = await resp.json()\n\n await self._parse_message(json)\n\n async def get_messages_loop(self):\n \"\"\"Listen for and parse new messages.\n\n The bot will always listen to all opened chat windows,\n as long as opsdroid is running. Since anyone can start\n a new chat with the bot is recommended that a list of\n users to be whitelisted be provided in config.yaml.\n\n The method will sleep asynchronously at the end of\n every loop. The time can either be specified in the\n config.yaml with the param update-interval - this\n defaults to 1 second.\n\n \"\"\"\n while self.listening:\n await self._get_messages()\n\n async def listen(self):\n \"\"\"Listen method of the connector.\n\n Every connector has to implement the listen method. When an\n infinite loop is running, it becomes hard to cancel this task.\n So we are creating a task and set it on a variable so we can\n cancel the task.\n\n \"\"\"\n message_getter = self.loop.create_task(await self.get_messages_loop())\n await self._closing.wait()\n message_getter.cancel()\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\n\n Args:\n message (object): An instance of Message.\n\n \"\"\"\n _LOGGER.debug(_(\"Responding with: %s.\"), message.text)\n\n data = dict()\n data[\"chat_id\"] = message.target[\"id\"]\n data[\"text\"] = message.text\n resp = await self.session.post(self.build_url(\"sendMessage\"), data=data)\n if resp.status == 200:\n _LOGGER.debug(_(\"Successfully responded.\"))\n else:\n _LOGGER.error(_(\"Unable to respond.\"))\n\n @register_event(Image)\n async def send_image(self, file_event):\n \"\"\"Send Image to Telegram.\n\n Gets the chat id from the channel and then\n sends the bytes of the image as multipart/form-data.\n\n \"\"\"\n data = aiohttp.FormData()\n data.add_field(\n \"chat_id\", str(file_event.target[\"id\"]), content_type=\"multipart/form-data\"\n )\n data.add_field(\n \"photo\",\n await file_event.get_file_bytes(),\n content_type=\"multipart/form-data\",\n )\n\n resp = await self.session.post(self.build_url(\"sendPhoto\"), data=data)\n if resp.status == 200:\n _LOGGER.debug(_(\"Sent %s image successfully.\"), file_event.name)\n else:\n _LOGGER.debug(_(\"Unable to send image - Status Code %s.\"), resp.status)\n\n async def disconnect(self):\n \"\"\"Disconnect from Telegram.\n\n Stops the infinite loop found in self._listen(), closes\n aiohttp session.\n\n \"\"\"\n self.listening = False\n self._closing.set()\n await self.session.close()\n", "path": "opsdroid/connector/telegram/__init__.py"}], "after_files": [{"content": "\"\"\"A connector for Telegram.\"\"\"\nimport asyncio\nimport logging\nimport aiohttp\nfrom voluptuous import Required\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message, Image\n\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\n Required(\"token\"): str,\n \"update-interval\": float,\n \"default-user\": str,\n \"whitelisted-users\": list,\n}\n\n\nclass ConnectorTelegram(Connector):\n \"\"\"A connector the the char service Telegram.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\n\n Args:\n config (dict): configuration settings from the\n file config.yaml.\n opsdroid (OpsDroid): An instance of opsdroid.core.\n\n \"\"\"\n _LOGGER.debug(_(\"Loaded Telegram Connector\"))\n super().__init__(config, opsdroid=opsdroid)\n self.name = \"telegram\"\n self.opsdroid = opsdroid\n self.latest_update = None\n self.listening = True\n self.default_user = config.get(\"default-user\", None)\n self.default_target = self.default_user\n self.whitelisted_users = config.get(\"whitelisted-users\", None)\n self.update_interval = config.get(\"update-interval\", 1)\n self.session = None\n self._closing = asyncio.Event()\n self.loop = asyncio.get_event_loop()\n\n try:\n self.token = config[\"token\"]\n except (KeyError, AttributeError):\n _LOGGER.error(\n _(\n \"Unable to login: Access token is missing. Telegram connector will be unavailable.\"\n )\n )\n\n @staticmethod\n def get_user(response):\n \"\"\"Get user from response.\n\n The API response is different depending on how\n the bot is set up and where the message is coming\n from. This method was created to keep if/else\n statements to a minium on _parse_message.\n\n Args:\n response (dict): Response returned by aiohttp.ClientSession.\n\n \"\"\"\n user = None\n user_id = None\n\n if \"username\" in response[\"message\"][\"from\"]:\n user = response[\"message\"][\"from\"][\"username\"]\n\n elif \"first_name\" in response[\"message\"][\"from\"]:\n user = response[\"message\"][\"from\"][\"first_name\"]\n user_id = response[\"message\"][\"from\"][\"id\"]\n\n return user, user_id\n\n def handle_user_permission(self, response, user):\n \"\"\"Handle user permissions.\n\n This will check if the user that tried to talk with\n the bot is allowed to do so. It will also work with\n userid to improve security.\n\n \"\"\"\n user_id = response[\"message\"][\"from\"][\"id\"]\n\n if (\n not self.whitelisted_users\n or user in self.whitelisted_users\n or user_id in self.whitelisted_users\n ):\n return True\n\n return False\n\n def build_url(self, method):\n \"\"\"Build the url to connect to the API.\n\n Args:\n method (string): API call end point.\n\n Return:\n String that represents the full API url.\n\n \"\"\"\n return \"https://api.telegram.org/bot{}/{}\".format(self.token, method)\n\n async def delete_webhook(self):\n \"\"\"Delete Telegram webhook.\n\n The Telegram api will thrown an 409 error when an webhook is\n active and a call to getUpdates is made. This method will\n try to request the deletion of the webhook to make the getUpdate\n request possible.\n\n \"\"\"\n _LOGGER.debug(_(\"Sending deleteWebhook request to Telegram...\"))\n resp = await self.session.get(self.build_url(\"deleteWebhook\"))\n\n if resp.status == 200:\n _LOGGER.debug(_(\"Telegram webhook deleted successfully.\"))\n else:\n _LOGGER.debug(_(\"Unable to delete webhook.\"))\n\n async def connect(self):\n \"\"\"Connect to Telegram.\n\n This method is not an authorization call. It basically\n checks if the API token was provided and makes an API\n call to Telegram and evaluates the status of the call.\n\n \"\"\"\n\n _LOGGER.debug(_(\"Connecting to Telegram.\"))\n self.session = aiohttp.ClientSession()\n\n resp = await self.session.get(self.build_url(\"getMe\"))\n\n if resp.status != 200:\n _LOGGER.error(_(\"Unable to connect.\"))\n _LOGGER.error(_(\"Telegram error %s, %s.\"), resp.status, resp.text)\n else:\n json = await resp.json()\n _LOGGER.debug(json)\n _LOGGER.debug(_(\"Connected to Telegram as %s.\"), json[\"result\"][\"username\"])\n\n async def _parse_message(self, response):\n \"\"\"Handle logic to parse a received message.\n\n Since everyone can send a private message to any user/bot\n in Telegram, this method allows to set a list of whitelisted\n users that can interact with the bot. If any other user tries\n to interact with the bot the command is not parsed and instead\n the bot will inform that user that he is not allowed to talk\n with the bot.\n\n We also set self.latest_update to +1 in order to get the next\n available message (or an empty {} if no message has been received\n yet) with the method self._get_messages().\n\n Args:\n response (dict): Response returned by aiohttp.ClientSession.\n\n \"\"\"\n for result in response[\"result\"]:\n _LOGGER.debug(result)\n\n if result.get(\"edited_message\", None):\n result[\"message\"] = result.pop(\"edited_message\")\n if result.get(\"channel_post\", None) or result.get(\n \"edited_channel_post\", None\n ):\n self.latest_update = result[\"update_id\"] + 1\n _LOGGER.debug(\n _(\"Channel message parsing not supported \" \"- Ignoring message.\")\n )\n elif \"message\" in result and \"text\" in result[\"message\"]:\n user, user_id = self.get_user(result)\n message = Message(\n text=result[\"message\"][\"text\"],\n user=user,\n user_id=user_id,\n target=result[\"message\"][\"chat\"][\"id\"],\n connector=self,\n )\n\n if self.handle_user_permission(result, user):\n await self.opsdroid.parse(message)\n else:\n message.text = (\n \"Sorry, you're not allowed \" \"to speak with this bot.\"\n )\n await self.send(message)\n self.latest_update = result[\"update_id\"] + 1\n elif (\n \"message\" in result\n and \"sticker\" in result[\"message\"]\n and \"emoji\" in result[\"message\"][\"sticker\"]\n ):\n self.latest_update = result[\"update_id\"] + 1\n _LOGGER.debug(\n _(\"Emoji message parsing not supported - Ignoring message.\")\n )\n else:\n _LOGGER.error(_(\"Unable to parse the message.\"))\n\n async def _get_messages(self):\n \"\"\"Connect to the Telegram API.\n\n Uses an aiohttp ClientSession to connect to Telegram API\n and get the latest messages from the chat service.\n\n The data[\"offset\"] is used to consume every new message, the API\n returns an int - \"update_id\" value. In order to get the next\n message this value needs to be increased by 1 the next time\n the API is called. If no new messages exists the API will just\n return an empty {}.\n\n \"\"\"\n data = {}\n if self.latest_update is not None:\n data[\"offset\"] = self.latest_update\n\n await asyncio.sleep(self.update_interval)\n resp = await self.session.get(self.build_url(\"getUpdates\"), params=data)\n\n if resp.status == 409:\n _LOGGER.info(\n _(\n \"Can't get updates because previous webhook is still active. Will try to delete webhook.\"\n )\n )\n await self.delete_webhook()\n\n if resp.status != 200:\n _LOGGER.error(_(\"Telegram error %s, %s.\"), resp.status, resp.text)\n self.listening = False\n else:\n json = await resp.json()\n\n await self._parse_message(json)\n\n async def get_messages_loop(self):\n \"\"\"Listen for and parse new messages.\n\n The bot will always listen to all opened chat windows,\n as long as opsdroid is running. Since anyone can start\n a new chat with the bot is recommended that a list of\n users to be whitelisted be provided in config.yaml.\n\n The method will sleep asynchronously at the end of\n every loop. The time can either be specified in the\n config.yaml with the param update-interval - this\n defaults to 1 second.\n\n \"\"\"\n while self.listening:\n await self._get_messages()\n\n async def listen(self):\n \"\"\"Listen method of the connector.\n\n Every connector has to implement the listen method. When an\n infinite loop is running, it becomes hard to cancel this task.\n So we are creating a task and set it on a variable so we can\n cancel the task.\n\n \"\"\"\n message_getter = self.loop.create_task(await self.get_messages_loop())\n await self._closing.wait()\n message_getter.cancel()\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\n\n Args:\n message (object): An instance of Message.\n\n \"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' at target: '%s'\"), message.text, message.target\n )\n\n data = dict()\n data[\"chat_id\"] = message.target\n data[\"text\"] = message.text\n resp = await self.session.post(self.build_url(\"sendMessage\"), data=data)\n if resp.status == 200:\n _LOGGER.debug(_(\"Successfully responded.\"))\n else:\n _LOGGER.error(_(\"Unable to respond.\"))\n\n @register_event(Image)\n async def send_image(self, file_event):\n \"\"\"Send Image to Telegram.\n\n Gets the chat id from the channel and then\n sends the bytes of the image as multipart/form-data.\n\n \"\"\"\n data = aiohttp.FormData()\n data.add_field(\n \"chat_id\", str(file_event.target[\"id\"]), content_type=\"multipart/form-data\"\n )\n data.add_field(\n \"photo\",\n await file_event.get_file_bytes(),\n content_type=\"multipart/form-data\",\n )\n\n resp = await self.session.post(self.build_url(\"sendPhoto\"), data=data)\n if resp.status == 200:\n _LOGGER.debug(_(\"Sent %s image successfully.\"), file_event.name)\n else:\n _LOGGER.debug(_(\"Unable to send image - Status Code %s.\"), resp.status)\n\n async def disconnect(self):\n \"\"\"Disconnect from Telegram.\n\n Stops the infinite loop found in self._listen(), closes\n aiohttp session.\n\n \"\"\"\n self.listening = False\n self._closing.set()\n await self.session.close()\n", "path": "opsdroid/connector/telegram/__init__.py"}]}
3,789
504
gh_patches_debug_19267
rasdani/github-patches
git_diff
pyodide__pyodide-2935
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Release 0.21 I went through issues and tagged things for the 0.21 release https://github.com/pyodide/pyodide/milestone/11 Mostly naming related subjects need to be discussed/addressed before the release IMO, since once we release they will be harder to change. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pyodide-build/pyodide_build/__init__.py` Content: ``` 1 __version__ = "0.21.0a3" 2 ``` Path: `src/py/pyodide/__init__.py` Content: ``` 1 # When the pyodide package is imported, both the js and the pyodide_js modules 2 # will be available to import from. Not all functions in pyodide_js will work 3 # until after pyodide is first imported, imported functions from pyodide_js 4 # should not be used at import time. It is fine to use js functions at import 5 # time. 6 # 7 # All pure Python code that does not require js or pyodide_js should go in 8 # the _pyodide package. 9 # 10 # This package is imported by the test suite as well, and currently we don't use 11 # pytest mocks for js or pyodide_js, so make sure to test "if IN_BROWSER" before 12 # importing from these. 13 __version__ = "0.21.0a3" 14 15 __all__ = ["__version__"] 16 17 from typing import Any 18 19 from . import _state # noqa: F401 20 from .code import CodeRunner # noqa: F401 21 from .code import eval_code # noqa: F401 22 from .code import eval_code_async # noqa: F401 23 from .code import find_imports # noqa: F401 24 from .code import should_quiet # noqa: F401 25 from .ffi import ConversionError # noqa: F401 26 from .ffi import JsException # noqa: F401 27 from .ffi import JsProxy # noqa: F401 28 from .ffi import create_once_callable # noqa: F401 29 from .ffi import create_proxy # noqa: F401 30 from .ffi import destroy_proxies # noqa: F401 31 from .ffi import register_js_module # noqa: F401 32 from .ffi import to_js # noqa: F401 33 from .ffi import unregister_js_module # noqa: F401 34 from .http import open_url # noqa: F401 35 36 DEPRECATED_LIST = { 37 "CodeRunner": "code", 38 "eval_code": "code", 39 "eval_code_async": "code", 40 "find_imports": "code", 41 "should_quiet": "code", 42 "open_url": "http", 43 "ConversionError": "ffi", 44 "JsException": "ffi", 45 "JsProxy": "ffi", 46 "create_once_callable": "ffi", 47 "create_proxy": "ffi", 48 "destroy_proxies": "ffi", 49 "to_js": "ffi", 50 "register_js_module": "ffi", 51 "unregister_js_module": "ffi", 52 } 53 54 55 from .webloop import _initialize_event_loop 56 57 _initialize_event_loop() 58 del _initialize_event_loop 59 60 61 def __dir__() -> list[str]: 62 return __all__ 63 64 65 for name in DEPRECATED_LIST: 66 globals()[f"_deprecated_{name}"] = globals()[name] 67 del globals()[name] 68 69 70 def __getattr__(name: str) -> Any: 71 if name in DEPRECATED_LIST: 72 from warnings import warn 73 74 warn( 75 f"pyodide.{name} has been moved to pyodide.{DEPRECATED_LIST[name]}.{name} " 76 "Accessing it through the pyodide module is deprecated.", 77 FutureWarning, 78 ) 79 # Put the name back so we won't warn next time this name is accessed 80 globals()[name] = globals()[f"_deprecated_{name}"] 81 return globals()[name] 82 raise AttributeError(f"module {__name__!r} has no attribute {name!r}") 83 ``` Path: `docs/conf.py` Content: ``` 1 # Configuration file for the Sphinx documentation builder. 2 3 # -- Path setup -------------------------------------------------------------- 4 5 import atexit 6 import os 7 import shutil 8 import subprocess 9 import sys 10 from pathlib import Path 11 from typing import Any 12 from unittest import mock 13 14 # -- Project information ----------------------------------------------------- 15 16 project = "Pyodide" 17 copyright = "2019-2022, Pyodide contributors and Mozilla" 18 pyodide_version = "0.21.0a3" 19 20 if ".dev" in pyodide_version or os.environ.get("READTHEDOCS_VERSION") == "latest": 21 CDN_URL = "https://cdn.jsdelivr.net/pyodide/dev/full/" 22 else: 23 CDN_URL = f"https://cdn.jsdelivr.net/pyodide/v{pyodide_version}/full/" 24 25 # -- General configuration --------------------------------------------------- 26 27 # If your documentation needs a minimal Sphinx version, state it here. 28 # 29 # needs_sphinx = '1.0' 30 31 extensions = [ 32 "sphinx.ext.autodoc", 33 "sphinx.ext.autosummary", 34 "sphinxcontrib.napoleon", 35 "myst_parser", 36 "sphinx_js", 37 "autodocsumm", 38 "sphinx_panels", 39 "sphinx_pyodide", 40 "sphinx_argparse_cli", 41 "versionwarning.extension", 42 "sphinx_issues", 43 ] 44 45 myst_enable_extensions = ["substitution"] 46 47 js_language = "typescript" 48 jsdoc_config_path = "../src/js/tsconfig.json" 49 root_for_relative_js_paths = "../src/" 50 issues_github_path = "pyodide/pyodide" 51 52 versionwarning_messages = { 53 "latest": ( 54 "This is the development version of the documentation. " 55 'See <a href="https://pyodide.org/">here</a> for latest stable ' 56 "documentation. Please do not use Pyodide with non " 57 "versioned (`dev`) URLs from the CDN for deployed applications!" 58 ) 59 } 60 versionwarning_body_selector = "#main-content > div" 61 62 autosummary_generate = True 63 autodoc_default_flags = ["members", "inherited-members"] 64 65 # Add modules to be mocked. 66 mock_modules = ["ruamel.yaml", "tomli"] 67 68 # Add any paths that contain templates here, relative to this directory. 69 templates_path = ["_templates"] 70 71 # The suffix(es) of source filenames. 72 source_suffix = [".rst", ".md"] 73 74 # The master toctree document. 75 master_doc = "index" 76 77 # The language for content autogenerated by Sphinx. 78 language = None 79 80 # List of patterns, relative to source directory, that match files and 81 # directories to ignore when looking for source files. 82 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "README.md"] 83 84 # The name of the Pygments (syntax highlighting) style to use. 85 pygments_style = None 86 87 # -- Options for HTML output ------------------------------------------------- 88 89 # The theme to use for HTML and HTML Help pages. See the documentation for 90 # a list of builtin themes. 91 # 92 html_theme = "sphinx_book_theme" 93 html_logo = "_static/img/pyodide-logo.png" 94 95 # theme-specific options 96 html_theme_options: dict[str, Any] = {} 97 98 # paths that contain custom static files (such as style sheets) 99 html_static_path = ["_static"] 100 101 102 html_css_files = [ 103 "css/pyodide.css", 104 ] 105 106 # Custom sidebar templates, must be a dictionary that maps document names 107 # to template names. 108 # html_sidebars = {} 109 110 # -- Options for HTMLHelp output --------------------------------------------- 111 112 # Output file base name for HTML help builder. 113 htmlhelp_basename = "Pyodidedoc" 114 115 # A list of files that should not be packed into the epub file. 116 epub_exclude_files = ["search.html"] 117 118 119 def delete_attrs(cls): 120 for name in dir(cls): 121 if not name.startswith("_"): 122 try: 123 delattr(cls, name) 124 except Exception: 125 pass 126 127 128 # Try not to cause side effects if we are imported incidentally. 129 130 try: 131 import sphinx 132 133 IN_SPHINX = hasattr(sphinx, "application") 134 except ImportError: 135 IN_SPHINX = False 136 137 IN_READTHEDOCS = "READTHEDOCS" in os.environ 138 139 if IN_READTHEDOCS: 140 env = {"PYODIDE_BASE_URL": CDN_URL} 141 os.makedirs("_build/html", exist_ok=True) 142 res = subprocess.check_output( 143 ["make", "-C", "..", "docs/_build/html/console.html"], 144 env=env, 145 stderr=subprocess.STDOUT, 146 encoding="utf-8", 147 ) 148 print(res) 149 150 if IN_SPHINX: 151 # Compatibility shims. sphinx-js and sphinxcontrib-napoleon have not been updated for Python 3.10 152 import collections 153 from typing import Callable, Mapping 154 155 collections.Mapping = Mapping # type: ignore[attr-defined] 156 collections.Callable = Callable # type: ignore[attr-defined] 157 158 base_dir = Path(__file__).resolve().parent.parent 159 path_dirs = [ 160 str(base_dir), 161 str(base_dir / "pyodide-build"), 162 str(base_dir / "docs/sphinx_pyodide"), 163 str(base_dir / "src/py"), 164 str(base_dir / "packages/micropip/src"), 165 ] 166 sys.path = path_dirs + sys.path 167 168 import micropip # noqa: F401 169 import pyodide 170 171 # We hacked it so that autodoc will look for submodules, but only if we import 172 # them here. TODO: look these up in the source directory? 173 import pyodide.code 174 import pyodide.console 175 import pyodide.ffi.wrappers 176 import pyodide.http 177 import pyodide.webloop 178 179 # The full version, including alpha/beta/rc tags. 180 release = version = pyodide.__version__ 181 html_title = f"Version {version}" 182 183 shutil.copy("../src/core/pyproxy.ts", "../src/js/pyproxy.gen.ts") 184 shutil.copy("../src/core/error_handling.ts", "../src/js/error_handling.gen.ts") 185 js_source_path = [str(x) for x in Path("../src/js").glob("*.ts")] 186 187 def remove_pyproxy_gen_ts(): 188 Path("../src/js/pyproxy.gen.ts").unlink(missing_ok=True) 189 190 atexit.register(remove_pyproxy_gen_ts) 191 192 os.environ["PATH"] += f':{str(Path("../src/js/node_modules/.bin").resolve())}' 193 print(os.environ["PATH"]) 194 if IN_READTHEDOCS: 195 subprocess.run(["npm", "ci"], cwd="../src/js") 196 elif not shutil.which("typedoc"): 197 raise Exception( 198 "Before building the Pyodide docs you must run 'npm install' in 'src/js'." 199 ) 200 201 # Prevent API docs for webloop methods: they are the same as for base event loop 202 # and it clutters api docs too much 203 delete_attrs(pyodide.webloop.WebLoop) 204 delete_attrs(pyodide.webloop.WebLoopPolicy) 205 delete_attrs(pyodide.console.PyodideConsole) 206 207 for module in mock_modules: 208 sys.modules[module] = mock.Mock() 209 210 211 # https://github.com/sphinx-doc/sphinx/issues/4054 212 def globalReplace(app, docname, source): 213 result = source[0] 214 for key in app.config.global_replacements: 215 result = result.replace(key, app.config.global_replacements[key]) 216 source[0] = result 217 218 219 global_replacements = {"{{PYODIDE_CDN_URL}}": CDN_URL} 220 221 222 def setup(app): 223 app.add_config_value("global_replacements", {}, True) 224 app.connect("source-read", globalReplace) 225 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -15,7 +15,7 @@ project = "Pyodide" copyright = "2019-2022, Pyodide contributors and Mozilla" -pyodide_version = "0.21.0a3" +pyodide_version = "0.21.0" if ".dev" in pyodide_version or os.environ.get("READTHEDOCS_VERSION") == "latest": CDN_URL = "https://cdn.jsdelivr.net/pyodide/dev/full/" diff --git a/pyodide-build/pyodide_build/__init__.py b/pyodide-build/pyodide_build/__init__.py --- a/pyodide-build/pyodide_build/__init__.py +++ b/pyodide-build/pyodide_build/__init__.py @@ -1 +1 @@ -__version__ = "0.21.0a3" +__version__ = "0.21.0" diff --git a/src/py/pyodide/__init__.py b/src/py/pyodide/__init__.py --- a/src/py/pyodide/__init__.py +++ b/src/py/pyodide/__init__.py @@ -10,7 +10,7 @@ # This package is imported by the test suite as well, and currently we don't use # pytest mocks for js or pyodide_js, so make sure to test "if IN_BROWSER" before # importing from these. -__version__ = "0.21.0a3" +__version__ = "0.21.0" __all__ = ["__version__"]
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -15,7 +15,7 @@\n \n project = \"Pyodide\"\n copyright = \"2019-2022, Pyodide contributors and Mozilla\"\n-pyodide_version = \"0.21.0a3\"\n+pyodide_version = \"0.21.0\"\n \n if \".dev\" in pyodide_version or os.environ.get(\"READTHEDOCS_VERSION\") == \"latest\":\n CDN_URL = \"https://cdn.jsdelivr.net/pyodide/dev/full/\"\ndiff --git a/pyodide-build/pyodide_build/__init__.py b/pyodide-build/pyodide_build/__init__.py\n--- a/pyodide-build/pyodide_build/__init__.py\n+++ b/pyodide-build/pyodide_build/__init__.py\n@@ -1 +1 @@\n-__version__ = \"0.21.0a3\"\n+__version__ = \"0.21.0\"\ndiff --git a/src/py/pyodide/__init__.py b/src/py/pyodide/__init__.py\n--- a/src/py/pyodide/__init__.py\n+++ b/src/py/pyodide/__init__.py\n@@ -10,7 +10,7 @@\n # This package is imported by the test suite as well, and currently we don't use\n # pytest mocks for js or pyodide_js, so make sure to test \"if IN_BROWSER\" before\n # importing from these.\n-__version__ = \"0.21.0a3\"\n+__version__ = \"0.21.0\"\n \n __all__ = [\"__version__\"]\n", "issue": "Release 0.21\nI went through issues and tagged things for the 0.21 release https://github.com/pyodide/pyodide/milestone/11 \r\n\r\nMostly naming related subjects need to be discussed/addressed before the release IMO, since once we release they will be harder to change.\n", "before_files": [{"content": "__version__ = \"0.21.0a3\"\n", "path": "pyodide-build/pyodide_build/__init__.py"}, {"content": "# When the pyodide package is imported, both the js and the pyodide_js modules\n# will be available to import from. Not all functions in pyodide_js will work\n# until after pyodide is first imported, imported functions from pyodide_js\n# should not be used at import time. It is fine to use js functions at import\n# time.\n#\n# All pure Python code that does not require js or pyodide_js should go in\n# the _pyodide package.\n#\n# This package is imported by the test suite as well, and currently we don't use\n# pytest mocks for js or pyodide_js, so make sure to test \"if IN_BROWSER\" before\n# importing from these.\n__version__ = \"0.21.0a3\"\n\n__all__ = [\"__version__\"]\n\nfrom typing import Any\n\nfrom . import _state # noqa: F401\nfrom .code import CodeRunner # noqa: F401\nfrom .code import eval_code # noqa: F401\nfrom .code import eval_code_async # noqa: F401\nfrom .code import find_imports # noqa: F401\nfrom .code import should_quiet # noqa: F401\nfrom .ffi import ConversionError # noqa: F401\nfrom .ffi import JsException # noqa: F401\nfrom .ffi import JsProxy # noqa: F401\nfrom .ffi import create_once_callable # noqa: F401\nfrom .ffi import create_proxy # noqa: F401\nfrom .ffi import destroy_proxies # noqa: F401\nfrom .ffi import register_js_module # noqa: F401\nfrom .ffi import to_js # noqa: F401\nfrom .ffi import unregister_js_module # noqa: F401\nfrom .http import open_url # noqa: F401\n\nDEPRECATED_LIST = {\n \"CodeRunner\": \"code\",\n \"eval_code\": \"code\",\n \"eval_code_async\": \"code\",\n \"find_imports\": \"code\",\n \"should_quiet\": \"code\",\n \"open_url\": \"http\",\n \"ConversionError\": \"ffi\",\n \"JsException\": \"ffi\",\n \"JsProxy\": \"ffi\",\n \"create_once_callable\": \"ffi\",\n \"create_proxy\": \"ffi\",\n \"destroy_proxies\": \"ffi\",\n \"to_js\": \"ffi\",\n \"register_js_module\": \"ffi\",\n \"unregister_js_module\": \"ffi\",\n}\n\n\nfrom .webloop import _initialize_event_loop\n\n_initialize_event_loop()\ndel _initialize_event_loop\n\n\ndef __dir__() -> list[str]:\n return __all__\n\n\nfor name in DEPRECATED_LIST:\n globals()[f\"_deprecated_{name}\"] = globals()[name]\n del globals()[name]\n\n\ndef __getattr__(name: str) -> Any:\n if name in DEPRECATED_LIST:\n from warnings import warn\n\n warn(\n f\"pyodide.{name} has been moved to pyodide.{DEPRECATED_LIST[name]}.{name} \"\n \"Accessing it through the pyodide module is deprecated.\",\n FutureWarning,\n )\n # Put the name back so we won't warn next time this name is accessed\n globals()[name] = globals()[f\"_deprecated_{name}\"]\n return globals()[name]\n raise AttributeError(f\"module {__name__!r} has no attribute {name!r}\")\n", "path": "src/py/pyodide/__init__.py"}, {"content": "# Configuration file for the Sphinx documentation builder.\n\n# -- Path setup --------------------------------------------------------------\n\nimport atexit\nimport os\nimport shutil\nimport subprocess\nimport sys\nfrom pathlib import Path\nfrom typing import Any\nfrom unittest import mock\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Pyodide\"\ncopyright = \"2019-2022, Pyodide contributors and Mozilla\"\npyodide_version = \"0.21.0a3\"\n\nif \".dev\" in pyodide_version or os.environ.get(\"READTHEDOCS_VERSION\") == \"latest\":\n CDN_URL = \"https://cdn.jsdelivr.net/pyodide/dev/full/\"\nelse:\n CDN_URL = f\"https://cdn.jsdelivr.net/pyodide/v{pyodide_version}/full/\"\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinxcontrib.napoleon\",\n \"myst_parser\",\n \"sphinx_js\",\n \"autodocsumm\",\n \"sphinx_panels\",\n \"sphinx_pyodide\",\n \"sphinx_argparse_cli\",\n \"versionwarning.extension\",\n \"sphinx_issues\",\n]\n\nmyst_enable_extensions = [\"substitution\"]\n\njs_language = \"typescript\"\njsdoc_config_path = \"../src/js/tsconfig.json\"\nroot_for_relative_js_paths = \"../src/\"\nissues_github_path = \"pyodide/pyodide\"\n\nversionwarning_messages = {\n \"latest\": (\n \"This is the development version of the documentation. \"\n 'See <a href=\"https://pyodide.org/\">here</a> for latest stable '\n \"documentation. Please do not use Pyodide with non \"\n \"versioned (`dev`) URLs from the CDN for deployed applications!\"\n )\n}\nversionwarning_body_selector = \"#main-content > div\"\n\nautosummary_generate = True\nautodoc_default_flags = [\"members\", \"inherited-members\"]\n\n# Add modules to be mocked.\nmock_modules = [\"ruamel.yaml\", \"tomli\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"README.md\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_book_theme\"\nhtml_logo = \"_static/img/pyodide-logo.png\"\n\n# theme-specific options\nhtml_theme_options: dict[str, Any] = {}\n\n# paths that contain custom static files (such as style sheets)\nhtml_static_path = [\"_static\"]\n\n\nhtml_css_files = [\n \"css/pyodide.css\",\n]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Pyodidedoc\"\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\n\ndef delete_attrs(cls):\n for name in dir(cls):\n if not name.startswith(\"_\"):\n try:\n delattr(cls, name)\n except Exception:\n pass\n\n\n# Try not to cause side effects if we are imported incidentally.\n\ntry:\n import sphinx\n\n IN_SPHINX = hasattr(sphinx, \"application\")\nexcept ImportError:\n IN_SPHINX = False\n\nIN_READTHEDOCS = \"READTHEDOCS\" in os.environ\n\nif IN_READTHEDOCS:\n env = {\"PYODIDE_BASE_URL\": CDN_URL}\n os.makedirs(\"_build/html\", exist_ok=True)\n res = subprocess.check_output(\n [\"make\", \"-C\", \"..\", \"docs/_build/html/console.html\"],\n env=env,\n stderr=subprocess.STDOUT,\n encoding=\"utf-8\",\n )\n print(res)\n\nif IN_SPHINX:\n # Compatibility shims. sphinx-js and sphinxcontrib-napoleon have not been updated for Python 3.10\n import collections\n from typing import Callable, Mapping\n\n collections.Mapping = Mapping # type: ignore[attr-defined]\n collections.Callable = Callable # type: ignore[attr-defined]\n\n base_dir = Path(__file__).resolve().parent.parent\n path_dirs = [\n str(base_dir),\n str(base_dir / \"pyodide-build\"),\n str(base_dir / \"docs/sphinx_pyodide\"),\n str(base_dir / \"src/py\"),\n str(base_dir / \"packages/micropip/src\"),\n ]\n sys.path = path_dirs + sys.path\n\n import micropip # noqa: F401\n import pyodide\n\n # We hacked it so that autodoc will look for submodules, but only if we import\n # them here. TODO: look these up in the source directory?\n import pyodide.code\n import pyodide.console\n import pyodide.ffi.wrappers\n import pyodide.http\n import pyodide.webloop\n\n # The full version, including alpha/beta/rc tags.\n release = version = pyodide.__version__\n html_title = f\"Version {version}\"\n\n shutil.copy(\"../src/core/pyproxy.ts\", \"../src/js/pyproxy.gen.ts\")\n shutil.copy(\"../src/core/error_handling.ts\", \"../src/js/error_handling.gen.ts\")\n js_source_path = [str(x) for x in Path(\"../src/js\").glob(\"*.ts\")]\n\n def remove_pyproxy_gen_ts():\n Path(\"../src/js/pyproxy.gen.ts\").unlink(missing_ok=True)\n\n atexit.register(remove_pyproxy_gen_ts)\n\n os.environ[\"PATH\"] += f':{str(Path(\"../src/js/node_modules/.bin\").resolve())}'\n print(os.environ[\"PATH\"])\n if IN_READTHEDOCS:\n subprocess.run([\"npm\", \"ci\"], cwd=\"../src/js\")\n elif not shutil.which(\"typedoc\"):\n raise Exception(\n \"Before building the Pyodide docs you must run 'npm install' in 'src/js'.\"\n )\n\n # Prevent API docs for webloop methods: they are the same as for base event loop\n # and it clutters api docs too much\n delete_attrs(pyodide.webloop.WebLoop)\n delete_attrs(pyodide.webloop.WebLoopPolicy)\n delete_attrs(pyodide.console.PyodideConsole)\n\n for module in mock_modules:\n sys.modules[module] = mock.Mock()\n\n\n# https://github.com/sphinx-doc/sphinx/issues/4054\ndef globalReplace(app, docname, source):\n result = source[0]\n for key in app.config.global_replacements:\n result = result.replace(key, app.config.global_replacements[key])\n source[0] = result\n\n\nglobal_replacements = {\"{{PYODIDE_CDN_URL}}\": CDN_URL}\n\n\ndef setup(app):\n app.add_config_value(\"global_replacements\", {}, True)\n app.connect(\"source-read\", globalReplace)\n", "path": "docs/conf.py"}], "after_files": [{"content": "__version__ = \"0.21.0\"\n", "path": "pyodide-build/pyodide_build/__init__.py"}, {"content": "# When the pyodide package is imported, both the js and the pyodide_js modules\n# will be available to import from. Not all functions in pyodide_js will work\n# until after pyodide is first imported, imported functions from pyodide_js\n# should not be used at import time. It is fine to use js functions at import\n# time.\n#\n# All pure Python code that does not require js or pyodide_js should go in\n# the _pyodide package.\n#\n# This package is imported by the test suite as well, and currently we don't use\n# pytest mocks for js or pyodide_js, so make sure to test \"if IN_BROWSER\" before\n# importing from these.\n__version__ = \"0.21.0\"\n\n__all__ = [\"__version__\"]\n\nfrom typing import Any\n\nfrom . import _state # noqa: F401\nfrom .code import CodeRunner # noqa: F401\nfrom .code import eval_code # noqa: F401\nfrom .code import eval_code_async # noqa: F401\nfrom .code import find_imports # noqa: F401\nfrom .code import should_quiet # noqa: F401\nfrom .ffi import ConversionError # noqa: F401\nfrom .ffi import JsException # noqa: F401\nfrom .ffi import JsProxy # noqa: F401\nfrom .ffi import create_once_callable # noqa: F401\nfrom .ffi import create_proxy # noqa: F401\nfrom .ffi import destroy_proxies # noqa: F401\nfrom .ffi import register_js_module # noqa: F401\nfrom .ffi import to_js # noqa: F401\nfrom .ffi import unregister_js_module # noqa: F401\nfrom .http import open_url # noqa: F401\n\nDEPRECATED_LIST = {\n \"CodeRunner\": \"code\",\n \"eval_code\": \"code\",\n \"eval_code_async\": \"code\",\n \"find_imports\": \"code\",\n \"should_quiet\": \"code\",\n \"open_url\": \"http\",\n \"ConversionError\": \"ffi\",\n \"JsException\": \"ffi\",\n \"JsProxy\": \"ffi\",\n \"create_once_callable\": \"ffi\",\n \"create_proxy\": \"ffi\",\n \"destroy_proxies\": \"ffi\",\n \"to_js\": \"ffi\",\n \"register_js_module\": \"ffi\",\n \"unregister_js_module\": \"ffi\",\n}\n\n\nfrom .webloop import _initialize_event_loop\n\n_initialize_event_loop()\ndel _initialize_event_loop\n\n\ndef __dir__() -> list[str]:\n return __all__\n\n\nfor name in DEPRECATED_LIST:\n globals()[f\"_deprecated_{name}\"] = globals()[name]\n del globals()[name]\n\n\ndef __getattr__(name: str) -> Any:\n if name in DEPRECATED_LIST:\n from warnings import warn\n\n warn(\n f\"pyodide.{name} has been moved to pyodide.{DEPRECATED_LIST[name]}.{name} \"\n \"Accessing it through the pyodide module is deprecated.\",\n FutureWarning,\n )\n # Put the name back so we won't warn next time this name is accessed\n globals()[name] = globals()[f\"_deprecated_{name}\"]\n return globals()[name]\n raise AttributeError(f\"module {__name__!r} has no attribute {name!r}\")\n", "path": "src/py/pyodide/__init__.py"}, {"content": "# Configuration file for the Sphinx documentation builder.\n\n# -- Path setup --------------------------------------------------------------\n\nimport atexit\nimport os\nimport shutil\nimport subprocess\nimport sys\nfrom pathlib import Path\nfrom typing import Any\nfrom unittest import mock\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Pyodide\"\ncopyright = \"2019-2022, Pyodide contributors and Mozilla\"\npyodide_version = \"0.21.0\"\n\nif \".dev\" in pyodide_version or os.environ.get(\"READTHEDOCS_VERSION\") == \"latest\":\n CDN_URL = \"https://cdn.jsdelivr.net/pyodide/dev/full/\"\nelse:\n CDN_URL = f\"https://cdn.jsdelivr.net/pyodide/v{pyodide_version}/full/\"\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinxcontrib.napoleon\",\n \"myst_parser\",\n \"sphinx_js\",\n \"autodocsumm\",\n \"sphinx_panels\",\n \"sphinx_pyodide\",\n \"sphinx_argparse_cli\",\n \"versionwarning.extension\",\n \"sphinx_issues\",\n]\n\nmyst_enable_extensions = [\"substitution\"]\n\njs_language = \"typescript\"\njsdoc_config_path = \"../src/js/tsconfig.json\"\nroot_for_relative_js_paths = \"../src/\"\nissues_github_path = \"pyodide/pyodide\"\n\nversionwarning_messages = {\n \"latest\": (\n \"This is the development version of the documentation. \"\n 'See <a href=\"https://pyodide.org/\">here</a> for latest stable '\n \"documentation. Please do not use Pyodide with non \"\n \"versioned (`dev`) URLs from the CDN for deployed applications!\"\n )\n}\nversionwarning_body_selector = \"#main-content > div\"\n\nautosummary_generate = True\nautodoc_default_flags = [\"members\", \"inherited-members\"]\n\n# Add modules to be mocked.\nmock_modules = [\"ruamel.yaml\", \"tomli\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"README.md\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_book_theme\"\nhtml_logo = \"_static/img/pyodide-logo.png\"\n\n# theme-specific options\nhtml_theme_options: dict[str, Any] = {}\n\n# paths that contain custom static files (such as style sheets)\nhtml_static_path = [\"_static\"]\n\n\nhtml_css_files = [\n \"css/pyodide.css\",\n]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Pyodidedoc\"\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\n\ndef delete_attrs(cls):\n for name in dir(cls):\n if not name.startswith(\"_\"):\n try:\n delattr(cls, name)\n except Exception:\n pass\n\n\n# Try not to cause side effects if we are imported incidentally.\n\ntry:\n import sphinx\n\n IN_SPHINX = hasattr(sphinx, \"application\")\nexcept ImportError:\n IN_SPHINX = False\n\nIN_READTHEDOCS = \"READTHEDOCS\" in os.environ\n\nif IN_READTHEDOCS:\n env = {\"PYODIDE_BASE_URL\": CDN_URL}\n os.makedirs(\"_build/html\", exist_ok=True)\n res = subprocess.check_output(\n [\"make\", \"-C\", \"..\", \"docs/_build/html/console.html\"],\n env=env,\n stderr=subprocess.STDOUT,\n encoding=\"utf-8\",\n )\n print(res)\n\nif IN_SPHINX:\n # Compatibility shims. sphinx-js and sphinxcontrib-napoleon have not been updated for Python 3.10\n import collections\n from typing import Callable, Mapping\n\n collections.Mapping = Mapping # type: ignore[attr-defined]\n collections.Callable = Callable # type: ignore[attr-defined]\n\n base_dir = Path(__file__).resolve().parent.parent\n path_dirs = [\n str(base_dir),\n str(base_dir / \"pyodide-build\"),\n str(base_dir / \"docs/sphinx_pyodide\"),\n str(base_dir / \"src/py\"),\n str(base_dir / \"packages/micropip/src\"),\n ]\n sys.path = path_dirs + sys.path\n\n import micropip # noqa: F401\n import pyodide\n\n # We hacked it so that autodoc will look for submodules, but only if we import\n # them here. TODO: look these up in the source directory?\n import pyodide.code\n import pyodide.console\n import pyodide.ffi.wrappers\n import pyodide.http\n import pyodide.webloop\n\n # The full version, including alpha/beta/rc tags.\n release = version = pyodide.__version__\n html_title = f\"Version {version}\"\n\n shutil.copy(\"../src/core/pyproxy.ts\", \"../src/js/pyproxy.gen.ts\")\n shutil.copy(\"../src/core/error_handling.ts\", \"../src/js/error_handling.gen.ts\")\n js_source_path = [str(x) for x in Path(\"../src/js\").glob(\"*.ts\")]\n\n def remove_pyproxy_gen_ts():\n Path(\"../src/js/pyproxy.gen.ts\").unlink(missing_ok=True)\n\n atexit.register(remove_pyproxy_gen_ts)\n\n os.environ[\"PATH\"] += f':{str(Path(\"../src/js/node_modules/.bin\").resolve())}'\n print(os.environ[\"PATH\"])\n if IN_READTHEDOCS:\n subprocess.run([\"npm\", \"ci\"], cwd=\"../src/js\")\n elif not shutil.which(\"typedoc\"):\n raise Exception(\n \"Before building the Pyodide docs you must run 'npm install' in 'src/js'.\"\n )\n\n # Prevent API docs for webloop methods: they are the same as for base event loop\n # and it clutters api docs too much\n delete_attrs(pyodide.webloop.WebLoop)\n delete_attrs(pyodide.webloop.WebLoopPolicy)\n delete_attrs(pyodide.console.PyodideConsole)\n\n for module in mock_modules:\n sys.modules[module] = mock.Mock()\n\n\n# https://github.com/sphinx-doc/sphinx/issues/4054\ndef globalReplace(app, docname, source):\n result = source[0]\n for key in app.config.global_replacements:\n result = result.replace(key, app.config.global_replacements[key])\n source[0] = result\n\n\nglobal_replacements = {\"{{PYODIDE_CDN_URL}}\": CDN_URL}\n\n\ndef setup(app):\n app.add_config_value(\"global_replacements\", {}, True)\n app.connect(\"source-read\", globalReplace)\n", "path": "docs/conf.py"}]}
3,544
375
gh_patches_debug_36038
rasdani/github-patches
git_diff
scverse__scanpy-260
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `NameError: name 'logg' is not defined` when using `sc.queries.mitochondrial_genes` I just tried ```python import scanpy.api as sc sc.queries.mitochondrial_genes('www.ensembl.org', 'strange_organism') ``` I would expect scanpy complains that it does not know `'strange_organism'`, but I get the error ```python --------------------------------------------------------------------------- NameError Traceback (most recent call last) <ipython-input-13-6a41b361ab41> in <module>() 1 import scanpy.api as sc ----> 2 sc.queries.mitochondrial_genes('www.ensembl.org', 'drerio') ~/software/scanpy/scanpy/queries/__init__.py in mitochondrial_genes(host, org) 34 s.add_attribute_to_xml('mgi_symbol') 35 else: ---> 36 logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True) 37 return None 38 s.add_attribute_to_xml('chromosome_name') NameError: name 'logg' is not defined ``` It seems to me like `queries/__init__.py` misses an `from .. import logging as logg` statement. Would maybe also make sense to show the the message that an organism is not available at verbosity level 1 instead of 4? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scanpy/queries/__init__.py` Content: ``` 1 import pandas as pd 2 3 4 def mitochondrial_genes(host, org): 5 """Mitochondrial gene symbols for specific organism through BioMart. 6 7 Parameters 8 ---------- 9 host : {{'www.ensembl.org', ...}} 10 A valid BioMart host URL. 11 org : {{'hsapiens', 'mmusculus'}} 12 Organism to query. Currently available are human ('hsapiens') and mouse 13 ('mmusculus'). 14 15 Returns 16 ------- 17 A `pd.Index` containing mitochondrial gene symbols. 18 """ 19 try: 20 from bioservices import biomart 21 except ImportError: 22 raise ImportError( 23 'You need to install the `bioservices` module.') 24 from io import StringIO 25 s = biomart.BioMart(host=host) 26 27 # building query 28 s.new_query() 29 if org == 'hsapiens': 30 s.add_dataset_to_xml('hsapiens_gene_ensembl') 31 s.add_attribute_to_xml('hgnc_symbol') 32 elif org == 'mmusculus': 33 s.add_dataset_to_xml('mmusculus_gene_ensembl') 34 s.add_attribute_to_xml('mgi_symbol') 35 else: 36 logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True) 37 return None 38 s.add_attribute_to_xml('chromosome_name') 39 xml = s.get_xml() 40 41 # parsing mitochondrial gene symbols 42 res = pd.read_csv(StringIO(s.query(xml)), sep='\t', header=None) 43 res.columns = ['symbol', 'chromosome_name'] 44 res = res.dropna() 45 res = res[res['chromosome_name'] == 'MT'] 46 res = res.set_index('symbol') 47 res = res[~res.index.duplicated(keep='first')] 48 49 return res.index 50 51 52 def gene_coordinates(host, org, gene, chr_exclude=[]): 53 """Retrieve gene coordinates for specific organism through BioMart. 54 Parameters 55 ---------- 56 host : {{'www.ensembl.org', ...}} 57 A valid BioMart host URL. Can be used to control genome build. 58 org : {{'hsapiens', 'mmusculus'}} 59 Organism to query. Currently available are human ('hsapiens') and mouse 60 ('mmusculus'). 61 gene : 62 The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve 63 coordinates. 64 chr_exclude : 65 A list of chromosomes to exclude from query. 66 Returns 67 ------- 68 A `pd.DataFrame` containing gene coordinates for the specified gene symbol. 69 """ 70 try: 71 from bioservices import biomart 72 except ImportError: 73 raise ImportError( 74 'You need to install the `bioservices` module.') 75 from io import StringIO 76 s = biomart.BioMart(host=host) 77 78 # building query 79 s.new_query() 80 if org == 'hsapiens': 81 s.add_dataset_to_xml('hsapiens_gene_ensembl') 82 s.add_attribute_to_xml('hgnc_symbol') 83 elif org == 'mmusculus': 84 s.add_dataset_to_xml('mmusculus_gene_ensembl') 85 s.add_attribute_to_xml('mgi_symbol') 86 else: 87 logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True) 88 return None 89 s.add_attribute_to_xml('chromosome_name') 90 s.add_attribute_to_xml('start_position') 91 s.add_attribute_to_xml('end_position') 92 xml = s.get_xml() 93 94 # parsing gene coordinates 95 res = pd.read_csv(StringIO(s.query(xml)), sep='\t', header=None) 96 res.columns = ['symbol', 'chromosome_name', 'start', 'end'] 97 res = res.dropna() 98 res = res[~res['chromosome_name'].isin(chr_exclude)] 99 res = res.set_index('symbol') 100 101 return res.loc[[gene], :] 102 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scanpy/queries/__init__.py b/scanpy/queries/__init__.py --- a/scanpy/queries/__init__.py +++ b/scanpy/queries/__init__.py @@ -1,4 +1,5 @@ import pandas as pd +from .. import logging as logg def mitochondrial_genes(host, org): @@ -8,9 +9,9 @@ ---------- host : {{'www.ensembl.org', ...}} A valid BioMart host URL. - org : {{'hsapiens', 'mmusculus'}} - Organism to query. Currently available are human ('hsapiens') and mouse - ('mmusculus'). + org : {{'hsapiens', 'mmusculus', 'drerio'}} + Organism to query. Currently available are human ('hsapiens'), mouse + ('mmusculus') and zebrafish ('drerio'). Returns ------- @@ -32,6 +33,9 @@ elif org == 'mmusculus': s.add_dataset_to_xml('mmusculus_gene_ensembl') s.add_attribute_to_xml('mgi_symbol') + elif org == 'drerio': + s.add_dataset_to_xml('drerio_gene_ensembl') + s.add_attribute_to_xml('zfin_id_symbol') else: logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True) return None @@ -55,9 +59,9 @@ ---------- host : {{'www.ensembl.org', ...}} A valid BioMart host URL. Can be used to control genome build. - org : {{'hsapiens', 'mmusculus'}} - Organism to query. Currently available are human ('hsapiens') and mouse - ('mmusculus'). + org : {{'hsapiens', 'mmusculus', 'drerio'}} + Organism to query. Currently available are human ('hsapiens'), mouse + ('mmusculus') and zebrafish ('drerio'). gene : The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve coordinates. @@ -83,6 +87,9 @@ elif org == 'mmusculus': s.add_dataset_to_xml('mmusculus_gene_ensembl') s.add_attribute_to_xml('mgi_symbol') + elif org == 'drerio': + s.add_dataset_to_xml('drerio_gene_ensembl') + s.add_attribute_to_xml('zfin_id_symbol') else: logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True) return None
{"golden_diff": "diff --git a/scanpy/queries/__init__.py b/scanpy/queries/__init__.py\n--- a/scanpy/queries/__init__.py\n+++ b/scanpy/queries/__init__.py\n@@ -1,4 +1,5 @@\n import pandas as pd\n+from .. import logging as logg\n \n \n def mitochondrial_genes(host, org):\n@@ -8,9 +9,9 @@\n ----------\n host : {{'www.ensembl.org', ...}}\n A valid BioMart host URL.\n- org : {{'hsapiens', 'mmusculus'}}\n- Organism to query. Currently available are human ('hsapiens') and mouse\n- ('mmusculus').\n+ org : {{'hsapiens', 'mmusculus', 'drerio'}}\n+ Organism to query. Currently available are human ('hsapiens'), mouse\n+ ('mmusculus') and zebrafish ('drerio').\n \n Returns\n -------\n@@ -32,6 +33,9 @@\n elif org == 'mmusculus':\n s.add_dataset_to_xml('mmusculus_gene_ensembl')\n s.add_attribute_to_xml('mgi_symbol')\n+ elif org == 'drerio':\n+ s.add_dataset_to_xml('drerio_gene_ensembl')\n+ s.add_attribute_to_xml('zfin_id_symbol')\n else:\n logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\n return None\n@@ -55,9 +59,9 @@\n ----------\n host : {{'www.ensembl.org', ...}}\n A valid BioMart host URL. Can be used to control genome build.\n- org : {{'hsapiens', 'mmusculus'}}\n- Organism to query. Currently available are human ('hsapiens') and mouse\n- ('mmusculus').\n+ org : {{'hsapiens', 'mmusculus', 'drerio'}}\n+ Organism to query. Currently available are human ('hsapiens'), mouse\n+ ('mmusculus') and zebrafish ('drerio').\n gene :\n The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve\n coordinates.\n@@ -83,6 +87,9 @@\n elif org == 'mmusculus':\n s.add_dataset_to_xml('mmusculus_gene_ensembl')\n s.add_attribute_to_xml('mgi_symbol')\n+ elif org == 'drerio':\n+ s.add_dataset_to_xml('drerio_gene_ensembl')\n+ s.add_attribute_to_xml('zfin_id_symbol')\n else:\n logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\n return None\n", "issue": "`NameError: name 'logg' is not defined` when using `sc.queries.mitochondrial_genes`\nI just tried\r\n```python\r\nimport scanpy.api as sc\r\nsc.queries.mitochondrial_genes('www.ensembl.org', 'strange_organism')\r\n```\r\nI would expect scanpy complains that it does not know `'strange_organism'`, but I get the error \r\n```python\r\n---------------------------------------------------------------------------\r\nNameError Traceback (most recent call last)\r\n<ipython-input-13-6a41b361ab41> in <module>()\r\n 1 import scanpy.api as sc\r\n----> 2 sc.queries.mitochondrial_genes('www.ensembl.org', 'drerio')\r\n\r\n~/software/scanpy/scanpy/queries/__init__.py in mitochondrial_genes(host, org)\r\n 34 s.add_attribute_to_xml('mgi_symbol')\r\n 35 else:\r\n---> 36 logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\r\n 37 return None\r\n 38 s.add_attribute_to_xml('chromosome_name')\r\n\r\nNameError: name 'logg' is not defined\r\n```\r\nIt seems to me like `queries/__init__.py` misses an `from .. import logging as logg` statement.\r\n\r\nWould maybe also make sense to show the the message that an organism is not available at verbosity level 1 instead of 4?\n", "before_files": [{"content": "import pandas as pd\n\n\ndef mitochondrial_genes(host, org):\n \"\"\"Mitochondrial gene symbols for specific organism through BioMart.\n\n Parameters\n ----------\n host : {{'www.ensembl.org', ...}}\n A valid BioMart host URL.\n org : {{'hsapiens', 'mmusculus'}}\n Organism to query. Currently available are human ('hsapiens') and mouse\n ('mmusculus').\n\n Returns\n -------\n A `pd.Index` containing mitochondrial gene symbols.\n \"\"\"\n try:\n from bioservices import biomart\n except ImportError:\n raise ImportError(\n 'You need to install the `bioservices` module.')\n from io import StringIO\n s = biomart.BioMart(host=host)\n\n # building query\n s.new_query()\n if org == 'hsapiens':\n s.add_dataset_to_xml('hsapiens_gene_ensembl')\n s.add_attribute_to_xml('hgnc_symbol')\n elif org == 'mmusculus':\n s.add_dataset_to_xml('mmusculus_gene_ensembl')\n s.add_attribute_to_xml('mgi_symbol')\n else:\n logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\n return None\n s.add_attribute_to_xml('chromosome_name')\n xml = s.get_xml()\n\n # parsing mitochondrial gene symbols\n res = pd.read_csv(StringIO(s.query(xml)), sep='\\t', header=None)\n res.columns = ['symbol', 'chromosome_name']\n res = res.dropna()\n res = res[res['chromosome_name'] == 'MT']\n res = res.set_index('symbol')\n res = res[~res.index.duplicated(keep='first')]\n\n return res.index\n\n\ndef gene_coordinates(host, org, gene, chr_exclude=[]):\n \"\"\"Retrieve gene coordinates for specific organism through BioMart.\n Parameters\n ----------\n host : {{'www.ensembl.org', ...}}\n A valid BioMart host URL. Can be used to control genome build.\n org : {{'hsapiens', 'mmusculus'}}\n Organism to query. Currently available are human ('hsapiens') and mouse\n ('mmusculus').\n gene :\n The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve\n coordinates.\n chr_exclude :\n A list of chromosomes to exclude from query.\n Returns\n -------\n A `pd.DataFrame` containing gene coordinates for the specified gene symbol.\n \"\"\"\n try:\n from bioservices import biomart\n except ImportError:\n raise ImportError(\n 'You need to install the `bioservices` module.')\n from io import StringIO\n s = biomart.BioMart(host=host)\n\n # building query\n s.new_query()\n if org == 'hsapiens':\n s.add_dataset_to_xml('hsapiens_gene_ensembl')\n s.add_attribute_to_xml('hgnc_symbol')\n elif org == 'mmusculus':\n s.add_dataset_to_xml('mmusculus_gene_ensembl')\n s.add_attribute_to_xml('mgi_symbol')\n else:\n logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\n return None\n s.add_attribute_to_xml('chromosome_name')\n s.add_attribute_to_xml('start_position')\n s.add_attribute_to_xml('end_position')\n xml = s.get_xml()\n\n # parsing gene coordinates\n res = pd.read_csv(StringIO(s.query(xml)), sep='\\t', header=None)\n res.columns = ['symbol', 'chromosome_name', 'start', 'end']\n res = res.dropna()\n res = res[~res['chromosome_name'].isin(chr_exclude)]\n res = res.set_index('symbol')\n\n return res.loc[[gene], :]\n", "path": "scanpy/queries/__init__.py"}], "after_files": [{"content": "import pandas as pd\nfrom .. import logging as logg\n\n\ndef mitochondrial_genes(host, org):\n \"\"\"Mitochondrial gene symbols for specific organism through BioMart.\n\n Parameters\n ----------\n host : {{'www.ensembl.org', ...}}\n A valid BioMart host URL.\n org : {{'hsapiens', 'mmusculus', 'drerio'}}\n Organism to query. Currently available are human ('hsapiens'), mouse\n ('mmusculus') and zebrafish ('drerio').\n\n Returns\n -------\n A `pd.Index` containing mitochondrial gene symbols.\n \"\"\"\n try:\n from bioservices import biomart\n except ImportError:\n raise ImportError(\n 'You need to install the `bioservices` module.')\n from io import StringIO\n s = biomart.BioMart(host=host)\n\n # building query\n s.new_query()\n if org == 'hsapiens':\n s.add_dataset_to_xml('hsapiens_gene_ensembl')\n s.add_attribute_to_xml('hgnc_symbol')\n elif org == 'mmusculus':\n s.add_dataset_to_xml('mmusculus_gene_ensembl')\n s.add_attribute_to_xml('mgi_symbol')\n elif org == 'drerio':\n s.add_dataset_to_xml('drerio_gene_ensembl')\n s.add_attribute_to_xml('zfin_id_symbol')\n else:\n logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\n return None\n s.add_attribute_to_xml('chromosome_name')\n xml = s.get_xml()\n\n # parsing mitochondrial gene symbols\n res = pd.read_csv(StringIO(s.query(xml)), sep='\\t', header=None)\n res.columns = ['symbol', 'chromosome_name']\n res = res.dropna()\n res = res[res['chromosome_name'] == 'MT']\n res = res.set_index('symbol')\n res = res[~res.index.duplicated(keep='first')]\n\n return res.index\n\n\ndef gene_coordinates(host, org, gene, chr_exclude=[]):\n \"\"\"Retrieve gene coordinates for specific organism through BioMart.\n Parameters\n ----------\n host : {{'www.ensembl.org', ...}}\n A valid BioMart host URL. Can be used to control genome build.\n org : {{'hsapiens', 'mmusculus', 'drerio'}}\n Organism to query. Currently available are human ('hsapiens'), mouse\n ('mmusculus') and zebrafish ('drerio').\n gene :\n The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve\n coordinates.\n chr_exclude :\n A list of chromosomes to exclude from query.\n Returns\n -------\n A `pd.DataFrame` containing gene coordinates for the specified gene symbol.\n \"\"\"\n try:\n from bioservices import biomart\n except ImportError:\n raise ImportError(\n 'You need to install the `bioservices` module.')\n from io import StringIO\n s = biomart.BioMart(host=host)\n\n # building query\n s.new_query()\n if org == 'hsapiens':\n s.add_dataset_to_xml('hsapiens_gene_ensembl')\n s.add_attribute_to_xml('hgnc_symbol')\n elif org == 'mmusculus':\n s.add_dataset_to_xml('mmusculus_gene_ensembl')\n s.add_attribute_to_xml('mgi_symbol')\n elif org == 'drerio':\n s.add_dataset_to_xml('drerio_gene_ensembl')\n s.add_attribute_to_xml('zfin_id_symbol')\n else:\n logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\n return None\n s.add_attribute_to_xml('chromosome_name')\n s.add_attribute_to_xml('start_position')\n s.add_attribute_to_xml('end_position')\n xml = s.get_xml()\n\n # parsing gene coordinates\n res = pd.read_csv(StringIO(s.query(xml)), sep='\\t', header=None)\n res.columns = ['symbol', 'chromosome_name', 'start', 'end']\n res = res.dropna()\n res = res[~res['chromosome_name'].isin(chr_exclude)]\n res = res.set_index('symbol')\n\n return res.loc[[gene], :]\n", "path": "scanpy/queries/__init__.py"}]}
1,604
613
gh_patches_debug_3479
rasdani/github-patches
git_diff
docker__docker-py-1393
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Exception retrieving untagged images on api >= 1.24 Docker API >= 1.24 will return a null object if image tags DNE instead of not including it in the response. This makes the dict.get fail to catch the null case and the list comprehension to iterate over a non-iterable. ``` File "<stdin>", line 1, in <module> File "docker/models/images.py", line 16, in __repr__ return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags)) File "docker/models/images.py", line 34, in tags tag for tag in self.attrs.get('RepoTags', []) TypeError: 'NoneType' object is not iterable ``` This is similar to an issue seen in [salt](https://github.com/saltstack/salt/pull/35447/commits/b833b5f9587534d3b843a026ef91abc4ec929d0f) Was able to get things working with a pretty quick change: ``` diff --git a/docker/models/images.py b/docker/models/images.py index 32068e6..39a640d 100644 --- a/docker/models/images.py +++ b/docker/models/images.py @@ -30,9 +30,11 @@ class Image(Model): """ The image's tags. """ + tags = self.attrs.get('RepoTags', []) + if tags is None: + return [] return [ - tag for tag in self.attrs.get('RepoTags', []) - if tag != '<none>:<none>' + tag for tag in tags if tag != '<none>:<none>' ] def history(self): ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docker/models/images.py` Content: ``` 1 import re 2 3 import six 4 5 from ..api import APIClient 6 from ..errors import BuildError 7 from ..utils.json_stream import json_stream 8 from .resource import Collection, Model 9 10 11 class Image(Model): 12 """ 13 An image on the server. 14 """ 15 def __repr__(self): 16 return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags)) 17 18 @property 19 def short_id(self): 20 """ 21 The ID of the image truncated to 10 characters, plus the ``sha256:`` 22 prefix. 23 """ 24 if self.id.startswith('sha256:'): 25 return self.id[:17] 26 return self.id[:10] 27 28 @property 29 def tags(self): 30 """ 31 The image's tags. 32 """ 33 return [ 34 tag for tag in self.attrs.get('RepoTags', []) 35 if tag != '<none>:<none>' 36 ] 37 38 def history(self): 39 """ 40 Show the history of an image. 41 42 Returns: 43 (str): The history of the image. 44 45 Raises: 46 :py:class:`docker.errors.APIError` 47 If the server returns an error. 48 """ 49 return self.client.api.history(self.id) 50 51 def save(self): 52 """ 53 Get a tarball of an image. Similar to the ``docker save`` command. 54 55 Returns: 56 (urllib3.response.HTTPResponse object): The response from the 57 daemon. 58 59 Raises: 60 :py:class:`docker.errors.APIError` 61 If the server returns an error. 62 63 Example: 64 65 >>> image = cli.get("fedora:latest") 66 >>> resp = image.save() 67 >>> f = open('/tmp/fedora-latest.tar', 'w') 68 >>> f.write(resp.data) 69 >>> f.close() 70 """ 71 return self.client.api.get_image(self.id) 72 73 def tag(self, repository, tag=None, **kwargs): 74 """ 75 Tag this image into a repository. Similar to the ``docker tag`` 76 command. 77 78 Args: 79 repository (str): The repository to set for the tag 80 tag (str): The tag name 81 force (bool): Force 82 83 Raises: 84 :py:class:`docker.errors.APIError` 85 If the server returns an error. 86 87 Returns: 88 (bool): ``True`` if successful 89 """ 90 self.client.api.tag(self.id, repository, tag=tag, **kwargs) 91 92 93 class ImageCollection(Collection): 94 model = Image 95 96 def build(self, **kwargs): 97 """ 98 Build an image and return it. Similar to the ``docker build`` 99 command. Either ``path`` or ``fileobj`` must be set. 100 101 If you have a tar file for the Docker build context (including a 102 Dockerfile) already, pass a readable file-like object to ``fileobj`` 103 and also pass ``custom_context=True``. If the stream is compressed 104 also, set ``encoding`` to the correct value (e.g ``gzip``). 105 106 If you want to get the raw output of the build, use the 107 :py:meth:`~docker.api.build.BuildApiMixin.build` method in the 108 low-level API. 109 110 Args: 111 path (str): Path to the directory containing the Dockerfile 112 fileobj: A file object to use as the Dockerfile. (Or a file-like 113 object) 114 tag (str): A tag to add to the final image 115 quiet (bool): Whether to return the status 116 nocache (bool): Don't use the cache when set to ``True`` 117 rm (bool): Remove intermediate containers. The ``docker build`` 118 command now defaults to ``--rm=true``, but we have kept the old 119 default of `False` to preserve backward compatibility 120 stream (bool): *Deprecated for API version > 1.8 (always True)*. 121 Return a blocking generator you can iterate over to retrieve 122 build output as it happens 123 timeout (int): HTTP timeout 124 custom_context (bool): Optional if using ``fileobj`` 125 encoding (str): The encoding for a stream. Set to ``gzip`` for 126 compressing 127 pull (bool): Downloads any updates to the FROM image in Dockerfiles 128 forcerm (bool): Always remove intermediate containers, even after 129 unsuccessful builds 130 dockerfile (str): path within the build context to the Dockerfile 131 buildargs (dict): A dictionary of build arguments 132 container_limits (dict): A dictionary of limits applied to each 133 container created by the build process. Valid keys: 134 135 - memory (int): set memory limit for build 136 - memswap (int): Total memory (memory + swap), -1 to disable 137 swap 138 - cpushares (int): CPU shares (relative weight) 139 - cpusetcpus (str): CPUs in which to allow execution, e.g., 140 ``"0-3"``, ``"0,1"`` 141 decode (bool): If set to ``True``, the returned stream will be 142 decoded into dicts on the fly. Default ``False``. 143 144 Returns: 145 (:py:class:`Image`): The built image. 146 147 Raises: 148 :py:class:`docker.errors.BuildError` 149 If there is an error during the build. 150 :py:class:`docker.errors.APIError` 151 If the server returns any other error. 152 ``TypeError`` 153 If neither ``path`` nor ``fileobj`` is specified. 154 """ 155 resp = self.client.api.build(**kwargs) 156 if isinstance(resp, six.string_types): 157 return self.get(resp) 158 events = list(json_stream(resp)) 159 if not events: 160 return BuildError('Unknown') 161 event = events[-1] 162 if 'stream' in event: 163 match = re.search(r'Successfully built ([0-9a-f]+)', 164 event.get('stream', '')) 165 if match: 166 image_id = match.group(1) 167 return self.get(image_id) 168 169 raise BuildError(event.get('error') or event) 170 171 def get(self, name): 172 """ 173 Gets an image. 174 175 Args: 176 name (str): The name of the image. 177 178 Returns: 179 (:py:class:`Image`): The image. 180 181 Raises: 182 :py:class:`docker.errors.ImageNotFound` If the image does not 183 exist. 184 :py:class:`docker.errors.APIError` 185 If the server returns an error. 186 """ 187 return self.prepare_model(self.client.api.inspect_image(name)) 188 189 def list(self, name=None, all=False, filters=None): 190 """ 191 List images on the server. 192 193 Args: 194 name (str): Only show images belonging to the repository ``name`` 195 all (bool): Show intermediate image layers. By default, these are 196 filtered out. 197 filters (dict): Filters to be processed on the image list. 198 Available filters: 199 - ``dangling`` (bool) 200 - ``label`` (str): format either ``key`` or ``key=value`` 201 202 Returns: 203 (list of :py:class:`Image`): The images. 204 205 Raises: 206 :py:class:`docker.errors.APIError` 207 If the server returns an error. 208 """ 209 resp = self.client.api.images(name=name, all=all, filters=filters) 210 return [self.prepare_model(r) for r in resp] 211 212 def load(self, data): 213 """ 214 Load an image that was previously saved using 215 :py:meth:`~docker.models.images.Image.save` (or ``docker save``). 216 Similar to ``docker load``. 217 218 Args: 219 data (binary): Image data to be loaded. 220 221 Raises: 222 :py:class:`docker.errors.APIError` 223 If the server returns an error. 224 """ 225 return self.client.api.load_image(data) 226 227 def pull(self, name, **kwargs): 228 """ 229 Pull an image of the given name and return it. Similar to the 230 ``docker pull`` command. 231 232 If you want to get the raw pull output, use the 233 :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the 234 low-level API. 235 236 Args: 237 repository (str): The repository to pull 238 tag (str): The tag to pull 239 insecure_registry (bool): Use an insecure registry 240 auth_config (dict): Override the credentials that 241 :py:meth:`~docker.client.DockerClient.login` has set for 242 this request. ``auth_config`` should contain the ``username`` 243 and ``password`` keys to be valid. 244 245 Returns: 246 (:py:class:`Image`): The image that has been pulled. 247 248 Raises: 249 :py:class:`docker.errors.APIError` 250 If the server returns an error. 251 252 Example: 253 254 >>> image = client.images.pull('busybox') 255 """ 256 self.client.api.pull(name, **kwargs) 257 return self.get(name) 258 259 def push(self, repository, tag=None, **kwargs): 260 return self.client.api.push(repository, tag=tag, **kwargs) 261 push.__doc__ = APIClient.push.__doc__ 262 263 def remove(self, *args, **kwargs): 264 self.client.api.remove_image(*args, **kwargs) 265 remove.__doc__ = APIClient.remove_image.__doc__ 266 267 def search(self, *args, **kwargs): 268 return self.client.api.search(*args, **kwargs) 269 search.__doc__ = APIClient.search.__doc__ 270 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docker/models/images.py b/docker/models/images.py --- a/docker/models/images.py +++ b/docker/models/images.py @@ -30,10 +30,10 @@ """ The image's tags. """ - return [ - tag for tag in self.attrs.get('RepoTags', []) - if tag != '<none>:<none>' - ] + tags = self.attrs.get('RepoTags') + if tags is None: + tags = [] + return [tag for tag in tags if tag != '<none>:<none>'] def history(self): """
{"golden_diff": "diff --git a/docker/models/images.py b/docker/models/images.py\n--- a/docker/models/images.py\n+++ b/docker/models/images.py\n@@ -30,10 +30,10 @@\n \"\"\"\n The image's tags.\n \"\"\"\n- return [\n- tag for tag in self.attrs.get('RepoTags', [])\n- if tag != '<none>:<none>'\n- ]\n+ tags = self.attrs.get('RepoTags')\n+ if tags is None:\n+ tags = []\n+ return [tag for tag in tags if tag != '<none>:<none>']\n \n def history(self):\n \"\"\"\n", "issue": "Exception retrieving untagged images on api >= 1.24\nDocker API >= 1.24 will return a null object if image tags DNE instead of not including it in the response. This makes the dict.get fail to catch the null case and the list comprehension to iterate over a non-iterable.\r\n\r\n```\r\n File \"<stdin>\", line 1, in <module>\r\n File \"docker/models/images.py\", line 16, in __repr__\r\n return \"<%s: '%s'>\" % (self.__class__.__name__, \"', '\".join(self.tags))\r\n File \"docker/models/images.py\", line 34, in tags\r\n tag for tag in self.attrs.get('RepoTags', [])\r\nTypeError: 'NoneType' object is not iterable\r\n```\r\n\r\nThis is similar to an issue seen in [salt](https://github.com/saltstack/salt/pull/35447/commits/b833b5f9587534d3b843a026ef91abc4ec929d0f)\r\n\r\nWas able to get things working with a pretty quick change:\r\n```\r\ndiff --git a/docker/models/images.py b/docker/models/images.py\r\nindex 32068e6..39a640d 100644\r\n--- a/docker/models/images.py\r\n+++ b/docker/models/images.py\r\n@@ -30,9 +30,11 @@ class Image(Model):\r\n \"\"\"\r\n The image's tags.\r\n \"\"\"\r\n+ tags = self.attrs.get('RepoTags', [])\r\n+ if tags is None:\r\n+ return []\r\n return [\r\n- tag for tag in self.attrs.get('RepoTags', [])\r\n- if tag != '<none>:<none>'\r\n+ tag for tag in tags if tag != '<none>:<none>'\r\n ]\r\n \r\n def history(self):\r\n```\n", "before_files": [{"content": "import re\n\nimport six\n\nfrom ..api import APIClient\nfrom ..errors import BuildError\nfrom ..utils.json_stream import json_stream\nfrom .resource import Collection, Model\n\n\nclass Image(Model):\n \"\"\"\n An image on the server.\n \"\"\"\n def __repr__(self):\n return \"<%s: '%s'>\" % (self.__class__.__name__, \"', '\".join(self.tags))\n\n @property\n def short_id(self):\n \"\"\"\n The ID of the image truncated to 10 characters, plus the ``sha256:``\n prefix.\n \"\"\"\n if self.id.startswith('sha256:'):\n return self.id[:17]\n return self.id[:10]\n\n @property\n def tags(self):\n \"\"\"\n The image's tags.\n \"\"\"\n return [\n tag for tag in self.attrs.get('RepoTags', [])\n if tag != '<none>:<none>'\n ]\n\n def history(self):\n \"\"\"\n Show the history of an image.\n\n Returns:\n (str): The history of the image.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.client.api.history(self.id)\n\n def save(self):\n \"\"\"\n Get a tarball of an image. Similar to the ``docker save`` command.\n\n Returns:\n (urllib3.response.HTTPResponse object): The response from the\n daemon.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n\n >>> image = cli.get(\"fedora:latest\")\n >>> resp = image.save()\n >>> f = open('/tmp/fedora-latest.tar', 'w')\n >>> f.write(resp.data)\n >>> f.close()\n \"\"\"\n return self.client.api.get_image(self.id)\n\n def tag(self, repository, tag=None, **kwargs):\n \"\"\"\n Tag this image into a repository. Similar to the ``docker tag``\n command.\n\n Args:\n repository (str): The repository to set for the tag\n tag (str): The tag name\n force (bool): Force\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Returns:\n (bool): ``True`` if successful\n \"\"\"\n self.client.api.tag(self.id, repository, tag=tag, **kwargs)\n\n\nclass ImageCollection(Collection):\n model = Image\n\n def build(self, **kwargs):\n \"\"\"\n Build an image and return it. Similar to the ``docker build``\n command. Either ``path`` or ``fileobj`` must be set.\n\n If you have a tar file for the Docker build context (including a\n Dockerfile) already, pass a readable file-like object to ``fileobj``\n and also pass ``custom_context=True``. If the stream is compressed\n also, set ``encoding`` to the correct value (e.g ``gzip``).\n\n If you want to get the raw output of the build, use the\n :py:meth:`~docker.api.build.BuildApiMixin.build` method in the\n low-level API.\n\n Args:\n path (str): Path to the directory containing the Dockerfile\n fileobj: A file object to use as the Dockerfile. (Or a file-like\n object)\n tag (str): A tag to add to the final image\n quiet (bool): Whether to return the status\n nocache (bool): Don't use the cache when set to ``True``\n rm (bool): Remove intermediate containers. The ``docker build``\n command now defaults to ``--rm=true``, but we have kept the old\n default of `False` to preserve backward compatibility\n stream (bool): *Deprecated for API version > 1.8 (always True)*.\n Return a blocking generator you can iterate over to retrieve\n build output as it happens\n timeout (int): HTTP timeout\n custom_context (bool): Optional if using ``fileobj``\n encoding (str): The encoding for a stream. Set to ``gzip`` for\n compressing\n pull (bool): Downloads any updates to the FROM image in Dockerfiles\n forcerm (bool): Always remove intermediate containers, even after\n unsuccessful builds\n dockerfile (str): path within the build context to the Dockerfile\n buildargs (dict): A dictionary of build arguments\n container_limits (dict): A dictionary of limits applied to each\n container created by the build process. Valid keys:\n\n - memory (int): set memory limit for build\n - memswap (int): Total memory (memory + swap), -1 to disable\n swap\n - cpushares (int): CPU shares (relative weight)\n - cpusetcpus (str): CPUs in which to allow execution, e.g.,\n ``\"0-3\"``, ``\"0,1\"``\n decode (bool): If set to ``True``, the returned stream will be\n decoded into dicts on the fly. Default ``False``.\n\n Returns:\n (:py:class:`Image`): The built image.\n\n Raises:\n :py:class:`docker.errors.BuildError`\n If there is an error during the build.\n :py:class:`docker.errors.APIError`\n If the server returns any other error.\n ``TypeError``\n If neither ``path`` nor ``fileobj`` is specified.\n \"\"\"\n resp = self.client.api.build(**kwargs)\n if isinstance(resp, six.string_types):\n return self.get(resp)\n events = list(json_stream(resp))\n if not events:\n return BuildError('Unknown')\n event = events[-1]\n if 'stream' in event:\n match = re.search(r'Successfully built ([0-9a-f]+)',\n event.get('stream', ''))\n if match:\n image_id = match.group(1)\n return self.get(image_id)\n\n raise BuildError(event.get('error') or event)\n\n def get(self, name):\n \"\"\"\n Gets an image.\n\n Args:\n name (str): The name of the image.\n\n Returns:\n (:py:class:`Image`): The image.\n\n Raises:\n :py:class:`docker.errors.ImageNotFound` If the image does not\n exist.\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.prepare_model(self.client.api.inspect_image(name))\n\n def list(self, name=None, all=False, filters=None):\n \"\"\"\n List images on the server.\n\n Args:\n name (str): Only show images belonging to the repository ``name``\n all (bool): Show intermediate image layers. By default, these are\n filtered out.\n filters (dict): Filters to be processed on the image list.\n Available filters:\n - ``dangling`` (bool)\n - ``label`` (str): format either ``key`` or ``key=value``\n\n Returns:\n (list of :py:class:`Image`): The images.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n resp = self.client.api.images(name=name, all=all, filters=filters)\n return [self.prepare_model(r) for r in resp]\n\n def load(self, data):\n \"\"\"\n Load an image that was previously saved using\n :py:meth:`~docker.models.images.Image.save` (or ``docker save``).\n Similar to ``docker load``.\n\n Args:\n data (binary): Image data to be loaded.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.client.api.load_image(data)\n\n def pull(self, name, **kwargs):\n \"\"\"\n Pull an image of the given name and return it. Similar to the\n ``docker pull`` command.\n\n If you want to get the raw pull output, use the\n :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the\n low-level API.\n\n Args:\n repository (str): The repository to pull\n tag (str): The tag to pull\n insecure_registry (bool): Use an insecure registry\n auth_config (dict): Override the credentials that\n :py:meth:`~docker.client.DockerClient.login` has set for\n this request. ``auth_config`` should contain the ``username``\n and ``password`` keys to be valid.\n\n Returns:\n (:py:class:`Image`): The image that has been pulled.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n\n >>> image = client.images.pull('busybox')\n \"\"\"\n self.client.api.pull(name, **kwargs)\n return self.get(name)\n\n def push(self, repository, tag=None, **kwargs):\n return self.client.api.push(repository, tag=tag, **kwargs)\n push.__doc__ = APIClient.push.__doc__\n\n def remove(self, *args, **kwargs):\n self.client.api.remove_image(*args, **kwargs)\n remove.__doc__ = APIClient.remove_image.__doc__\n\n def search(self, *args, **kwargs):\n return self.client.api.search(*args, **kwargs)\n search.__doc__ = APIClient.search.__doc__\n", "path": "docker/models/images.py"}], "after_files": [{"content": "import re\n\nimport six\n\nfrom ..api import APIClient\nfrom ..errors import BuildError\nfrom ..utils.json_stream import json_stream\nfrom .resource import Collection, Model\n\n\nclass Image(Model):\n \"\"\"\n An image on the server.\n \"\"\"\n def __repr__(self):\n return \"<%s: '%s'>\" % (self.__class__.__name__, \"', '\".join(self.tags))\n\n @property\n def short_id(self):\n \"\"\"\n The ID of the image truncated to 10 characters, plus the ``sha256:``\n prefix.\n \"\"\"\n if self.id.startswith('sha256:'):\n return self.id[:17]\n return self.id[:10]\n\n @property\n def tags(self):\n \"\"\"\n The image's tags.\n \"\"\"\n tags = self.attrs.get('RepoTags')\n if tags is None:\n tags = []\n return [tag for tag in tags if tag != '<none>:<none>']\n\n def history(self):\n \"\"\"\n Show the history of an image.\n\n Returns:\n (str): The history of the image.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.client.api.history(self.id)\n\n def save(self):\n \"\"\"\n Get a tarball of an image. Similar to the ``docker save`` command.\n\n Returns:\n (urllib3.response.HTTPResponse object): The response from the\n daemon.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n\n >>> image = cli.get(\"fedora:latest\")\n >>> resp = image.save()\n >>> f = open('/tmp/fedora-latest.tar', 'w')\n >>> f.write(resp.data)\n >>> f.close()\n \"\"\"\n return self.client.api.get_image(self.id)\n\n def tag(self, repository, tag=None, **kwargs):\n \"\"\"\n Tag this image into a repository. Similar to the ``docker tag``\n command.\n\n Args:\n repository (str): The repository to set for the tag\n tag (str): The tag name\n force (bool): Force\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Returns:\n (bool): ``True`` if successful\n \"\"\"\n self.client.api.tag(self.id, repository, tag=tag, **kwargs)\n\n\nclass ImageCollection(Collection):\n model = Image\n\n def build(self, **kwargs):\n \"\"\"\n Build an image and return it. Similar to the ``docker build``\n command. Either ``path`` or ``fileobj`` must be set.\n\n If you have a tar file for the Docker build context (including a\n Dockerfile) already, pass a readable file-like object to ``fileobj``\n and also pass ``custom_context=True``. If the stream is compressed\n also, set ``encoding`` to the correct value (e.g ``gzip``).\n\n If you want to get the raw output of the build, use the\n :py:meth:`~docker.api.build.BuildApiMixin.build` method in the\n low-level API.\n\n Args:\n path (str): Path to the directory containing the Dockerfile\n fileobj: A file object to use as the Dockerfile. (Or a file-like\n object)\n tag (str): A tag to add to the final image\n quiet (bool): Whether to return the status\n nocache (bool): Don't use the cache when set to ``True``\n rm (bool): Remove intermediate containers. The ``docker build``\n command now defaults to ``--rm=true``, but we have kept the old\n default of `False` to preserve backward compatibility\n stream (bool): *Deprecated for API version > 1.8 (always True)*.\n Return a blocking generator you can iterate over to retrieve\n build output as it happens\n timeout (int): HTTP timeout\n custom_context (bool): Optional if using ``fileobj``\n encoding (str): The encoding for a stream. Set to ``gzip`` for\n compressing\n pull (bool): Downloads any updates to the FROM image in Dockerfiles\n forcerm (bool): Always remove intermediate containers, even after\n unsuccessful builds\n dockerfile (str): path within the build context to the Dockerfile\n buildargs (dict): A dictionary of build arguments\n container_limits (dict): A dictionary of limits applied to each\n container created by the build process. Valid keys:\n\n - memory (int): set memory limit for build\n - memswap (int): Total memory (memory + swap), -1 to disable\n swap\n - cpushares (int): CPU shares (relative weight)\n - cpusetcpus (str): CPUs in which to allow execution, e.g.,\n ``\"0-3\"``, ``\"0,1\"``\n decode (bool): If set to ``True``, the returned stream will be\n decoded into dicts on the fly. Default ``False``.\n\n Returns:\n (:py:class:`Image`): The built image.\n\n Raises:\n :py:class:`docker.errors.BuildError`\n If there is an error during the build.\n :py:class:`docker.errors.APIError`\n If the server returns any other error.\n ``TypeError``\n If neither ``path`` nor ``fileobj`` is specified.\n \"\"\"\n resp = self.client.api.build(**kwargs)\n if isinstance(resp, six.string_types):\n return self.get(resp)\n events = list(json_stream(resp))\n if not events:\n return BuildError('Unknown')\n event = events[-1]\n if 'stream' in event:\n match = re.search(r'Successfully built ([0-9a-f]+)',\n event.get('stream', ''))\n if match:\n image_id = match.group(1)\n return self.get(image_id)\n\n raise BuildError(event.get('error') or event)\n\n def get(self, name):\n \"\"\"\n Gets an image.\n\n Args:\n name (str): The name of the image.\n\n Returns:\n (:py:class:`Image`): The image.\n\n Raises:\n :py:class:`docker.errors.ImageNotFound` If the image does not\n exist.\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.prepare_model(self.client.api.inspect_image(name))\n\n def list(self, name=None, all=False, filters=None):\n \"\"\"\n List images on the server.\n\n Args:\n name (str): Only show images belonging to the repository ``name``\n all (bool): Show intermediate image layers. By default, these are\n filtered out.\n filters (dict): Filters to be processed on the image list.\n Available filters:\n - ``dangling`` (bool)\n - ``label`` (str): format either ``key`` or ``key=value``\n\n Returns:\n (list of :py:class:`Image`): The images.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n resp = self.client.api.images(name=name, all=all, filters=filters)\n return [self.prepare_model(r) for r in resp]\n\n def load(self, data):\n \"\"\"\n Load an image that was previously saved using\n :py:meth:`~docker.models.images.Image.save` (or ``docker save``).\n Similar to ``docker load``.\n\n Args:\n data (binary): Image data to be loaded.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.client.api.load_image(data)\n\n def pull(self, name, **kwargs):\n \"\"\"\n Pull an image of the given name and return it. Similar to the\n ``docker pull`` command.\n\n If you want to get the raw pull output, use the\n :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the\n low-level API.\n\n Args:\n repository (str): The repository to pull\n tag (str): The tag to pull\n insecure_registry (bool): Use an insecure registry\n auth_config (dict): Override the credentials that\n :py:meth:`~docker.client.DockerClient.login` has set for\n this request. ``auth_config`` should contain the ``username``\n and ``password`` keys to be valid.\n\n Returns:\n (:py:class:`Image`): The image that has been pulled.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n\n >>> image = client.images.pull('busybox')\n \"\"\"\n self.client.api.pull(name, **kwargs)\n return self.get(name)\n\n def push(self, repository, tag=None, **kwargs):\n return self.client.api.push(repository, tag=tag, **kwargs)\n push.__doc__ = APIClient.push.__doc__\n\n def remove(self, *args, **kwargs):\n self.client.api.remove_image(*args, **kwargs)\n remove.__doc__ = APIClient.remove_image.__doc__\n\n def search(self, *args, **kwargs):\n return self.client.api.search(*args, **kwargs)\n search.__doc__ = APIClient.search.__doc__\n", "path": "docker/models/images.py"}]}
3,381
134
gh_patches_debug_23640
rasdani/github-patches
git_diff
ipython__ipython-7454
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- When kernel is missing, notebook fails to load Using master as of 0f92b92748b0dc07dd, I tried loading a notebook with a kernel I don't have configured (Python 3). Instead of getting the message indicating I should install it and at least being able to read the notebook, the popup said ``` Notebook failed to load The error was: TypeError: Cannot read property 'resources' of undefined ``` And the notebook didn't load at all. This is obviously a hard blocker for release. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `IPython/kernel/kernelspec.py` Content: ``` 1 import io 2 import json 3 import os 4 import shutil 5 import sys 6 7 pjoin = os.path.join 8 9 from IPython.utils.path import get_ipython_dir 10 from IPython.utils.py3compat import PY3 11 from IPython.utils.traitlets import HasTraits, List, Unicode, Dict, Any 12 from .launcher import make_ipkernel_cmd 13 14 if os.name == 'nt': 15 programdata = os.environ.get('PROGRAMDATA', None) 16 if programdata: 17 SYSTEM_KERNEL_DIRS = [pjoin(programdata, 'jupyter', 'kernels')] 18 else: # PROGRAMDATA is not defined by default on XP. 19 SYSTEM_KERNEL_DIRS = [] 20 else: 21 SYSTEM_KERNEL_DIRS = ["/usr/share/jupyter/kernels", 22 "/usr/local/share/jupyter/kernels", 23 ] 24 25 NATIVE_KERNEL_NAME = 'python3' if PY3 else 'python2' 26 27 def _pythonfirst(s): 28 "Sort key function that will put strings starting with 'python' first." 29 if s == NATIVE_KERNEL_NAME: 30 return ' ' + s # Two spaces to sort this first of all 31 elif s.startswith('python'): 32 # Space is not valid in kernel names, so this should sort first 33 return ' ' + s 34 return s 35 36 class KernelSpec(HasTraits): 37 argv = List() 38 display_name = Unicode() 39 env = Dict() 40 resource_dir = Unicode() 41 42 @classmethod 43 def from_resource_dir(cls, resource_dir): 44 """Create a KernelSpec object by reading kernel.json 45 46 Pass the path to the *directory* containing kernel.json. 47 """ 48 kernel_file = pjoin(resource_dir, 'kernel.json') 49 with io.open(kernel_file, 'r', encoding='utf-8') as f: 50 kernel_dict = json.load(f) 51 return cls(resource_dir=resource_dir, **kernel_dict) 52 53 def to_dict(self): 54 d = dict(argv=self.argv, 55 env=self.env, 56 display_name=self.display_name, 57 ) 58 59 return d 60 61 def to_json(self): 62 return json.dumps(self.to_dict()) 63 64 def _is_kernel_dir(path): 65 """Is ``path`` a kernel directory?""" 66 return os.path.isdir(path) and os.path.isfile(pjoin(path, 'kernel.json')) 67 68 def _list_kernels_in(dir): 69 """Return a mapping of kernel names to resource directories from dir. 70 71 If dir is None or does not exist, returns an empty dict. 72 """ 73 if dir is None or not os.path.isdir(dir): 74 return {} 75 return {f.lower(): pjoin(dir, f) for f in os.listdir(dir) 76 if _is_kernel_dir(pjoin(dir, f))} 77 78 class NoSuchKernel(KeyError): 79 def __init__(self, name): 80 self.name = name 81 82 class KernelSpecManager(HasTraits): 83 ipython_dir = Unicode() 84 def _ipython_dir_default(self): 85 return get_ipython_dir() 86 87 user_kernel_dir = Unicode() 88 def _user_kernel_dir_default(self): 89 return pjoin(self.ipython_dir, 'kernels') 90 91 @property 92 def env_kernel_dir(self): 93 return pjoin(sys.prefix, 'share', 'jupyter', 'kernels') 94 95 kernel_dirs = List( 96 help="List of kernel directories to search. Later ones take priority over earlier." 97 ) 98 def _kernel_dirs_default(self): 99 dirs = SYSTEM_KERNEL_DIRS[:] 100 if self.env_kernel_dir not in dirs: 101 dirs.append(self.env_kernel_dir) 102 dirs.append(self.user_kernel_dir) 103 return dirs 104 105 @property 106 def _native_kernel_dict(self): 107 """Makes a kernel directory for the native kernel. 108 109 The native kernel is the kernel using the same Python runtime as this 110 process. This will put its information in the user kernels directory. 111 """ 112 return {'argv': make_ipkernel_cmd(), 113 'display_name': 'Python %i' % (3 if PY3 else 2), 114 } 115 116 @property 117 def _native_kernel_resource_dir(self): 118 return pjoin(os.path.dirname(__file__), 'resources') 119 120 def find_kernel_specs(self): 121 """Returns a dict mapping kernel names to resource directories.""" 122 d = {} 123 for kernel_dir in self.kernel_dirs: 124 d.update(_list_kernels_in(kernel_dir)) 125 126 d[NATIVE_KERNEL_NAME] = self._native_kernel_resource_dir 127 return d 128 # TODO: Caching? 129 130 def get_kernel_spec(self, kernel_name): 131 """Returns a :class:`KernelSpec` instance for the given kernel_name. 132 133 Raises :exc:`NoSuchKernel` if the given kernel name is not found. 134 """ 135 if kernel_name in {'python', NATIVE_KERNEL_NAME}: 136 return KernelSpec(resource_dir=self._native_kernel_resource_dir, 137 **self._native_kernel_dict) 138 139 d = self.find_kernel_specs() 140 try: 141 resource_dir = d[kernel_name.lower()] 142 except KeyError: 143 raise NoSuchKernel(kernel_name) 144 return KernelSpec.from_resource_dir(resource_dir) 145 146 def _get_destination_dir(self, kernel_name, user=False): 147 if user: 148 return os.path.join(self.user_kernel_dir, kernel_name) 149 else: 150 if SYSTEM_KERNEL_DIRS: 151 return os.path.join(SYSTEM_KERNEL_DIRS[-1], kernel_name) 152 else: 153 raise EnvironmentError("No system kernel directory is available") 154 155 156 def install_kernel_spec(self, source_dir, kernel_name=None, user=False, 157 replace=False): 158 """Install a kernel spec by copying its directory. 159 160 If ``kernel_name`` is not given, the basename of ``source_dir`` will 161 be used. 162 163 If ``user`` is False, it will attempt to install into the systemwide 164 kernel registry. If the process does not have appropriate permissions, 165 an :exc:`OSError` will be raised. 166 167 If ``replace`` is True, this will replace an existing kernel of the same 168 name. Otherwise, if the destination already exists, an :exc:`OSError` 169 will be raised. 170 """ 171 if not kernel_name: 172 kernel_name = os.path.basename(source_dir) 173 kernel_name = kernel_name.lower() 174 175 destination = self._get_destination_dir(kernel_name, user=user) 176 177 if replace and os.path.isdir(destination): 178 shutil.rmtree(destination) 179 180 shutil.copytree(source_dir, destination) 181 182 def install_native_kernel_spec(self, user=False): 183 """Install the native kernel spec to the filesystem 184 185 This allows a Python 3 frontend to use a Python 2 kernel, or vice versa. 186 The kernelspec will be written pointing to the Python executable on 187 which this is run. 188 189 If ``user`` is False, it will attempt to install into the systemwide 190 kernel registry. If the process does not have appropriate permissions, 191 an :exc:`OSError` will be raised. 192 """ 193 path = self._get_destination_dir(NATIVE_KERNEL_NAME, user=user) 194 os.makedirs(path, mode=0o755) 195 with open(pjoin(path, 'kernel.json'), 'w') as f: 196 json.dump(self._native_kernel_dict, f, indent=1) 197 copy_from = self._native_kernel_resource_dir 198 for file in os.listdir(copy_from): 199 shutil.copy(pjoin(copy_from, file), path) 200 return path 201 202 def find_kernel_specs(): 203 """Returns a dict mapping kernel names to resource directories.""" 204 return KernelSpecManager().find_kernel_specs() 205 206 def get_kernel_spec(kernel_name): 207 """Returns a :class:`KernelSpec` instance for the given kernel_name. 208 209 Raises KeyError if the given kernel name is not found. 210 """ 211 return KernelSpecManager().get_kernel_spec(kernel_name) 212 213 def install_kernel_spec(source_dir, kernel_name=None, user=False, replace=False): 214 return KernelSpecManager().install_kernel_spec(source_dir, kernel_name, 215 user, replace) 216 217 install_kernel_spec.__doc__ = KernelSpecManager.install_kernel_spec.__doc__ 218 219 def install_native_kernel_spec(user=False): 220 return KernelSpecManager().install_native_kernel_spec(user=user) 221 222 install_native_kernel_spec.__doc__ = KernelSpecManager.install_native_kernel_spec.__doc__ 223 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/IPython/kernel/kernelspec.py b/IPython/kernel/kernelspec.py --- a/IPython/kernel/kernelspec.py +++ b/IPython/kernel/kernelspec.py @@ -36,6 +36,7 @@ class KernelSpec(HasTraits): argv = List() display_name = Unicode() + language = Unicode() env = Dict() resource_dir = Unicode() @@ -54,6 +55,7 @@ d = dict(argv=self.argv, env=self.env, display_name=self.display_name, + language=self.language, ) return d @@ -109,8 +111,10 @@ The native kernel is the kernel using the same Python runtime as this process. This will put its information in the user kernels directory. """ - return {'argv': make_ipkernel_cmd(), + return { + 'argv': make_ipkernel_cmd(), 'display_name': 'Python %i' % (3 if PY3 else 2), + 'language': 'python', } @property
{"golden_diff": "diff --git a/IPython/kernel/kernelspec.py b/IPython/kernel/kernelspec.py\n--- a/IPython/kernel/kernelspec.py\n+++ b/IPython/kernel/kernelspec.py\n@@ -36,6 +36,7 @@\n class KernelSpec(HasTraits):\n argv = List()\n display_name = Unicode()\n+ language = Unicode()\n env = Dict()\n resource_dir = Unicode()\n \n@@ -54,6 +55,7 @@\n d = dict(argv=self.argv,\n env=self.env,\n display_name=self.display_name,\n+ language=self.language,\n )\n \n return d\n@@ -109,8 +111,10 @@\n The native kernel is the kernel using the same Python runtime as this\n process. This will put its information in the user kernels directory.\n \"\"\"\n- return {'argv': make_ipkernel_cmd(),\n+ return {\n+ 'argv': make_ipkernel_cmd(),\n 'display_name': 'Python %i' % (3 if PY3 else 2),\n+ 'language': 'python',\n }\n \n @property\n", "issue": "When kernel is missing, notebook fails to load\nUsing master as of 0f92b92748b0dc07dd, I tried loading a notebook with a kernel I don't have configured (Python 3). Instead of getting the message indicating I should install it and at least being able to read the notebook, the popup said\n\n```\nNotebook failed to load\nThe error was:\nTypeError: Cannot read property 'resources' of undefined\n```\n\nAnd the notebook didn't load at all.\n\nThis is obviously a hard blocker for release.\n\n", "before_files": [{"content": "import io\nimport json\nimport os\nimport shutil\nimport sys\n\npjoin = os.path.join\n\nfrom IPython.utils.path import get_ipython_dir\nfrom IPython.utils.py3compat import PY3\nfrom IPython.utils.traitlets import HasTraits, List, Unicode, Dict, Any\nfrom .launcher import make_ipkernel_cmd\n\nif os.name == 'nt':\n programdata = os.environ.get('PROGRAMDATA', None)\n if programdata:\n SYSTEM_KERNEL_DIRS = [pjoin(programdata, 'jupyter', 'kernels')]\n else: # PROGRAMDATA is not defined by default on XP.\n SYSTEM_KERNEL_DIRS = []\nelse:\n SYSTEM_KERNEL_DIRS = [\"/usr/share/jupyter/kernels\",\n \"/usr/local/share/jupyter/kernels\",\n ]\n \nNATIVE_KERNEL_NAME = 'python3' if PY3 else 'python2'\n\ndef _pythonfirst(s):\n \"Sort key function that will put strings starting with 'python' first.\"\n if s == NATIVE_KERNEL_NAME:\n return ' ' + s # Two spaces to sort this first of all\n elif s.startswith('python'):\n # Space is not valid in kernel names, so this should sort first\n return ' ' + s\n return s\n\nclass KernelSpec(HasTraits):\n argv = List()\n display_name = Unicode()\n env = Dict()\n resource_dir = Unicode()\n \n @classmethod\n def from_resource_dir(cls, resource_dir):\n \"\"\"Create a KernelSpec object by reading kernel.json\n \n Pass the path to the *directory* containing kernel.json.\n \"\"\"\n kernel_file = pjoin(resource_dir, 'kernel.json')\n with io.open(kernel_file, 'r', encoding='utf-8') as f:\n kernel_dict = json.load(f)\n return cls(resource_dir=resource_dir, **kernel_dict)\n \n def to_dict(self):\n d = dict(argv=self.argv,\n env=self.env,\n display_name=self.display_name,\n )\n\n return d\n\n def to_json(self):\n return json.dumps(self.to_dict())\n\ndef _is_kernel_dir(path):\n \"\"\"Is ``path`` a kernel directory?\"\"\"\n return os.path.isdir(path) and os.path.isfile(pjoin(path, 'kernel.json'))\n\ndef _list_kernels_in(dir):\n \"\"\"Return a mapping of kernel names to resource directories from dir.\n \n If dir is None or does not exist, returns an empty dict.\n \"\"\"\n if dir is None or not os.path.isdir(dir):\n return {}\n return {f.lower(): pjoin(dir, f) for f in os.listdir(dir)\n if _is_kernel_dir(pjoin(dir, f))}\n\nclass NoSuchKernel(KeyError):\n def __init__(self, name):\n self.name = name\n\nclass KernelSpecManager(HasTraits):\n ipython_dir = Unicode()\n def _ipython_dir_default(self):\n return get_ipython_dir()\n\n user_kernel_dir = Unicode()\n def _user_kernel_dir_default(self):\n return pjoin(self.ipython_dir, 'kernels')\n\n @property\n def env_kernel_dir(self):\n return pjoin(sys.prefix, 'share', 'jupyter', 'kernels')\n \n kernel_dirs = List(\n help=\"List of kernel directories to search. Later ones take priority over earlier.\" \n ) \n def _kernel_dirs_default(self):\n dirs = SYSTEM_KERNEL_DIRS[:]\n if self.env_kernel_dir not in dirs:\n dirs.append(self.env_kernel_dir)\n dirs.append(self.user_kernel_dir)\n return dirs\n\n @property\n def _native_kernel_dict(self):\n \"\"\"Makes a kernel directory for the native kernel.\n \n The native kernel is the kernel using the same Python runtime as this\n process. This will put its information in the user kernels directory.\n \"\"\"\n return {'argv': make_ipkernel_cmd(),\n 'display_name': 'Python %i' % (3 if PY3 else 2),\n }\n\n @property\n def _native_kernel_resource_dir(self):\n return pjoin(os.path.dirname(__file__), 'resources')\n\n def find_kernel_specs(self):\n \"\"\"Returns a dict mapping kernel names to resource directories.\"\"\"\n d = {}\n for kernel_dir in self.kernel_dirs:\n d.update(_list_kernels_in(kernel_dir))\n\n d[NATIVE_KERNEL_NAME] = self._native_kernel_resource_dir\n return d\n # TODO: Caching?\n\n def get_kernel_spec(self, kernel_name):\n \"\"\"Returns a :class:`KernelSpec` instance for the given kernel_name.\n \n Raises :exc:`NoSuchKernel` if the given kernel name is not found.\n \"\"\"\n if kernel_name in {'python', NATIVE_KERNEL_NAME}:\n return KernelSpec(resource_dir=self._native_kernel_resource_dir,\n **self._native_kernel_dict)\n\n d = self.find_kernel_specs()\n try:\n resource_dir = d[kernel_name.lower()]\n except KeyError:\n raise NoSuchKernel(kernel_name)\n return KernelSpec.from_resource_dir(resource_dir)\n \n def _get_destination_dir(self, kernel_name, user=False):\n if user:\n return os.path.join(self.user_kernel_dir, kernel_name)\n else:\n if SYSTEM_KERNEL_DIRS:\n return os.path.join(SYSTEM_KERNEL_DIRS[-1], kernel_name)\n else:\n raise EnvironmentError(\"No system kernel directory is available\")\n\n\n def install_kernel_spec(self, source_dir, kernel_name=None, user=False,\n replace=False):\n \"\"\"Install a kernel spec by copying its directory.\n \n If ``kernel_name`` is not given, the basename of ``source_dir`` will\n be used.\n \n If ``user`` is False, it will attempt to install into the systemwide\n kernel registry. If the process does not have appropriate permissions,\n an :exc:`OSError` will be raised.\n \n If ``replace`` is True, this will replace an existing kernel of the same\n name. Otherwise, if the destination already exists, an :exc:`OSError`\n will be raised.\n \"\"\"\n if not kernel_name:\n kernel_name = os.path.basename(source_dir)\n kernel_name = kernel_name.lower()\n \n destination = self._get_destination_dir(kernel_name, user=user)\n\n if replace and os.path.isdir(destination):\n shutil.rmtree(destination)\n\n shutil.copytree(source_dir, destination)\n\n def install_native_kernel_spec(self, user=False):\n \"\"\"Install the native kernel spec to the filesystem\n \n This allows a Python 3 frontend to use a Python 2 kernel, or vice versa.\n The kernelspec will be written pointing to the Python executable on\n which this is run.\n \n If ``user`` is False, it will attempt to install into the systemwide\n kernel registry. If the process does not have appropriate permissions, \n an :exc:`OSError` will be raised.\n \"\"\"\n path = self._get_destination_dir(NATIVE_KERNEL_NAME, user=user)\n os.makedirs(path, mode=0o755)\n with open(pjoin(path, 'kernel.json'), 'w') as f:\n json.dump(self._native_kernel_dict, f, indent=1)\n copy_from = self._native_kernel_resource_dir\n for file in os.listdir(copy_from):\n shutil.copy(pjoin(copy_from, file), path)\n return path\n\ndef find_kernel_specs():\n \"\"\"Returns a dict mapping kernel names to resource directories.\"\"\"\n return KernelSpecManager().find_kernel_specs()\n\ndef get_kernel_spec(kernel_name):\n \"\"\"Returns a :class:`KernelSpec` instance for the given kernel_name.\n \n Raises KeyError if the given kernel name is not found.\n \"\"\"\n return KernelSpecManager().get_kernel_spec(kernel_name)\n\ndef install_kernel_spec(source_dir, kernel_name=None, user=False, replace=False):\n return KernelSpecManager().install_kernel_spec(source_dir, kernel_name,\n user, replace)\n\ninstall_kernel_spec.__doc__ = KernelSpecManager.install_kernel_spec.__doc__\n\ndef install_native_kernel_spec(user=False):\n return KernelSpecManager().install_native_kernel_spec(user=user)\n\ninstall_native_kernel_spec.__doc__ = KernelSpecManager.install_native_kernel_spec.__doc__\n", "path": "IPython/kernel/kernelspec.py"}], "after_files": [{"content": "import io\nimport json\nimport os\nimport shutil\nimport sys\n\npjoin = os.path.join\n\nfrom IPython.utils.path import get_ipython_dir\nfrom IPython.utils.py3compat import PY3\nfrom IPython.utils.traitlets import HasTraits, List, Unicode, Dict, Any\nfrom .launcher import make_ipkernel_cmd\n\nif os.name == 'nt':\n programdata = os.environ.get('PROGRAMDATA', None)\n if programdata:\n SYSTEM_KERNEL_DIRS = [pjoin(programdata, 'jupyter', 'kernels')]\n else: # PROGRAMDATA is not defined by default on XP.\n SYSTEM_KERNEL_DIRS = []\nelse:\n SYSTEM_KERNEL_DIRS = [\"/usr/share/jupyter/kernels\",\n \"/usr/local/share/jupyter/kernels\",\n ]\n \nNATIVE_KERNEL_NAME = 'python3' if PY3 else 'python2'\n\ndef _pythonfirst(s):\n \"Sort key function that will put strings starting with 'python' first.\"\n if s == NATIVE_KERNEL_NAME:\n return ' ' + s # Two spaces to sort this first of all\n elif s.startswith('python'):\n # Space is not valid in kernel names, so this should sort first\n return ' ' + s\n return s\n\nclass KernelSpec(HasTraits):\n argv = List()\n display_name = Unicode()\n language = Unicode()\n env = Dict()\n resource_dir = Unicode()\n \n @classmethod\n def from_resource_dir(cls, resource_dir):\n \"\"\"Create a KernelSpec object by reading kernel.json\n \n Pass the path to the *directory* containing kernel.json.\n \"\"\"\n kernel_file = pjoin(resource_dir, 'kernel.json')\n with io.open(kernel_file, 'r', encoding='utf-8') as f:\n kernel_dict = json.load(f)\n return cls(resource_dir=resource_dir, **kernel_dict)\n \n def to_dict(self):\n d = dict(argv=self.argv,\n env=self.env,\n display_name=self.display_name,\n language=self.language,\n )\n\n return d\n\n def to_json(self):\n return json.dumps(self.to_dict())\n\ndef _is_kernel_dir(path):\n \"\"\"Is ``path`` a kernel directory?\"\"\"\n return os.path.isdir(path) and os.path.isfile(pjoin(path, 'kernel.json'))\n\ndef _list_kernels_in(dir):\n \"\"\"Return a mapping of kernel names to resource directories from dir.\n \n If dir is None or does not exist, returns an empty dict.\n \"\"\"\n if dir is None or not os.path.isdir(dir):\n return {}\n return {f.lower(): pjoin(dir, f) for f in os.listdir(dir)\n if _is_kernel_dir(pjoin(dir, f))}\n\nclass NoSuchKernel(KeyError):\n def __init__(self, name):\n self.name = name\n\nclass KernelSpecManager(HasTraits):\n ipython_dir = Unicode()\n def _ipython_dir_default(self):\n return get_ipython_dir()\n\n user_kernel_dir = Unicode()\n def _user_kernel_dir_default(self):\n return pjoin(self.ipython_dir, 'kernels')\n\n @property\n def env_kernel_dir(self):\n return pjoin(sys.prefix, 'share', 'jupyter', 'kernels')\n \n kernel_dirs = List(\n help=\"List of kernel directories to search. Later ones take priority over earlier.\" \n ) \n def _kernel_dirs_default(self):\n dirs = SYSTEM_KERNEL_DIRS[:]\n if self.env_kernel_dir not in dirs:\n dirs.append(self.env_kernel_dir)\n dirs.append(self.user_kernel_dir)\n return dirs\n\n @property\n def _native_kernel_dict(self):\n \"\"\"Makes a kernel directory for the native kernel.\n \n The native kernel is the kernel using the same Python runtime as this\n process. This will put its information in the user kernels directory.\n \"\"\"\n return {\n 'argv': make_ipkernel_cmd(),\n 'display_name': 'Python %i' % (3 if PY3 else 2),\n 'language': 'python',\n }\n\n @property\n def _native_kernel_resource_dir(self):\n return pjoin(os.path.dirname(__file__), 'resources')\n\n def find_kernel_specs(self):\n \"\"\"Returns a dict mapping kernel names to resource directories.\"\"\"\n d = {}\n for kernel_dir in self.kernel_dirs:\n d.update(_list_kernels_in(kernel_dir))\n\n d[NATIVE_KERNEL_NAME] = self._native_kernel_resource_dir\n return d\n # TODO: Caching?\n\n def get_kernel_spec(self, kernel_name):\n \"\"\"Returns a :class:`KernelSpec` instance for the given kernel_name.\n \n Raises :exc:`NoSuchKernel` if the given kernel name is not found.\n \"\"\"\n if kernel_name in {'python', NATIVE_KERNEL_NAME}:\n return KernelSpec(resource_dir=self._native_kernel_resource_dir,\n **self._native_kernel_dict)\n\n d = self.find_kernel_specs()\n try:\n resource_dir = d[kernel_name.lower()]\n except KeyError:\n raise NoSuchKernel(kernel_name)\n return KernelSpec.from_resource_dir(resource_dir)\n \n def _get_destination_dir(self, kernel_name, user=False):\n if user:\n return os.path.join(self.user_kernel_dir, kernel_name)\n else:\n if SYSTEM_KERNEL_DIRS:\n return os.path.join(SYSTEM_KERNEL_DIRS[-1], kernel_name)\n else:\n raise EnvironmentError(\"No system kernel directory is available\")\n\n\n def install_kernel_spec(self, source_dir, kernel_name=None, user=False,\n replace=False):\n \"\"\"Install a kernel spec by copying its directory.\n \n If ``kernel_name`` is not given, the basename of ``source_dir`` will\n be used.\n \n If ``user`` is False, it will attempt to install into the systemwide\n kernel registry. If the process does not have appropriate permissions,\n an :exc:`OSError` will be raised.\n \n If ``replace`` is True, this will replace an existing kernel of the same\n name. Otherwise, if the destination already exists, an :exc:`OSError`\n will be raised.\n \"\"\"\n if not kernel_name:\n kernel_name = os.path.basename(source_dir)\n kernel_name = kernel_name.lower()\n \n destination = self._get_destination_dir(kernel_name, user=user)\n\n if replace and os.path.isdir(destination):\n shutil.rmtree(destination)\n\n shutil.copytree(source_dir, destination)\n\n def install_native_kernel_spec(self, user=False):\n \"\"\"Install the native kernel spec to the filesystem\n \n This allows a Python 3 frontend to use a Python 2 kernel, or vice versa.\n The kernelspec will be written pointing to the Python executable on\n which this is run.\n \n If ``user`` is False, it will attempt to install into the systemwide\n kernel registry. If the process does not have appropriate permissions, \n an :exc:`OSError` will be raised.\n \"\"\"\n path = self._get_destination_dir(NATIVE_KERNEL_NAME, user=user)\n os.makedirs(path, mode=0o755)\n with open(pjoin(path, 'kernel.json'), 'w') as f:\n json.dump(self._native_kernel_dict, f, indent=1)\n copy_from = self._native_kernel_resource_dir\n for file in os.listdir(copy_from):\n shutil.copy(pjoin(copy_from, file), path)\n return path\n\ndef find_kernel_specs():\n \"\"\"Returns a dict mapping kernel names to resource directories.\"\"\"\n return KernelSpecManager().find_kernel_specs()\n\ndef get_kernel_spec(kernel_name):\n \"\"\"Returns a :class:`KernelSpec` instance for the given kernel_name.\n \n Raises KeyError if the given kernel name is not found.\n \"\"\"\n return KernelSpecManager().get_kernel_spec(kernel_name)\n\ndef install_kernel_spec(source_dir, kernel_name=None, user=False, replace=False):\n return KernelSpecManager().install_kernel_spec(source_dir, kernel_name,\n user, replace)\n\ninstall_kernel_spec.__doc__ = KernelSpecManager.install_kernel_spec.__doc__\n\ndef install_native_kernel_spec(user=False):\n return KernelSpecManager().install_native_kernel_spec(user=user)\n\ninstall_native_kernel_spec.__doc__ = KernelSpecManager.install_native_kernel_spec.__doc__\n", "path": "IPython/kernel/kernelspec.py"}]}
2,675
239
gh_patches_debug_38704
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3305
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Spider wellstar is broken During the global build at 2021-10-20-14-42-48, spider **wellstar** failed with **0 features** and **1 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/wellstar.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/wellstar.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/wellstar.geojson)) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/spiders/wellstar.py` Content: ``` 1 import json 2 import scrapy 3 from locations.items import GeojsonPointItem 4 from locations.hours import OpeningHours 5 6 DAYS_NAME = { 7 'Monday': 'Mo', 8 'Tuesday': 'Tu', 9 'Wednesday': 'We', 10 'Wedsenday': 'We', 11 'Thursday': 'Th', 12 'Friday': 'Fr', 13 'Saturday': 'Sa', 14 'Sunday': 'Su' 15 } 16 17 class WellStarSpider(scrapy.Spider): 18 name = "wellstar" 19 item_attributes = {'brand': "WellStar Health System"} 20 allowed_domains = ["www.wellstar.org/"] 21 start_urls = ('https://www.wellstar.org/locations',) 22 23 def start_requests(self): 24 url = 'https://www.wellstar.org/api/LocationSearchApi/GetLocations' 25 26 headers = { 27 "authority": "www.wellstar.org", 28 "sec-ch-ua": "\"Chromium\";v=\"88\", \"Google Chrome\";v=\"88\", \";Not A Brand\";v=\"99\"", 29 "accept": "application/json, text/javascript, */*; q=0.01", 30 "sec-ch-ua-mobile": "?0", 31 "__requestverificationtoken": "Y0cuJinQRzvtT-kjKKyPZOlZPu0JU48XSyIvEl1p__yzZF4621s-0YUAkImeXkjGgLB7GyZlv8rYby0uII9LChhFlrKiWiDb2Va5skX-TmM1", 32 "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.192 Safari/537.36", 33 "content-type": "application/json", 34 "origin": "https://www.wellstar.org", 35 "sec-fetch-site": "same-origin", 36 "sec-fetch-mode": "cors", 37 "sec-fetch-dest": "empty", 38 "referer": "https://www.wellstar.org/locations", 39 "accept-language": "en-US,en;q=0.9" 40 } 41 42 cookies = { 43 "ASP.NET_SessionId": "24p11pgzrg42p4ghtqwi40gl", 44 "__RequestVerificationToken": "3agF8JY4gLGMg_K1j7eXWtME4IsQmLPf1snFPF0d132SBNJG2JsIHpVgCAJ1cVysztrjgXuRrCmeMYp9kJjHDYNZlqIzc_ZYwq-TmGIeBTk1", 45 "sxa_site": "WellStarOrg", 46 "ARRAffinity": "e29b803b013c7e5c76a254365bc4ef36ddc3a862faece1b8869fa46de635b9c5", 47 "ARRAffinitySameSite": "e29b803b013c7e5c76a254365bc4ef36ddc3a862faece1b8869fa46de635b9c5", 48 "_gid": "GA1.2.1358908497.1614958557", 49 "_fbp": "fb.1.1614958557337.1409275872", 50 "_gcl_au": "1.1.1318529675.1614958557", 51 "SC_ANALYTICS_GLOBAL_COOKIE": "da85aa2fb9ad429eb899c581fcb2376d|True", 52 "fs_uid": "rs.fullstory.com#10J51H#5729266957271040:6498357093580800/1646494557", 53 "isIUnderstand": "true", 54 "searchedlatitude": "33.7489954", 55 "searchedlongitude": "-84.3879824", 56 "latitude": "30.259263699999998", 57 "longitude": "-97.7393472", 58 "_gat_UA-9373927-15": "1", 59 "_gat_UA-9373927-14": "1", 60 "_ga_BM192ND27H": "GS1.1.1614983138.7.1.1614983410.0", 61 "_ga": "GA1.1.688932849.1614958557" 62 } 63 64 body = '{"searchTerm":"","searchFilter":""}' 65 66 yield scrapy.Request( 67 url=url, 68 method='POST', 69 dont_filter=True, 70 cookies=cookies, 71 headers=headers, 72 body=body, 73 callback=self.parse 74 ) 75 76 def parse_hours(self, hours): 77 78 opening_hours = OpeningHours() 79 80 if hours: 81 for dt in hours: 82 try: 83 day = DAYS_NAME[dt.split(':')[0]] 84 time = "".join(dt.split(':')[1:4]) 85 open_time, close_time = time.split('-') 86 opening_hours.add_range(day=day, 87 open_time=open_time.strip(), 88 close_time=close_time.strip(), 89 time_format="%H%M" 90 ) 91 except: 92 continue 93 94 return opening_hours.as_opening_hours() 95 96 def parse(self, response): 97 hdata = json.loads(response.text) 98 99 hdata = hdata["SearchResults"] 100 101 for row in hdata: 102 103 properties = { 104 'ref': row['LocationID'], 105 'name': row['Name'], 106 'addr_full': " ".join([row["Address"].split(",")[0], row.get('Address2',"") or ""]).strip(), 107 'city': row["Address"].split(",")[1].strip(), 108 'state': row["Address"].split(",")[2].strip(), 109 'postcode': row["Address"].split(",")[3].strip(), 110 'lat': row['Latitude'], 111 'lon': row['Longitude'], 112 'phone': row['LocationContactPhone'] 113 } 114 115 hours = self.parse_hours(row['WorkingHours']) 116 properties['opening_hours'] = hours 117 118 yield GeojsonPointItem(**properties) 119 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/spiders/wellstar.py b/locations/spiders/wellstar.py --- a/locations/spiders/wellstar.py +++ b/locations/spiders/wellstar.py @@ -81,11 +81,10 @@ for dt in hours: try: day = DAYS_NAME[dt.split(':')[0]] - time = "".join(dt.split(':')[1:4]) - open_time, close_time = time.split('-') + open_time, close_time = hours.get(dt).split('-') opening_hours.add_range(day=day, - open_time=open_time.strip(), - close_time=close_time.strip(), + open_time=open_time.replace(':', '').strip(), + close_time=close_time.replace(':', '').strip(), time_format="%H%M" ) except: @@ -93,26 +92,38 @@ return opening_hours.as_opening_hours() + def get_address_attributes(self, address): + address_parts = address.split(",") + address_attributes = {} + if len(address_parts) > 1: + address_attributes['city'] = address_parts[1].strip() + if len(address_parts) > 2: + address_attributes['state'] = address_parts[2].strip() + if len(address_parts) > 3: + address_attributes['postcode'] = address_parts[3].strip() + + return address_attributes + def parse(self, response): hdata = json.loads(response.text) - hdata = hdata["SearchResults"] + hdata = hdata["matchingItems"] for row in hdata: - + address_attributes = self.get_address_attributes(row.get("Address")) properties = { - 'ref': row['LocationID'], - 'name': row['Name'], - 'addr_full': " ".join([row["Address"].split(",")[0], row.get('Address2',"") or ""]).strip(), - 'city': row["Address"].split(",")[1].strip(), - 'state': row["Address"].split(",")[2].strip(), - 'postcode': row["Address"].split(",")[3].strip(), - 'lat': row['Latitude'], - 'lon': row['Longitude'], - 'phone': row['LocationContactPhone'] + 'ref': row.get('LocationID'), + 'name': row.get('Name'), + 'addr_full': " ".join([row.get("Address").split(",")[0], row.get('Address2',"") or ""]).strip(), + 'city': address_attributes.get('city'), + 'state': address_attributes.get('state'), + 'postcode': address_attributes.get('postcode'), + 'lat': row.get('Latitude'), + 'lon': row.get('Longitude'), + 'phone': row.get('LocationContactPhone') } - hours = self.parse_hours(row['WorkingHours']) + hours = self.parse_hours(row.get('Hours')) properties['opening_hours'] = hours - yield GeojsonPointItem(**properties) + yield GeojsonPointItem(**properties) \ No newline at end of file
{"golden_diff": "diff --git a/locations/spiders/wellstar.py b/locations/spiders/wellstar.py\n--- a/locations/spiders/wellstar.py\n+++ b/locations/spiders/wellstar.py\n@@ -81,11 +81,10 @@\n for dt in hours:\n try:\n day = DAYS_NAME[dt.split(':')[0]]\n- time = \"\".join(dt.split(':')[1:4])\n- open_time, close_time = time.split('-')\n+ open_time, close_time = hours.get(dt).split('-')\n opening_hours.add_range(day=day,\n- open_time=open_time.strip(),\n- close_time=close_time.strip(),\n+ open_time=open_time.replace(':', '').strip(),\n+ close_time=close_time.replace(':', '').strip(),\n time_format=\"%H%M\"\n )\n except:\n@@ -93,26 +92,38 @@\n \n return opening_hours.as_opening_hours()\n \n+ def get_address_attributes(self, address):\n+ address_parts = address.split(\",\")\n+ address_attributes = {}\n+ if len(address_parts) > 1:\n+ address_attributes['city'] = address_parts[1].strip()\n+ if len(address_parts) > 2:\n+ address_attributes['state'] = address_parts[2].strip()\n+ if len(address_parts) > 3:\n+ address_attributes['postcode'] = address_parts[3].strip()\n+\n+ return address_attributes\n+\n def parse(self, response):\n hdata = json.loads(response.text)\n \n- hdata = hdata[\"SearchResults\"]\n+ hdata = hdata[\"matchingItems\"]\n \n for row in hdata:\n-\n+ address_attributes = self.get_address_attributes(row.get(\"Address\"))\n properties = {\n- 'ref': row['LocationID'],\n- 'name': row['Name'],\n- 'addr_full': \" \".join([row[\"Address\"].split(\",\")[0], row.get('Address2',\"\") or \"\"]).strip(),\n- 'city': row[\"Address\"].split(\",\")[1].strip(),\n- 'state': row[\"Address\"].split(\",\")[2].strip(),\n- 'postcode': row[\"Address\"].split(\",\")[3].strip(),\n- 'lat': row['Latitude'],\n- 'lon': row['Longitude'],\n- 'phone': row['LocationContactPhone']\n+ 'ref': row.get('LocationID'),\n+ 'name': row.get('Name'),\n+ 'addr_full': \" \".join([row.get(\"Address\").split(\",\")[0], row.get('Address2',\"\") or \"\"]).strip(),\n+ 'city': address_attributes.get('city'),\n+ 'state': address_attributes.get('state'),\n+ 'postcode': address_attributes.get('postcode'),\n+ 'lat': row.get('Latitude'),\n+ 'lon': row.get('Longitude'),\n+ 'phone': row.get('LocationContactPhone')\n }\n \n- hours = self.parse_hours(row['WorkingHours'])\n+ hours = self.parse_hours(row.get('Hours'))\n properties['opening_hours'] = hours\n \n- yield GeojsonPointItem(**properties)\n+ yield GeojsonPointItem(**properties)\n\\ No newline at end of file\n", "issue": "Spider wellstar is broken\nDuring the global build at 2021-10-20-14-42-48, spider **wellstar** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/wellstar.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/wellstar.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/wellstar.geojson))\n", "before_files": [{"content": "import json\nimport scrapy\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAYS_NAME = {\n 'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Wedsenday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'\n}\n\nclass WellStarSpider(scrapy.Spider):\n name = \"wellstar\"\n item_attributes = {'brand': \"WellStar Health System\"}\n allowed_domains = [\"www.wellstar.org/\"]\n start_urls = ('https://www.wellstar.org/locations',)\n\n def start_requests(self):\n url = 'https://www.wellstar.org/api/LocationSearchApi/GetLocations'\n\n headers = {\n \"authority\": \"www.wellstar.org\",\n \"sec-ch-ua\": \"\\\"Chromium\\\";v=\\\"88\\\", \\\"Google Chrome\\\";v=\\\"88\\\", \\\";Not A Brand\\\";v=\\\"99\\\"\",\n \"accept\": \"application/json, text/javascript, */*; q=0.01\",\n \"sec-ch-ua-mobile\": \"?0\",\n \"__requestverificationtoken\": \"Y0cuJinQRzvtT-kjKKyPZOlZPu0JU48XSyIvEl1p__yzZF4621s-0YUAkImeXkjGgLB7GyZlv8rYby0uII9LChhFlrKiWiDb2Va5skX-TmM1\",\n \"user-agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.192 Safari/537.36\",\n \"content-type\": \"application/json\",\n \"origin\": \"https://www.wellstar.org\",\n \"sec-fetch-site\": \"same-origin\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-dest\": \"empty\",\n \"referer\": \"https://www.wellstar.org/locations\",\n \"accept-language\": \"en-US,en;q=0.9\"\n }\n\n cookies = {\n \"ASP.NET_SessionId\": \"24p11pgzrg42p4ghtqwi40gl\",\n \"__RequestVerificationToken\": \"3agF8JY4gLGMg_K1j7eXWtME4IsQmLPf1snFPF0d132SBNJG2JsIHpVgCAJ1cVysztrjgXuRrCmeMYp9kJjHDYNZlqIzc_ZYwq-TmGIeBTk1\",\n \"sxa_site\": \"WellStarOrg\",\n \"ARRAffinity\": \"e29b803b013c7e5c76a254365bc4ef36ddc3a862faece1b8869fa46de635b9c5\",\n \"ARRAffinitySameSite\": \"e29b803b013c7e5c76a254365bc4ef36ddc3a862faece1b8869fa46de635b9c5\",\n \"_gid\": \"GA1.2.1358908497.1614958557\",\n \"_fbp\": \"fb.1.1614958557337.1409275872\",\n \"_gcl_au\": \"1.1.1318529675.1614958557\",\n \"SC_ANALYTICS_GLOBAL_COOKIE\": \"da85aa2fb9ad429eb899c581fcb2376d|True\",\n \"fs_uid\": \"rs.fullstory.com#10J51H#5729266957271040:6498357093580800/1646494557\",\n \"isIUnderstand\": \"true\",\n \"searchedlatitude\": \"33.7489954\",\n \"searchedlongitude\": \"-84.3879824\",\n \"latitude\": \"30.259263699999998\",\n \"longitude\": \"-97.7393472\",\n \"_gat_UA-9373927-15\": \"1\",\n \"_gat_UA-9373927-14\": \"1\",\n \"_ga_BM192ND27H\": \"GS1.1.1614983138.7.1.1614983410.0\",\n \"_ga\": \"GA1.1.688932849.1614958557\"\n }\n\n body = '{\"searchTerm\":\"\",\"searchFilter\":\"\"}'\n\n yield scrapy.Request(\n url=url,\n method='POST',\n dont_filter=True,\n cookies=cookies,\n headers=headers,\n body=body,\n callback=self.parse\n )\n\n def parse_hours(self, hours):\n\n opening_hours = OpeningHours()\n\n if hours:\n for dt in hours:\n try:\n day = DAYS_NAME[dt.split(':')[0]]\n time = \"\".join(dt.split(':')[1:4])\n open_time, close_time = time.split('-')\n opening_hours.add_range(day=day,\n open_time=open_time.strip(),\n close_time=close_time.strip(),\n time_format=\"%H%M\"\n )\n except:\n continue\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n hdata = json.loads(response.text)\n\n hdata = hdata[\"SearchResults\"]\n\n for row in hdata:\n\n properties = {\n 'ref': row['LocationID'],\n 'name': row['Name'],\n 'addr_full': \" \".join([row[\"Address\"].split(\",\")[0], row.get('Address2',\"\") or \"\"]).strip(),\n 'city': row[\"Address\"].split(\",\")[1].strip(),\n 'state': row[\"Address\"].split(\",\")[2].strip(),\n 'postcode': row[\"Address\"].split(\",\")[3].strip(),\n 'lat': row['Latitude'],\n 'lon': row['Longitude'],\n 'phone': row['LocationContactPhone']\n }\n\n hours = self.parse_hours(row['WorkingHours'])\n properties['opening_hours'] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/wellstar.py"}], "after_files": [{"content": "import json\nimport scrapy\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAYS_NAME = {\n 'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Wedsenday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'\n}\n\nclass WellStarSpider(scrapy.Spider):\n name = \"wellstar\"\n item_attributes = {'brand': \"WellStar Health System\"}\n allowed_domains = [\"www.wellstar.org/\"]\n start_urls = ('https://www.wellstar.org/locations',)\n\n def start_requests(self):\n url = 'https://www.wellstar.org/api/LocationSearchApi/GetLocations'\n\n headers = {\n \"authority\": \"www.wellstar.org\",\n \"sec-ch-ua\": \"\\\"Chromium\\\";v=\\\"88\\\", \\\"Google Chrome\\\";v=\\\"88\\\", \\\";Not A Brand\\\";v=\\\"99\\\"\",\n \"accept\": \"application/json, text/javascript, */*; q=0.01\",\n \"sec-ch-ua-mobile\": \"?0\",\n \"__requestverificationtoken\": \"Y0cuJinQRzvtT-kjKKyPZOlZPu0JU48XSyIvEl1p__yzZF4621s-0YUAkImeXkjGgLB7GyZlv8rYby0uII9LChhFlrKiWiDb2Va5skX-TmM1\",\n \"user-agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.192 Safari/537.36\",\n \"content-type\": \"application/json\",\n \"origin\": \"https://www.wellstar.org\",\n \"sec-fetch-site\": \"same-origin\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-dest\": \"empty\",\n \"referer\": \"https://www.wellstar.org/locations\",\n \"accept-language\": \"en-US,en;q=0.9\"\n }\n\n cookies = {\n \"ASP.NET_SessionId\": \"24p11pgzrg42p4ghtqwi40gl\",\n \"__RequestVerificationToken\": \"3agF8JY4gLGMg_K1j7eXWtME4IsQmLPf1snFPF0d132SBNJG2JsIHpVgCAJ1cVysztrjgXuRrCmeMYp9kJjHDYNZlqIzc_ZYwq-TmGIeBTk1\",\n \"sxa_site\": \"WellStarOrg\",\n \"ARRAffinity\": \"e29b803b013c7e5c76a254365bc4ef36ddc3a862faece1b8869fa46de635b9c5\",\n \"ARRAffinitySameSite\": \"e29b803b013c7e5c76a254365bc4ef36ddc3a862faece1b8869fa46de635b9c5\",\n \"_gid\": \"GA1.2.1358908497.1614958557\",\n \"_fbp\": \"fb.1.1614958557337.1409275872\",\n \"_gcl_au\": \"1.1.1318529675.1614958557\",\n \"SC_ANALYTICS_GLOBAL_COOKIE\": \"da85aa2fb9ad429eb899c581fcb2376d|True\",\n \"fs_uid\": \"rs.fullstory.com#10J51H#5729266957271040:6498357093580800/1646494557\",\n \"isIUnderstand\": \"true\",\n \"searchedlatitude\": \"33.7489954\",\n \"searchedlongitude\": \"-84.3879824\",\n \"latitude\": \"30.259263699999998\",\n \"longitude\": \"-97.7393472\",\n \"_gat_UA-9373927-15\": \"1\",\n \"_gat_UA-9373927-14\": \"1\",\n \"_ga_BM192ND27H\": \"GS1.1.1614983138.7.1.1614983410.0\",\n \"_ga\": \"GA1.1.688932849.1614958557\"\n }\n\n body = '{\"searchTerm\":\"\",\"searchFilter\":\"\"}'\n\n yield scrapy.Request(\n url=url,\n method='POST',\n dont_filter=True,\n cookies=cookies,\n headers=headers,\n body=body,\n callback=self.parse\n )\n\n def parse_hours(self, hours):\n\n opening_hours = OpeningHours()\n\n if hours:\n for dt in hours:\n try:\n day = DAYS_NAME[dt.split(':')[0]]\n open_time, close_time = hours.get(dt).split('-')\n opening_hours.add_range(day=day,\n open_time=open_time.replace(':', '').strip(),\n close_time=close_time.replace(':', '').strip(),\n time_format=\"%H%M\"\n )\n except:\n continue\n\n return opening_hours.as_opening_hours()\n\n def get_address_attributes(self, address):\n address_parts = address.split(\",\")\n address_attributes = {}\n if len(address_parts) > 1:\n address_attributes['city'] = address_parts[1].strip()\n if len(address_parts) > 2:\n address_attributes['state'] = address_parts[2].strip()\n if len(address_parts) > 3:\n address_attributes['postcode'] = address_parts[3].strip()\n\n return address_attributes\n\n def parse(self, response):\n hdata = json.loads(response.text)\n\n hdata = hdata[\"matchingItems\"]\n\n for row in hdata:\n address_attributes = self.get_address_attributes(row.get(\"Address\"))\n properties = {\n 'ref': row.get('LocationID'),\n 'name': row.get('Name'),\n 'addr_full': \" \".join([row.get(\"Address\").split(\",\")[0], row.get('Address2',\"\") or \"\"]).strip(),\n 'city': address_attributes.get('city'),\n 'state': address_attributes.get('state'),\n 'postcode': address_attributes.get('postcode'),\n 'lat': row.get('Latitude'),\n 'lon': row.get('Longitude'),\n 'phone': row.get('LocationContactPhone')\n }\n\n hours = self.parse_hours(row.get('Hours'))\n properties['opening_hours'] = hours\n\n yield GeojsonPointItem(**properties)", "path": "locations/spiders/wellstar.py"}]}
2,220
689
gh_patches_debug_28469
rasdani/github-patches
git_diff
fossasia__open-event-server-2390
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Show image and square crop option (like in wizard) for speakers and ensure it shows up after import ![screenshot from 2016-08-24 11 30 08](https://cloud.githubusercontent.com/assets/1583873/17925701/48dd03be-69ee-11e6-84e4-c353001ddde1.png) As the above screenshot shows, the image of the speaker does not show up as expected. In the wizard step 1 it is already implemented in that way. Compare: http://open-event-dev.herokuapp.com/events/132/speakers/882/edit/ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `app/views/admin/models_views/speakers.py` Content: ``` 1 import json 2 3 from flask.ext.admin import BaseView 4 from flask.ext.restplus import abort 5 from flask_admin import expose 6 from flask.ext import login 7 from flask import request, url_for, redirect, flash 8 from ....helpers.data import delete_from_db, save_to_db 9 from ....helpers.data_getter import DataGetter 10 from ....helpers.storage import upload, UPLOAD_PATHS 11 12 13 def get_speaker_or_throw(speaker_id): 14 session = DataGetter.get_speaker(speaker_id) 15 if not session: 16 abort(404) 17 return session 18 19 20 class SpeakersView(BaseView): 21 22 def is_accessible(self): 23 return login.current_user.is_authenticated 24 25 def _handle_view(self, name, **kwargs): 26 if not self.is_accessible(): 27 return redirect(url_for('admin.login_view', next=request.url)) 28 event = DataGetter.get_event(kwargs['event_id']) 29 if not event.has_session_speakers: 30 return self.render('/gentelella/admin/event/info/enable_module.html', active_page='speakers', title='Speakers', event=event) 31 32 @expose('/') 33 def index_view(self, event_id): 34 speakers = DataGetter.get_speakers(event_id) 35 event = DataGetter.get_event(event_id) 36 return self.render('/gentelella/admin/event/speakers/base_speaker_table.html', 37 speakers=speakers, event_id=event_id, event=event) 38 39 @expose('/<int:speaker_id>/edit/', methods=('GET', 'POST')) 40 def edit_view(self, event_id, speaker_id): 41 speaker = get_speaker_or_throw(speaker_id) 42 event = DataGetter.get_event(event_id) 43 form_elems = DataGetter.get_custom_form_elements(event_id) 44 if not form_elems: 45 flash("Speaker form has been incorrectly configured for this event. Editing has been disabled", "danger") 46 return redirect(url_for('.index_view', event_id=event_id)) 47 speaker_form = json.loads(form_elems.speaker_form) 48 if request.method == 'GET': 49 return self.render('/gentelella/admin/event/speakers/edit.html', 50 speaker=speaker, event_id=event_id, 51 event=event, speaker_form=speaker_form) 52 if request.method == 'POST': 53 # set photo 54 if 'photo' in request.files and request.files['photo'].filename != '': 55 speaker_img_file = request.files['photo'] 56 speaker_img = upload( 57 speaker_img_file, 58 UPLOAD_PATHS['speakers']['photo'].format( 59 event_id=int(event_id), id=int(speaker.id) 60 )) 61 speaker.photo = speaker_img 62 # set other fields 63 speaker.name = request.form.get('name', None) 64 speaker.short_biography = request.form.get('short_biography', None) 65 speaker.long_biography = request.form.get('long_biography', None) 66 speaker.email = request.form.get('email', None) 67 speaker.mobile = request.form.get('mobile', None) 68 speaker.website = request.form.get('website', None) 69 speaker.twitter = request.form.get('twitter', None) 70 speaker.facebook = request.form.get('facebook', None) 71 speaker.github = request.form.get('github', None) 72 speaker.linkedin = request.form.get('linkedin', None) 73 speaker.organisation = request.form.get('organisation', None) 74 speaker.featured = True if request.form.get('featured', 'false') == 'true' else False 75 speaker.position = request.form.get('position', None) 76 speaker.country = request.form.get('country', None) 77 save_to_db(speaker, "Speaker has been updated") 78 flash("Speaker has been saved", "success") 79 80 return redirect(url_for('.index_view', event_id=event_id)) 81 82 @expose('/<int:speaker_id>/delete', methods=('GET',)) 83 def delete(self, event_id, speaker_id): 84 speaker = get_speaker_or_throw(speaker_id) 85 delete_from_db(speaker, 'Speaker Rejected') 86 flash("The speaker has been deleted", "danger") 87 return redirect(url_for('.index_view', event_id=event_id)) 88 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/app/views/admin/models_views/speakers.py b/app/views/admin/models_views/speakers.py --- a/app/views/admin/models_views/speakers.py +++ b/app/views/admin/models_views/speakers.py @@ -4,10 +4,11 @@ from flask.ext.restplus import abort from flask_admin import expose from flask.ext import login -from flask import request, url_for, redirect, flash +from flask import request, url_for, redirect, flash, jsonify from ....helpers.data import delete_from_db, save_to_db from ....helpers.data_getter import DataGetter from ....helpers.storage import upload, UPLOAD_PATHS +from app.helpers.helpers import uploaded_file def get_speaker_or_throw(speaker_id): @@ -85,3 +86,23 @@ delete_from_db(speaker, 'Speaker Rejected') flash("The speaker has been deleted", "danger") return redirect(url_for('.index_view', event_id=event_id)) + + @expose('/<int:speaker_id>/photo_upload', methods=('POST',)) + def photo_upload(self, event_id, speaker_id): + speaker = get_speaker_or_throw(speaker_id) + event = DataGetter.get_event(event_id) + photo = request.form['photo'] + if photo: + photo_file = uploaded_file(file_content=photo) + photo = upload( + photo_file, + UPLOAD_PATHS['speakers']['photo'].format( + event_id=int(event_id), id=int(speaker.id) + )) + speaker.photo = photo + save_to_db(speaker) + return jsonify({'status': 'ok', 'photo': photo}) + else: + speaker.photo = None + save_to_db(speaker) + return jsonify({'status': 'Removed'})
{"golden_diff": "diff --git a/app/views/admin/models_views/speakers.py b/app/views/admin/models_views/speakers.py\n--- a/app/views/admin/models_views/speakers.py\n+++ b/app/views/admin/models_views/speakers.py\n@@ -4,10 +4,11 @@\n from flask.ext.restplus import abort\n from flask_admin import expose\n from flask.ext import login\n-from flask import request, url_for, redirect, flash\n+from flask import request, url_for, redirect, flash, jsonify\n from ....helpers.data import delete_from_db, save_to_db\n from ....helpers.data_getter import DataGetter\n from ....helpers.storage import upload, UPLOAD_PATHS\n+from app.helpers.helpers import uploaded_file\n \n \n def get_speaker_or_throw(speaker_id):\n@@ -85,3 +86,23 @@\n delete_from_db(speaker, 'Speaker Rejected')\n flash(\"The speaker has been deleted\", \"danger\")\n return redirect(url_for('.index_view', event_id=event_id))\n+\n+ @expose('/<int:speaker_id>/photo_upload', methods=('POST',))\n+ def photo_upload(self, event_id, speaker_id):\n+ speaker = get_speaker_or_throw(speaker_id)\n+ event = DataGetter.get_event(event_id)\n+ photo = request.form['photo']\n+ if photo:\n+ photo_file = uploaded_file(file_content=photo)\n+ photo = upload(\n+ photo_file,\n+ UPLOAD_PATHS['speakers']['photo'].format(\n+ event_id=int(event_id), id=int(speaker.id)\n+ ))\n+ speaker.photo = photo\n+ save_to_db(speaker)\n+ return jsonify({'status': 'ok', 'photo': photo})\n+ else:\n+ speaker.photo = None\n+ save_to_db(speaker)\n+ return jsonify({'status': 'Removed'})\n", "issue": "Show image and square crop option (like in wizard) for speakers and ensure it shows up after import\n![screenshot from 2016-08-24 11 30 08](https://cloud.githubusercontent.com/assets/1583873/17925701/48dd03be-69ee-11e6-84e4-c353001ddde1.png)\n\nAs the above screenshot shows, the image of the speaker does not show up as expected. In the wizard step 1 it is already implemented in that way.\n\nCompare: http://open-event-dev.herokuapp.com/events/132/speakers/882/edit/\n\n", "before_files": [{"content": "import json\n\nfrom flask.ext.admin import BaseView\nfrom flask.ext.restplus import abort\nfrom flask_admin import expose\nfrom flask.ext import login\nfrom flask import request, url_for, redirect, flash\nfrom ....helpers.data import delete_from_db, save_to_db\nfrom ....helpers.data_getter import DataGetter\nfrom ....helpers.storage import upload, UPLOAD_PATHS\n\n\ndef get_speaker_or_throw(speaker_id):\n session = DataGetter.get_speaker(speaker_id)\n if not session:\n abort(404)\n return session\n\n\nclass SpeakersView(BaseView):\n\n def is_accessible(self):\n return login.current_user.is_authenticated\n\n def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n return redirect(url_for('admin.login_view', next=request.url))\n event = DataGetter.get_event(kwargs['event_id'])\n if not event.has_session_speakers:\n return self.render('/gentelella/admin/event/info/enable_module.html', active_page='speakers', title='Speakers', event=event)\n\n @expose('/')\n def index_view(self, event_id):\n speakers = DataGetter.get_speakers(event_id)\n event = DataGetter.get_event(event_id)\n return self.render('/gentelella/admin/event/speakers/base_speaker_table.html',\n speakers=speakers, event_id=event_id, event=event)\n\n @expose('/<int:speaker_id>/edit/', methods=('GET', 'POST'))\n def edit_view(self, event_id, speaker_id):\n speaker = get_speaker_or_throw(speaker_id)\n event = DataGetter.get_event(event_id)\n form_elems = DataGetter.get_custom_form_elements(event_id)\n if not form_elems:\n flash(\"Speaker form has been incorrectly configured for this event. Editing has been disabled\", \"danger\")\n return redirect(url_for('.index_view', event_id=event_id))\n speaker_form = json.loads(form_elems.speaker_form)\n if request.method == 'GET':\n return self.render('/gentelella/admin/event/speakers/edit.html',\n speaker=speaker, event_id=event_id,\n event=event, speaker_form=speaker_form)\n if request.method == 'POST':\n # set photo\n if 'photo' in request.files and request.files['photo'].filename != '':\n speaker_img_file = request.files['photo']\n speaker_img = upload(\n speaker_img_file,\n UPLOAD_PATHS['speakers']['photo'].format(\n event_id=int(event_id), id=int(speaker.id)\n ))\n speaker.photo = speaker_img\n # set other fields\n speaker.name = request.form.get('name', None)\n speaker.short_biography = request.form.get('short_biography', None)\n speaker.long_biography = request.form.get('long_biography', None)\n speaker.email = request.form.get('email', None)\n speaker.mobile = request.form.get('mobile', None)\n speaker.website = request.form.get('website', None)\n speaker.twitter = request.form.get('twitter', None)\n speaker.facebook = request.form.get('facebook', None)\n speaker.github = request.form.get('github', None)\n speaker.linkedin = request.form.get('linkedin', None)\n speaker.organisation = request.form.get('organisation', None)\n speaker.featured = True if request.form.get('featured', 'false') == 'true' else False\n speaker.position = request.form.get('position', None)\n speaker.country = request.form.get('country', None)\n save_to_db(speaker, \"Speaker has been updated\")\n flash(\"Speaker has been saved\", \"success\")\n\n return redirect(url_for('.index_view', event_id=event_id))\n\n @expose('/<int:speaker_id>/delete', methods=('GET',))\n def delete(self, event_id, speaker_id):\n speaker = get_speaker_or_throw(speaker_id)\n delete_from_db(speaker, 'Speaker Rejected')\n flash(\"The speaker has been deleted\", \"danger\")\n return redirect(url_for('.index_view', event_id=event_id))\n", "path": "app/views/admin/models_views/speakers.py"}], "after_files": [{"content": "import json\n\nfrom flask.ext.admin import BaseView\nfrom flask.ext.restplus import abort\nfrom flask_admin import expose\nfrom flask.ext import login\nfrom flask import request, url_for, redirect, flash, jsonify\nfrom ....helpers.data import delete_from_db, save_to_db\nfrom ....helpers.data_getter import DataGetter\nfrom ....helpers.storage import upload, UPLOAD_PATHS\nfrom app.helpers.helpers import uploaded_file\n\n\ndef get_speaker_or_throw(speaker_id):\n session = DataGetter.get_speaker(speaker_id)\n if not session:\n abort(404)\n return session\n\n\nclass SpeakersView(BaseView):\n\n def is_accessible(self):\n return login.current_user.is_authenticated\n\n def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n return redirect(url_for('admin.login_view', next=request.url))\n event = DataGetter.get_event(kwargs['event_id'])\n if not event.has_session_speakers:\n return self.render('/gentelella/admin/event/info/enable_module.html', active_page='speakers', title='Speakers', event=event)\n\n @expose('/')\n def index_view(self, event_id):\n speakers = DataGetter.get_speakers(event_id)\n event = DataGetter.get_event(event_id)\n return self.render('/gentelella/admin/event/speakers/base_speaker_table.html',\n speakers=speakers, event_id=event_id, event=event)\n\n @expose('/<int:speaker_id>/edit/', methods=('GET', 'POST'))\n def edit_view(self, event_id, speaker_id):\n speaker = get_speaker_or_throw(speaker_id)\n event = DataGetter.get_event(event_id)\n form_elems = DataGetter.get_custom_form_elements(event_id)\n if not form_elems:\n flash(\"Speaker form has been incorrectly configured for this event. Editing has been disabled\", \"danger\")\n return redirect(url_for('.index_view', event_id=event_id))\n speaker_form = json.loads(form_elems.speaker_form)\n if request.method == 'GET':\n return self.render('/gentelella/admin/event/speakers/edit.html',\n speaker=speaker, event_id=event_id,\n event=event, speaker_form=speaker_form)\n if request.method == 'POST':\n # set photo\n if 'photo' in request.files and request.files['photo'].filename != '':\n speaker_img_file = request.files['photo']\n speaker_img = upload(\n speaker_img_file,\n UPLOAD_PATHS['speakers']['photo'].format(\n event_id=int(event_id), id=int(speaker.id)\n ))\n speaker.photo = speaker_img\n # set other fields\n speaker.name = request.form.get('name', None)\n speaker.short_biography = request.form.get('short_biography', None)\n speaker.long_biography = request.form.get('long_biography', None)\n speaker.email = request.form.get('email', None)\n speaker.mobile = request.form.get('mobile', None)\n speaker.website = request.form.get('website', None)\n speaker.twitter = request.form.get('twitter', None)\n speaker.facebook = request.form.get('facebook', None)\n speaker.github = request.form.get('github', None)\n speaker.linkedin = request.form.get('linkedin', None)\n speaker.organisation = request.form.get('organisation', None)\n speaker.featured = True if request.form.get('featured', 'false') == 'true' else False\n speaker.position = request.form.get('position', None)\n speaker.country = request.form.get('country', None)\n save_to_db(speaker, \"Speaker has been updated\")\n flash(\"Speaker has been saved\", \"success\")\n\n return redirect(url_for('.index_view', event_id=event_id))\n\n @expose('/<int:speaker_id>/delete', methods=('GET',))\n def delete(self, event_id, speaker_id):\n speaker = get_speaker_or_throw(speaker_id)\n delete_from_db(speaker, 'Speaker Rejected')\n flash(\"The speaker has been deleted\", \"danger\")\n return redirect(url_for('.index_view', event_id=event_id))\n\n @expose('/<int:speaker_id>/photo_upload', methods=('POST',))\n def photo_upload(self, event_id, speaker_id):\n speaker = get_speaker_or_throw(speaker_id)\n event = DataGetter.get_event(event_id)\n photo = request.form['photo']\n if photo:\n photo_file = uploaded_file(file_content=photo)\n photo = upload(\n photo_file,\n UPLOAD_PATHS['speakers']['photo'].format(\n event_id=int(event_id), id=int(speaker.id)\n ))\n speaker.photo = photo\n save_to_db(speaker)\n return jsonify({'status': 'ok', 'photo': photo})\n else:\n speaker.photo = None\n save_to_db(speaker)\n return jsonify({'status': 'Removed'})\n", "path": "app/views/admin/models_views/speakers.py"}]}
1,450
396
gh_patches_debug_26592
rasdani/github-patches
git_diff
ansible__ansible-modules-extras-2902
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cisco ASA_ACL "NameError: global name 'candidate' is not defined" ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME asa_acl ##### ANSIBLE VERSION ``` root@osboxes:~/asa# ansible-playbook -i inventory acl.yml PLAY [Test ASA] **************************************************************** TASK [Create object to ACL-ANSIBLE] ******************************************** An exception occurred during task execution. To see the full traceback, use -vvv. The error was: NameError: global name 'candidate' is not defined fatal: [asa]: FAILED! => {"changed": false, "failed": true, "module_stderr": "Traceback (most recent call last):\n File \"/tmp/ansible_JUM9ib/ansible_module_asa_acl.py\", line 202, in <module>\n main()\n File \"/tmp/ansible_JUM9ib/ansible_module_asa_acl.py\", line 184, in main\n commands = candidate.difference(config)\nNameError: global name 'candidate' is not defined\n", "module_stdout": "", "msg": "MODULE FAILURE"} PLAY RECAP ********************************************************************* asa : ok=0 changed=0 unreachable=0 failed=1 root@osboxes:~/asa# ``` ##### CONFIGURATION ##### OS / ENVIRONMENT Ubuntu ##### SUMMARY Attempting to run asa_acl using ansible dev version results in above errors. asa_command and asa_config work with no issues. ##### STEPS TO REPRODUCE ``` --- - name: Test ASA hosts: asa connection: local gather_facts: false tasks: - name: Create object to ACL-ANSIBLE asa_acl: lines: - "access-list ACL-ANSIBLE extended permit tcp any any eq 74" username: "ntc" host: "{{ inventory_hostname }}" authorize: true auth_pass: "\n" ``` ``` ansible-playbook -i inventory acl.yml ``` ##### EXPECTED RESULTS ACL to be updated ##### ACTUAL RESULTS ``` root@osboxes:~/asa# ansible-playbook -i inventory acl.yml -vvvv Using /etc/ansible/ansible.cfg as config file Loading callback plugin default of type stdout, v2.0 from /tmp/ansible/lib/ansible/plugins/callback/__init__.pyc PLAYBOOK: acl.yml ************************************************************** 1 plays in acl.yml PLAY [Test ASA] **************************************************************** TASK [Create object to ACL-ANSIBLE] ******************************************** task path: /root/asa/acl.yml:10 Using module file /tmp/ansible/lib/ansible/modules/extras/network/asa/asa_acl.py <asa> ESTABLISH LOCAL CONNECTION FOR USER: root <asa> EXEC /bin/sh -c '( umask 77 && mkdir -p "` echo $HOME/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872 `" && echo ansible-tmp-1473449999.57-34289925616872="` echo $HOME/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872 `" ) && sleep 0' <asa> PUT /tmp/tmpu2hszs TO /root/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872/asa_acl.py <asa> EXEC /bin/sh -c 'chmod u+x /root/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872/ /root/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872/asa_acl.py && sleep 0' <asa> EXEC /bin/sh -c '/usr/bin/python /root/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872/asa_acl.py; rm -rf "/root/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872/" > /dev/null 2>&1 && sleep 0' An exception occurred during task execution. The full traceback is: Traceback (most recent call last): File "/tmp/ansible_o9grpZ/ansible_module_asa_acl.py", line 202, in <module> main() File "/tmp/ansible_o9grpZ/ansible_module_asa_acl.py", line 184, in main commands = candidate.difference(config) NameError: global name 'candidate' is not defined fatal: [asa]: FAILED! => { "changed": false, "failed": true, "invocation": { "module_name": "asa_acl" }, "module_stderr": "Traceback (most recent call last):\n File \"/tmp/ansible_o9grpZ/ansible_module_asa_acl.py\", line 202, in <module>\n main()\n File \"/tmp/ansible_o9grpZ/ansible_module_asa_acl.py\", line 184, in main\n commands = candidate.difference(config)\nNameError: global name 'candidate' is not defined\n", "module_stdout": "", "msg": "MODULE FAILURE" } PLAY RECAP ********************************************************************* asa : ok=0 changed=0 unreachable=0 failed=1 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `network/asa/asa_acl.py` Content: ``` 1 #!/usr/bin/python 2 # 3 # This file is part of Ansible 4 # 5 # Ansible is free software: you can redistribute it and/or modify 6 # it under the terms of the GNU General Public License as published by 7 # the Free Software Foundation, either version 3 of the License, or 8 # (at your option) any later version. 9 # 10 # Ansible is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU General Public License 16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>. 17 # 18 19 DOCUMENTATION = """ 20 --- 21 module: asa_acl 22 version_added: "2.2" 23 author: "Patrick Ogenstad (@ogenstad)" 24 short_description: Manage access-lists on a Cisco ASA 25 description: 26 - This module allows you to work with access-lists on a Cisco ASA device. 27 extends_documentation_fragment: asa 28 options: 29 lines: 30 description: 31 - The ordered set of commands that should be configured in the 32 section. The commands must be the exact same commands as found 33 in the device running-config. Be sure to note the configuration 34 command syntanx as some commands are automatically modified by the 35 device config parser. 36 required: true 37 before: 38 description: 39 - The ordered set of commands to push on to the command stack if 40 a change needs to be made. This allows the playbook designer 41 the opportunity to perform configuration commands prior to pushing 42 any changes without affecting how the set of commands are matched 43 against the system 44 required: false 45 default: null 46 after: 47 description: 48 - The ordered set of commands to append to the end of the command 49 stack if a changed needs to be made. Just like with I(before) this 50 allows the playbook designer to append a set of commands to be 51 executed after the command set. 52 required: false 53 default: null 54 match: 55 description: 56 - Instructs the module on the way to perform the matching of 57 the set of commands against the current device config. If 58 match is set to I(line), commands are matched line by line. If 59 match is set to I(strict), command lines are matched with respect 60 to position. Finally if match is set to I(exact), command lines 61 must be an equal match. 62 required: false 63 default: line 64 choices: ['line', 'strict', 'exact'] 65 replace: 66 description: 67 - Instructs the module on the way to perform the configuration 68 on the device. If the replace argument is set to I(line) then 69 the modified lines are pushed to the device in configuration 70 mode. If the replace argument is set to I(block) then the entire 71 command block is pushed to the device in configuration mode if any 72 line is not correct 73 required: false 74 default: line 75 choices: ['line', 'block'] 76 force: 77 description: 78 - The force argument instructs the module to not consider the 79 current devices running-config. When set to true, this will 80 cause the module to push the contents of I(src) into the device 81 without first checking if already configured. 82 required: false 83 default: false 84 choices: ['yes', 'no'] 85 config: 86 description: 87 - The module, by default, will connect to the remote device and 88 retrieve the current running-config to use as a base for comparing 89 against the contents of source. There are times when it is not 90 desirable to have the task get the current running-config for 91 every task in a playbook. The I(config) argument allows the 92 implementer to pass in the configuruation to use as the base 93 config for comparision. 94 required: false 95 default: null 96 """ 97 98 EXAMPLES = """ 99 100 - asa_acl: 101 lines: 102 - access-list ACL-ANSIBLE extended permit tcp any any eq 82 103 - access-list ACL-ANSIBLE extended permit tcp any any eq www 104 - access-list ACL-ANSIBLE extended permit tcp any any eq 97 105 - access-list ACL-ANSIBLE extended permit tcp any any eq 98 106 - access-list ACL-ANSIBLE extended permit tcp any any eq 99 107 before: clear configure access-list ACL-ANSIBLE 108 match: strict 109 replace: block 110 111 - asa_acl: 112 lines: 113 - access-list ACL-OUTSIDE extended permit tcp any any eq www 114 - access-list ACL-OUTSIDE extended permit tcp any any eq https 115 context: customer_a 116 """ 117 118 RETURN = """ 119 updates: 120 description: The set of commands that will be pushed to the remote device 121 returned: always 122 type: list 123 sample: ['...', '...'] 124 125 responses: 126 description: The set of responses from issuing the commands on the device 127 retured: when not check_mode 128 type: list 129 sample: ['...', '...'] 130 """ 131 from ansible.module_utils.netcfg import NetworkConfig 132 from ansible.module_utils.asa import NetworkModule 133 134 135 def get_config(module): 136 config = module.params['config'] or dict() 137 if not config and not module.params['force']: 138 config = module.config 139 return config 140 141 142 def check_input_acl(lines, module): 143 first_line = True 144 for line in lines: 145 ace = line.split() 146 if ace[0] != 'access-list': 147 module.fail_json(msg='All lines/commands must begin with "access-list" %s is not permitted' % ace[0]) 148 if len(ace) <= 1: 149 module.fail_json(msg='All lines/commainds must contain the name of the access-list') 150 if first_line: 151 acl_name = ace[1] 152 else: 153 if acl_name != ace[1]: 154 module.fail_json(msg='All lines/commands must use the same access-list %s is not %s' % (ace[1], acl_name)) 155 first_line = False 156 157 return 'access-list %s' % acl_name 158 159 def main(): 160 161 argument_spec = dict( 162 lines=dict(aliases=['commands'], required=True, type='list'), 163 before=dict(type='list'), 164 after=dict(type='list'), 165 match=dict(default='line', choices=['line', 'strict', 'exact']), 166 replace=dict(default='line', choices=['line', 'block']), 167 force=dict(default=False, type='bool'), 168 config=dict() 169 ) 170 171 module = NetworkModule(argument_spec=argument_spec, 172 supports_check_mode=True) 173 174 lines = module.params['lines'] 175 176 before = module.params['before'] 177 after = module.params['after'] 178 179 match = module.params['match'] 180 replace = module.params['replace'] 181 182 module.filter = check_input_acl(lines, module) 183 if not module.params['force']: 184 commands = candidate.difference(config) 185 commands = dumps(commands, 'commands').split('\n') 186 commands = [str(c) for c in commands if c] 187 else: 188 commands = str(candidate).split('\n') 189 190 if commands: 191 if not module.check_mode: 192 response = module.config(commands) 193 result['responses'] = response 194 result['changed'] = True 195 196 result['updates'] = commands 197 198 module.exit_json(**result) 199 200 201 if __name__ == '__main__': 202 main() 203 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/network/asa/asa_acl.py b/network/asa/asa_acl.py --- a/network/asa/asa_acl.py +++ b/network/asa/asa_acl.py @@ -128,8 +128,10 @@ type: list sample: ['...', '...'] """ +import ansible.module_utils.asa + from ansible.module_utils.netcfg import NetworkConfig -from ansible.module_utils.asa import NetworkModule +from ansible.module_utils.network import NetworkModule def get_config(module): @@ -179,16 +181,22 @@ match = module.params['match'] replace = module.params['replace'] + candidate = NetworkConfig(indent=1) + candidate.add(lines) + module.filter = check_input_acl(lines, module) + if not module.params['force']: + contents = get_config(module) + config = NetworkConfig(indent=1, contents=contents) commands = candidate.difference(config) commands = dumps(commands, 'commands').split('\n') - commands = [str(c) for c in commands if c] else: commands = str(candidate).split('\n') if commands: if not module.check_mode: + commands = [str(c) for c in commands if c] response = module.config(commands) result['responses'] = response result['changed'] = True
{"golden_diff": "diff --git a/network/asa/asa_acl.py b/network/asa/asa_acl.py\n--- a/network/asa/asa_acl.py\n+++ b/network/asa/asa_acl.py\n@@ -128,8 +128,10 @@\n type: list\n sample: ['...', '...']\n \"\"\"\n+import ansible.module_utils.asa\n+\n from ansible.module_utils.netcfg import NetworkConfig\n-from ansible.module_utils.asa import NetworkModule\n+from ansible.module_utils.network import NetworkModule\n \n \n def get_config(module):\n@@ -179,16 +181,22 @@\n match = module.params['match']\n replace = module.params['replace']\n \n+ candidate = NetworkConfig(indent=1)\n+ candidate.add(lines)\n+\n module.filter = check_input_acl(lines, module)\n+\n if not module.params['force']:\n+ contents = get_config(module)\n+ config = NetworkConfig(indent=1, contents=contents)\n commands = candidate.difference(config)\n commands = dumps(commands, 'commands').split('\\n')\n- commands = [str(c) for c in commands if c]\n else:\n commands = str(candidate).split('\\n')\n \n if commands:\n if not module.check_mode:\n+ commands = [str(c) for c in commands if c]\n response = module.config(commands)\n result['responses'] = response\n result['changed'] = True\n", "issue": "Cisco ASA_ACL \"NameError: global name 'candidate' is not defined\"\n##### ISSUE TYPE\n- Bug Report\n##### COMPONENT NAME\n\nasa_acl\n##### ANSIBLE VERSION\n\n```\nroot@osboxes:~/asa# ansible-playbook -i inventory acl.yml\n\nPLAY [Test ASA] ****************************************************************\n\nTASK [Create object to ACL-ANSIBLE] ********************************************\nAn exception occurred during task execution. To see the full traceback, use -vvv. The error was: NameError: global name 'candidate' is not defined\nfatal: [asa]: FAILED! => {\"changed\": false, \"failed\": true, \"module_stderr\": \"Traceback (most recent call last):\\n File \\\"/tmp/ansible_JUM9ib/ansible_module_asa_acl.py\\\", line 202, in <module>\\n main()\\n File \\\"/tmp/ansible_JUM9ib/ansible_module_asa_acl.py\\\", line 184, in main\\n commands = candidate.difference(config)\\nNameError: global name 'candidate' is not defined\\n\", \"module_stdout\": \"\", \"msg\": \"MODULE FAILURE\"}\n\nPLAY RECAP *********************************************************************\nasa : ok=0 changed=0 unreachable=0 failed=1\n\nroot@osboxes:~/asa#\n\n```\n##### CONFIGURATION\n##### OS / ENVIRONMENT\n\nUbuntu\n##### SUMMARY\n\nAttempting to run asa_acl using ansible dev version results in above errors. asa_command and asa_config work with no issues. \n##### STEPS TO REPRODUCE\n\n```\n\n---\n\n- name: Test ASA\n hosts: asa\n connection: local\n gather_facts: false\n\n tasks:\n\n - name: Create object to ACL-ANSIBLE\n asa_acl:\n lines:\n - \"access-list ACL-ANSIBLE extended permit tcp any any eq 74\"\n username: \"ntc\"\n host: \"{{ inventory_hostname }}\"\n authorize: true\n auth_pass: \"\\n\"\n```\n\n```\nansible-playbook -i inventory acl.yml\n```\n##### EXPECTED RESULTS\n\nACL to be updated \n##### ACTUAL RESULTS\n\n```\nroot@osboxes:~/asa# ansible-playbook -i inventory acl.yml -vvvv\nUsing /etc/ansible/ansible.cfg as config file\nLoading callback plugin default of type stdout, v2.0 from /tmp/ansible/lib/ansible/plugins/callback/__init__.pyc\n\nPLAYBOOK: acl.yml **************************************************************\n1 plays in acl.yml\n\nPLAY [Test ASA] ****************************************************************\n\nTASK [Create object to ACL-ANSIBLE] ********************************************\ntask path: /root/asa/acl.yml:10\nUsing module file /tmp/ansible/lib/ansible/modules/extras/network/asa/asa_acl.py\n<asa> ESTABLISH LOCAL CONNECTION FOR USER: root\n<asa> EXEC /bin/sh -c '( umask 77 && mkdir -p \"` echo $HOME/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872 `\" && echo ansible-tmp-1473449999.57-34289925616872=\"` echo $HOME/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872 `\" ) && sleep 0'\n<asa> PUT /tmp/tmpu2hszs TO /root/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872/asa_acl.py\n<asa> EXEC /bin/sh -c 'chmod u+x /root/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872/ /root/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872/asa_acl.py && sleep 0'\n<asa> EXEC /bin/sh -c '/usr/bin/python /root/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872/asa_acl.py; rm -rf \"/root/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872/\" > /dev/null 2>&1 && sleep 0'\nAn exception occurred during task execution. The full traceback is:\nTraceback (most recent call last):\n File \"/tmp/ansible_o9grpZ/ansible_module_asa_acl.py\", line 202, in <module>\n main()\n File \"/tmp/ansible_o9grpZ/ansible_module_asa_acl.py\", line 184, in main\n commands = candidate.difference(config)\nNameError: global name 'candidate' is not defined\n\nfatal: [asa]: FAILED! => {\n \"changed\": false,\n \"failed\": true,\n \"invocation\": {\n \"module_name\": \"asa_acl\"\n },\n \"module_stderr\": \"Traceback (most recent call last):\\n File \\\"/tmp/ansible_o9grpZ/ansible_module_asa_acl.py\\\", line 202, in <module>\\n main()\\n File \\\"/tmp/ansible_o9grpZ/ansible_module_asa_acl.py\\\", line 184, in main\\n commands = candidate.difference(config)\\nNameError: global name 'candidate' is not defined\\n\",\n \"module_stdout\": \"\",\n \"msg\": \"MODULE FAILURE\"\n}\n\nPLAY RECAP *********************************************************************\nasa : ok=0 changed=0 unreachable=0 failed=1\n\n\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nDOCUMENTATION = \"\"\"\n---\nmodule: asa_acl\nversion_added: \"2.2\"\nauthor: \"Patrick Ogenstad (@ogenstad)\"\nshort_description: Manage access-lists on a Cisco ASA\ndescription:\n - This module allows you to work with access-lists on a Cisco ASA device.\nextends_documentation_fragment: asa\noptions:\n lines:\n description:\n - The ordered set of commands that should be configured in the\n section. The commands must be the exact same commands as found\n in the device running-config. Be sure to note the configuration\n command syntanx as some commands are automatically modified by the\n device config parser.\n required: true\n before:\n description:\n - The ordered set of commands to push on to the command stack if\n a change needs to be made. This allows the playbook designer\n the opportunity to perform configuration commands prior to pushing\n any changes without affecting how the set of commands are matched\n against the system\n required: false\n default: null\n after:\n description:\n - The ordered set of commands to append to the end of the command\n stack if a changed needs to be made. Just like with I(before) this\n allows the playbook designer to append a set of commands to be\n executed after the command set.\n required: false\n default: null\n match:\n description:\n - Instructs the module on the way to perform the matching of\n the set of commands against the current device config. If\n match is set to I(line), commands are matched line by line. If\n match is set to I(strict), command lines are matched with respect\n to position. Finally if match is set to I(exact), command lines\n must be an equal match.\n required: false\n default: line\n choices: ['line', 'strict', 'exact']\n replace:\n description:\n - Instructs the module on the way to perform the configuration\n on the device. If the replace argument is set to I(line) then\n the modified lines are pushed to the device in configuration\n mode. If the replace argument is set to I(block) then the entire\n command block is pushed to the device in configuration mode if any\n line is not correct\n required: false\n default: line\n choices: ['line', 'block']\n force:\n description:\n - The force argument instructs the module to not consider the\n current devices running-config. When set to true, this will\n cause the module to push the contents of I(src) into the device\n without first checking if already configured.\n required: false\n default: false\n choices: ['yes', 'no']\n config:\n description:\n - The module, by default, will connect to the remote device and\n retrieve the current running-config to use as a base for comparing\n against the contents of source. There are times when it is not\n desirable to have the task get the current running-config for\n every task in a playbook. The I(config) argument allows the\n implementer to pass in the configuruation to use as the base\n config for comparision.\n required: false\n default: null\n\"\"\"\n\nEXAMPLES = \"\"\"\n\n- asa_acl:\n lines:\n - access-list ACL-ANSIBLE extended permit tcp any any eq 82\n - access-list ACL-ANSIBLE extended permit tcp any any eq www\n - access-list ACL-ANSIBLE extended permit tcp any any eq 97\n - access-list ACL-ANSIBLE extended permit tcp any any eq 98\n - access-list ACL-ANSIBLE extended permit tcp any any eq 99\n before: clear configure access-list ACL-ANSIBLE\n match: strict\n replace: block\n\n- asa_acl:\n lines:\n - access-list ACL-OUTSIDE extended permit tcp any any eq www\n - access-list ACL-OUTSIDE extended permit tcp any any eq https\n context: customer_a\n\"\"\"\n\nRETURN = \"\"\"\nupdates:\n description: The set of commands that will be pushed to the remote device\n returned: always\n type: list\n sample: ['...', '...']\n\nresponses:\n description: The set of responses from issuing the commands on the device\n retured: when not check_mode\n type: list\n sample: ['...', '...']\n\"\"\"\nfrom ansible.module_utils.netcfg import NetworkConfig\nfrom ansible.module_utils.asa import NetworkModule\n\n\ndef get_config(module):\n config = module.params['config'] or dict()\n if not config and not module.params['force']:\n config = module.config\n return config\n\n\ndef check_input_acl(lines, module):\n first_line = True\n for line in lines:\n ace = line.split()\n if ace[0] != 'access-list':\n module.fail_json(msg='All lines/commands must begin with \"access-list\" %s is not permitted' % ace[0])\n if len(ace) <= 1:\n module.fail_json(msg='All lines/commainds must contain the name of the access-list')\n if first_line:\n acl_name = ace[1]\n else:\n if acl_name != ace[1]:\n module.fail_json(msg='All lines/commands must use the same access-list %s is not %s' % (ace[1], acl_name))\n first_line = False\n\n return 'access-list %s' % acl_name\n\ndef main():\n\n argument_spec = dict(\n lines=dict(aliases=['commands'], required=True, type='list'),\n before=dict(type='list'),\n after=dict(type='list'),\n match=dict(default='line', choices=['line', 'strict', 'exact']),\n replace=dict(default='line', choices=['line', 'block']),\n force=dict(default=False, type='bool'),\n config=dict()\n )\n\n module = NetworkModule(argument_spec=argument_spec,\n supports_check_mode=True)\n\n lines = module.params['lines']\n\n before = module.params['before']\n after = module.params['after']\n\n match = module.params['match']\n replace = module.params['replace']\n\n module.filter = check_input_acl(lines, module)\n if not module.params['force']:\n commands = candidate.difference(config)\n commands = dumps(commands, 'commands').split('\\n')\n commands = [str(c) for c in commands if c]\n else:\n commands = str(candidate).split('\\n')\n\n if commands:\n if not module.check_mode:\n response = module.config(commands)\n result['responses'] = response\n result['changed'] = True\n\n result['updates'] = commands\n\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n", "path": "network/asa/asa_acl.py"}], "after_files": [{"content": "#!/usr/bin/python\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nDOCUMENTATION = \"\"\"\n---\nmodule: asa_acl\nversion_added: \"2.2\"\nauthor: \"Patrick Ogenstad (@ogenstad)\"\nshort_description: Manage access-lists on a Cisco ASA\ndescription:\n - This module allows you to work with access-lists on a Cisco ASA device.\nextends_documentation_fragment: asa\noptions:\n lines:\n description:\n - The ordered set of commands that should be configured in the\n section. The commands must be the exact same commands as found\n in the device running-config. Be sure to note the configuration\n command syntanx as some commands are automatically modified by the\n device config parser.\n required: true\n before:\n description:\n - The ordered set of commands to push on to the command stack if\n a change needs to be made. This allows the playbook designer\n the opportunity to perform configuration commands prior to pushing\n any changes without affecting how the set of commands are matched\n against the system\n required: false\n default: null\n after:\n description:\n - The ordered set of commands to append to the end of the command\n stack if a changed needs to be made. Just like with I(before) this\n allows the playbook designer to append a set of commands to be\n executed after the command set.\n required: false\n default: null\n match:\n description:\n - Instructs the module on the way to perform the matching of\n the set of commands against the current device config. If\n match is set to I(line), commands are matched line by line. If\n match is set to I(strict), command lines are matched with respect\n to position. Finally if match is set to I(exact), command lines\n must be an equal match.\n required: false\n default: line\n choices: ['line', 'strict', 'exact']\n replace:\n description:\n - Instructs the module on the way to perform the configuration\n on the device. If the replace argument is set to I(line) then\n the modified lines are pushed to the device in configuration\n mode. If the replace argument is set to I(block) then the entire\n command block is pushed to the device in configuration mode if any\n line is not correct\n required: false\n default: line\n choices: ['line', 'block']\n force:\n description:\n - The force argument instructs the module to not consider the\n current devices running-config. When set to true, this will\n cause the module to push the contents of I(src) into the device\n without first checking if already configured.\n required: false\n default: false\n choices: ['yes', 'no']\n config:\n description:\n - The module, by default, will connect to the remote device and\n retrieve the current running-config to use as a base for comparing\n against the contents of source. There are times when it is not\n desirable to have the task get the current running-config for\n every task in a playbook. The I(config) argument allows the\n implementer to pass in the configuruation to use as the base\n config for comparision.\n required: false\n default: null\n\"\"\"\n\nEXAMPLES = \"\"\"\n\n- asa_acl:\n lines:\n - access-list ACL-ANSIBLE extended permit tcp any any eq 82\n - access-list ACL-ANSIBLE extended permit tcp any any eq www\n - access-list ACL-ANSIBLE extended permit tcp any any eq 97\n - access-list ACL-ANSIBLE extended permit tcp any any eq 98\n - access-list ACL-ANSIBLE extended permit tcp any any eq 99\n before: clear configure access-list ACL-ANSIBLE\n match: strict\n replace: block\n\n- asa_acl:\n lines:\n - access-list ACL-OUTSIDE extended permit tcp any any eq www\n - access-list ACL-OUTSIDE extended permit tcp any any eq https\n context: customer_a\n\"\"\"\n\nRETURN = \"\"\"\nupdates:\n description: The set of commands that will be pushed to the remote device\n returned: always\n type: list\n sample: ['...', '...']\n\nresponses:\n description: The set of responses from issuing the commands on the device\n retured: when not check_mode\n type: list\n sample: ['...', '...']\n\"\"\"\nimport ansible.module_utils.asa\n\nfrom ansible.module_utils.netcfg import NetworkConfig\nfrom ansible.module_utils.network import NetworkModule\n\n\ndef get_config(module):\n config = module.params['config'] or dict()\n if not config and not module.params['force']:\n config = module.config\n return config\n\n\ndef check_input_acl(lines, module):\n first_line = True\n for line in lines:\n ace = line.split()\n if ace[0] != 'access-list':\n module.fail_json(msg='All lines/commands must begin with \"access-list\" %s is not permitted' % ace[0])\n if len(ace) <= 1:\n module.fail_json(msg='All lines/commainds must contain the name of the access-list')\n if first_line:\n acl_name = ace[1]\n else:\n if acl_name != ace[1]:\n module.fail_json(msg='All lines/commands must use the same access-list %s is not %s' % (ace[1], acl_name))\n first_line = False\n\n return 'access-list %s' % acl_name\n\ndef main():\n\n argument_spec = dict(\n lines=dict(aliases=['commands'], required=True, type='list'),\n before=dict(type='list'),\n after=dict(type='list'),\n match=dict(default='line', choices=['line', 'strict', 'exact']),\n replace=dict(default='line', choices=['line', 'block']),\n force=dict(default=False, type='bool'),\n config=dict()\n )\n\n module = NetworkModule(argument_spec=argument_spec,\n supports_check_mode=True)\n\n lines = module.params['lines']\n\n before = module.params['before']\n after = module.params['after']\n\n match = module.params['match']\n replace = module.params['replace']\n\n candidate = NetworkConfig(indent=1)\n candidate.add(lines)\n\n module.filter = check_input_acl(lines, module)\n\n if not module.params['force']:\n contents = get_config(module)\n config = NetworkConfig(indent=1, contents=contents)\n commands = candidate.difference(config)\n commands = dumps(commands, 'commands').split('\\n')\n else:\n commands = str(candidate).split('\\n')\n\n if commands:\n if not module.check_mode:\n commands = [str(c) for c in commands if c]\n response = module.config(commands)\n result['responses'] = response\n result['changed'] = True\n\n result['updates'] = commands\n\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n", "path": "network/asa/asa_acl.py"}]}
3,671
305
gh_patches_debug_36013
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-5186
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- UTF8 characters on version slugging -- or slugging in general This ticket came up as part of #1407. We should make sure version slugging is handling UTF8 characters in a sane way. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `readthedocs/builds/version_slug.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 """ 4 Contains logic for handling version slugs. 5 6 Handling slugs for versions is not too straightforward. We need to allow some 7 characters which are uncommon in usual slugs. They are dots and underscores. 8 Usually we want the slug to be the name of the tag or branch corresponding VCS 9 version. However we need to strip url-destroying characters like slashes. 10 11 So the syntax for version slugs should be: 12 13 * Start with a lowercase ascii char or a digit. 14 * All other characters must be lowercase ascii chars, digits or dots. 15 16 If uniqueness is not met for a slug in a project, we append a dash and a letter 17 starting with ``a``. We keep increasing that letter until we have a unique 18 slug. This is used since using numbers in tags is too common and appending 19 another number would be confusing. 20 """ 21 22 import math 23 import re 24 import string 25 from operator import truediv 26 27 from django.db import models 28 from django.utils.encoding import force_text 29 30 31 def get_fields_with_model(cls): 32 """ 33 Replace deprecated function of the same name in Model._meta. 34 35 This replaces deprecated function (as of Django 1.10) in Model._meta as 36 prescrived in the Django docs. 37 https://docs.djangoproject.com/en/1.11/ref/models/meta/#migrating-from-the-old-api 38 """ 39 return [(f, f.model if f.model != cls else None) 40 for f in cls._meta.get_fields() 41 if not f.is_relation or f.one_to_one or 42 (f.many_to_one and f.related_model)] 43 44 45 # Regex breakdown: 46 # [a-z0-9] -- start with alphanumeric value 47 # [-._a-z0-9] -- allow dash, dot, underscore, digit, lowercase ascii 48 # *? -- allow multiple of those, but be not greedy about the matching 49 # (?: ... ) -- wrap everything so that the pattern cannot escape when used in 50 # regexes. 51 VERSION_SLUG_REGEX = '(?:[a-z0-9A-Z][-._a-z0-9A-Z]*?)' 52 53 54 class VersionSlugField(models.CharField): 55 56 """Inspired by ``django_extensions.db.fields.AutoSlugField``.""" 57 58 invalid_chars_re = re.compile('[^-._a-z0-9]') 59 leading_punctuation_re = re.compile('^[-._]+') 60 placeholder = '-' 61 fallback_slug = 'unknown' 62 test_pattern = re.compile('^{pattern}$'.format(pattern=VERSION_SLUG_REGEX)) 63 64 def __init__(self, *args, **kwargs): 65 kwargs.setdefault('db_index', True) 66 67 populate_from = kwargs.pop('populate_from', None) 68 if populate_from is None: 69 raise ValueError("missing 'populate_from' argument") 70 else: 71 self._populate_from = populate_from 72 super().__init__(*args, **kwargs) 73 74 def get_queryset(self, model_cls, slug_field): 75 # pylint: disable=protected-access 76 for field, model in get_fields_with_model(model_cls): 77 if model and field == slug_field: 78 return model._default_manager.all() 79 return model_cls._default_manager.all() 80 81 def slugify(self, content): 82 if not content: 83 return '' 84 85 slugified = content.lower() 86 slugified = self.invalid_chars_re.sub(self.placeholder, slugified) 87 slugified = self.leading_punctuation_re.sub('', slugified) 88 89 if not slugified: 90 return self.fallback_slug 91 return slugified 92 93 def uniquifying_suffix(self, iteration): 94 """ 95 Create a unique suffix. 96 97 This creates a suffix based on the number given as ``iteration``. It 98 will return a value encoded as lowercase ascii letter. So we have an 99 alphabet of 26 letters. The returned suffix will be for example ``_yh`` 100 where ``yh`` is the encoding of ``iteration``. The length of it will be 101 ``math.log(iteration, 26)``. 102 103 Examples:: 104 105 uniquifying_suffix(0) == '_a' 106 uniquifying_suffix(25) == '_z' 107 uniquifying_suffix(26) == '_ba' 108 uniquifying_suffix(52) == '_ca' 109 """ 110 alphabet = string.ascii_lowercase 111 length = len(alphabet) 112 if iteration == 0: 113 power = 0 114 else: 115 power = int(math.log(iteration, length)) 116 current = iteration 117 suffix = '' 118 for exp in reversed(list(range(0, power + 1))): 119 digit = int(truediv(current, length ** exp)) 120 suffix += alphabet[digit] 121 current = current % length ** exp 122 return '_{suffix}'.format(suffix=suffix) 123 124 def create_slug(self, model_instance): 125 """Generate a unique slug for a model instance.""" 126 # pylint: disable=protected-access 127 128 # get fields to populate from and slug field to set 129 slug_field = model_instance._meta.get_field(self.attname) 130 131 slug = self.slugify(getattr(model_instance, self._populate_from)) 132 count = 0 133 134 # strip slug depending on max_length attribute of the slug field 135 # and clean-up 136 slug_len = slug_field.max_length 137 if slug_len: 138 slug = slug[:slug_len] 139 original_slug = slug 140 141 # exclude the current model instance from the queryset used in finding 142 # the next valid slug 143 queryset = self.get_queryset(model_instance.__class__, slug_field) 144 if model_instance.pk: 145 queryset = queryset.exclude(pk=model_instance.pk) 146 147 # form a kwarg dict used to implement any unique_together constraints 148 kwargs = {} 149 for params in model_instance._meta.unique_together: 150 if self.attname in params: 151 for param in params: 152 kwargs[param] = getattr(model_instance, param, None) 153 kwargs[self.attname] = slug 154 155 # increases the number while searching for the next valid slug 156 # depending on the given slug, clean-up 157 while not slug or queryset.filter(**kwargs).exists(): 158 slug = original_slug 159 end = self.uniquifying_suffix(count) 160 end_len = len(end) 161 if slug_len and len(slug) + end_len > slug_len: 162 slug = slug[:slug_len - end_len] 163 slug = slug + end 164 kwargs[self.attname] = slug 165 count += 1 166 167 assert self.test_pattern.match(slug), ( 168 'Invalid generated slug: {slug}'.format(slug=slug) 169 ) 170 return slug 171 172 def pre_save(self, model_instance, add): 173 value = getattr(model_instance, self.attname) 174 # We only create a new slug if none was set yet. 175 if not value and add: 176 value = force_text(self.create_slug(model_instance)) 177 setattr(model_instance, self.attname, value) 178 return value 179 180 def deconstruct(self): 181 name, path, args, kwargs = super().deconstruct() 182 kwargs['populate_from'] = self._populate_from 183 return name, path, args, kwargs 184 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/readthedocs/builds/version_slug.py b/readthedocs/builds/version_slug.py --- a/readthedocs/builds/version_slug.py +++ b/readthedocs/builds/version_slug.py @@ -26,6 +26,7 @@ from django.db import models from django.utils.encoding import force_text +from slugify import slugify as unicode_slugify def get_fields_with_model(cls): @@ -53,13 +54,15 @@ class VersionSlugField(models.CharField): - """Inspired by ``django_extensions.db.fields.AutoSlugField``.""" + """ + Inspired by ``django_extensions.db.fields.AutoSlugField``. - invalid_chars_re = re.compile('[^-._a-z0-9]') - leading_punctuation_re = re.compile('^[-._]+') - placeholder = '-' - fallback_slug = 'unknown' + Uses ``unicode-slugify`` to generate the slug. + """ + + ok_chars = '-._' # dash, dot, underscore test_pattern = re.compile('^{pattern}$'.format(pattern=VERSION_SLUG_REGEX)) + fallback_slug = 'unknown' def __init__(self, *args, **kwargs): kwargs.setdefault('db_index', True) @@ -78,13 +81,42 @@ return model._default_manager.all() return model_cls._default_manager.all() + def _normalize(self, content): + """ + Normalize some invalid characters (/, %, !, ?) to become a dash (``-``). + + .. note:: + + We replace these characters to a dash to keep compatibility with the + old behavior and also because it makes this more readable. + + For example, ``release/1.0`` will become ``release-1.0``. + """ + return re.sub('[/%!?]', '-', content) + def slugify(self, content): + """ + Make ``content`` a valid slug. + + It uses ``unicode-slugify`` behind the scenes which works properly with + Unicode characters. + """ if not content: return '' - slugified = content.lower() - slugified = self.invalid_chars_re.sub(self.placeholder, slugified) - slugified = self.leading_punctuation_re.sub('', slugified) + normalized = self._normalize(content) + slugified = unicode_slugify( + normalized, + only_ascii=True, + spaces=False, + lower=True, + ok=self.ok_chars, + space_replacement='-', + ) + + # Remove first character wile it's an invalid character for the + # beginning of the slug + slugified = slugified.lstrip(self.ok_chars) if not slugified: return self.fallback_slug
{"golden_diff": "diff --git a/readthedocs/builds/version_slug.py b/readthedocs/builds/version_slug.py\n--- a/readthedocs/builds/version_slug.py\n+++ b/readthedocs/builds/version_slug.py\n@@ -26,6 +26,7 @@\n \n from django.db import models\n from django.utils.encoding import force_text\n+from slugify import slugify as unicode_slugify\n \n \n def get_fields_with_model(cls):\n@@ -53,13 +54,15 @@\n \n class VersionSlugField(models.CharField):\n \n- \"\"\"Inspired by ``django_extensions.db.fields.AutoSlugField``.\"\"\"\n+ \"\"\"\n+ Inspired by ``django_extensions.db.fields.AutoSlugField``.\n \n- invalid_chars_re = re.compile('[^-._a-z0-9]')\n- leading_punctuation_re = re.compile('^[-._]+')\n- placeholder = '-'\n- fallback_slug = 'unknown'\n+ Uses ``unicode-slugify`` to generate the slug.\n+ \"\"\"\n+\n+ ok_chars = '-._' # dash, dot, underscore\n test_pattern = re.compile('^{pattern}$'.format(pattern=VERSION_SLUG_REGEX))\n+ fallback_slug = 'unknown'\n \n def __init__(self, *args, **kwargs):\n kwargs.setdefault('db_index', True)\n@@ -78,13 +81,42 @@\n return model._default_manager.all()\n return model_cls._default_manager.all()\n \n+ def _normalize(self, content):\n+ \"\"\"\n+ Normalize some invalid characters (/, %, !, ?) to become a dash (``-``).\n+\n+ .. note::\n+\n+ We replace these characters to a dash to keep compatibility with the\n+ old behavior and also because it makes this more readable.\n+\n+ For example, ``release/1.0`` will become ``release-1.0``.\n+ \"\"\"\n+ return re.sub('[/%!?]', '-', content)\n+\n def slugify(self, content):\n+ \"\"\"\n+ Make ``content`` a valid slug.\n+\n+ It uses ``unicode-slugify`` behind the scenes which works properly with\n+ Unicode characters.\n+ \"\"\"\n if not content:\n return ''\n \n- slugified = content.lower()\n- slugified = self.invalid_chars_re.sub(self.placeholder, slugified)\n- slugified = self.leading_punctuation_re.sub('', slugified)\n+ normalized = self._normalize(content)\n+ slugified = unicode_slugify(\n+ normalized,\n+ only_ascii=True,\n+ spaces=False,\n+ lower=True,\n+ ok=self.ok_chars,\n+ space_replacement='-',\n+ )\n+\n+ # Remove first character wile it's an invalid character for the\n+ # beginning of the slug\n+ slugified = slugified.lstrip(self.ok_chars)\n \n if not slugified:\n return self.fallback_slug\n", "issue": "UTF8 characters on version slugging -- or slugging in general\nThis ticket came up as part of #1407. We should make sure version slugging is handling UTF8 characters in a sane way.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nContains logic for handling version slugs.\n\nHandling slugs for versions is not too straightforward. We need to allow some\ncharacters which are uncommon in usual slugs. They are dots and underscores.\nUsually we want the slug to be the name of the tag or branch corresponding VCS\nversion. However we need to strip url-destroying characters like slashes.\n\nSo the syntax for version slugs should be:\n\n* Start with a lowercase ascii char or a digit.\n* All other characters must be lowercase ascii chars, digits or dots.\n\nIf uniqueness is not met for a slug in a project, we append a dash and a letter\nstarting with ``a``. We keep increasing that letter until we have a unique\nslug. This is used since using numbers in tags is too common and appending\nanother number would be confusing.\n\"\"\"\n\nimport math\nimport re\nimport string\nfrom operator import truediv\n\nfrom django.db import models\nfrom django.utils.encoding import force_text\n\n\ndef get_fields_with_model(cls):\n \"\"\"\n Replace deprecated function of the same name in Model._meta.\n\n This replaces deprecated function (as of Django 1.10) in Model._meta as\n prescrived in the Django docs.\n https://docs.djangoproject.com/en/1.11/ref/models/meta/#migrating-from-the-old-api\n \"\"\"\n return [(f, f.model if f.model != cls else None)\n for f in cls._meta.get_fields()\n if not f.is_relation or f.one_to_one or\n (f.many_to_one and f.related_model)]\n\n\n# Regex breakdown:\n# [a-z0-9] -- start with alphanumeric value\n# [-._a-z0-9] -- allow dash, dot, underscore, digit, lowercase ascii\n# *? -- allow multiple of those, but be not greedy about the matching\n# (?: ... ) -- wrap everything so that the pattern cannot escape when used in\n# regexes.\nVERSION_SLUG_REGEX = '(?:[a-z0-9A-Z][-._a-z0-9A-Z]*?)'\n\n\nclass VersionSlugField(models.CharField):\n\n \"\"\"Inspired by ``django_extensions.db.fields.AutoSlugField``.\"\"\"\n\n invalid_chars_re = re.compile('[^-._a-z0-9]')\n leading_punctuation_re = re.compile('^[-._]+')\n placeholder = '-'\n fallback_slug = 'unknown'\n test_pattern = re.compile('^{pattern}$'.format(pattern=VERSION_SLUG_REGEX))\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('db_index', True)\n\n populate_from = kwargs.pop('populate_from', None)\n if populate_from is None:\n raise ValueError(\"missing 'populate_from' argument\")\n else:\n self._populate_from = populate_from\n super().__init__(*args, **kwargs)\n\n def get_queryset(self, model_cls, slug_field):\n # pylint: disable=protected-access\n for field, model in get_fields_with_model(model_cls):\n if model and field == slug_field:\n return model._default_manager.all()\n return model_cls._default_manager.all()\n\n def slugify(self, content):\n if not content:\n return ''\n\n slugified = content.lower()\n slugified = self.invalid_chars_re.sub(self.placeholder, slugified)\n slugified = self.leading_punctuation_re.sub('', slugified)\n\n if not slugified:\n return self.fallback_slug\n return slugified\n\n def uniquifying_suffix(self, iteration):\n \"\"\"\n Create a unique suffix.\n\n This creates a suffix based on the number given as ``iteration``. It\n will return a value encoded as lowercase ascii letter. So we have an\n alphabet of 26 letters. The returned suffix will be for example ``_yh``\n where ``yh`` is the encoding of ``iteration``. The length of it will be\n ``math.log(iteration, 26)``.\n\n Examples::\n\n uniquifying_suffix(0) == '_a'\n uniquifying_suffix(25) == '_z'\n uniquifying_suffix(26) == '_ba'\n uniquifying_suffix(52) == '_ca'\n \"\"\"\n alphabet = string.ascii_lowercase\n length = len(alphabet)\n if iteration == 0:\n power = 0\n else:\n power = int(math.log(iteration, length))\n current = iteration\n suffix = ''\n for exp in reversed(list(range(0, power + 1))):\n digit = int(truediv(current, length ** exp))\n suffix += alphabet[digit]\n current = current % length ** exp\n return '_{suffix}'.format(suffix=suffix)\n\n def create_slug(self, model_instance):\n \"\"\"Generate a unique slug for a model instance.\"\"\"\n # pylint: disable=protected-access\n\n # get fields to populate from and slug field to set\n slug_field = model_instance._meta.get_field(self.attname)\n\n slug = self.slugify(getattr(model_instance, self._populate_from))\n count = 0\n\n # strip slug depending on max_length attribute of the slug field\n # and clean-up\n slug_len = slug_field.max_length\n if slug_len:\n slug = slug[:slug_len]\n original_slug = slug\n\n # exclude the current model instance from the queryset used in finding\n # the next valid slug\n queryset = self.get_queryset(model_instance.__class__, slug_field)\n if model_instance.pk:\n queryset = queryset.exclude(pk=model_instance.pk)\n\n # form a kwarg dict used to implement any unique_together constraints\n kwargs = {}\n for params in model_instance._meta.unique_together:\n if self.attname in params:\n for param in params:\n kwargs[param] = getattr(model_instance, param, None)\n kwargs[self.attname] = slug\n\n # increases the number while searching for the next valid slug\n # depending on the given slug, clean-up\n while not slug or queryset.filter(**kwargs).exists():\n slug = original_slug\n end = self.uniquifying_suffix(count)\n end_len = len(end)\n if slug_len and len(slug) + end_len > slug_len:\n slug = slug[:slug_len - end_len]\n slug = slug + end\n kwargs[self.attname] = slug\n count += 1\n\n assert self.test_pattern.match(slug), (\n 'Invalid generated slug: {slug}'.format(slug=slug)\n )\n return slug\n\n def pre_save(self, model_instance, add):\n value = getattr(model_instance, self.attname)\n # We only create a new slug if none was set yet.\n if not value and add:\n value = force_text(self.create_slug(model_instance))\n setattr(model_instance, self.attname, value)\n return value\n\n def deconstruct(self):\n name, path, args, kwargs = super().deconstruct()\n kwargs['populate_from'] = self._populate_from\n return name, path, args, kwargs\n", "path": "readthedocs/builds/version_slug.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nContains logic for handling version slugs.\n\nHandling slugs for versions is not too straightforward. We need to allow some\ncharacters which are uncommon in usual slugs. They are dots and underscores.\nUsually we want the slug to be the name of the tag or branch corresponding VCS\nversion. However we need to strip url-destroying characters like slashes.\n\nSo the syntax for version slugs should be:\n\n* Start with a lowercase ascii char or a digit.\n* All other characters must be lowercase ascii chars, digits or dots.\n\nIf uniqueness is not met for a slug in a project, we append a dash and a letter\nstarting with ``a``. We keep increasing that letter until we have a unique\nslug. This is used since using numbers in tags is too common and appending\nanother number would be confusing.\n\"\"\"\n\nimport math\nimport re\nimport string\nfrom operator import truediv\n\nfrom django.db import models\nfrom django.utils.encoding import force_text\nfrom slugify import slugify as unicode_slugify\n\n\ndef get_fields_with_model(cls):\n \"\"\"\n Replace deprecated function of the same name in Model._meta.\n\n This replaces deprecated function (as of Django 1.10) in Model._meta as\n prescrived in the Django docs.\n https://docs.djangoproject.com/en/1.11/ref/models/meta/#migrating-from-the-old-api\n \"\"\"\n return [(f, f.model if f.model != cls else None)\n for f in cls._meta.get_fields()\n if not f.is_relation or f.one_to_one or\n (f.many_to_one and f.related_model)]\n\n\n# Regex breakdown:\n# [a-z0-9] -- start with alphanumeric value\n# [-._a-z0-9] -- allow dash, dot, underscore, digit, lowercase ascii\n# *? -- allow multiple of those, but be not greedy about the matching\n# (?: ... ) -- wrap everything so that the pattern cannot escape when used in\n# regexes.\nVERSION_SLUG_REGEX = '(?:[a-z0-9A-Z][-._a-z0-9A-Z]*?)'\n\n\nclass VersionSlugField(models.CharField):\n\n \"\"\"\n Inspired by ``django_extensions.db.fields.AutoSlugField``.\n\n Uses ``unicode-slugify`` to generate the slug.\n \"\"\"\n\n ok_chars = '-._' # dash, dot, underscore\n test_pattern = re.compile('^{pattern}$'.format(pattern=VERSION_SLUG_REGEX))\n fallback_slug = 'unknown'\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('db_index', True)\n\n populate_from = kwargs.pop('populate_from', None)\n if populate_from is None:\n raise ValueError(\"missing 'populate_from' argument\")\n else:\n self._populate_from = populate_from\n super().__init__(*args, **kwargs)\n\n def get_queryset(self, model_cls, slug_field):\n # pylint: disable=protected-access\n for field, model in get_fields_with_model(model_cls):\n if model and field == slug_field:\n return model._default_manager.all()\n return model_cls._default_manager.all()\n\n def _normalize(self, content):\n \"\"\"\n Normalize some invalid characters (/, %, !, ?) to become a dash (``-``).\n\n .. note::\n\n We replace these characters to a dash to keep compatibility with the\n old behavior and also because it makes this more readable.\n\n For example, ``release/1.0`` will become ``release-1.0``.\n \"\"\"\n return re.sub('[/%!?]', '-', content)\n\n def slugify(self, content):\n \"\"\"\n Make ``content`` a valid slug.\n\n It uses ``unicode-slugify`` behind the scenes which works properly with\n Unicode characters.\n \"\"\"\n if not content:\n return ''\n\n normalized = self._normalize(content)\n slugified = unicode_slugify(\n normalized,\n only_ascii=True,\n spaces=False,\n lower=True,\n ok=self.ok_chars,\n space_replacement='-',\n )\n\n # Remove first character wile it's an invalid character for the\n # beginning of the slug\n slugified = slugified.lstrip(self.ok_chars)\n\n if not slugified:\n return self.fallback_slug\n return slugified\n\n def uniquifying_suffix(self, iteration):\n \"\"\"\n Create a unique suffix.\n\n This creates a suffix based on the number given as ``iteration``. It\n will return a value encoded as lowercase ascii letter. So we have an\n alphabet of 26 letters. The returned suffix will be for example ``_yh``\n where ``yh`` is the encoding of ``iteration``. The length of it will be\n ``math.log(iteration, 26)``.\n\n Examples::\n\n uniquifying_suffix(0) == '_a'\n uniquifying_suffix(25) == '_z'\n uniquifying_suffix(26) == '_ba'\n uniquifying_suffix(52) == '_ca'\n \"\"\"\n alphabet = string.ascii_lowercase\n length = len(alphabet)\n if iteration == 0:\n power = 0\n else:\n power = int(math.log(iteration, length))\n current = iteration\n suffix = ''\n for exp in reversed(list(range(0, power + 1))):\n digit = int(truediv(current, length ** exp))\n suffix += alphabet[digit]\n current = current % length ** exp\n return '_{suffix}'.format(suffix=suffix)\n\n def create_slug(self, model_instance):\n \"\"\"Generate a unique slug for a model instance.\"\"\"\n # pylint: disable=protected-access\n\n # get fields to populate from and slug field to set\n slug_field = model_instance._meta.get_field(self.attname)\n\n slug = self.slugify(getattr(model_instance, self._populate_from))\n count = 0\n\n # strip slug depending on max_length attribute of the slug field\n # and clean-up\n slug_len = slug_field.max_length\n if slug_len:\n slug = slug[:slug_len]\n original_slug = slug\n\n # exclude the current model instance from the queryset used in finding\n # the next valid slug\n queryset = self.get_queryset(model_instance.__class__, slug_field)\n if model_instance.pk:\n queryset = queryset.exclude(pk=model_instance.pk)\n\n # form a kwarg dict used to implement any unique_together constraints\n kwargs = {}\n for params in model_instance._meta.unique_together:\n if self.attname in params:\n for param in params:\n kwargs[param] = getattr(model_instance, param, None)\n kwargs[self.attname] = slug\n\n # increases the number while searching for the next valid slug\n # depending on the given slug, clean-up\n while not slug or queryset.filter(**kwargs).exists():\n slug = original_slug\n end = self.uniquifying_suffix(count)\n end_len = len(end)\n if slug_len and len(slug) + end_len > slug_len:\n slug = slug[:slug_len - end_len]\n slug = slug + end\n kwargs[self.attname] = slug\n count += 1\n\n assert self.test_pattern.match(slug), (\n 'Invalid generated slug: {slug}'.format(slug=slug)\n )\n return slug\n\n def pre_save(self, model_instance, add):\n value = getattr(model_instance, self.attname)\n # We only create a new slug if none was set yet.\n if not value and add:\n value = force_text(self.create_slug(model_instance))\n setattr(model_instance, self.attname, value)\n return value\n\n def deconstruct(self):\n name, path, args, kwargs = super().deconstruct()\n kwargs['populate_from'] = self._populate_from\n return name, path, args, kwargs\n", "path": "readthedocs/builds/version_slug.py"}]}
2,268
621
gh_patches_debug_4890
rasdani/github-patches
git_diff
gratipay__gratipay.com-2162
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 500 from credit card page Reticketing from #2155. https://app.getsentry.com/gittip/gittip/group/16333701/ (login required) ``` TypeError: 'NoneType' object has no attribute '__getitem__' Stacktrace (most recent call last): File "site-packages/algorithm.py", line 288, in run new_state = function(**deps.as_kwargs) File "aspen/algorithms/website.py", line 88, in get_response_for_resource return {'response': resource.respond(request)} File "aspen/resources/dynamic_resource.py", line 68, in respond response = self.get_response(context) File "aspen/resources/negotiated_resource.py", line 98, in get_response response.body = render(context) File "aspen/renderers/__init__.py", line 99, in __call__ return self.render_content(context) File "site-packages/aspen_jinja2_renderer.py", line 66, in render_content return self.compiled.render(context).encode(charset) File "jinja2/environment.py", line 969, in render return self.environment.handle_exception(exc_info, True) File "jinja2/environment.py", line 742, in handle_exception reraise(exc_type, exc_value, tb) File "/app/www/credit-card.html.spt", line 1, in top-level template code import traceback File "/app/templates/base.html", line 54, in top-level template code {% block killbox %} File "/app/templates/base.html", line 56, in block "killbox" {% block box %}{% endblock %} File "/app/www/credit-card.html.spt", line 56, in block "box" Gittip.payments.cc.init("{{ balanced.Marketplace.my_marketplace.uri }}", "{{ user.participant.username }}"); ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gittip/billing/__init__.py` Content: ``` 1 """This module encapsulates billing logic and db access. 2 3 There are three pieces of information for each participant related to billing: 4 5 balanced_customer_href 6 * NULL - This participant has never been billed. 7 * 'deadbeef' - This participant has had a Balanced account created for 8 them, either by adding a credit card or a bank account. 9 last_bill_result 10 * NULL - This participant has not had their credit card charged yet. 11 * '' - This participant has a working card. 12 * <message> - An error message. 13 last_ach_result 14 * NULL - This participant has not wired up a bank account yet. 15 * '' - This participant has a working bank account. 16 * <message> - An error message. 17 18 """ 19 from __future__ import unicode_literals 20 21 import balanced 22 import stripe 23 from aspen.utils import typecheck 24 25 26 def get_balanced_account(db, username, balanced_customer_href): 27 """Find or create a balanced.Account. 28 """ 29 typecheck( username, unicode 30 , balanced_customer_href, (unicode, None) 31 ) 32 33 if balanced_customer_href is None: 34 customer = balanced.Customer(meta={ 35 'username': username, 36 }).save() 37 BALANCED_ACCOUNT = """\ 38 39 UPDATE participants 40 SET balanced_customer_href=%s 41 WHERE username=%s 42 43 """ 44 db.run(BALANCED_ACCOUNT, (customer.href, username)) 45 else: 46 customer = balanced.Customer.fetch(balanced_customer_href) 47 return customer 48 49 50 def associate(db, thing, username, balanced_customer_href, balanced_thing_uri): 51 """Given four unicodes, return a unicode. 52 53 This function attempts to associate the credit card or bank account details 54 referenced by balanced_thing_uri with a Balanced Account. If it fails we 55 log and return a unicode describing the failure. Even for failure we keep 56 balanced_customer_href; we don't reset it to None/NULL. It's useful for 57 loading the previous (bad) info from Balanced in order to prepopulate the 58 form. 59 60 """ 61 typecheck( username, unicode 62 , balanced_customer_href, (unicode, None, balanced.Customer) 63 , balanced_thing_uri, unicode 64 , thing, unicode 65 ) 66 67 if isinstance(balanced_customer_href, balanced.Customer): 68 balanced_account = balanced_customer_href 69 else: 70 balanced_account = get_balanced_account( db 71 , username 72 , balanced_customer_href 73 ) 74 invalidate_on_balanced(thing, balanced_account.href) 75 SQL = "UPDATE participants SET last_%s_result=%%s WHERE username=%%s" 76 try: 77 if thing == "credit card": 78 SQL %= "bill" 79 obj = balanced.Card.fetch(balanced_thing_uri) 80 #add = balanced_account.add_card 81 82 else: 83 assert thing == "bank account", thing # sanity check 84 SQL %= "ach" 85 obj = balanced.BankAccount.fetch(balanced_thing_uri) 86 #add = balanced_account.add_bank_account 87 88 obj.associate_to_customer(balanced_account) 89 except balanced.exc.HTTPError as err: 90 error = err.message.message.decode('UTF-8') # XXX UTF-8? 91 else: 92 error = '' 93 typecheck(error, unicode) 94 95 db.run(SQL, (error, username)) 96 return error 97 98 99 def invalidate_on_balanced(thing, balanced_customer_href): 100 """XXX Things in balanced cannot be deleted at the moment. 101 102 Instead we mark all valid cards as invalid which will restrict against 103 anyone being able to issue charges against them in the future. 104 105 See: https://github.com/balanced/balanced-api/issues/22 106 107 """ 108 assert thing in ("credit card", "bank account") 109 typecheck(balanced_customer_href, (str, unicode)) 110 111 customer = balanced.Customer.fetch(balanced_customer_href) 112 things = customer.cards if thing == "credit card" else customer.bank_accounts 113 114 for _thing in things: 115 _thing.unstore() 116 117 118 def clear(db, thing, username, balanced_customer_href): 119 typecheck( thing, unicode 120 , username, unicode 121 , balanced_customer_href, (unicode, str) 122 ) 123 assert thing in ("credit card", "bank account"), thing 124 invalidate_on_balanced(thing, balanced_customer_href) 125 CLEAR = """\ 126 127 UPDATE participants 128 SET last_%s_result=NULL 129 WHERE username=%%s 130 131 """ % ("bill" if thing == "credit card" else "ach") 132 db.run(CLEAR, (username,)) 133 134 135 def store_error(db, thing, username, msg): 136 typecheck(thing, unicode, username, unicode, msg, unicode) 137 assert thing in ("credit card", "bank account"), thing 138 ERROR = """\ 139 140 UPDATE participants 141 SET last_%s_result=%%s 142 WHERE username=%%s 143 144 """ % ("bill" if thing == "credit card" else "ach") 145 db.run(ERROR, (msg, username)) 146 147 148 # Card 149 # ==== 150 # While we're migrating data we need to support loading data from both Stripe 151 # and Balanced. 152 153 class StripeCard(object): 154 """This is a dict-like wrapper around a Stripe PaymentMethod. 155 """ 156 157 _customer = None # underlying stripe.Customer object 158 159 def __init__(self, stripe_customer_id): 160 """Given a Stripe customer id, load data from Stripe. 161 """ 162 if stripe_customer_id is not None: 163 self._customer = stripe.Customer.retrieve(stripe_customer_id) 164 165 def _get(self, name, default=""): 166 """Given a name, return a string. 167 """ 168 out = "" 169 if self._customer is not None: 170 out = self._customer.get('active_card', {}).get(name, "") 171 if out is None: 172 out = default 173 return out 174 175 def __getitem__(self, name): 176 """Given a name, return a string. 177 """ 178 if name == 'id': 179 out = self._customer.id if self._customer is not None else None 180 elif name == 'last4': 181 out = self._get('last4') 182 if out: 183 out = "************" + out 184 else: 185 name = { 'address_1': 'address_line1' 186 , 'address_2': 'address_line2' 187 , 'state': 'address_state' 188 , 'zip': 'address_zip' 189 }.get(name, name) 190 out = self._get(name) 191 return out 192 193 194 class BalancedThing(object): 195 """Represent either a credit card or a bank account. 196 """ 197 198 thing_type = None # either 'card' or 'bank_account' 199 keys_to_attr_paths = None # set to a mapping in subclasses 200 201 _customer = None # underlying balanced.Customer object 202 _thing = None # underlying balanced.{BankAccount,Card} object 203 204 def __getitem__(self, key): 205 """Given a name, return a unicode. 206 207 Allow subclasses to provide a flat set of keys, which, under the hood, 208 might be nested attributes and/or keys. The traversal path is relative 209 to _thing (not self!). 210 211 """ 212 attr_path = self.keys_to_attr_paths.get(key, key) 213 214 out = None 215 if self._customer is not None and self._thing is not None: 216 out = self._thing 217 for val in attr_path.split('.'): 218 if type(out) is dict: 219 # this lets us reach into the meta dict 220 out = out.get(val) 221 else: 222 try: 223 out = getattr(out, val) 224 except AttributeError: 225 raise KeyError("{} not found".format(val)) 226 if out is None: 227 break 228 return out 229 230 def __init__(self, balanced_customer_href): 231 """Given a Balanced account_uri, load data from Balanced. 232 """ 233 if balanced_customer_href is None: 234 return 235 236 # XXX Indexing is borken. See: 237 # https://github.com/balanced/balanced-python/issues/10 238 239 self._customer = balanced.Customer.fetch(balanced_customer_href) 240 241 things = getattr(self._customer, self.thing_type+'s')\ 242 .filter(is_valid=True).all() 243 nvalid = len(things) 244 245 if nvalid == 0: 246 self._thing = None 247 elif nvalid == 1: 248 self._thing = things[0] 249 else: 250 msg = "%s has %d valid %ss" 251 msg %= (balanced_customer_href, len(things), self.thing_type) 252 raise RuntimeError(msg) 253 254 @property 255 def is_setup(self): 256 return self._thing is not None 257 258 259 class BalancedCard(BalancedThing): 260 """This is a dict-like wrapper around a Balanced credit card. 261 """ 262 263 thing_type = 'card' 264 265 keys_to_attr_paths = { 266 'id': 'customer.href', 267 'address_1': 'address.line1', 268 'address_2': 'meta.address_2', 269 'country': 'meta.country', 270 'city_town': 'meta.city_town', 271 'zip': 'address.postal_code', 272 # gittip is saving the state in the meta field 273 # for compatibility with legacy customers 274 'state': 'meta.region', 275 'last4': 'number', 276 'last_four': 'number', 277 } 278 279 280 class BalancedBankAccount(BalancedThing): 281 """This is a dict-like wrapper around a Balanced bank account. 282 """ 283 284 thing_type = 'bank_account' 285 286 keys_to_attr_paths = { 287 'customer_href': 'customer.href', 288 } 289 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gittip/billing/__init__.py b/gittip/billing/__init__.py --- a/gittip/billing/__init__.py +++ b/gittip/billing/__init__.py @@ -225,6 +225,11 @@ raise KeyError("{} not found".format(val)) if out is None: break + + if out is None: + # Default to ''; see https://github.com/gittip/www.gittip.com/issues/2161. + out = '' + return out def __init__(self, balanced_customer_href):
{"golden_diff": "diff --git a/gittip/billing/__init__.py b/gittip/billing/__init__.py\n--- a/gittip/billing/__init__.py\n+++ b/gittip/billing/__init__.py\n@@ -225,6 +225,11 @@\n raise KeyError(\"{} not found\".format(val))\n if out is None:\n break\n+\n+ if out is None:\n+ # Default to ''; see https://github.com/gittip/www.gittip.com/issues/2161.\n+ out = ''\n+\n return out\n \n def __init__(self, balanced_customer_href):\n", "issue": "500 from credit card page\nReticketing from #2155.\n\nhttps://app.getsentry.com/gittip/gittip/group/16333701/ (login required)\n\n```\nTypeError: 'NoneType' object has no attribute '__getitem__'\n\nStacktrace (most recent call last):\n\n File \"site-packages/algorithm.py\", line 288, in run\n new_state = function(**deps.as_kwargs)\n File \"aspen/algorithms/website.py\", line 88, in get_response_for_resource\n return {'response': resource.respond(request)}\n File \"aspen/resources/dynamic_resource.py\", line 68, in respond\n response = self.get_response(context)\n File \"aspen/resources/negotiated_resource.py\", line 98, in get_response\n response.body = render(context)\n File \"aspen/renderers/__init__.py\", line 99, in __call__\n return self.render_content(context)\n File \"site-packages/aspen_jinja2_renderer.py\", line 66, in render_content\n return self.compiled.render(context).encode(charset)\n File \"jinja2/environment.py\", line 969, in render\n return self.environment.handle_exception(exc_info, True)\n File \"jinja2/environment.py\", line 742, in handle_exception\n reraise(exc_type, exc_value, tb)\n File \"/app/www/credit-card.html.spt\", line 1, in top-level template code\n import traceback\n File \"/app/templates/base.html\", line 54, in top-level template code\n {% block killbox %}\n File \"/app/templates/base.html\", line 56, in block \"killbox\"\n {% block box %}{% endblock %}\n File \"/app/www/credit-card.html.spt\", line 56, in block \"box\"\n Gittip.payments.cc.init(\"{{ balanced.Marketplace.my_marketplace.uri }}\", \"{{ user.participant.username }}\");\n```\n\n", "before_files": [{"content": "\"\"\"This module encapsulates billing logic and db access.\n\nThere are three pieces of information for each participant related to billing:\n\n balanced_customer_href\n * NULL - This participant has never been billed.\n * 'deadbeef' - This participant has had a Balanced account created for\n them, either by adding a credit card or a bank account.\n last_bill_result\n * NULL - This participant has not had their credit card charged yet.\n * '' - This participant has a working card.\n * <message> - An error message.\n last_ach_result\n * NULL - This participant has not wired up a bank account yet.\n * '' - This participant has a working bank account.\n * <message> - An error message.\n\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport balanced\nimport stripe\nfrom aspen.utils import typecheck\n\n\ndef get_balanced_account(db, username, balanced_customer_href):\n \"\"\"Find or create a balanced.Account.\n \"\"\"\n typecheck( username, unicode\n , balanced_customer_href, (unicode, None)\n )\n\n if balanced_customer_href is None:\n customer = balanced.Customer(meta={\n 'username': username,\n }).save()\n BALANCED_ACCOUNT = \"\"\"\\\n\n UPDATE participants\n SET balanced_customer_href=%s\n WHERE username=%s\n\n \"\"\"\n db.run(BALANCED_ACCOUNT, (customer.href, username))\n else:\n customer = balanced.Customer.fetch(balanced_customer_href)\n return customer\n\n\ndef associate(db, thing, username, balanced_customer_href, balanced_thing_uri):\n \"\"\"Given four unicodes, return a unicode.\n\n This function attempts to associate the credit card or bank account details\n referenced by balanced_thing_uri with a Balanced Account. If it fails we\n log and return a unicode describing the failure. Even for failure we keep\n balanced_customer_href; we don't reset it to None/NULL. It's useful for\n loading the previous (bad) info from Balanced in order to prepopulate the\n form.\n\n \"\"\"\n typecheck( username, unicode\n , balanced_customer_href, (unicode, None, balanced.Customer)\n , balanced_thing_uri, unicode\n , thing, unicode\n )\n\n if isinstance(balanced_customer_href, balanced.Customer):\n balanced_account = balanced_customer_href\n else:\n balanced_account = get_balanced_account( db\n , username\n , balanced_customer_href\n )\n invalidate_on_balanced(thing, balanced_account.href)\n SQL = \"UPDATE participants SET last_%s_result=%%s WHERE username=%%s\"\n try:\n if thing == \"credit card\":\n SQL %= \"bill\"\n obj = balanced.Card.fetch(balanced_thing_uri)\n #add = balanced_account.add_card\n\n else:\n assert thing == \"bank account\", thing # sanity check\n SQL %= \"ach\"\n obj = balanced.BankAccount.fetch(balanced_thing_uri)\n #add = balanced_account.add_bank_account\n\n obj.associate_to_customer(balanced_account)\n except balanced.exc.HTTPError as err:\n error = err.message.message.decode('UTF-8') # XXX UTF-8?\n else:\n error = ''\n typecheck(error, unicode)\n\n db.run(SQL, (error, username))\n return error\n\n\ndef invalidate_on_balanced(thing, balanced_customer_href):\n \"\"\"XXX Things in balanced cannot be deleted at the moment.\n\n Instead we mark all valid cards as invalid which will restrict against\n anyone being able to issue charges against them in the future.\n\n See: https://github.com/balanced/balanced-api/issues/22\n\n \"\"\"\n assert thing in (\"credit card\", \"bank account\")\n typecheck(balanced_customer_href, (str, unicode))\n\n customer = balanced.Customer.fetch(balanced_customer_href)\n things = customer.cards if thing == \"credit card\" else customer.bank_accounts\n\n for _thing in things:\n _thing.unstore()\n\n\ndef clear(db, thing, username, balanced_customer_href):\n typecheck( thing, unicode\n , username, unicode\n , balanced_customer_href, (unicode, str)\n )\n assert thing in (\"credit card\", \"bank account\"), thing\n invalidate_on_balanced(thing, balanced_customer_href)\n CLEAR = \"\"\"\\\n\n UPDATE participants\n SET last_%s_result=NULL\n WHERE username=%%s\n\n \"\"\" % (\"bill\" if thing == \"credit card\" else \"ach\")\n db.run(CLEAR, (username,))\n\n\ndef store_error(db, thing, username, msg):\n typecheck(thing, unicode, username, unicode, msg, unicode)\n assert thing in (\"credit card\", \"bank account\"), thing\n ERROR = \"\"\"\\\n\n UPDATE participants\n SET last_%s_result=%%s\n WHERE username=%%s\n\n \"\"\" % (\"bill\" if thing == \"credit card\" else \"ach\")\n db.run(ERROR, (msg, username))\n\n\n# Card\n# ====\n# While we're migrating data we need to support loading data from both Stripe\n# and Balanced.\n\nclass StripeCard(object):\n \"\"\"This is a dict-like wrapper around a Stripe PaymentMethod.\n \"\"\"\n\n _customer = None # underlying stripe.Customer object\n\n def __init__(self, stripe_customer_id):\n \"\"\"Given a Stripe customer id, load data from Stripe.\n \"\"\"\n if stripe_customer_id is not None:\n self._customer = stripe.Customer.retrieve(stripe_customer_id)\n\n def _get(self, name, default=\"\"):\n \"\"\"Given a name, return a string.\n \"\"\"\n out = \"\"\n if self._customer is not None:\n out = self._customer.get('active_card', {}).get(name, \"\")\n if out is None:\n out = default\n return out\n\n def __getitem__(self, name):\n \"\"\"Given a name, return a string.\n \"\"\"\n if name == 'id':\n out = self._customer.id if self._customer is not None else None\n elif name == 'last4':\n out = self._get('last4')\n if out:\n out = \"************\" + out\n else:\n name = { 'address_1': 'address_line1'\n , 'address_2': 'address_line2'\n , 'state': 'address_state'\n , 'zip': 'address_zip'\n }.get(name, name)\n out = self._get(name)\n return out\n\n\nclass BalancedThing(object):\n \"\"\"Represent either a credit card or a bank account.\n \"\"\"\n\n thing_type = None # either 'card' or 'bank_account'\n keys_to_attr_paths = None # set to a mapping in subclasses\n\n _customer = None # underlying balanced.Customer object\n _thing = None # underlying balanced.{BankAccount,Card} object\n\n def __getitem__(self, key):\n \"\"\"Given a name, return a unicode.\n\n Allow subclasses to provide a flat set of keys, which, under the hood,\n might be nested attributes and/or keys. The traversal path is relative\n to _thing (not self!).\n\n \"\"\"\n attr_path = self.keys_to_attr_paths.get(key, key)\n\n out = None\n if self._customer is not None and self._thing is not None:\n out = self._thing\n for val in attr_path.split('.'):\n if type(out) is dict:\n # this lets us reach into the meta dict\n out = out.get(val)\n else:\n try:\n out = getattr(out, val)\n except AttributeError:\n raise KeyError(\"{} not found\".format(val))\n if out is None:\n break\n return out\n\n def __init__(self, balanced_customer_href):\n \"\"\"Given a Balanced account_uri, load data from Balanced.\n \"\"\"\n if balanced_customer_href is None:\n return\n\n # XXX Indexing is borken. See:\n # https://github.com/balanced/balanced-python/issues/10\n\n self._customer = balanced.Customer.fetch(balanced_customer_href)\n\n things = getattr(self._customer, self.thing_type+'s')\\\n .filter(is_valid=True).all()\n nvalid = len(things)\n\n if nvalid == 0:\n self._thing = None\n elif nvalid == 1:\n self._thing = things[0]\n else:\n msg = \"%s has %d valid %ss\"\n msg %= (balanced_customer_href, len(things), self.thing_type)\n raise RuntimeError(msg)\n\n @property\n def is_setup(self):\n return self._thing is not None\n\n\nclass BalancedCard(BalancedThing):\n \"\"\"This is a dict-like wrapper around a Balanced credit card.\n \"\"\"\n\n thing_type = 'card'\n\n keys_to_attr_paths = {\n 'id': 'customer.href',\n 'address_1': 'address.line1',\n 'address_2': 'meta.address_2',\n 'country': 'meta.country',\n 'city_town': 'meta.city_town',\n 'zip': 'address.postal_code',\n # gittip is saving the state in the meta field\n # for compatibility with legacy customers\n 'state': 'meta.region',\n 'last4': 'number',\n 'last_four': 'number',\n }\n\n\nclass BalancedBankAccount(BalancedThing):\n \"\"\"This is a dict-like wrapper around a Balanced bank account.\n \"\"\"\n\n thing_type = 'bank_account'\n\n keys_to_attr_paths = {\n 'customer_href': 'customer.href',\n }\n", "path": "gittip/billing/__init__.py"}], "after_files": [{"content": "\"\"\"This module encapsulates billing logic and db access.\n\nThere are three pieces of information for each participant related to billing:\n\n balanced_customer_href\n * NULL - This participant has never been billed.\n * 'deadbeef' - This participant has had a Balanced account created for\n them, either by adding a credit card or a bank account.\n last_bill_result\n * NULL - This participant has not had their credit card charged yet.\n * '' - This participant has a working card.\n * <message> - An error message.\n last_ach_result\n * NULL - This participant has not wired up a bank account yet.\n * '' - This participant has a working bank account.\n * <message> - An error message.\n\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport balanced\nimport stripe\nfrom aspen.utils import typecheck\n\n\ndef get_balanced_account(db, username, balanced_customer_href):\n \"\"\"Find or create a balanced.Account.\n \"\"\"\n typecheck( username, unicode\n , balanced_customer_href, (unicode, None)\n )\n\n if balanced_customer_href is None:\n customer = balanced.Customer(meta={\n 'username': username,\n }).save()\n BALANCED_ACCOUNT = \"\"\"\\\n\n UPDATE participants\n SET balanced_customer_href=%s\n WHERE username=%s\n\n \"\"\"\n db.run(BALANCED_ACCOUNT, (customer.href, username))\n else:\n customer = balanced.Customer.fetch(balanced_customer_href)\n return customer\n\n\ndef associate(db, thing, username, balanced_customer_href, balanced_thing_uri):\n \"\"\"Given four unicodes, return a unicode.\n\n This function attempts to associate the credit card or bank account details\n referenced by balanced_thing_uri with a Balanced Account. If it fails we\n log and return a unicode describing the failure. Even for failure we keep\n balanced_customer_href; we don't reset it to None/NULL. It's useful for\n loading the previous (bad) info from Balanced in order to prepopulate the\n form.\n\n \"\"\"\n typecheck( username, unicode\n , balanced_customer_href, (unicode, None, balanced.Customer)\n , balanced_thing_uri, unicode\n , thing, unicode\n )\n\n if isinstance(balanced_customer_href, balanced.Customer):\n balanced_account = balanced_customer_href\n else:\n balanced_account = get_balanced_account( db\n , username\n , balanced_customer_href\n )\n invalidate_on_balanced(thing, balanced_account.href)\n SQL = \"UPDATE participants SET last_%s_result=%%s WHERE username=%%s\"\n try:\n if thing == \"credit card\":\n SQL %= \"bill\"\n obj = balanced.Card.fetch(balanced_thing_uri)\n #add = balanced_account.add_card\n\n else:\n assert thing == \"bank account\", thing # sanity check\n SQL %= \"ach\"\n obj = balanced.BankAccount.fetch(balanced_thing_uri)\n #add = balanced_account.add_bank_account\n\n obj.associate_to_customer(balanced_account)\n except balanced.exc.HTTPError as err:\n error = err.message.message.decode('UTF-8') # XXX UTF-8?\n else:\n error = ''\n typecheck(error, unicode)\n\n db.run(SQL, (error, username))\n return error\n\n\ndef invalidate_on_balanced(thing, balanced_customer_href):\n \"\"\"XXX Things in balanced cannot be deleted at the moment.\n\n Instead we mark all valid cards as invalid which will restrict against\n anyone being able to issue charges against them in the future.\n\n See: https://github.com/balanced/balanced-api/issues/22\n\n \"\"\"\n assert thing in (\"credit card\", \"bank account\")\n typecheck(balanced_customer_href, (str, unicode))\n\n customer = balanced.Customer.fetch(balanced_customer_href)\n things = customer.cards if thing == \"credit card\" else customer.bank_accounts\n\n for _thing in things:\n _thing.unstore()\n\n\ndef clear(db, thing, username, balanced_customer_href):\n typecheck( thing, unicode\n , username, unicode\n , balanced_customer_href, (unicode, str)\n )\n assert thing in (\"credit card\", \"bank account\"), thing\n invalidate_on_balanced(thing, balanced_customer_href)\n CLEAR = \"\"\"\\\n\n UPDATE participants\n SET last_%s_result=NULL\n WHERE username=%%s\n\n \"\"\" % (\"bill\" if thing == \"credit card\" else \"ach\")\n db.run(CLEAR, (username,))\n\n\ndef store_error(db, thing, username, msg):\n typecheck(thing, unicode, username, unicode, msg, unicode)\n assert thing in (\"credit card\", \"bank account\"), thing\n ERROR = \"\"\"\\\n\n UPDATE participants\n SET last_%s_result=%%s\n WHERE username=%%s\n\n \"\"\" % (\"bill\" if thing == \"credit card\" else \"ach\")\n db.run(ERROR, (msg, username))\n\n\n# Card\n# ====\n# While we're migrating data we need to support loading data from both Stripe\n# and Balanced.\n\nclass StripeCard(object):\n \"\"\"This is a dict-like wrapper around a Stripe PaymentMethod.\n \"\"\"\n\n _customer = None # underlying stripe.Customer object\n\n def __init__(self, stripe_customer_id):\n \"\"\"Given a Stripe customer id, load data from Stripe.\n \"\"\"\n if stripe_customer_id is not None:\n self._customer = stripe.Customer.retrieve(stripe_customer_id)\n\n def _get(self, name, default=\"\"):\n \"\"\"Given a name, return a string.\n \"\"\"\n out = \"\"\n if self._customer is not None:\n out = self._customer.get('active_card', {}).get(name, \"\")\n if out is None:\n out = default\n return out\n\n def __getitem__(self, name):\n \"\"\"Given a name, return a string.\n \"\"\"\n if name == 'id':\n out = self._customer.id if self._customer is not None else None\n elif name == 'last4':\n out = self._get('last4')\n if out:\n out = \"************\" + out\n else:\n name = { 'address_1': 'address_line1'\n , 'address_2': 'address_line2'\n , 'state': 'address_state'\n , 'zip': 'address_zip'\n }.get(name, name)\n out = self._get(name)\n return out\n\n\nclass BalancedThing(object):\n \"\"\"Represent either a credit card or a bank account.\n \"\"\"\n\n thing_type = None # either 'card' or 'bank_account'\n keys_to_attr_paths = None # set to a mapping in subclasses\n\n _customer = None # underlying balanced.Customer object\n _thing = None # underlying balanced.{BankAccount,Card} object\n\n def __getitem__(self, key):\n \"\"\"Given a name, return a unicode.\n\n Allow subclasses to provide a flat set of keys, which, under the hood,\n might be nested attributes and/or keys. The traversal path is relative\n to _thing (not self!).\n\n \"\"\"\n attr_path = self.keys_to_attr_paths.get(key, key)\n\n out = None\n if self._customer is not None and self._thing is not None:\n out = self._thing\n for val in attr_path.split('.'):\n if type(out) is dict:\n # this lets us reach into the meta dict\n out = out.get(val)\n else:\n try:\n out = getattr(out, val)\n except AttributeError:\n raise KeyError(\"{} not found\".format(val))\n if out is None:\n break\n\n if out is None:\n # Default to ''; see https://github.com/gittip/www.gittip.com/issues/2161.\n out = ''\n\n return out\n\n def __init__(self, balanced_customer_href):\n \"\"\"Given a Balanced account_uri, load data from Balanced.\n \"\"\"\n if balanced_customer_href is None:\n return\n\n # XXX Indexing is borken. See:\n # https://github.com/balanced/balanced-python/issues/10\n\n self._customer = balanced.Customer.fetch(balanced_customer_href)\n\n things = getattr(self._customer, self.thing_type+'s')\\\n .filter(is_valid=True).all()\n nvalid = len(things)\n\n if nvalid == 0:\n self._thing = None\n elif nvalid == 1:\n self._thing = things[0]\n else:\n msg = \"%s has %d valid %ss\"\n msg %= (balanced_customer_href, len(things), self.thing_type)\n raise RuntimeError(msg)\n\n @property\n def is_setup(self):\n return self._thing is not None\n\n\nclass BalancedCard(BalancedThing):\n \"\"\"This is a dict-like wrapper around a Balanced credit card.\n \"\"\"\n\n thing_type = 'card'\n\n keys_to_attr_paths = {\n 'id': 'customer.href',\n 'address_1': 'address.line1',\n 'address_2': 'meta.address_2',\n 'country': 'meta.country',\n 'city_town': 'meta.city_town',\n 'zip': 'address.postal_code',\n # gittip is saving the state in the meta field\n # for compatibility with legacy customers\n 'state': 'meta.region',\n 'last4': 'number',\n 'last_four': 'number',\n }\n\n\nclass BalancedBankAccount(BalancedThing):\n \"\"\"This is a dict-like wrapper around a Balanced bank account.\n \"\"\"\n\n thing_type = 'bank_account'\n\n keys_to_attr_paths = {\n 'customer_href': 'customer.href',\n }\n", "path": "gittip/billing/__init__.py"}]}
3,539
137
gh_patches_debug_16542
rasdani/github-patches
git_diff
Kinto__kinto-1637
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Crash in batch view deserialization ``` AttributeError: 'bytes' object has no attribute 'get' File "pyramid/tweens.py", line 39, in excview_tween response = handler(request) File "kinto/core/events.py", line 76, in tween response = handler(request) File "pyramid/router.py", line 156, in handle_request view_name File "pyramid/view.py", line 642, in _call_view response = view_callable(context, request) File "pyramid/config/views.py", line 181, in __call__ return view(context, request) File "pyramid/viewderivers.py", line 390, in attr_view return view(context, request) File "pyramid/viewderivers.py", line 368, in predicate_wrapper return view(context, request) File "pyramid/viewderivers.py", line 439, in rendered_view result = view(context, request) File "pyramid/viewderivers.py", line 148, in _requestonly_view response = view(request) File "cornice/service.py", line 487, in wrapper validator(request, **args) File "cornice/validators/_colander.py", line 113, in validator deserialized = schema.deserialize(cstruct) File "colander/__init__.py", line 2073, in deserialize appstruct = self.typ.deserialize(self, cstruct) File "colander/__init__.py", line 724, in deserialize return self._impl(node, cstruct, callback) File "colander/__init__.py", line 683, in _impl sub_result = callback(subnode, subval) File "colander/__init__.py", line 722, in callback return subnode.deserialize(subcstruct) File "kinto/core/views/batch.py", line 68, in deserialize defaults = cstruct.get('defaults') ``` Crash in batch view deserialization ``` AttributeError: 'bytes' object has no attribute 'get' File "pyramid/tweens.py", line 39, in excview_tween response = handler(request) File "kinto/core/events.py", line 76, in tween response = handler(request) File "pyramid/router.py", line 156, in handle_request view_name File "pyramid/view.py", line 642, in _call_view response = view_callable(context, request) File "pyramid/config/views.py", line 181, in __call__ return view(context, request) File "pyramid/viewderivers.py", line 390, in attr_view return view(context, request) File "pyramid/viewderivers.py", line 368, in predicate_wrapper return view(context, request) File "pyramid/viewderivers.py", line 439, in rendered_view result = view(context, request) File "pyramid/viewderivers.py", line 148, in _requestonly_view response = view(request) File "cornice/service.py", line 487, in wrapper validator(request, **args) File "cornice/validators/_colander.py", line 113, in validator deserialized = schema.deserialize(cstruct) File "colander/__init__.py", line 2073, in deserialize appstruct = self.typ.deserialize(self, cstruct) File "colander/__init__.py", line 724, in deserialize return self._impl(node, cstruct, callback) File "colander/__init__.py", line 683, in _impl sub_result = callback(subnode, subval) File "colander/__init__.py", line 722, in callback return subnode.deserialize(subcstruct) File "kinto/core/views/batch.py", line 68, in deserialize defaults = cstruct.get('defaults') ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kinto/core/views/batch.py` Content: ``` 1 import logging 2 3 import colander 4 from cornice.validators import colander_validator 5 from pyramid import httpexceptions 6 from pyramid.security import NO_PERMISSION_REQUIRED 7 8 from kinto.core import errors 9 from kinto.core import Service 10 from kinto.core.errors import ErrorSchema 11 from kinto.core.utils import merge_dicts, build_request, build_response 12 13 14 subrequest_logger = logging.getLogger('subrequest.summary') 15 16 valid_http_method = colander.OneOf(('GET', 'HEAD', 'DELETE', 'TRACE', 17 'POST', 'PUT', 'PATCH')) 18 19 20 def string_values(node, cstruct): 21 """Validate that a ``colander.Mapping`` only has strings in its values. 22 23 .. warning:: 24 25 Should be associated to a ``colander.Mapping`` schema node. 26 """ 27 are_strings = [isinstance(v, str) for v in cstruct.values()] 28 if not all(are_strings): 29 error_msg = '{} contains non string value'.format(cstruct) 30 raise colander.Invalid(node, error_msg) 31 32 33 class BatchRequestSchema(colander.MappingSchema): 34 method = colander.SchemaNode(colander.String(), 35 validator=valid_http_method, 36 missing=colander.drop) 37 path = colander.SchemaNode(colander.String(), 38 validator=colander.Regex('^/')) 39 headers = colander.SchemaNode(colander.Mapping(unknown='preserve'), 40 validator=string_values, 41 missing=colander.drop) 42 body = colander.SchemaNode(colander.Mapping(unknown='preserve'), 43 missing=colander.drop) 44 45 @staticmethod 46 def schema_type(): 47 return colander.Mapping(unknown='raise') 48 49 50 class BatchPayloadSchema(colander.MappingSchema): 51 defaults = BatchRequestSchema(missing=colander.drop).clone() 52 requests = colander.SchemaNode(colander.Sequence(), 53 BatchRequestSchema()) 54 55 @staticmethod 56 def schema_type(): 57 return colander.Mapping(unknown='raise') 58 59 def __init__(self, *args, **kwargs): 60 super().__init__(*args, **kwargs) 61 # On defaults, path is not mandatory. 62 self.get('defaults').get('path').missing = colander.drop 63 64 def deserialize(self, cstruct=colander.null): 65 """Preprocess received data to carefully merge defaults. 66 """ 67 if cstruct is not colander.null: 68 defaults = cstruct.get('defaults') 69 requests = cstruct.get('requests') 70 if isinstance(defaults, dict) and isinstance(requests, list): 71 for request in requests: 72 if isinstance(request, dict): 73 merge_dicts(request, defaults) 74 return super().deserialize(cstruct) 75 76 77 class BatchRequest(colander.MappingSchema): 78 body = BatchPayloadSchema() 79 80 81 class BatchResponseSchema(colander.MappingSchema): 82 status = colander.SchemaNode(colander.Integer()) 83 path = colander.SchemaNode(colander.String()) 84 headers = colander.SchemaNode(colander.Mapping(unknown='preserve'), 85 validator=string_values, 86 missing=colander.drop) 87 body = colander.SchemaNode(colander.Mapping(unknown='preserve'), 88 missing=colander.drop) 89 90 91 class BatchResponseBodySchema(colander.MappingSchema): 92 responses = colander.SequenceSchema(BatchResponseSchema(missing=colander.drop)) 93 94 95 class BatchResponse(colander.MappingSchema): 96 body = BatchResponseBodySchema() 97 98 99 class ErrorResponseSchema(colander.MappingSchema): 100 body = ErrorSchema() 101 102 103 batch_responses = { 104 '200': BatchResponse(description='Return a list of operation responses.'), 105 '400': ErrorResponseSchema(description='The request was badly formatted.'), 106 'default': ErrorResponseSchema(description='an unknown error occurred.') 107 } 108 109 batch = Service(name='batch', path='/batch', 110 description='Batch operations') 111 112 113 @batch.post(schema=BatchRequest, 114 validators=(colander_validator,), 115 permission=NO_PERMISSION_REQUIRED, 116 tags=['Batch'], operation_id='batch', 117 response_schemas=batch_responses) 118 def post_batch(request): 119 requests = request.validated['body']['requests'] 120 121 request.log_context(batch_size=len(requests)) 122 123 limit = request.registry.settings['batch_max_requests'] 124 if limit and len(requests) > int(limit): 125 error_msg = 'Number of requests is limited to {}'.format(limit) 126 request.errors.add('body', 'requests', error_msg) 127 return 128 129 if any([batch.path in req['path'] for req in requests]): 130 error_msg = 'Recursive call on {} endpoint is forbidden.'.format(batch.path) 131 request.errors.add('body', 'requests', error_msg) 132 return 133 134 responses = [] 135 136 for subrequest_spec in requests: 137 subrequest = build_request(request, subrequest_spec) 138 139 log_context = {**request.log_context(), 140 'path': subrequest.path, 141 'method': subrequest.method} 142 try: 143 # Invoke subrequest without individual transaction. 144 resp, subrequest = request.follow_subrequest(subrequest, 145 use_tweens=False) 146 except httpexceptions.HTTPException as e: 147 # Since some request in the batch failed, we need to stop the parent request 148 # through Pyramid's transaction manager. 5XX errors are already caught by 149 # pyramid_tm's commit_veto 150 # https://github.com/Kinto/kinto/issues/624 151 if e.status_code == 409: 152 request.tm.abort() 153 154 if e.content_type == 'application/json': 155 resp = e 156 else: 157 # JSONify raw Pyramid errors. 158 resp = errors.http_error(e) 159 160 subrequest_logger.info('subrequest.summary', extra=log_context) 161 162 dict_resp = build_response(resp, subrequest) 163 responses.append(dict_resp) 164 165 return { 166 'responses': responses 167 } 168 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kinto/core/views/batch.py b/kinto/core/views/batch.py --- a/kinto/core/views/batch.py +++ b/kinto/core/views/batch.py @@ -9,6 +9,7 @@ from kinto.core import Service from kinto.core.errors import ErrorSchema from kinto.core.utils import merge_dicts, build_request, build_response +from kinto.core.resource.viewset import CONTENT_TYPES subrequest_logger = logging.getLogger('subrequest.summary') @@ -112,6 +113,7 @@ @batch.post(schema=BatchRequest, validators=(colander_validator,), + content_type=CONTENT_TYPES, permission=NO_PERMISSION_REQUIRED, tags=['Batch'], operation_id='batch', response_schemas=batch_responses)
{"golden_diff": "diff --git a/kinto/core/views/batch.py b/kinto/core/views/batch.py\n--- a/kinto/core/views/batch.py\n+++ b/kinto/core/views/batch.py\n@@ -9,6 +9,7 @@\n from kinto.core import Service\n from kinto.core.errors import ErrorSchema\n from kinto.core.utils import merge_dicts, build_request, build_response\n+from kinto.core.resource.viewset import CONTENT_TYPES\n \n \n subrequest_logger = logging.getLogger('subrequest.summary')\n@@ -112,6 +113,7 @@\n \n @batch.post(schema=BatchRequest,\n validators=(colander_validator,),\n+ content_type=CONTENT_TYPES,\n permission=NO_PERMISSION_REQUIRED,\n tags=['Batch'], operation_id='batch',\n response_schemas=batch_responses)\n", "issue": "Crash in batch view deserialization\n```\r\nAttributeError: 'bytes' object has no attribute 'get'\r\n File \"pyramid/tweens.py\", line 39, in excview_tween\r\n response = handler(request)\r\n File \"kinto/core/events.py\", line 76, in tween\r\n response = handler(request)\r\n File \"pyramid/router.py\", line 156, in handle_request\r\n view_name\r\n File \"pyramid/view.py\", line 642, in _call_view\r\n response = view_callable(context, request)\r\n File \"pyramid/config/views.py\", line 181, in __call__\r\n return view(context, request)\r\n File \"pyramid/viewderivers.py\", line 390, in attr_view\r\n return view(context, request)\r\n File \"pyramid/viewderivers.py\", line 368, in predicate_wrapper\r\n return view(context, request)\r\n File \"pyramid/viewderivers.py\", line 439, in rendered_view\r\n result = view(context, request)\r\n File \"pyramid/viewderivers.py\", line 148, in _requestonly_view\r\n response = view(request)\r\n File \"cornice/service.py\", line 487, in wrapper\r\n validator(request, **args)\r\n File \"cornice/validators/_colander.py\", line 113, in validator\r\n deserialized = schema.deserialize(cstruct)\r\n File \"colander/__init__.py\", line 2073, in deserialize\r\n appstruct = self.typ.deserialize(self, cstruct)\r\n File \"colander/__init__.py\", line 724, in deserialize\r\n return self._impl(node, cstruct, callback)\r\n File \"colander/__init__.py\", line 683, in _impl\r\n sub_result = callback(subnode, subval)\r\n File \"colander/__init__.py\", line 722, in callback\r\n return subnode.deserialize(subcstruct)\r\n File \"kinto/core/views/batch.py\", line 68, in deserialize\r\n defaults = cstruct.get('defaults')\r\n```\nCrash in batch view deserialization\n```\r\nAttributeError: 'bytes' object has no attribute 'get'\r\n File \"pyramid/tweens.py\", line 39, in excview_tween\r\n response = handler(request)\r\n File \"kinto/core/events.py\", line 76, in tween\r\n response = handler(request)\r\n File \"pyramid/router.py\", line 156, in handle_request\r\n view_name\r\n File \"pyramid/view.py\", line 642, in _call_view\r\n response = view_callable(context, request)\r\n File \"pyramid/config/views.py\", line 181, in __call__\r\n return view(context, request)\r\n File \"pyramid/viewderivers.py\", line 390, in attr_view\r\n return view(context, request)\r\n File \"pyramid/viewderivers.py\", line 368, in predicate_wrapper\r\n return view(context, request)\r\n File \"pyramid/viewderivers.py\", line 439, in rendered_view\r\n result = view(context, request)\r\n File \"pyramid/viewderivers.py\", line 148, in _requestonly_view\r\n response = view(request)\r\n File \"cornice/service.py\", line 487, in wrapper\r\n validator(request, **args)\r\n File \"cornice/validators/_colander.py\", line 113, in validator\r\n deserialized = schema.deserialize(cstruct)\r\n File \"colander/__init__.py\", line 2073, in deserialize\r\n appstruct = self.typ.deserialize(self, cstruct)\r\n File \"colander/__init__.py\", line 724, in deserialize\r\n return self._impl(node, cstruct, callback)\r\n File \"colander/__init__.py\", line 683, in _impl\r\n sub_result = callback(subnode, subval)\r\n File \"colander/__init__.py\", line 722, in callback\r\n return subnode.deserialize(subcstruct)\r\n File \"kinto/core/views/batch.py\", line 68, in deserialize\r\n defaults = cstruct.get('defaults')\r\n```\n", "before_files": [{"content": "import logging\n\nimport colander\nfrom cornice.validators import colander_validator\nfrom pyramid import httpexceptions\nfrom pyramid.security import NO_PERMISSION_REQUIRED\n\nfrom kinto.core import errors\nfrom kinto.core import Service\nfrom kinto.core.errors import ErrorSchema\nfrom kinto.core.utils import merge_dicts, build_request, build_response\n\n\nsubrequest_logger = logging.getLogger('subrequest.summary')\n\nvalid_http_method = colander.OneOf(('GET', 'HEAD', 'DELETE', 'TRACE',\n 'POST', 'PUT', 'PATCH'))\n\n\ndef string_values(node, cstruct):\n \"\"\"Validate that a ``colander.Mapping`` only has strings in its values.\n\n .. warning::\n\n Should be associated to a ``colander.Mapping`` schema node.\n \"\"\"\n are_strings = [isinstance(v, str) for v in cstruct.values()]\n if not all(are_strings):\n error_msg = '{} contains non string value'.format(cstruct)\n raise colander.Invalid(node, error_msg)\n\n\nclass BatchRequestSchema(colander.MappingSchema):\n method = colander.SchemaNode(colander.String(),\n validator=valid_http_method,\n missing=colander.drop)\n path = colander.SchemaNode(colander.String(),\n validator=colander.Regex('^/'))\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n\nclass BatchPayloadSchema(colander.MappingSchema):\n defaults = BatchRequestSchema(missing=colander.drop).clone()\n requests = colander.SchemaNode(colander.Sequence(),\n BatchRequestSchema())\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # On defaults, path is not mandatory.\n self.get('defaults').get('path').missing = colander.drop\n\n def deserialize(self, cstruct=colander.null):\n \"\"\"Preprocess received data to carefully merge defaults.\n \"\"\"\n if cstruct is not colander.null:\n defaults = cstruct.get('defaults')\n requests = cstruct.get('requests')\n if isinstance(defaults, dict) and isinstance(requests, list):\n for request in requests:\n if isinstance(request, dict):\n merge_dicts(request, defaults)\n return super().deserialize(cstruct)\n\n\nclass BatchRequest(colander.MappingSchema):\n body = BatchPayloadSchema()\n\n\nclass BatchResponseSchema(colander.MappingSchema):\n status = colander.SchemaNode(colander.Integer())\n path = colander.SchemaNode(colander.String())\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n\nclass BatchResponseBodySchema(colander.MappingSchema):\n responses = colander.SequenceSchema(BatchResponseSchema(missing=colander.drop))\n\n\nclass BatchResponse(colander.MappingSchema):\n body = BatchResponseBodySchema()\n\n\nclass ErrorResponseSchema(colander.MappingSchema):\n body = ErrorSchema()\n\n\nbatch_responses = {\n '200': BatchResponse(description='Return a list of operation responses.'),\n '400': ErrorResponseSchema(description='The request was badly formatted.'),\n 'default': ErrorResponseSchema(description='an unknown error occurred.')\n}\n\nbatch = Service(name='batch', path='/batch',\n description='Batch operations')\n\n\[email protected](schema=BatchRequest,\n validators=(colander_validator,),\n permission=NO_PERMISSION_REQUIRED,\n tags=['Batch'], operation_id='batch',\n response_schemas=batch_responses)\ndef post_batch(request):\n requests = request.validated['body']['requests']\n\n request.log_context(batch_size=len(requests))\n\n limit = request.registry.settings['batch_max_requests']\n if limit and len(requests) > int(limit):\n error_msg = 'Number of requests is limited to {}'.format(limit)\n request.errors.add('body', 'requests', error_msg)\n return\n\n if any([batch.path in req['path'] for req in requests]):\n error_msg = 'Recursive call on {} endpoint is forbidden.'.format(batch.path)\n request.errors.add('body', 'requests', error_msg)\n return\n\n responses = []\n\n for subrequest_spec in requests:\n subrequest = build_request(request, subrequest_spec)\n\n log_context = {**request.log_context(),\n 'path': subrequest.path,\n 'method': subrequest.method}\n try:\n # Invoke subrequest without individual transaction.\n resp, subrequest = request.follow_subrequest(subrequest,\n use_tweens=False)\n except httpexceptions.HTTPException as e:\n # Since some request in the batch failed, we need to stop the parent request\n # through Pyramid's transaction manager. 5XX errors are already caught by\n # pyramid_tm's commit_veto\n # https://github.com/Kinto/kinto/issues/624\n if e.status_code == 409:\n request.tm.abort()\n\n if e.content_type == 'application/json':\n resp = e\n else:\n # JSONify raw Pyramid errors.\n resp = errors.http_error(e)\n\n subrequest_logger.info('subrequest.summary', extra=log_context)\n\n dict_resp = build_response(resp, subrequest)\n responses.append(dict_resp)\n\n return {\n 'responses': responses\n }\n", "path": "kinto/core/views/batch.py"}], "after_files": [{"content": "import logging\n\nimport colander\nfrom cornice.validators import colander_validator\nfrom pyramid import httpexceptions\nfrom pyramid.security import NO_PERMISSION_REQUIRED\n\nfrom kinto.core import errors\nfrom kinto.core import Service\nfrom kinto.core.errors import ErrorSchema\nfrom kinto.core.utils import merge_dicts, build_request, build_response\nfrom kinto.core.resource.viewset import CONTENT_TYPES\n\n\nsubrequest_logger = logging.getLogger('subrequest.summary')\n\nvalid_http_method = colander.OneOf(('GET', 'HEAD', 'DELETE', 'TRACE',\n 'POST', 'PUT', 'PATCH'))\n\n\ndef string_values(node, cstruct):\n \"\"\"Validate that a ``colander.Mapping`` only has strings in its values.\n\n .. warning::\n\n Should be associated to a ``colander.Mapping`` schema node.\n \"\"\"\n are_strings = [isinstance(v, str) for v in cstruct.values()]\n if not all(are_strings):\n error_msg = '{} contains non string value'.format(cstruct)\n raise colander.Invalid(node, error_msg)\n\n\nclass BatchRequestSchema(colander.MappingSchema):\n method = colander.SchemaNode(colander.String(),\n validator=valid_http_method,\n missing=colander.drop)\n path = colander.SchemaNode(colander.String(),\n validator=colander.Regex('^/'))\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n\nclass BatchPayloadSchema(colander.MappingSchema):\n defaults = BatchRequestSchema(missing=colander.drop).clone()\n requests = colander.SchemaNode(colander.Sequence(),\n BatchRequestSchema())\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # On defaults, path is not mandatory.\n self.get('defaults').get('path').missing = colander.drop\n\n def deserialize(self, cstruct=colander.null):\n \"\"\"Preprocess received data to carefully merge defaults.\n \"\"\"\n if cstruct is not colander.null:\n defaults = cstruct.get('defaults')\n requests = cstruct.get('requests')\n if isinstance(defaults, dict) and isinstance(requests, list):\n for request in requests:\n if isinstance(request, dict):\n merge_dicts(request, defaults)\n return super().deserialize(cstruct)\n\n\nclass BatchRequest(colander.MappingSchema):\n body = BatchPayloadSchema()\n\n\nclass BatchResponseSchema(colander.MappingSchema):\n status = colander.SchemaNode(colander.Integer())\n path = colander.SchemaNode(colander.String())\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n\nclass BatchResponseBodySchema(colander.MappingSchema):\n responses = colander.SequenceSchema(BatchResponseSchema(missing=colander.drop))\n\n\nclass BatchResponse(colander.MappingSchema):\n body = BatchResponseBodySchema()\n\n\nclass ErrorResponseSchema(colander.MappingSchema):\n body = ErrorSchema()\n\n\nbatch_responses = {\n '200': BatchResponse(description='Return a list of operation responses.'),\n '400': ErrorResponseSchema(description='The request was badly formatted.'),\n 'default': ErrorResponseSchema(description='an unknown error occurred.')\n}\n\nbatch = Service(name='batch', path='/batch',\n description='Batch operations')\n\n\[email protected](schema=BatchRequest,\n validators=(colander_validator,),\n content_type=CONTENT_TYPES,\n permission=NO_PERMISSION_REQUIRED,\n tags=['Batch'], operation_id='batch',\n response_schemas=batch_responses)\ndef post_batch(request):\n requests = request.validated['body']['requests']\n\n request.log_context(batch_size=len(requests))\n\n limit = request.registry.settings['batch_max_requests']\n if limit and len(requests) > int(limit):\n error_msg = 'Number of requests is limited to {}'.format(limit)\n request.errors.add('body', 'requests', error_msg)\n return\n\n if any([batch.path in req['path'] for req in requests]):\n error_msg = 'Recursive call on {} endpoint is forbidden.'.format(batch.path)\n request.errors.add('body', 'requests', error_msg)\n return\n\n responses = []\n\n for subrequest_spec in requests:\n subrequest = build_request(request, subrequest_spec)\n\n log_context = {**request.log_context(),\n 'path': subrequest.path,\n 'method': subrequest.method}\n try:\n # Invoke subrequest without individual transaction.\n resp, subrequest = request.follow_subrequest(subrequest,\n use_tweens=False)\n except httpexceptions.HTTPException as e:\n # Since some request in the batch failed, we need to stop the parent request\n # through Pyramid's transaction manager. 5XX errors are already caught by\n # pyramid_tm's commit_veto\n # https://github.com/Kinto/kinto/issues/624\n if e.status_code == 409:\n request.tm.abort()\n\n if e.content_type == 'application/json':\n resp = e\n else:\n # JSONify raw Pyramid errors.\n resp = errors.http_error(e)\n\n subrequest_logger.info('subrequest.summary', extra=log_context)\n\n dict_resp = build_response(resp, subrequest)\n responses.append(dict_resp)\n\n return {\n 'responses': responses\n }\n", "path": "kinto/core/views/batch.py"}]}
2,792
167
gh_patches_debug_8658
rasdani/github-patches
git_diff
benoitc__gunicorn-931
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- "Connection reset by peer" in handle_request isn't handled (or ignored) We see a lot of these errors with HAProxy 1.5 in front of gunicorn: ``` 2014-10-23 17:17:24,366 DEBUG OPTIONS /haproxy_check 2014-10-23 17:17:24,384 DEBUG Ignoring connection reset 2014-10-23 17:17:24,700 DEBUG OPTIONS /haproxy_check 2014-10-23 17:17:24,719 DEBUG Ignoring connection reset 2014-10-23 17:17:24,753 DEBUG OPTIONS /haproxy_check 2014-10-23 17:17:24,770 ERROR Error handling request Traceback (most recent call last): File "/opt/lp-apps/webshell/lib/python2.7/site-packages/gunicorn/workers/async.py", line 108, in handle_request resp.write(item) File "/opt/lp-apps/webshell/lib/python2.7/site-packages/gunicorn/http/wsgi.py", line 344, in write util.write(self.sock, arg, self.chunked) File "/opt/lp-apps/webshell/lib/python2.7/site-packages/gunicorn/util.py", line 300, in write return write_chunk(sock, data) File "/opt/lp-apps/webshell/lib/python2.7/site-packages/gunicorn/util.py", line 295, in write_chunk sock.sendall(chunk) File "/opt/lp-apps/webshell/lib/python2.7/site-packages/gevent/socket.py", line 458, in sendall data_sent += self.send(_get_memory(data, data_sent), flags) File "/opt/lp-apps/webshell/lib/python2.7/site-packages/gevent/socket.py", line 435, in send return sock.send(data, flags) error: [Errno 104] Connection reset by peer 2014-10-23 17:17:24,770 DEBUG Closing connection. ``` It seems that in the async worker, the error is sometimes caught and ignored in `handle`, but it also sometimes happens in `handle_request`. It should probably be ignored there as well? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gunicorn/workers/async.py` Content: ``` 1 # -*- coding: utf-8 - 2 # 3 # This file is part of gunicorn released under the MIT license. 4 # See the NOTICE for more information. 5 6 from datetime import datetime 7 import errno 8 import socket 9 import ssl 10 import sys 11 12 import gunicorn.http as http 13 import gunicorn.http.wsgi as wsgi 14 import gunicorn.util as util 15 import gunicorn.workers.base as base 16 from gunicorn import six 17 18 ALREADY_HANDLED = object() 19 20 21 class AsyncWorker(base.Worker): 22 23 def __init__(self, *args, **kwargs): 24 super(AsyncWorker, self).__init__(*args, **kwargs) 25 self.worker_connections = self.cfg.worker_connections 26 27 def timeout_ctx(self): 28 raise NotImplementedError() 29 30 def handle(self, listener, client, addr): 31 req = None 32 try: 33 parser = http.RequestParser(self.cfg, client) 34 try: 35 listener_name = listener.getsockname() 36 if not self.cfg.keepalive: 37 req = six.next(parser) 38 self.handle_request(listener_name, req, client, addr) 39 else: 40 # keepalive loop 41 proxy_protocol_info = {} 42 while True: 43 req = None 44 with self.timeout_ctx(): 45 req = six.next(parser) 46 if not req: 47 break 48 if req.proxy_protocol_info: 49 proxy_protocol_info = req.proxy_protocol_info 50 else: 51 req.proxy_protocol_info = proxy_protocol_info 52 self.handle_request(listener_name, req, client, addr) 53 except http.errors.NoMoreData as e: 54 self.log.debug("Ignored premature client disconnection. %s", e) 55 except StopIteration as e: 56 self.log.debug("Closing connection. %s", e) 57 except ssl.SSLError: 58 exc_info = sys.exc_info() 59 # pass to next try-except level 60 six.reraise(exc_info[0], exc_info[1], exc_info[2]) 61 except socket.error: 62 exc_info = sys.exc_info() 63 # pass to next try-except level 64 six.reraise(exc_info[0], exc_info[1], exc_info[2]) 65 except Exception as e: 66 self.handle_error(req, client, addr, e) 67 except ssl.SSLError as e: 68 if e.args[0] == ssl.SSL_ERROR_EOF: 69 self.log.debug("ssl connection closed") 70 client.close() 71 else: 72 self.log.debug("Error processing SSL request.") 73 self.handle_error(req, client, addr, e) 74 except socket.error as e: 75 if e.args[0] not in (errno.EPIPE, errno.ECONNRESET): 76 self.log.exception("Socket error processing request.") 77 else: 78 if e.args[0] == errno.ECONNRESET: 79 self.log.debug("Ignoring connection reset") 80 else: 81 self.log.debug("Ignoring EPIPE") 82 except Exception as e: 83 self.handle_error(req, client, addr, e) 84 finally: 85 util.close(client) 86 87 def handle_request(self, listener_name, req, sock, addr): 88 request_start = datetime.now() 89 environ = {} 90 resp = None 91 try: 92 self.cfg.pre_request(self, req) 93 resp, environ = wsgi.create(req, sock, addr, 94 listener_name, self.cfg) 95 environ["wsgi.multithread"] = True 96 self.nr += 1 97 if self.alive and self.nr >= self.max_requests: 98 self.log.info("Autorestarting worker after current request.") 99 resp.force_close() 100 self.alive = False 101 102 if not self.cfg.keepalive: 103 resp.force_close() 104 105 respiter = self.wsgi(environ, resp.start_response) 106 if respiter == ALREADY_HANDLED: 107 return False 108 try: 109 if isinstance(respiter, environ['wsgi.file_wrapper']): 110 resp.write_file(respiter) 111 else: 112 for item in respiter: 113 resp.write(item) 114 resp.close() 115 request_time = datetime.now() - request_start 116 self.log.access(resp, req, environ, request_time) 117 finally: 118 if hasattr(respiter, "close"): 119 respiter.close() 120 if resp.should_close(): 121 raise StopIteration() 122 except StopIteration: 123 raise 124 except Exception: 125 if resp and resp.headers_sent: 126 # If the requests have already been sent, we should close the 127 # connection to indicate the error. 128 self.log.exception("Error handling request") 129 try: 130 sock.shutdown(socket.SHUT_RDWR) 131 sock.close() 132 except socket.error: 133 pass 134 raise StopIteration() 135 raise 136 finally: 137 try: 138 self.cfg.post_request(self, req, environ, resp) 139 except Exception: 140 self.log.exception("Exception in post_request hook") 141 return True 142 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gunicorn/workers/async.py b/gunicorn/workers/async.py --- a/gunicorn/workers/async.py +++ b/gunicorn/workers/async.py @@ -118,6 +118,10 @@ raise StopIteration() except StopIteration: raise + except socket.error: + # If the original exception was a socket.error we delegate + # handling it to the caller (where handle() might ignore it + six.reraise(*sys.exc_info()) except Exception: if resp and resp.headers_sent: # If the requests have already been sent, we should close the
{"golden_diff": "diff --git a/gunicorn/workers/async.py b/gunicorn/workers/async.py\n--- a/gunicorn/workers/async.py\n+++ b/gunicorn/workers/async.py\n@@ -118,6 +118,10 @@\n raise StopIteration()\n except StopIteration:\n raise\n+ except socket.error:\n+ # If the original exception was a socket.error we delegate\n+ # handling it to the caller (where handle() might ignore it \n+ six.reraise(*sys.exc_info())\n except Exception:\n if resp and resp.headers_sent:\n # If the requests have already been sent, we should close the\n", "issue": "\"Connection reset by peer\" in handle_request isn't handled (or ignored)\nWe see a lot of these errors with HAProxy 1.5 in front of gunicorn:\n\n```\n2014-10-23 17:17:24,366 DEBUG OPTIONS /haproxy_check\n2014-10-23 17:17:24,384 DEBUG Ignoring connection reset\n2014-10-23 17:17:24,700 DEBUG OPTIONS /haproxy_check\n2014-10-23 17:17:24,719 DEBUG Ignoring connection reset\n2014-10-23 17:17:24,753 DEBUG OPTIONS /haproxy_check\n2014-10-23 17:17:24,770 ERROR Error handling request\nTraceback (most recent call last):\n File \"/opt/lp-apps/webshell/lib/python2.7/site-packages/gunicorn/workers/async.py\", line 108, in handle_request\n resp.write(item)\n File \"/opt/lp-apps/webshell/lib/python2.7/site-packages/gunicorn/http/wsgi.py\", line 344, in write\n util.write(self.sock, arg, self.chunked)\n File \"/opt/lp-apps/webshell/lib/python2.7/site-packages/gunicorn/util.py\", line 300, in write\n return write_chunk(sock, data)\n File \"/opt/lp-apps/webshell/lib/python2.7/site-packages/gunicorn/util.py\", line 295, in write_chunk\n sock.sendall(chunk)\n File \"/opt/lp-apps/webshell/lib/python2.7/site-packages/gevent/socket.py\", line 458, in sendall\n data_sent += self.send(_get_memory(data, data_sent), flags)\n File \"/opt/lp-apps/webshell/lib/python2.7/site-packages/gevent/socket.py\", line 435, in send\n return sock.send(data, flags)\nerror: [Errno 104] Connection reset by peer\n2014-10-23 17:17:24,770 DEBUG Closing connection.\n```\n\nIt seems that in the async worker, the error is sometimes caught and ignored in `handle`, but it also sometimes happens in `handle_request`. It should probably be ignored there as well?\n\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nfrom datetime import datetime\nimport errno\nimport socket\nimport ssl\nimport sys\n\nimport gunicorn.http as http\nimport gunicorn.http.wsgi as wsgi\nimport gunicorn.util as util\nimport gunicorn.workers.base as base\nfrom gunicorn import six\n\nALREADY_HANDLED = object()\n\n\nclass AsyncWorker(base.Worker):\n\n def __init__(self, *args, **kwargs):\n super(AsyncWorker, self).__init__(*args, **kwargs)\n self.worker_connections = self.cfg.worker_connections\n\n def timeout_ctx(self):\n raise NotImplementedError()\n\n def handle(self, listener, client, addr):\n req = None\n try:\n parser = http.RequestParser(self.cfg, client)\n try:\n listener_name = listener.getsockname()\n if not self.cfg.keepalive:\n req = six.next(parser)\n self.handle_request(listener_name, req, client, addr)\n else:\n # keepalive loop\n proxy_protocol_info = {}\n while True:\n req = None\n with self.timeout_ctx():\n req = six.next(parser)\n if not req:\n break\n if req.proxy_protocol_info:\n proxy_protocol_info = req.proxy_protocol_info\n else:\n req.proxy_protocol_info = proxy_protocol_info\n self.handle_request(listener_name, req, client, addr)\n except http.errors.NoMoreData as e:\n self.log.debug(\"Ignored premature client disconnection. %s\", e)\n except StopIteration as e:\n self.log.debug(\"Closing connection. %s\", e)\n except ssl.SSLError:\n exc_info = sys.exc_info()\n # pass to next try-except level\n six.reraise(exc_info[0], exc_info[1], exc_info[2])\n except socket.error:\n exc_info = sys.exc_info()\n # pass to next try-except level\n six.reraise(exc_info[0], exc_info[1], exc_info[2])\n except Exception as e:\n self.handle_error(req, client, addr, e)\n except ssl.SSLError as e:\n if e.args[0] == ssl.SSL_ERROR_EOF:\n self.log.debug(\"ssl connection closed\")\n client.close()\n else:\n self.log.debug(\"Error processing SSL request.\")\n self.handle_error(req, client, addr, e)\n except socket.error as e:\n if e.args[0] not in (errno.EPIPE, errno.ECONNRESET):\n self.log.exception(\"Socket error processing request.\")\n else:\n if e.args[0] == errno.ECONNRESET:\n self.log.debug(\"Ignoring connection reset\")\n else:\n self.log.debug(\"Ignoring EPIPE\")\n except Exception as e:\n self.handle_error(req, client, addr, e)\n finally:\n util.close(client)\n\n def handle_request(self, listener_name, req, sock, addr):\n request_start = datetime.now()\n environ = {}\n resp = None\n try:\n self.cfg.pre_request(self, req)\n resp, environ = wsgi.create(req, sock, addr,\n listener_name, self.cfg)\n environ[\"wsgi.multithread\"] = True\n self.nr += 1\n if self.alive and self.nr >= self.max_requests:\n self.log.info(\"Autorestarting worker after current request.\")\n resp.force_close()\n self.alive = False\n\n if not self.cfg.keepalive:\n resp.force_close()\n\n respiter = self.wsgi(environ, resp.start_response)\n if respiter == ALREADY_HANDLED:\n return False\n try:\n if isinstance(respiter, environ['wsgi.file_wrapper']):\n resp.write_file(respiter)\n else:\n for item in respiter:\n resp.write(item)\n resp.close()\n request_time = datetime.now() - request_start\n self.log.access(resp, req, environ, request_time)\n finally:\n if hasattr(respiter, \"close\"):\n respiter.close()\n if resp.should_close():\n raise StopIteration()\n except StopIteration:\n raise\n except Exception:\n if resp and resp.headers_sent:\n # If the requests have already been sent, we should close the\n # connection to indicate the error.\n self.log.exception(\"Error handling request\")\n try:\n sock.shutdown(socket.SHUT_RDWR)\n sock.close()\n except socket.error:\n pass\n raise StopIteration()\n raise\n finally:\n try:\n self.cfg.post_request(self, req, environ, resp)\n except Exception:\n self.log.exception(\"Exception in post_request hook\")\n return True\n", "path": "gunicorn/workers/async.py"}], "after_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nfrom datetime import datetime\nimport errno\nimport socket\nimport ssl\nimport sys\n\nimport gunicorn.http as http\nimport gunicorn.http.wsgi as wsgi\nimport gunicorn.util as util\nimport gunicorn.workers.base as base\nfrom gunicorn import six\n\nALREADY_HANDLED = object()\n\n\nclass AsyncWorker(base.Worker):\n\n def __init__(self, *args, **kwargs):\n super(AsyncWorker, self).__init__(*args, **kwargs)\n self.worker_connections = self.cfg.worker_connections\n\n def timeout_ctx(self):\n raise NotImplementedError()\n\n def handle(self, listener, client, addr):\n req = None\n try:\n parser = http.RequestParser(self.cfg, client)\n try:\n listener_name = listener.getsockname()\n if not self.cfg.keepalive:\n req = six.next(parser)\n self.handle_request(listener_name, req, client, addr)\n else:\n # keepalive loop\n proxy_protocol_info = req.proxy_protocol_info\n while True:\n req = None\n with self.timeout_ctx():\n req = six.next(parser)\n if not req:\n break\n req.proxy_protocol_info = proxy_protocol_info\n self.handle_request(listener_name, req, client, addr)\n except http.errors.NoMoreData as e:\n self.log.debug(\"Ignored premature client disconnection. %s\", e)\n except StopIteration as e:\n self.log.debug(\"Closing connection. %s\", e)\n except ssl.SSLError:\n exc_info = sys.exc_info()\n # pass to next try-except level\n six.reraise(exc_info[0], exc_info[1], exc_info[2])\n except socket.error:\n exc_info = sys.exc_info()\n # pass to next try-except level\n six.reraise(exc_info[0], exc_info[1], exc_info[2])\n except Exception as e:\n self.handle_error(req, client, addr, e)\n except ssl.SSLError as e:\n if e.args[0] == ssl.SSL_ERROR_EOF:\n self.log.debug(\"ssl connection closed\")\n client.close()\n else:\n self.log.debug(\"Error processing SSL request.\")\n self.handle_error(req, client, addr, e)\n except socket.error as e:\n if e.args[0] not in (errno.EPIPE, errno.ECONNRESET):\n self.log.exception(\"Socket error processing request.\")\n else:\n if e.args[0] == errno.ECONNRESET:\n self.log.debug(\"Ignoring connection reset\")\n else:\n self.log.debug(\"Ignoring EPIPE\")\n except Exception as e:\n self.handle_error(req, client, addr, e)\n finally:\n util.close(client)\n\n def handle_request(self, listener_name, req, sock, addr):\n request_start = datetime.now()\n environ = {}\n resp = None\n try:\n self.cfg.pre_request(self, req)\n resp, environ = wsgi.create(req, sock, addr,\n listener_name, self.cfg)\n environ[\"wsgi.multithread\"] = True\n self.nr += 1\n if self.alive and self.nr >= self.max_requests:\n self.log.info(\"Autorestarting worker after current request.\")\n resp.force_close()\n self.alive = False\n\n if not self.cfg.keepalive:\n resp.force_close()\n\n respiter = self.wsgi(environ, resp.start_response)\n if respiter == ALREADY_HANDLED:\n return False\n try:\n if isinstance(respiter, environ['wsgi.file_wrapper']):\n resp.write_file(respiter)\n else:\n for item in respiter:\n resp.write(item)\n resp.close()\n request_time = datetime.now() - request_start\n self.log.access(resp, req, environ, request_time)\n finally:\n if hasattr(respiter, \"close\"):\n respiter.close()\n if resp.should_close():\n raise StopIteration()\n except StopIteration:\n raise\n except socket.error:\n # If the original exception was a socket.error we delegate\n # handling it to the caller (where handle() might ignore it \n six.reraise(*sys.exc_info())\n except Exception:\n if resp and resp.headers_sent:\n # If the requests have already been sent, we should close the\n # connection to indicate the error.\n self.log.exception(\"Error handling request\")\n try:\n sock.shutdown(socket.SHUT_RDWR)\n sock.close()\n except socket.error:\n pass\n raise StopIteration()\n raise\n finally:\n try:\n self.cfg.post_request(self, req, environ, resp)\n except Exception:\n self.log.exception(\"Exception in post_request hook\")\n return True\n", "path": "gunicorn/workers/async.py"}]}
2,143
142
gh_patches_debug_10477
rasdani/github-patches
git_diff
freedomofpress__securedrop-6102
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 1.8.0 translation feedback: say replies are saved, not stored ## Description From [AO](https://weblate.securedrop.org/translate/securedrop/securedrop/en/?checksum=a6f950dfa57047f3): "Your reply has been saved." might be better than "Your reply has been stored." --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `securedrop/journalist_app/main.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 from datetime import datetime 3 from typing import Union 4 5 import werkzeug 6 from flask import (Blueprint, request, current_app, session, url_for, redirect, 7 render_template, g, flash, abort, Markup, escape) 8 from flask_babel import gettext 9 from sqlalchemy.orm import joinedload 10 from sqlalchemy.sql import func 11 12 import store 13 14 from db import db 15 from models import SeenReply, Source, SourceStar, Submission, Reply 16 from journalist_app.forms import ReplyForm 17 from journalist_app.utils import (validate_user, bulk_delete, download, 18 confirm_bulk_delete, get_source) 19 from sdconfig import SDConfig 20 21 22 def make_blueprint(config: SDConfig) -> Blueprint: 23 view = Blueprint('main', __name__) 24 25 @view.route('/login', methods=('GET', 'POST')) 26 def login() -> Union[str, werkzeug.Response]: 27 if request.method == 'POST': 28 user = validate_user(request.form['username'], 29 request.form['password'], 30 request.form['token']) 31 if user: 32 current_app.logger.info("'{}' logged in with the two-factor code {}" 33 .format(request.form['username'], 34 request.form['token'])) 35 36 # Update access metadata 37 user.last_access = datetime.utcnow() 38 db.session.add(user) 39 db.session.commit() 40 41 session['uid'] = user.id 42 session['nonce'] = user.session_nonce 43 return redirect(url_for('main.index')) 44 45 return render_template("login.html") 46 47 @view.route('/logout') 48 def logout() -> werkzeug.Response: 49 session.pop('uid', None) 50 session.pop('expires', None) 51 session.pop('nonce', None) 52 return redirect(url_for('main.index')) 53 54 @view.route("/") 55 def index() -> str: 56 # Gather the count of unread submissions for each source 57 # ID. This query will be joined in the queries for starred and 58 # unstarred sources below, and the unread counts added to 59 # their result sets as an extra column. 60 unread_stmt = ( 61 db.session.query(Submission.source_id, func.count("*").label("num_unread")) 62 .filter_by(seen_files=None, seen_messages=None) 63 .group_by(Submission.source_id) 64 .subquery() 65 ) 66 67 # Query for starred sources, along with their unread 68 # submission counts. 69 starred = ( 70 db.session.query(Source, unread_stmt.c.num_unread) 71 .filter_by(pending=False, deleted_at=None) 72 .filter(Source.last_updated.isnot(None)) 73 .filter(SourceStar.starred.is_(True)) 74 .outerjoin(SourceStar) 75 .options(joinedload(Source.submissions)) 76 .options(joinedload(Source.star)) 77 .outerjoin(unread_stmt, Source.id == unread_stmt.c.source_id) 78 .order_by(Source.last_updated.desc()) 79 .all() 80 ) 81 82 # Now, add "num_unread" attributes to the source entities. 83 for source, num_unread in starred: 84 source.num_unread = num_unread or 0 85 starred = [source for source, num_unread in starred] 86 87 # Query for sources without stars, along with their unread 88 # submission counts. 89 unstarred = ( 90 db.session.query(Source, unread_stmt.c.num_unread) 91 .filter_by(pending=False, deleted_at=None) 92 .filter(Source.last_updated.isnot(None)) 93 .filter(~Source.star.has(SourceStar.starred.is_(True))) 94 .options(joinedload(Source.submissions)) 95 .options(joinedload(Source.star)) 96 .outerjoin(unread_stmt, Source.id == unread_stmt.c.source_id) 97 .order_by(Source.last_updated.desc()) 98 .all() 99 ) 100 101 # Again, add "num_unread" attributes to the source entities. 102 for source, num_unread in unstarred: 103 source.num_unread = num_unread or 0 104 unstarred = [source for source, num_unread in unstarred] 105 106 response = render_template("index.html", unstarred=unstarred, starred=starred) 107 return response 108 109 @view.route('/reply', methods=('POST',)) 110 def reply() -> werkzeug.Response: 111 """Attempt to send a Reply from a Journalist to a Source. Empty 112 messages are rejected, and an informative error message is flashed 113 on the client. In the case of unexpected errors involving database 114 transactions (potentially caused by racing request threads that 115 modify the same the database object) logging is done in such a way 116 so as not to write potentially sensitive information to disk, and a 117 generic error message is flashed on the client. 118 119 Returns: 120 flask.Response: The user is redirected to the same Source 121 collection view, regardless if the Reply is created 122 successfully. 123 """ 124 form = ReplyForm() 125 if not form.validate_on_submit(): 126 for error in form.message.errors: 127 flash(error, "error") 128 return redirect(url_for('col.col', filesystem_id=g.filesystem_id)) 129 130 g.source.interaction_count += 1 131 filename = "{0}-{1}-reply.gpg".format(g.source.interaction_count, 132 g.source.journalist_filename) 133 current_app.crypto_util.encrypt( 134 form.message.data, 135 [current_app.crypto_util.get_fingerprint(g.filesystem_id), 136 config.JOURNALIST_KEY], 137 output=current_app.storage.path(g.filesystem_id, filename), 138 ) 139 140 try: 141 reply = Reply(g.user, g.source, filename) 142 db.session.add(reply) 143 db.session.flush() 144 seen_reply = SeenReply(reply_id=reply.id, journalist_id=g.user.id) 145 db.session.add(seen_reply) 146 db.session.commit() 147 store.async_add_checksum_for_file(reply) 148 except Exception as exc: 149 flash(gettext( 150 "An unexpected error occurred! Please " 151 "inform your admin."), "error") 152 # We take a cautious approach to logging here because we're dealing 153 # with responses to sources. It's possible the exception message 154 # could contain information we don't want to write to disk. 155 current_app.logger.error( 156 "Reply from '{}' (ID {}) failed: {}!".format(g.user.username, 157 g.user.id, 158 exc.__class__)) 159 else: 160 161 flash( 162 Markup( 163 "<b>{}</b> {}".format( 164 # Translators: Precedes a message confirming the success of an operation. 165 escape(gettext("Success!")), 166 escape(gettext("Your reply has been stored.")) 167 ) 168 ), 'success') 169 finally: 170 return redirect(url_for('col.col', filesystem_id=g.filesystem_id)) 171 172 @view.route('/bulk', methods=('POST',)) 173 def bulk() -> Union[str, werkzeug.Response]: 174 action = request.form['action'] 175 error_redirect = url_for('col.col', filesystem_id=g.filesystem_id) 176 doc_names_selected = request.form.getlist('doc_names_selected') 177 selected_docs = [doc for doc in g.source.collection 178 if doc.filename in doc_names_selected] 179 if selected_docs == []: 180 if action == 'download': 181 flash( 182 Markup( 183 "<b>{}</b> {}".format( 184 # Translators: Error shown when a user has not selected items to act on. 185 escape(gettext("Nothing Selected")), 186 escape(gettext("You must select one or more items for download")) 187 ) 188 ), 'error') 189 elif action in ('delete', 'confirm_delete'): 190 flash( 191 Markup( 192 "<b>{}</b> {}".format( 193 # Translators: Error shown when a user has not selected items to act on. 194 escape(gettext("Nothing Selected")), 195 escape(gettext("You must select one or more items for deletion")) 196 ) 197 ), 'error') 198 199 return redirect(error_redirect) 200 201 if action == 'download': 202 source = get_source(g.filesystem_id) 203 return download( 204 source.journalist_filename, selected_docs, on_error_redirect=error_redirect 205 ) 206 elif action == 'delete': 207 return bulk_delete(g.filesystem_id, selected_docs) 208 elif action == 'confirm_delete': 209 return confirm_bulk_delete(g.filesystem_id, selected_docs) 210 else: 211 abort(400) 212 213 @view.route('/download_unread/<filesystem_id>') 214 def download_unread_filesystem_id(filesystem_id: str) -> werkzeug.Response: 215 unseen_submissions = ( 216 Submission.query.join(Source) 217 .filter( 218 Source.deleted_at.is_(None), 219 Source.filesystem_id == filesystem_id 220 ) 221 .filter(~Submission.seen_files.any(), ~Submission.seen_messages.any()) 222 .all() 223 ) 224 if len(unseen_submissions) == 0: 225 flash(gettext("No unread submissions for this source."), "error") 226 return redirect(url_for('col.col', filesystem_id=filesystem_id)) 227 source = get_source(filesystem_id) 228 return download(source.journalist_filename, unseen_submissions) 229 230 return view 231 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/securedrop/journalist_app/main.py b/securedrop/journalist_app/main.py --- a/securedrop/journalist_app/main.py +++ b/securedrop/journalist_app/main.py @@ -163,7 +163,8 @@ "<b>{}</b> {}".format( # Translators: Precedes a message confirming the success of an operation. escape(gettext("Success!")), - escape(gettext("Your reply has been stored.")) + escape(gettext("The source will receive your reply " + "next time they log in.")) ) ), 'success') finally:
{"golden_diff": "diff --git a/securedrop/journalist_app/main.py b/securedrop/journalist_app/main.py\n--- a/securedrop/journalist_app/main.py\n+++ b/securedrop/journalist_app/main.py\n@@ -163,7 +163,8 @@\n \"<b>{}</b> {}\".format(\n # Translators: Precedes a message confirming the success of an operation.\n escape(gettext(\"Success!\")),\n- escape(gettext(\"Your reply has been stored.\"))\n+ escape(gettext(\"The source will receive your reply \"\n+ \"next time they log in.\"))\n )\n ), 'success')\n finally:\n", "issue": "1.8.0 translation feedback: say replies are saved, not stored\n## Description\r\n\r\nFrom [AO](https://weblate.securedrop.org/translate/securedrop/securedrop/en/?checksum=a6f950dfa57047f3): \"Your reply has been saved.\" might be better than \"Your reply has been stored.\"\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom typing import Union\n\nimport werkzeug\nfrom flask import (Blueprint, request, current_app, session, url_for, redirect,\n render_template, g, flash, abort, Markup, escape)\nfrom flask_babel import gettext\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy.sql import func\n\nimport store\n\nfrom db import db\nfrom models import SeenReply, Source, SourceStar, Submission, Reply\nfrom journalist_app.forms import ReplyForm\nfrom journalist_app.utils import (validate_user, bulk_delete, download,\n confirm_bulk_delete, get_source)\nfrom sdconfig import SDConfig\n\n\ndef make_blueprint(config: SDConfig) -> Blueprint:\n view = Blueprint('main', __name__)\n\n @view.route('/login', methods=('GET', 'POST'))\n def login() -> Union[str, werkzeug.Response]:\n if request.method == 'POST':\n user = validate_user(request.form['username'],\n request.form['password'],\n request.form['token'])\n if user:\n current_app.logger.info(\"'{}' logged in with the two-factor code {}\"\n .format(request.form['username'],\n request.form['token']))\n\n # Update access metadata\n user.last_access = datetime.utcnow()\n db.session.add(user)\n db.session.commit()\n\n session['uid'] = user.id\n session['nonce'] = user.session_nonce\n return redirect(url_for('main.index'))\n\n return render_template(\"login.html\")\n\n @view.route('/logout')\n def logout() -> werkzeug.Response:\n session.pop('uid', None)\n session.pop('expires', None)\n session.pop('nonce', None)\n return redirect(url_for('main.index'))\n\n @view.route(\"/\")\n def index() -> str:\n # Gather the count of unread submissions for each source\n # ID. This query will be joined in the queries for starred and\n # unstarred sources below, and the unread counts added to\n # their result sets as an extra column.\n unread_stmt = (\n db.session.query(Submission.source_id, func.count(\"*\").label(\"num_unread\"))\n .filter_by(seen_files=None, seen_messages=None)\n .group_by(Submission.source_id)\n .subquery()\n )\n\n # Query for starred sources, along with their unread\n # submission counts.\n starred = (\n db.session.query(Source, unread_stmt.c.num_unread)\n .filter_by(pending=False, deleted_at=None)\n .filter(Source.last_updated.isnot(None))\n .filter(SourceStar.starred.is_(True))\n .outerjoin(SourceStar)\n .options(joinedload(Source.submissions))\n .options(joinedload(Source.star))\n .outerjoin(unread_stmt, Source.id == unread_stmt.c.source_id)\n .order_by(Source.last_updated.desc())\n .all()\n )\n\n # Now, add \"num_unread\" attributes to the source entities.\n for source, num_unread in starred:\n source.num_unread = num_unread or 0\n starred = [source for source, num_unread in starred]\n\n # Query for sources without stars, along with their unread\n # submission counts.\n unstarred = (\n db.session.query(Source, unread_stmt.c.num_unread)\n .filter_by(pending=False, deleted_at=None)\n .filter(Source.last_updated.isnot(None))\n .filter(~Source.star.has(SourceStar.starred.is_(True)))\n .options(joinedload(Source.submissions))\n .options(joinedload(Source.star))\n .outerjoin(unread_stmt, Source.id == unread_stmt.c.source_id)\n .order_by(Source.last_updated.desc())\n .all()\n )\n\n # Again, add \"num_unread\" attributes to the source entities.\n for source, num_unread in unstarred:\n source.num_unread = num_unread or 0\n unstarred = [source for source, num_unread in unstarred]\n\n response = render_template(\"index.html\", unstarred=unstarred, starred=starred)\n return response\n\n @view.route('/reply', methods=('POST',))\n def reply() -> werkzeug.Response:\n \"\"\"Attempt to send a Reply from a Journalist to a Source. Empty\n messages are rejected, and an informative error message is flashed\n on the client. In the case of unexpected errors involving database\n transactions (potentially caused by racing request threads that\n modify the same the database object) logging is done in such a way\n so as not to write potentially sensitive information to disk, and a\n generic error message is flashed on the client.\n\n Returns:\n flask.Response: The user is redirected to the same Source\n collection view, regardless if the Reply is created\n successfully.\n \"\"\"\n form = ReplyForm()\n if not form.validate_on_submit():\n for error in form.message.errors:\n flash(error, \"error\")\n return redirect(url_for('col.col', filesystem_id=g.filesystem_id))\n\n g.source.interaction_count += 1\n filename = \"{0}-{1}-reply.gpg\".format(g.source.interaction_count,\n g.source.journalist_filename)\n current_app.crypto_util.encrypt(\n form.message.data,\n [current_app.crypto_util.get_fingerprint(g.filesystem_id),\n config.JOURNALIST_KEY],\n output=current_app.storage.path(g.filesystem_id, filename),\n )\n\n try:\n reply = Reply(g.user, g.source, filename)\n db.session.add(reply)\n db.session.flush()\n seen_reply = SeenReply(reply_id=reply.id, journalist_id=g.user.id)\n db.session.add(seen_reply)\n db.session.commit()\n store.async_add_checksum_for_file(reply)\n except Exception as exc:\n flash(gettext(\n \"An unexpected error occurred! Please \"\n \"inform your admin.\"), \"error\")\n # We take a cautious approach to logging here because we're dealing\n # with responses to sources. It's possible the exception message\n # could contain information we don't want to write to disk.\n current_app.logger.error(\n \"Reply from '{}' (ID {}) failed: {}!\".format(g.user.username,\n g.user.id,\n exc.__class__))\n else:\n\n flash(\n Markup(\n \"<b>{}</b> {}\".format(\n # Translators: Precedes a message confirming the success of an operation.\n escape(gettext(\"Success!\")),\n escape(gettext(\"Your reply has been stored.\"))\n )\n ), 'success')\n finally:\n return redirect(url_for('col.col', filesystem_id=g.filesystem_id))\n\n @view.route('/bulk', methods=('POST',))\n def bulk() -> Union[str, werkzeug.Response]:\n action = request.form['action']\n error_redirect = url_for('col.col', filesystem_id=g.filesystem_id)\n doc_names_selected = request.form.getlist('doc_names_selected')\n selected_docs = [doc for doc in g.source.collection\n if doc.filename in doc_names_selected]\n if selected_docs == []:\n if action == 'download':\n flash(\n Markup(\n \"<b>{}</b> {}\".format(\n # Translators: Error shown when a user has not selected items to act on.\n escape(gettext(\"Nothing Selected\")),\n escape(gettext(\"You must select one or more items for download\"))\n )\n ), 'error')\n elif action in ('delete', 'confirm_delete'):\n flash(\n Markup(\n \"<b>{}</b> {}\".format(\n # Translators: Error shown when a user has not selected items to act on.\n escape(gettext(\"Nothing Selected\")),\n escape(gettext(\"You must select one or more items for deletion\"))\n )\n ), 'error')\n\n return redirect(error_redirect)\n\n if action == 'download':\n source = get_source(g.filesystem_id)\n return download(\n source.journalist_filename, selected_docs, on_error_redirect=error_redirect\n )\n elif action == 'delete':\n return bulk_delete(g.filesystem_id, selected_docs)\n elif action == 'confirm_delete':\n return confirm_bulk_delete(g.filesystem_id, selected_docs)\n else:\n abort(400)\n\n @view.route('/download_unread/<filesystem_id>')\n def download_unread_filesystem_id(filesystem_id: str) -> werkzeug.Response:\n unseen_submissions = (\n Submission.query.join(Source)\n .filter(\n Source.deleted_at.is_(None),\n Source.filesystem_id == filesystem_id\n )\n .filter(~Submission.seen_files.any(), ~Submission.seen_messages.any())\n .all()\n )\n if len(unseen_submissions) == 0:\n flash(gettext(\"No unread submissions for this source.\"), \"error\")\n return redirect(url_for('col.col', filesystem_id=filesystem_id))\n source = get_source(filesystem_id)\n return download(source.journalist_filename, unseen_submissions)\n\n return view\n", "path": "securedrop/journalist_app/main.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom typing import Union\n\nimport werkzeug\nfrom flask import (Blueprint, request, current_app, session, url_for, redirect,\n render_template, g, flash, abort, Markup, escape)\nfrom flask_babel import gettext\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy.sql import func\n\nimport store\n\nfrom db import db\nfrom models import SeenReply, Source, SourceStar, Submission, Reply\nfrom journalist_app.forms import ReplyForm\nfrom journalist_app.utils import (validate_user, bulk_delete, download,\n confirm_bulk_delete, get_source)\nfrom sdconfig import SDConfig\n\n\ndef make_blueprint(config: SDConfig) -> Blueprint:\n view = Blueprint('main', __name__)\n\n @view.route('/login', methods=('GET', 'POST'))\n def login() -> Union[str, werkzeug.Response]:\n if request.method == 'POST':\n user = validate_user(request.form['username'],\n request.form['password'],\n request.form['token'])\n if user:\n current_app.logger.info(\"'{}' logged in with the two-factor code {}\"\n .format(request.form['username'],\n request.form['token']))\n\n # Update access metadata\n user.last_access = datetime.utcnow()\n db.session.add(user)\n db.session.commit()\n\n session['uid'] = user.id\n session['nonce'] = user.session_nonce\n return redirect(url_for('main.index'))\n\n return render_template(\"login.html\")\n\n @view.route('/logout')\n def logout() -> werkzeug.Response:\n session.pop('uid', None)\n session.pop('expires', None)\n session.pop('nonce', None)\n return redirect(url_for('main.index'))\n\n @view.route(\"/\")\n def index() -> str:\n # Gather the count of unread submissions for each source\n # ID. This query will be joined in the queries for starred and\n # unstarred sources below, and the unread counts added to\n # their result sets as an extra column.\n unread_stmt = (\n db.session.query(Submission.source_id, func.count(\"*\").label(\"num_unread\"))\n .filter_by(seen_files=None, seen_messages=None)\n .group_by(Submission.source_id)\n .subquery()\n )\n\n # Query for starred sources, along with their unread\n # submission counts.\n starred = (\n db.session.query(Source, unread_stmt.c.num_unread)\n .filter_by(pending=False, deleted_at=None)\n .filter(Source.last_updated.isnot(None))\n .filter(SourceStar.starred.is_(True))\n .outerjoin(SourceStar)\n .options(joinedload(Source.submissions))\n .options(joinedload(Source.star))\n .outerjoin(unread_stmt, Source.id == unread_stmt.c.source_id)\n .order_by(Source.last_updated.desc())\n .all()\n )\n\n # Now, add \"num_unread\" attributes to the source entities.\n for source, num_unread in starred:\n source.num_unread = num_unread or 0\n starred = [source for source, num_unread in starred]\n\n # Query for sources without stars, along with their unread\n # submission counts.\n unstarred = (\n db.session.query(Source, unread_stmt.c.num_unread)\n .filter_by(pending=False, deleted_at=None)\n .filter(Source.last_updated.isnot(None))\n .filter(~Source.star.has(SourceStar.starred.is_(True)))\n .options(joinedload(Source.submissions))\n .options(joinedload(Source.star))\n .outerjoin(unread_stmt, Source.id == unread_stmt.c.source_id)\n .order_by(Source.last_updated.desc())\n .all()\n )\n\n # Again, add \"num_unread\" attributes to the source entities.\n for source, num_unread in unstarred:\n source.num_unread = num_unread or 0\n unstarred = [source for source, num_unread in unstarred]\n\n response = render_template(\"index.html\", unstarred=unstarred, starred=starred)\n return response\n\n @view.route('/reply', methods=('POST',))\n def reply() -> werkzeug.Response:\n \"\"\"Attempt to send a Reply from a Journalist to a Source. Empty\n messages are rejected, and an informative error message is flashed\n on the client. In the case of unexpected errors involving database\n transactions (potentially caused by racing request threads that\n modify the same the database object) logging is done in such a way\n so as not to write potentially sensitive information to disk, and a\n generic error message is flashed on the client.\n\n Returns:\n flask.Response: The user is redirected to the same Source\n collection view, regardless if the Reply is created\n successfully.\n \"\"\"\n form = ReplyForm()\n if not form.validate_on_submit():\n for error in form.message.errors:\n flash(error, \"error\")\n return redirect(url_for('col.col', filesystem_id=g.filesystem_id))\n\n g.source.interaction_count += 1\n filename = \"{0}-{1}-reply.gpg\".format(g.source.interaction_count,\n g.source.journalist_filename)\n current_app.crypto_util.encrypt(\n form.message.data,\n [current_app.crypto_util.get_fingerprint(g.filesystem_id),\n config.JOURNALIST_KEY],\n output=current_app.storage.path(g.filesystem_id, filename),\n )\n\n try:\n reply = Reply(g.user, g.source, filename)\n db.session.add(reply)\n db.session.flush()\n seen_reply = SeenReply(reply_id=reply.id, journalist_id=g.user.id)\n db.session.add(seen_reply)\n db.session.commit()\n store.async_add_checksum_for_file(reply)\n except Exception as exc:\n flash(gettext(\n \"An unexpected error occurred! Please \"\n \"inform your admin.\"), \"error\")\n # We take a cautious approach to logging here because we're dealing\n # with responses to sources. It's possible the exception message\n # could contain information we don't want to write to disk.\n current_app.logger.error(\n \"Reply from '{}' (ID {}) failed: {}!\".format(g.user.username,\n g.user.id,\n exc.__class__))\n else:\n\n flash(\n Markup(\n \"<b>{}</b> {}\".format(\n # Translators: Precedes a message confirming the success of an operation.\n escape(gettext(\"Success!\")),\n escape(gettext(\"The source will receive your reply \"\n \"next time they log in.\"))\n )\n ), 'success')\n finally:\n return redirect(url_for('col.col', filesystem_id=g.filesystem_id))\n\n @view.route('/bulk', methods=('POST',))\n def bulk() -> Union[str, werkzeug.Response]:\n action = request.form['action']\n error_redirect = url_for('col.col', filesystem_id=g.filesystem_id)\n doc_names_selected = request.form.getlist('doc_names_selected')\n selected_docs = [doc for doc in g.source.collection\n if doc.filename in doc_names_selected]\n if selected_docs == []:\n if action == 'download':\n flash(\n Markup(\n \"<b>{}</b> {}\".format(\n # Translators: Error shown when a user has not selected items to act on.\n escape(gettext(\"Nothing Selected\")),\n escape(gettext(\"You must select one or more items for download\"))\n )\n ), 'error')\n elif action in ('delete', 'confirm_delete'):\n flash(\n Markup(\n \"<b>{}</b> {}\".format(\n # Translators: Error shown when a user has not selected items to act on.\n escape(gettext(\"Nothing Selected\")),\n escape(gettext(\"You must select one or more items for deletion\"))\n )\n ), 'error')\n\n return redirect(error_redirect)\n\n if action == 'download':\n source = get_source(g.filesystem_id)\n return download(\n source.journalist_filename, selected_docs, on_error_redirect=error_redirect\n )\n elif action == 'delete':\n return bulk_delete(g.filesystem_id, selected_docs)\n elif action == 'confirm_delete':\n return confirm_bulk_delete(g.filesystem_id, selected_docs)\n else:\n abort(400)\n\n @view.route('/download_unread/<filesystem_id>')\n def download_unread_filesystem_id(filesystem_id: str) -> werkzeug.Response:\n unseen_submissions = (\n Submission.query.join(Source)\n .filter(\n Source.deleted_at.is_(None),\n Source.filesystem_id == filesystem_id\n )\n .filter(~Submission.seen_files.any(), ~Submission.seen_messages.any())\n .all()\n )\n if len(unseen_submissions) == 0:\n flash(gettext(\"No unread submissions for this source.\"), \"error\")\n return redirect(url_for('col.col', filesystem_id=filesystem_id))\n source = get_source(filesystem_id)\n return download(source.journalist_filename, unseen_submissions)\n\n return view\n", "path": "securedrop/journalist_app/main.py"}]}
2,838
143
gh_patches_debug_9587
rasdani/github-patches
git_diff
freedomofpress__securedrop-2475
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Session expiring do not display a localized logout message. # Bug ## Description Like #2391, if a source has their session expire, they will not be shown a localized message when they log out. ## Steps to Reproduce Set session expire to 30 seconds. Log in. Set locale to not-english. Wait 30 seconds. Refresh. See no-localized flashed message. ## Expected Behavior The logout message is localized. ## Actual Behavior It is not. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `securedrop/source_app/__init__.py` Content: ``` 1 from datetime import datetime, timedelta 2 from flask import (Flask, render_template, flash, Markup, request, g, session, 3 url_for, redirect) 4 from flask_babel import gettext 5 from flask_assets import Environment 6 from flask_wtf.csrf import CSRFProtect 7 from jinja2 import evalcontextfilter 8 from os import path 9 from sqlalchemy.orm.exc import NoResultFound 10 11 import crypto_util 12 import i18n 13 import store 14 import template_filters 15 import version 16 17 from db import Source, db_session 18 from request_that_secures_file_uploads import RequestThatSecuresFileUploads 19 from source_app import main, info, api 20 from source_app.decorators import ignore_static 21 from source_app.utils import logged_in 22 23 24 def create_app(config): 25 app = Flask(__name__, 26 template_folder=config.SOURCE_TEMPLATES_DIR, 27 static_folder=path.join(config.SECUREDROP_ROOT, 'static')) 28 app.request_class = RequestThatSecuresFileUploads 29 app.config.from_object(config.SourceInterfaceFlaskConfig) 30 31 # The default CSRF token expiration is 1 hour. Since large uploads can 32 # take longer than an hour over Tor, we increase the valid window to 24h. 33 app.config['WTF_CSRF_TIME_LIMIT'] = 60 * 60 * 24 34 CSRFProtect(app) 35 36 assets = Environment(app) 37 app.config['assets'] = assets 38 39 i18n.setup_app(app) 40 41 app.jinja_env.trim_blocks = True 42 app.jinja_env.lstrip_blocks = True 43 app.jinja_env.globals['version'] = version.__version__ 44 if getattr(config, 'CUSTOM_HEADER_IMAGE', None): 45 app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE 46 app.jinja_env.globals['use_custom_header_image'] = True 47 else: 48 app.jinja_env.globals['header_image'] = 'logo.png' 49 app.jinja_env.globals['use_custom_header_image'] = False 50 51 app.jinja_env.filters['rel_datetime_format'] = \ 52 template_filters.rel_datetime_format 53 app.jinja_env.filters['nl2br'] = evalcontextfilter(template_filters.nl2br) 54 app.jinja_env.filters['filesizeformat'] = template_filters.filesizeformat 55 56 for module in [main, info, api]: 57 app.register_blueprint(module.make_blueprint(config)) 58 59 @app.before_request 60 @ignore_static 61 def check_tor2web(): 62 # ignore_static here so we only flash a single message warning 63 # about Tor2Web, corresponding to the initial page load. 64 if 'X-tor2web' in request.headers: 65 flash(Markup(gettext( 66 '<strong>WARNING:</strong> You appear to be using Tor2Web. ' 67 'This <strong>does not</strong> provide anonymity. ' 68 '<a href="{url}">Why is this dangerous?</a>') 69 .format(url=url_for('info.tor2web_warning'))), 70 "banner-warning") 71 72 @app.before_request 73 @ignore_static 74 def setup_g(): 75 """Store commonly used values in Flask's special g object""" 76 g.locale = i18n.get_locale() 77 g.text_direction = i18n.get_text_direction(g.locale) 78 g.html_lang = i18n.locale_to_rfc_5646(g.locale) 79 g.locales = i18n.get_locale2name() 80 81 if 'expires' in session and datetime.utcnow() >= session['expires']: 82 session.clear() 83 msg = render_template('session_timeout.html') 84 flash(Markup(msg), "important") 85 86 session['expires'] = datetime.utcnow() + \ 87 timedelta(minutes=getattr(config, 88 'SESSION_EXPIRATION_MINUTES', 89 30)) 90 91 # ignore_static here because `crypto_util.hash_codename` is scrypt 92 # (very time consuming), and we don't need to waste time running if 93 # we're just serving a static resource that won't need to access 94 # these common values. 95 if logged_in(): 96 g.codename = session['codename'] 97 g.filesystem_id = crypto_util.hash_codename(g.codename) 98 try: 99 g.source = Source.query \ 100 .filter(Source.filesystem_id == g.filesystem_id) \ 101 .one() 102 except NoResultFound as e: 103 app.logger.error( 104 "Found no Sources when one was expected: %s" % 105 (e,)) 106 del session['logged_in'] 107 del session['codename'] 108 return redirect(url_for('main.index')) 109 g.loc = store.path(g.filesystem_id) 110 111 @app.teardown_appcontext 112 def shutdown_session(exception=None): 113 """Automatically remove database sessions at the end of the request, or 114 when the application shuts down""" 115 db_session.remove() 116 117 @app.errorhandler(404) 118 def page_not_found(error): 119 return render_template('notfound.html'), 404 120 121 @app.errorhandler(500) 122 def internal_error(error): 123 return render_template('error.html'), 500 124 125 return app 126 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/securedrop/source_app/__init__.py b/securedrop/source_app/__init__.py --- a/securedrop/source_app/__init__.py +++ b/securedrop/source_app/__init__.py @@ -79,8 +79,11 @@ g.locales = i18n.get_locale2name() if 'expires' in session and datetime.utcnow() >= session['expires']: - session.clear() msg = render_template('session_timeout.html') + + # clear the session after we render the message so it's localized + session.clear() + flash(Markup(msg), "important") session['expires'] = datetime.utcnow() + \
{"golden_diff": "diff --git a/securedrop/source_app/__init__.py b/securedrop/source_app/__init__.py\n--- a/securedrop/source_app/__init__.py\n+++ b/securedrop/source_app/__init__.py\n@@ -79,8 +79,11 @@\n g.locales = i18n.get_locale2name()\n \n if 'expires' in session and datetime.utcnow() >= session['expires']:\n- session.clear()\n msg = render_template('session_timeout.html')\n+\n+ # clear the session after we render the message so it's localized\n+ session.clear()\n+\n flash(Markup(msg), \"important\")\n \n session['expires'] = datetime.utcnow() + \\\n", "issue": "Session expiring do not display a localized logout message.\n# Bug\r\n\r\n## Description\r\n\r\nLike #2391, if a source has their session expire, they will not be shown a localized message when they log out.\r\n\r\n## Steps to Reproduce\r\n\r\nSet session expire to 30 seconds. Log in. Set locale to not-english. Wait 30 seconds. Refresh. See no-localized flashed message.\r\n\r\n## Expected Behavior\r\n\r\nThe logout message is localized.\r\n\r\n## Actual Behavior\r\n\r\nIt is not.\n", "before_files": [{"content": "from datetime import datetime, timedelta\nfrom flask import (Flask, render_template, flash, Markup, request, g, session,\n url_for, redirect)\nfrom flask_babel import gettext\nfrom flask_assets import Environment\nfrom flask_wtf.csrf import CSRFProtect\nfrom jinja2 import evalcontextfilter\nfrom os import path\nfrom sqlalchemy.orm.exc import NoResultFound\n\nimport crypto_util\nimport i18n\nimport store\nimport template_filters\nimport version\n\nfrom db import Source, db_session\nfrom request_that_secures_file_uploads import RequestThatSecuresFileUploads\nfrom source_app import main, info, api\nfrom source_app.decorators import ignore_static\nfrom source_app.utils import logged_in\n\n\ndef create_app(config):\n app = Flask(__name__,\n template_folder=config.SOURCE_TEMPLATES_DIR,\n static_folder=path.join(config.SECUREDROP_ROOT, 'static'))\n app.request_class = RequestThatSecuresFileUploads\n app.config.from_object(config.SourceInterfaceFlaskConfig)\n\n # The default CSRF token expiration is 1 hour. Since large uploads can\n # take longer than an hour over Tor, we increase the valid window to 24h.\n app.config['WTF_CSRF_TIME_LIMIT'] = 60 * 60 * 24\n CSRFProtect(app)\n\n assets = Environment(app)\n app.config['assets'] = assets\n\n i18n.setup_app(app)\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n app.jinja_env.globals['version'] = version.__version__\n if getattr(config, 'CUSTOM_HEADER_IMAGE', None):\n app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE\n app.jinja_env.globals['use_custom_header_image'] = True\n else:\n app.jinja_env.globals['header_image'] = 'logo.png'\n app.jinja_env.globals['use_custom_header_image'] = False\n\n app.jinja_env.filters['rel_datetime_format'] = \\\n template_filters.rel_datetime_format\n app.jinja_env.filters['nl2br'] = evalcontextfilter(template_filters.nl2br)\n app.jinja_env.filters['filesizeformat'] = template_filters.filesizeformat\n\n for module in [main, info, api]:\n app.register_blueprint(module.make_blueprint(config))\n\n @app.before_request\n @ignore_static\n def check_tor2web():\n # ignore_static here so we only flash a single message warning\n # about Tor2Web, corresponding to the initial page load.\n if 'X-tor2web' in request.headers:\n flash(Markup(gettext(\n '<strong>WARNING:</strong> You appear to be using Tor2Web. '\n 'This <strong>does not</strong> provide anonymity. '\n '<a href=\"{url}\">Why is this dangerous?</a>')\n .format(url=url_for('info.tor2web_warning'))),\n \"banner-warning\")\n\n @app.before_request\n @ignore_static\n def setup_g():\n \"\"\"Store commonly used values in Flask's special g object\"\"\"\n g.locale = i18n.get_locale()\n g.text_direction = i18n.get_text_direction(g.locale)\n g.html_lang = i18n.locale_to_rfc_5646(g.locale)\n g.locales = i18n.get_locale2name()\n\n if 'expires' in session and datetime.utcnow() >= session['expires']:\n session.clear()\n msg = render_template('session_timeout.html')\n flash(Markup(msg), \"important\")\n\n session['expires'] = datetime.utcnow() + \\\n timedelta(minutes=getattr(config,\n 'SESSION_EXPIRATION_MINUTES',\n 30))\n\n # ignore_static here because `crypto_util.hash_codename` is scrypt\n # (very time consuming), and we don't need to waste time running if\n # we're just serving a static resource that won't need to access\n # these common values.\n if logged_in():\n g.codename = session['codename']\n g.filesystem_id = crypto_util.hash_codename(g.codename)\n try:\n g.source = Source.query \\\n .filter(Source.filesystem_id == g.filesystem_id) \\\n .one()\n except NoResultFound as e:\n app.logger.error(\n \"Found no Sources when one was expected: %s\" %\n (e,))\n del session['logged_in']\n del session['codename']\n return redirect(url_for('main.index'))\n g.loc = store.path(g.filesystem_id)\n\n @app.teardown_appcontext\n def shutdown_session(exception=None):\n \"\"\"Automatically remove database sessions at the end of the request, or\n when the application shuts down\"\"\"\n db_session.remove()\n\n @app.errorhandler(404)\n def page_not_found(error):\n return render_template('notfound.html'), 404\n\n @app.errorhandler(500)\n def internal_error(error):\n return render_template('error.html'), 500\n\n return app\n", "path": "securedrop/source_app/__init__.py"}], "after_files": [{"content": "from datetime import datetime, timedelta\nfrom flask import (Flask, render_template, flash, Markup, request, g, session,\n url_for, redirect)\nfrom flask_babel import gettext\nfrom flask_assets import Environment\nfrom flask_wtf.csrf import CSRFProtect\nfrom jinja2 import evalcontextfilter\nfrom os import path\nfrom sqlalchemy.orm.exc import NoResultFound\n\nimport crypto_util\nimport i18n\nimport store\nimport template_filters\nimport version\n\nfrom db import Source, db_session\nfrom request_that_secures_file_uploads import RequestThatSecuresFileUploads\nfrom source_app import main, info, api\nfrom source_app.decorators import ignore_static\nfrom source_app.utils import logged_in\n\n\ndef create_app(config):\n app = Flask(__name__,\n template_folder=config.SOURCE_TEMPLATES_DIR,\n static_folder=path.join(config.SECUREDROP_ROOT, 'static'))\n app.request_class = RequestThatSecuresFileUploads\n app.config.from_object(config.SourceInterfaceFlaskConfig)\n\n # The default CSRF token expiration is 1 hour. Since large uploads can\n # take longer than an hour over Tor, we increase the valid window to 24h.\n app.config['WTF_CSRF_TIME_LIMIT'] = 60 * 60 * 24\n CSRFProtect(app)\n\n assets = Environment(app)\n app.config['assets'] = assets\n\n i18n.setup_app(app)\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n app.jinja_env.globals['version'] = version.__version__\n if getattr(config, 'CUSTOM_HEADER_IMAGE', None):\n app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE\n app.jinja_env.globals['use_custom_header_image'] = True\n else:\n app.jinja_env.globals['header_image'] = 'logo.png'\n app.jinja_env.globals['use_custom_header_image'] = False\n\n app.jinja_env.filters['rel_datetime_format'] = \\\n template_filters.rel_datetime_format\n app.jinja_env.filters['nl2br'] = evalcontextfilter(template_filters.nl2br)\n app.jinja_env.filters['filesizeformat'] = template_filters.filesizeformat\n\n for module in [main, info, api]:\n app.register_blueprint(module.make_blueprint(config))\n\n @app.before_request\n @ignore_static\n def check_tor2web():\n # ignore_static here so we only flash a single message warning\n # about Tor2Web, corresponding to the initial page load.\n if 'X-tor2web' in request.headers:\n flash(Markup(gettext(\n '<strong>WARNING:</strong> You appear to be using Tor2Web. '\n 'This <strong>does not</strong> provide anonymity. '\n '<a href=\"{url}\">Why is this dangerous?</a>')\n .format(url=url_for('info.tor2web_warning'))),\n \"banner-warning\")\n\n @app.before_request\n @ignore_static\n def setup_g():\n \"\"\"Store commonly used values in Flask's special g object\"\"\"\n g.locale = i18n.get_locale()\n g.text_direction = i18n.get_text_direction(g.locale)\n g.html_lang = i18n.locale_to_rfc_5646(g.locale)\n g.locales = i18n.get_locale2name()\n\n if 'expires' in session and datetime.utcnow() >= session['expires']:\n msg = render_template('session_timeout.html')\n\n # clear the session after we render the message so it's localized\n session.clear()\n\n flash(Markup(msg), \"important\")\n\n session['expires'] = datetime.utcnow() + \\\n timedelta(minutes=getattr(config,\n 'SESSION_EXPIRATION_MINUTES',\n 30))\n\n # ignore_static here because `crypto_util.hash_codename` is scrypt\n # (very time consuming), and we don't need to waste time running if\n # we're just serving a static resource that won't need to access\n # these common values.\n if logged_in():\n g.codename = session['codename']\n g.filesystem_id = crypto_util.hash_codename(g.codename)\n try:\n g.source = Source.query \\\n .filter(Source.filesystem_id == g.filesystem_id) \\\n .one()\n except NoResultFound as e:\n app.logger.error(\n \"Found no Sources when one was expected: %s\" %\n (e,))\n del session['logged_in']\n del session['codename']\n return redirect(url_for('main.index'))\n g.loc = store.path(g.filesystem_id)\n\n @app.teardown_appcontext\n def shutdown_session(exception=None):\n \"\"\"Automatically remove database sessions at the end of the request, or\n when the application shuts down\"\"\"\n db_session.remove()\n\n @app.errorhandler(404)\n def page_not_found(error):\n return render_template('notfound.html'), 404\n\n @app.errorhandler(500)\n def internal_error(error):\n return render_template('error.html'), 500\n\n return app\n", "path": "securedrop/source_app/__init__.py"}]}
1,736
152
gh_patches_debug_29478
rasdani/github-patches
git_diff
vyperlang__vyper-3409
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Immutable variables can be read before assignment ### Version Information * vyper Version (output of `vyper --version`): 0.3.6+commit.4a2124d * OS: osx * Python Version: 3.9.13 ### What's your issue about? Immutable variables can be read before assignment, their memory location during constructor execution is accessed, but that memory is not initialized yet. Its value is not necessarily `empty(type)` since memory could have been written to ephemerally. This should not compile: ```python # @version 0.3.6 a:immutable(uint256) b:uint256 @payable @external def __init__(s:address): self.b = a a = 12 ``` A more interesting example where transient memory written during execution of `create_copy_of()` is written into the storage variable `b`: ```python # @version 0.3.6 a:immutable(uint256) b:uint256 @payable @external def __init__(): s:address = 0x9D0464996170c6B9e75eED71c68B99dDEDf279e8 #random contract containing code that is copied to memory before redeploying c:address = create_copy_of(s) self.b = a a = 12 ``` ### How can it be fixed? check that immutable variables are assigned before usage --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `vyper/codegen/module.py` Content: ``` 1 # a contract.vy -- all functions and constructor 2 3 from typing import Any, List, Optional 4 5 from vyper import ast as vy_ast 6 from vyper.codegen.core import shr 7 from vyper.codegen.function_definitions import FuncIRInfo, generate_ir_for_function 8 from vyper.codegen.global_context import GlobalContext 9 from vyper.codegen.ir_node import IRnode 10 from vyper.exceptions import CompilerPanic 11 12 13 def _topsort_helper(functions, lookup): 14 # single pass to get a global topological sort of functions (so that each 15 # function comes after each of its callees). may have duplicates, which get 16 # filtered out in _topsort() 17 18 ret = [] 19 for f in functions: 20 # called_functions is a list of ContractFunctions, need to map 21 # back to FunctionDefs. 22 callees = [lookup[t.name] for t in f._metadata["type"].called_functions] 23 ret.extend(_topsort_helper(callees, lookup)) 24 ret.append(f) 25 26 return ret 27 28 29 def _topsort(functions): 30 lookup = {f.name: f for f in functions} 31 # strip duplicates 32 return list(dict.fromkeys(_topsort_helper(functions, lookup))) 33 34 35 def _is_constructor(func_ast): 36 return func_ast._metadata["type"].is_constructor 37 38 39 def _is_fallback(func_ast): 40 return func_ast._metadata["type"].is_fallback 41 42 43 def _is_internal(func_ast): 44 return func_ast._metadata["type"].is_internal 45 46 47 def _is_payable(func_ast): 48 return func_ast._metadata["type"].is_payable 49 50 51 # codegen for all runtime functions + callvalue/calldata checks + method selector routines 52 def _runtime_ir(runtime_functions, global_ctx): 53 # categorize the runtime functions because we will organize the runtime 54 # code into the following sections: 55 # payable functions, nonpayable functions, fallback function, internal_functions 56 internal_functions = [f for f in runtime_functions if _is_internal(f)] 57 58 external_functions = [f for f in runtime_functions if not _is_internal(f)] 59 default_function = next((f for f in external_functions if _is_fallback(f)), None) 60 61 # functions that need to go exposed in the selector section 62 regular_functions = [f for f in external_functions if not _is_fallback(f)] 63 payables = [f for f in regular_functions if _is_payable(f)] 64 nonpayables = [f for f in regular_functions if not _is_payable(f)] 65 66 # create a map of the IR functions since they might live in both 67 # runtime and deploy code (if init function calls them) 68 internal_functions_ir: list[IRnode] = [] 69 70 for func_ast in internal_functions: 71 func_ir = generate_ir_for_function(func_ast, global_ctx, False) 72 internal_functions_ir.append(func_ir) 73 74 # for some reason, somebody may want to deploy a contract with no 75 # external functions, or more likely, a "pure data" contract which 76 # contains immutables 77 if len(external_functions) == 0: 78 # TODO: prune internal functions in this case? dead code eliminator 79 # might not eliminate them, since internal function jumpdest is at the 80 # first instruction in the contract. 81 runtime = ["seq"] + internal_functions_ir 82 return runtime 83 84 # note: if the user does not provide one, the default fallback function 85 # reverts anyway. so it does not hurt to batch the payable check. 86 default_is_nonpayable = default_function is None or not _is_payable(default_function) 87 88 # when a contract has a nonpayable default function, 89 # we can do a single check for all nonpayable functions 90 batch_payable_check = len(nonpayables) > 0 and default_is_nonpayable 91 skip_nonpayable_check = batch_payable_check 92 93 selector_section = ["seq"] 94 95 for func_ast in payables: 96 func_ir = generate_ir_for_function(func_ast, global_ctx, False) 97 selector_section.append(func_ir) 98 99 if batch_payable_check: 100 selector_section.append(["assert", ["iszero", "callvalue"]]) 101 102 for func_ast in nonpayables: 103 func_ir = generate_ir_for_function(func_ast, global_ctx, skip_nonpayable_check) 104 selector_section.append(func_ir) 105 106 if default_function: 107 fallback_ir = generate_ir_for_function(default_function, global_ctx, skip_nonpayable_check) 108 else: 109 fallback_ir = IRnode.from_list( 110 ["revert", 0, 0], annotation="Default function", error_msg="fallback function" 111 ) 112 113 # ensure the external jumptable section gets closed out 114 # (for basic block hygiene and also for zksync interpreter) 115 # NOTE: this jump gets optimized out in assembly since the 116 # fallback label is the immediate next instruction, 117 close_selector_section = ["goto", "fallback"] 118 119 runtime = [ 120 "seq", 121 ["with", "_calldata_method_id", shr(224, ["calldataload", 0]), selector_section], 122 close_selector_section, 123 ["label", "fallback", ["var_list"], fallback_ir], 124 ] 125 126 # note: dead code eliminator will clean dead functions 127 runtime.extend(internal_functions_ir) 128 129 return runtime 130 131 132 # take a GlobalContext, and generate the runtime and deploy IR 133 def generate_ir_for_module(global_ctx: GlobalContext) -> tuple[IRnode, IRnode]: 134 # order functions so that each function comes after all of its callees 135 function_defs = _topsort(global_ctx.functions) 136 137 init_function: Optional[vy_ast.FunctionDef] = None 138 139 # generate all FuncIRInfos 140 for f in function_defs: 141 func_t = f._metadata["type"] 142 func_t._ir_info = FuncIRInfo(func_t) 143 144 runtime_functions = [f for f in function_defs if not _is_constructor(f)] 145 init_function = next((f for f in function_defs if _is_constructor(f)), None) 146 147 runtime = _runtime_ir(runtime_functions, global_ctx) 148 149 deploy_code: List[Any] = ["seq"] 150 immutables_len = global_ctx.immutable_section_bytes 151 if init_function: 152 # TODO might be cleaner to separate this into an _init_ir helper func 153 init_func_ir = generate_ir_for_function( 154 init_function, global_ctx, skip_nonpayable_check=False, is_ctor_context=True 155 ) 156 deploy_code.append(init_func_ir) 157 158 # pass the amount of memory allocated for the init function 159 # so that deployment does not clobber while preparing immutables 160 # note: (deploy mem_ofst, code, extra_padding) 161 init_mem_used = init_function._metadata["type"]._ir_info.frame_info.mem_used 162 deploy_code.append(["deploy", init_mem_used, runtime, immutables_len]) 163 164 # internal functions come after everything else 165 internal_functions = [f for f in runtime_functions if _is_internal(f)] 166 for f in internal_functions: 167 func_ir = generate_ir_for_function( 168 f, global_ctx, skip_nonpayable_check=False, is_ctor_context=True 169 ) 170 # note: we depend on dead code eliminator to clean dead function defs 171 deploy_code.append(func_ir) 172 173 else: 174 if immutables_len != 0: 175 raise CompilerPanic("unreachable") 176 deploy_code.append(["deploy", 0, runtime, 0]) 177 178 return IRnode.from_list(deploy_code), IRnode.from_list(runtime) 179 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/vyper/codegen/module.py b/vyper/codegen/module.py --- a/vyper/codegen/module.py +++ b/vyper/codegen/module.py @@ -153,12 +153,31 @@ init_func_ir = generate_ir_for_function( init_function, global_ctx, skip_nonpayable_check=False, is_ctor_context=True ) - deploy_code.append(init_func_ir) # pass the amount of memory allocated for the init function # so that deployment does not clobber while preparing immutables # note: (deploy mem_ofst, code, extra_padding) init_mem_used = init_function._metadata["type"]._ir_info.frame_info.mem_used + + # force msize to be initialized past the end of immutables section + # so that builtins which use `msize` for "dynamic" memory + # allocation do not clobber uninitialized immutables. + # cf. GH issue 3101. + # note mload/iload X touches bytes from X to X+32, and msize rounds up + # to the nearest 32, so `iload`ing `immutables_len - 32` guarantees + # that `msize` will refer to a memory location of at least + # `<immutables_start> + immutables_len` (where <immutables_start> == + # `_mem_deploy_end` as defined in the assembler). + # note: + # mload 32 => msize == 64 + # mload 33 => msize == 96 + # assumption in general: (mload X) => msize == ceil32(X + 32) + # see py-evm extend_memory: after_size = ceil32(start_position + size) + if immutables_len > 0: + deploy_code.append(["iload", max(0, immutables_len - 32)]) + + deploy_code.append(init_func_ir) + deploy_code.append(["deploy", init_mem_used, runtime, immutables_len]) # internal functions come after everything else
{"golden_diff": "diff --git a/vyper/codegen/module.py b/vyper/codegen/module.py\n--- a/vyper/codegen/module.py\n+++ b/vyper/codegen/module.py\n@@ -153,12 +153,31 @@\n init_func_ir = generate_ir_for_function(\n init_function, global_ctx, skip_nonpayable_check=False, is_ctor_context=True\n )\n- deploy_code.append(init_func_ir)\n \n # pass the amount of memory allocated for the init function\n # so that deployment does not clobber while preparing immutables\n # note: (deploy mem_ofst, code, extra_padding)\n init_mem_used = init_function._metadata[\"type\"]._ir_info.frame_info.mem_used\n+\n+ # force msize to be initialized past the end of immutables section\n+ # so that builtins which use `msize` for \"dynamic\" memory\n+ # allocation do not clobber uninitialized immutables.\n+ # cf. GH issue 3101.\n+ # note mload/iload X touches bytes from X to X+32, and msize rounds up\n+ # to the nearest 32, so `iload`ing `immutables_len - 32` guarantees\n+ # that `msize` will refer to a memory location of at least\n+ # `<immutables_start> + immutables_len` (where <immutables_start> ==\n+ # `_mem_deploy_end` as defined in the assembler).\n+ # note:\n+ # mload 32 => msize == 64\n+ # mload 33 => msize == 96\n+ # assumption in general: (mload X) => msize == ceil32(X + 32)\n+ # see py-evm extend_memory: after_size = ceil32(start_position + size)\n+ if immutables_len > 0:\n+ deploy_code.append([\"iload\", max(0, immutables_len - 32)])\n+\n+ deploy_code.append(init_func_ir)\n+\n deploy_code.append([\"deploy\", init_mem_used, runtime, immutables_len])\n \n # internal functions come after everything else\n", "issue": "Immutable variables can be read before assignment\n### Version Information\r\n\r\n* vyper Version (output of `vyper --version`): 0.3.6+commit.4a2124d\r\n* OS: osx\r\n* Python Version: 3.9.13\r\n \r\n### What's your issue about?\r\n\r\nImmutable variables can be read before assignment, their memory location during constructor execution is accessed, but that memory is not initialized yet. Its value is not necessarily `empty(type)` since memory could have been written to ephemerally.\r\n\r\nThis should not compile:\r\n\r\n```python\r\n# @version 0.3.6\r\n\r\na:immutable(uint256)\r\nb:uint256\r\n\r\n@payable\r\n@external\r\ndef __init__(s:address):\r\n self.b = a\r\n a = 12\r\n```\r\n\r\nA more interesting example where transient memory written during execution of `create_copy_of()` is written into the storage variable `b`:\r\n\r\n```python\r\n# @version 0.3.6\r\n\r\na:immutable(uint256)\r\nb:uint256\r\n\r\n@payable\r\n@external\r\ndef __init__():\r\n s:address = 0x9D0464996170c6B9e75eED71c68B99dDEDf279e8 #random contract containing code that is copied to memory before redeploying\r\n c:address = create_copy_of(s)\r\n self.b = a\r\n a = 12\r\n```\r\n\r\n### How can it be fixed?\r\n\r\ncheck that immutable variables are assigned before usage\r\n\n", "before_files": [{"content": "# a contract.vy -- all functions and constructor\n\nfrom typing import Any, List, Optional\n\nfrom vyper import ast as vy_ast\nfrom vyper.codegen.core import shr\nfrom vyper.codegen.function_definitions import FuncIRInfo, generate_ir_for_function\nfrom vyper.codegen.global_context import GlobalContext\nfrom vyper.codegen.ir_node import IRnode\nfrom vyper.exceptions import CompilerPanic\n\n\ndef _topsort_helper(functions, lookup):\n # single pass to get a global topological sort of functions (so that each\n # function comes after each of its callees). may have duplicates, which get\n # filtered out in _topsort()\n\n ret = []\n for f in functions:\n # called_functions is a list of ContractFunctions, need to map\n # back to FunctionDefs.\n callees = [lookup[t.name] for t in f._metadata[\"type\"].called_functions]\n ret.extend(_topsort_helper(callees, lookup))\n ret.append(f)\n\n return ret\n\n\ndef _topsort(functions):\n lookup = {f.name: f for f in functions}\n # strip duplicates\n return list(dict.fromkeys(_topsort_helper(functions, lookup)))\n\n\ndef _is_constructor(func_ast):\n return func_ast._metadata[\"type\"].is_constructor\n\n\ndef _is_fallback(func_ast):\n return func_ast._metadata[\"type\"].is_fallback\n\n\ndef _is_internal(func_ast):\n return func_ast._metadata[\"type\"].is_internal\n\n\ndef _is_payable(func_ast):\n return func_ast._metadata[\"type\"].is_payable\n\n\n# codegen for all runtime functions + callvalue/calldata checks + method selector routines\ndef _runtime_ir(runtime_functions, global_ctx):\n # categorize the runtime functions because we will organize the runtime\n # code into the following sections:\n # payable functions, nonpayable functions, fallback function, internal_functions\n internal_functions = [f for f in runtime_functions if _is_internal(f)]\n\n external_functions = [f for f in runtime_functions if not _is_internal(f)]\n default_function = next((f for f in external_functions if _is_fallback(f)), None)\n\n # functions that need to go exposed in the selector section\n regular_functions = [f for f in external_functions if not _is_fallback(f)]\n payables = [f for f in regular_functions if _is_payable(f)]\n nonpayables = [f for f in regular_functions if not _is_payable(f)]\n\n # create a map of the IR functions since they might live in both\n # runtime and deploy code (if init function calls them)\n internal_functions_ir: list[IRnode] = []\n\n for func_ast in internal_functions:\n func_ir = generate_ir_for_function(func_ast, global_ctx, False)\n internal_functions_ir.append(func_ir)\n\n # for some reason, somebody may want to deploy a contract with no\n # external functions, or more likely, a \"pure data\" contract which\n # contains immutables\n if len(external_functions) == 0:\n # TODO: prune internal functions in this case? dead code eliminator\n # might not eliminate them, since internal function jumpdest is at the\n # first instruction in the contract.\n runtime = [\"seq\"] + internal_functions_ir\n return runtime\n\n # note: if the user does not provide one, the default fallback function\n # reverts anyway. so it does not hurt to batch the payable check.\n default_is_nonpayable = default_function is None or not _is_payable(default_function)\n\n # when a contract has a nonpayable default function,\n # we can do a single check for all nonpayable functions\n batch_payable_check = len(nonpayables) > 0 and default_is_nonpayable\n skip_nonpayable_check = batch_payable_check\n\n selector_section = [\"seq\"]\n\n for func_ast in payables:\n func_ir = generate_ir_for_function(func_ast, global_ctx, False)\n selector_section.append(func_ir)\n\n if batch_payable_check:\n selector_section.append([\"assert\", [\"iszero\", \"callvalue\"]])\n\n for func_ast in nonpayables:\n func_ir = generate_ir_for_function(func_ast, global_ctx, skip_nonpayable_check)\n selector_section.append(func_ir)\n\n if default_function:\n fallback_ir = generate_ir_for_function(default_function, global_ctx, skip_nonpayable_check)\n else:\n fallback_ir = IRnode.from_list(\n [\"revert\", 0, 0], annotation=\"Default function\", error_msg=\"fallback function\"\n )\n\n # ensure the external jumptable section gets closed out\n # (for basic block hygiene and also for zksync interpreter)\n # NOTE: this jump gets optimized out in assembly since the\n # fallback label is the immediate next instruction,\n close_selector_section = [\"goto\", \"fallback\"]\n\n runtime = [\n \"seq\",\n [\"with\", \"_calldata_method_id\", shr(224, [\"calldataload\", 0]), selector_section],\n close_selector_section,\n [\"label\", \"fallback\", [\"var_list\"], fallback_ir],\n ]\n\n # note: dead code eliminator will clean dead functions\n runtime.extend(internal_functions_ir)\n\n return runtime\n\n\n# take a GlobalContext, and generate the runtime and deploy IR\ndef generate_ir_for_module(global_ctx: GlobalContext) -> tuple[IRnode, IRnode]:\n # order functions so that each function comes after all of its callees\n function_defs = _topsort(global_ctx.functions)\n\n init_function: Optional[vy_ast.FunctionDef] = None\n\n # generate all FuncIRInfos\n for f in function_defs:\n func_t = f._metadata[\"type\"]\n func_t._ir_info = FuncIRInfo(func_t)\n\n runtime_functions = [f for f in function_defs if not _is_constructor(f)]\n init_function = next((f for f in function_defs if _is_constructor(f)), None)\n\n runtime = _runtime_ir(runtime_functions, global_ctx)\n\n deploy_code: List[Any] = [\"seq\"]\n immutables_len = global_ctx.immutable_section_bytes\n if init_function:\n # TODO might be cleaner to separate this into an _init_ir helper func\n init_func_ir = generate_ir_for_function(\n init_function, global_ctx, skip_nonpayable_check=False, is_ctor_context=True\n )\n deploy_code.append(init_func_ir)\n\n # pass the amount of memory allocated for the init function\n # so that deployment does not clobber while preparing immutables\n # note: (deploy mem_ofst, code, extra_padding)\n init_mem_used = init_function._metadata[\"type\"]._ir_info.frame_info.mem_used\n deploy_code.append([\"deploy\", init_mem_used, runtime, immutables_len])\n\n # internal functions come after everything else\n internal_functions = [f for f in runtime_functions if _is_internal(f)]\n for f in internal_functions:\n func_ir = generate_ir_for_function(\n f, global_ctx, skip_nonpayable_check=False, is_ctor_context=True\n )\n # note: we depend on dead code eliminator to clean dead function defs\n deploy_code.append(func_ir)\n\n else:\n if immutables_len != 0:\n raise CompilerPanic(\"unreachable\")\n deploy_code.append([\"deploy\", 0, runtime, 0])\n\n return IRnode.from_list(deploy_code), IRnode.from_list(runtime)\n", "path": "vyper/codegen/module.py"}], "after_files": [{"content": "# a contract.vy -- all functions and constructor\n\nfrom typing import Any, List, Optional\n\nfrom vyper import ast as vy_ast\nfrom vyper.codegen.core import shr\nfrom vyper.codegen.function_definitions import FuncIRInfo, generate_ir_for_function\nfrom vyper.codegen.global_context import GlobalContext\nfrom vyper.codegen.ir_node import IRnode\nfrom vyper.exceptions import CompilerPanic\n\n\ndef _topsort_helper(functions, lookup):\n # single pass to get a global topological sort of functions (so that each\n # function comes after each of its callees). may have duplicates, which get\n # filtered out in _topsort()\n\n ret = []\n for f in functions:\n # called_functions is a list of ContractFunctions, need to map\n # back to FunctionDefs.\n callees = [lookup[t.name] for t in f._metadata[\"type\"].called_functions]\n ret.extend(_topsort_helper(callees, lookup))\n ret.append(f)\n\n return ret\n\n\ndef _topsort(functions):\n lookup = {f.name: f for f in functions}\n # strip duplicates\n return list(dict.fromkeys(_topsort_helper(functions, lookup)))\n\n\ndef _is_constructor(func_ast):\n return func_ast._metadata[\"type\"].is_constructor\n\n\ndef _is_fallback(func_ast):\n return func_ast._metadata[\"type\"].is_fallback\n\n\ndef _is_internal(func_ast):\n return func_ast._metadata[\"type\"].is_internal\n\n\ndef _is_payable(func_ast):\n return func_ast._metadata[\"type\"].is_payable\n\n\n# codegen for all runtime functions + callvalue/calldata checks + method selector routines\ndef _runtime_ir(runtime_functions, global_ctx):\n # categorize the runtime functions because we will organize the runtime\n # code into the following sections:\n # payable functions, nonpayable functions, fallback function, internal_functions\n internal_functions = [f for f in runtime_functions if _is_internal(f)]\n\n external_functions = [f for f in runtime_functions if not _is_internal(f)]\n default_function = next((f for f in external_functions if _is_fallback(f)), None)\n\n # functions that need to go exposed in the selector section\n regular_functions = [f for f in external_functions if not _is_fallback(f)]\n payables = [f for f in regular_functions if _is_payable(f)]\n nonpayables = [f for f in regular_functions if not _is_payable(f)]\n\n # create a map of the IR functions since they might live in both\n # runtime and deploy code (if init function calls them)\n internal_functions_ir: list[IRnode] = []\n\n for func_ast in internal_functions:\n func_ir = generate_ir_for_function(func_ast, global_ctx, False)\n internal_functions_ir.append(func_ir)\n\n # for some reason, somebody may want to deploy a contract with no\n # external functions, or more likely, a \"pure data\" contract which\n # contains immutables\n if len(external_functions) == 0:\n # TODO: prune internal functions in this case? dead code eliminator\n # might not eliminate them, since internal function jumpdest is at the\n # first instruction in the contract.\n runtime = [\"seq\"] + internal_functions_ir\n return runtime\n\n # note: if the user does not provide one, the default fallback function\n # reverts anyway. so it does not hurt to batch the payable check.\n default_is_nonpayable = default_function is None or not _is_payable(default_function)\n\n # when a contract has a nonpayable default function,\n # we can do a single check for all nonpayable functions\n batch_payable_check = len(nonpayables) > 0 and default_is_nonpayable\n skip_nonpayable_check = batch_payable_check\n\n selector_section = [\"seq\"]\n\n for func_ast in payables:\n func_ir = generate_ir_for_function(func_ast, global_ctx, False)\n selector_section.append(func_ir)\n\n if batch_payable_check:\n selector_section.append([\"assert\", [\"iszero\", \"callvalue\"]])\n\n for func_ast in nonpayables:\n func_ir = generate_ir_for_function(func_ast, global_ctx, skip_nonpayable_check)\n selector_section.append(func_ir)\n\n if default_function:\n fallback_ir = generate_ir_for_function(default_function, global_ctx, skip_nonpayable_check)\n else:\n fallback_ir = IRnode.from_list(\n [\"revert\", 0, 0], annotation=\"Default function\", error_msg=\"fallback function\"\n )\n\n # ensure the external jumptable section gets closed out\n # (for basic block hygiene and also for zksync interpreter)\n # NOTE: this jump gets optimized out in assembly since the\n # fallback label is the immediate next instruction,\n close_selector_section = [\"goto\", \"fallback\"]\n\n runtime = [\n \"seq\",\n [\"with\", \"_calldata_method_id\", shr(224, [\"calldataload\", 0]), selector_section],\n close_selector_section,\n [\"label\", \"fallback\", [\"var_list\"], fallback_ir],\n ]\n\n # note: dead code eliminator will clean dead functions\n runtime.extend(internal_functions_ir)\n\n return runtime\n\n\n# take a GlobalContext, and generate the runtime and deploy IR\ndef generate_ir_for_module(global_ctx: GlobalContext) -> tuple[IRnode, IRnode]:\n # order functions so that each function comes after all of its callees\n function_defs = _topsort(global_ctx.functions)\n\n init_function: Optional[vy_ast.FunctionDef] = None\n\n # generate all FuncIRInfos\n for f in function_defs:\n func_t = f._metadata[\"type\"]\n func_t._ir_info = FuncIRInfo(func_t)\n\n runtime_functions = [f for f in function_defs if not _is_constructor(f)]\n init_function = next((f for f in function_defs if _is_constructor(f)), None)\n\n runtime = _runtime_ir(runtime_functions, global_ctx)\n\n deploy_code: List[Any] = [\"seq\"]\n immutables_len = global_ctx.immutable_section_bytes\n if init_function:\n # TODO might be cleaner to separate this into an _init_ir helper func\n init_func_ir = generate_ir_for_function(\n init_function, global_ctx, skip_nonpayable_check=False, is_ctor_context=True\n )\n\n # pass the amount of memory allocated for the init function\n # so that deployment does not clobber while preparing immutables\n # note: (deploy mem_ofst, code, extra_padding)\n init_mem_used = init_function._metadata[\"type\"]._ir_info.frame_info.mem_used\n\n # force msize to be initialized past the end of immutables section\n # so that builtins which use `msize` for \"dynamic\" memory\n # allocation do not clobber uninitialized immutables.\n # cf. GH issue 3101.\n # note mload/iload X touches bytes from X to X+32, and msize rounds up\n # to the nearest 32, so `iload`ing `immutables_len - 32` guarantees\n # that `msize` will refer to a memory location of at least\n # `<immutables_start> + immutables_len` (where <immutables_start> ==\n # `_mem_deploy_end` as defined in the assembler).\n # note:\n # mload 32 => msize == 64\n # mload 33 => msize == 96\n # assumption in general: (mload X) => msize == ceil32(X + 32)\n # see py-evm extend_memory: after_size = ceil32(start_position + size)\n if immutables_len > 0:\n deploy_code.append([\"iload\", max(0, immutables_len - 32)])\n\n deploy_code.append(init_func_ir)\n\n deploy_code.append([\"deploy\", init_mem_used, runtime, immutables_len])\n\n # internal functions come after everything else\n internal_functions = [f for f in runtime_functions if _is_internal(f)]\n for f in internal_functions:\n func_ir = generate_ir_for_function(\n f, global_ctx, skip_nonpayable_check=False, is_ctor_context=True\n )\n # note: we depend on dead code eliminator to clean dead function defs\n deploy_code.append(func_ir)\n\n else:\n if immutables_len != 0:\n raise CompilerPanic(\"unreachable\")\n deploy_code.append([\"deploy\", 0, runtime, 0])\n\n return IRnode.from_list(deploy_code), IRnode.from_list(runtime)\n", "path": "vyper/codegen/module.py"}]}
2,674
486
gh_patches_debug_10365
rasdani/github-patches
git_diff
acl-org__acl-anthology-2399
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Ingestion request: NEJLT vol 7 & 8 This is a new (to the anthology) venue. * **Venue name:** Northern European Journal of Language Technology (NEJLT) * **Website:** [nejlt.org](https://www.nejlt.org/) * Papers at https://doi.org/10.3384/nejlt.2000-1533.8.1 and https://doi.org/10.3384/nejlt.2000-1533.7.1 I propose: * volume identifier `nejlt` * volume titles in the format _Northern European Journal of Language Technology, Volume n_ We're ready to send over two volumes, for 2021 and 2022. Iff and when this is OK with you, I'm happy to go assemble ACLPUB format volumes and send them. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bin/anthology/data.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright 2019 Marcel Bollmann <[email protected]> 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 ################################################################################ 18 # This file contains all constants and functions that have hardcoded data (such 19 # as URLs or journal titles) which does not come from the XML. This is to 20 # provide a single file where such hardcoded data can be looked up and/or 21 # changed. 22 ################################################################################ 23 24 import os 25 import re 26 27 from typing import Tuple 28 29 30 # this is the canonical URL. In contrast to all other 31 # URL templates, it always links to the official anthology. 32 CANONICAL_URL_TEMPLATE = "https://aclanthology.org/{}" 33 34 # the prefix is used in different programs and we need to set it everywhere 35 # We use a environment variable to set this and not have to forward the value 36 # through all the programs. If this does not look like the best idea, keep in mind 37 # that the structure is historically grown -- from 2019 to 2020 :-) 38 try: 39 ANTHOLOGY_PREFIX = os.environ["ANTHOLOGY_PREFIX"] 40 except: 41 ANTHOLOGY_PREFIX = "https://aclanthology.org" 42 43 ATTACHMENT_PREFIX = ANTHOLOGY_PREFIX + "/attachments" 44 ATTACHMENT_TEMPLATE = ATTACHMENT_PREFIX + "/{}" 45 46 PDF_LOCATION_TEMPLATE = ANTHOLOGY_PREFIX + "/{}.pdf" 47 PDF_THUMBNAIL_LOCATION_TEMPLATE = ANTHOLOGY_PREFIX + "/thumb/{}.jpg" 48 49 VIDEO_LOCATION_TEMPLATE = ANTHOLOGY_PREFIX + "/{}" 50 51 # Regular expression matching full Anthology IDs 52 ANTHOLOGY_ID_REGEX = r"[A-Z]\d{2}-\d{4}" 53 54 # Anthology file location on server 55 # Defaults to ~/anthology-files 56 ANTHOLOGY_FILE_DIR = os.environ.get( 57 "ANTHOLOGY_FILES", os.path.join(os.environ["HOME"], "anthology-files") 58 ) 59 60 # Names of XML elements that may appear multiple times 61 LIST_ELEMENTS = ( 62 "attachment", 63 "author", 64 "editor", 65 "video", 66 "revision", 67 "erratum", 68 "award", 69 "pwcdataset", 70 "video", 71 "venue", 72 "colocated", 73 ) 74 75 # New-style IDs that should be handled as journals 76 JOURNAL_IDS = ("cl", "tacl", "tal", "lilt") 77 78 # Constants associated with DOI assignation 79 DOI_URL_PREFIX = "https://dx.doi.org/" 80 DOI_PREFIX = "10.18653/v1/" 81 82 # Default ingestion date (= unknown) 83 UNKNOWN_INGEST_DATE = "1900-01-01" 84 85 # The venue format must match this pattern 86 VENUE_FORMAT = r"^[a-z\d]+$" 87 88 89 def match_volume_and_issue(booktitle) -> Tuple[str, str]: 90 """Parses a volume name and issue name from a title. 91 92 Examples: 93 - <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle> 94 - <booktitle>Traitement Automatique des Langues 2011 Volume 52 Numéro 1</booktitle> 95 - <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle> 96 97 :param booktitle: The booktitle 98 :return: the volume and issue numbers 99 """ 100 volume_no = re.search(r"Volume\s*(\d+)", booktitle, flags=re.IGNORECASE) 101 if volume_no is not None: 102 volume_no = volume_no.group(1) 103 104 issue_no = re.search( 105 r"(Number|Numéro|Issue)\s*(\d+-?\d*)", booktitle, flags=re.IGNORECASE 106 ) 107 if issue_no is not None: 108 issue_no = issue_no.group(2) 109 110 return volume_no, issue_no 111 112 113 def get_journal_info(top_level_id, volume_title) -> Tuple[str, str, str]: 114 """Returns info about the journal: title, volume no., and issue no. 115 Currently (Feb 2023), this information is parsed from the <booktitle> tag! 116 We should move instead to an explicit representation. See 117 118 https://github.com/acl-org/acl-anthology/issues/2379 119 120 :param top_level_id: The collection ID 121 :param volume_title: The text from the <booktitle> tag 122 :return: The journal title, volume number, and issue number 123 """ 124 125 # TODO: consider moving this from code to data (perhaps 126 # under <booktitle> in the volume metadata 127 128 top_level_id = top_level_id.split(".")[-1] # for new-style IDs; is a no-op otherwise 129 130 journal_title = None 131 volume_no = None 132 issue_no = None 133 134 if top_level_id == "cl": 135 # <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle> 136 journal_title = "Computational Linguistics" 137 volume_no, issue_no = match_volume_and_issue(volume_title) 138 139 elif top_level_id == "lilt": 140 # <booktitle>Linguistic Issues in Language Technology, Volume 10, 2015</booktitle> 141 journal_title = "Linguistic Issues in Language Technology" 142 volume_no, _ = match_volume_and_issue(volume_title) 143 144 elif top_level_id == "tal": 145 # <booktitle>Traitement Automatique des Langues 2011 Volume 52 Numéro 1</booktitle> 146 journal_title = "Traitement Automatique des Langues" 147 volume_no, issue_no = match_volume_and_issue(volume_title) 148 149 elif top_level_id[0] == "J": 150 # <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle> 151 year = int(top_level_id[1:3]) 152 if year >= 65 and year <= 83: 153 journal_title = "American Journal of Computational Linguistics" 154 else: 155 journal_title = "Computational Linguistics" 156 157 volume_no, issue_no = match_volume_and_issue(volume_title) 158 159 elif top_level_id[0] == "Q" or top_level_id == "tacl": 160 journal_title = "Transactions of the Association for Computational Linguistics" 161 volume_no, _ = match_volume_and_issue(volume_title) 162 163 else: 164 journal_title = volume_title 165 166 return journal_title, volume_no, issue_no 167 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bin/anthology/data.py b/bin/anthology/data.py --- a/bin/anthology/data.py +++ b/bin/anthology/data.py @@ -146,6 +146,10 @@ journal_title = "Traitement Automatique des Langues" volume_no, issue_no = match_volume_and_issue(volume_title) + elif top_level_id == "nejlt": + journal_title = "Northern European Journal of Language Technology" + volume_no, _ = match_volume_and_issue(volume_title) + elif top_level_id[0] == "J": # <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle> year = int(top_level_id[1:3])
{"golden_diff": "diff --git a/bin/anthology/data.py b/bin/anthology/data.py\n--- a/bin/anthology/data.py\n+++ b/bin/anthology/data.py\n@@ -146,6 +146,10 @@\n journal_title = \"Traitement Automatique des Langues\"\n volume_no, issue_no = match_volume_and_issue(volume_title)\n \n+ elif top_level_id == \"nejlt\":\n+ journal_title = \"Northern European Journal of Language Technology\"\n+ volume_no, _ = match_volume_and_issue(volume_title)\n+\n elif top_level_id[0] == \"J\":\n # <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle>\n year = int(top_level_id[1:3])\n", "issue": "Ingestion request: NEJLT vol 7 & 8\nThis is a new (to the anthology) venue.\r\n\r\n* **Venue name:** Northern European Journal of Language Technology (NEJLT)\r\n* **Website:** [nejlt.org](https://www.nejlt.org/)\r\n* Papers at https://doi.org/10.3384/nejlt.2000-1533.8.1 and https://doi.org/10.3384/nejlt.2000-1533.7.1\r\n\r\nI propose:\r\n* volume identifier `nejlt`\r\n* volume titles in the format _Northern European Journal of Language Technology, Volume n_\r\n\r\nWe're ready to send over two volumes, for 2021 and 2022. Iff and when this is OK with you, I'm happy to go assemble ACLPUB format volumes and send them.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n################################################################################\n# This file contains all constants and functions that have hardcoded data (such\n# as URLs or journal titles) which does not come from the XML. This is to\n# provide a single file where such hardcoded data can be looked up and/or\n# changed.\n################################################################################\n\nimport os\nimport re\n\nfrom typing import Tuple\n\n\n# this is the canonical URL. In contrast to all other\n# URL templates, it always links to the official anthology.\nCANONICAL_URL_TEMPLATE = \"https://aclanthology.org/{}\"\n\n# the prefix is used in different programs and we need to set it everywhere\n# We use a environment variable to set this and not have to forward the value\n# through all the programs. If this does not look like the best idea, keep in mind\n# that the structure is historically grown -- from 2019 to 2020 :-)\ntry:\n ANTHOLOGY_PREFIX = os.environ[\"ANTHOLOGY_PREFIX\"]\nexcept:\n ANTHOLOGY_PREFIX = \"https://aclanthology.org\"\n\nATTACHMENT_PREFIX = ANTHOLOGY_PREFIX + \"/attachments\"\nATTACHMENT_TEMPLATE = ATTACHMENT_PREFIX + \"/{}\"\n\nPDF_LOCATION_TEMPLATE = ANTHOLOGY_PREFIX + \"/{}.pdf\"\nPDF_THUMBNAIL_LOCATION_TEMPLATE = ANTHOLOGY_PREFIX + \"/thumb/{}.jpg\"\n\nVIDEO_LOCATION_TEMPLATE = ANTHOLOGY_PREFIX + \"/{}\"\n\n# Regular expression matching full Anthology IDs\nANTHOLOGY_ID_REGEX = r\"[A-Z]\\d{2}-\\d{4}\"\n\n# Anthology file location on server\n# Defaults to ~/anthology-files\nANTHOLOGY_FILE_DIR = os.environ.get(\n \"ANTHOLOGY_FILES\", os.path.join(os.environ[\"HOME\"], \"anthology-files\")\n)\n\n# Names of XML elements that may appear multiple times\nLIST_ELEMENTS = (\n \"attachment\",\n \"author\",\n \"editor\",\n \"video\",\n \"revision\",\n \"erratum\",\n \"award\",\n \"pwcdataset\",\n \"video\",\n \"venue\",\n \"colocated\",\n)\n\n# New-style IDs that should be handled as journals\nJOURNAL_IDS = (\"cl\", \"tacl\", \"tal\", \"lilt\")\n\n# Constants associated with DOI assignation\nDOI_URL_PREFIX = \"https://dx.doi.org/\"\nDOI_PREFIX = \"10.18653/v1/\"\n\n# Default ingestion date (= unknown)\nUNKNOWN_INGEST_DATE = \"1900-01-01\"\n\n# The venue format must match this pattern\nVENUE_FORMAT = r\"^[a-z\\d]+$\"\n\n\ndef match_volume_and_issue(booktitle) -> Tuple[str, str]:\n \"\"\"Parses a volume name and issue name from a title.\n\n Examples:\n - <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle>\n - <booktitle>Traitement Automatique des Langues 2011 Volume 52 Num\u00e9ro 1</booktitle>\n - <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle>\n\n :param booktitle: The booktitle\n :return: the volume and issue numbers\n \"\"\"\n volume_no = re.search(r\"Volume\\s*(\\d+)\", booktitle, flags=re.IGNORECASE)\n if volume_no is not None:\n volume_no = volume_no.group(1)\n\n issue_no = re.search(\n r\"(Number|Num\u00e9ro|Issue)\\s*(\\d+-?\\d*)\", booktitle, flags=re.IGNORECASE\n )\n if issue_no is not None:\n issue_no = issue_no.group(2)\n\n return volume_no, issue_no\n\n\ndef get_journal_info(top_level_id, volume_title) -> Tuple[str, str, str]:\n \"\"\"Returns info about the journal: title, volume no., and issue no.\n Currently (Feb 2023), this information is parsed from the <booktitle> tag!\n We should move instead to an explicit representation. See\n\n https://github.com/acl-org/acl-anthology/issues/2379\n\n :param top_level_id: The collection ID\n :param volume_title: The text from the <booktitle> tag\n :return: The journal title, volume number, and issue number\n \"\"\"\n\n # TODO: consider moving this from code to data (perhaps\n # under <booktitle> in the volume metadata\n\n top_level_id = top_level_id.split(\".\")[-1] # for new-style IDs; is a no-op otherwise\n\n journal_title = None\n volume_no = None\n issue_no = None\n\n if top_level_id == \"cl\":\n # <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle>\n journal_title = \"Computational Linguistics\"\n volume_no, issue_no = match_volume_and_issue(volume_title)\n\n elif top_level_id == \"lilt\":\n # <booktitle>Linguistic Issues in Language Technology, Volume 10, 2015</booktitle>\n journal_title = \"Linguistic Issues in Language Technology\"\n volume_no, _ = match_volume_and_issue(volume_title)\n\n elif top_level_id == \"tal\":\n # <booktitle>Traitement Automatique des Langues 2011 Volume 52 Num\u00e9ro 1</booktitle>\n journal_title = \"Traitement Automatique des Langues\"\n volume_no, issue_no = match_volume_and_issue(volume_title)\n\n elif top_level_id[0] == \"J\":\n # <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle>\n year = int(top_level_id[1:3])\n if year >= 65 and year <= 83:\n journal_title = \"American Journal of Computational Linguistics\"\n else:\n journal_title = \"Computational Linguistics\"\n\n volume_no, issue_no = match_volume_and_issue(volume_title)\n\n elif top_level_id[0] == \"Q\" or top_level_id == \"tacl\":\n journal_title = \"Transactions of the Association for Computational Linguistics\"\n volume_no, _ = match_volume_and_issue(volume_title)\n\n else:\n journal_title = volume_title\n\n return journal_title, volume_no, issue_no\n", "path": "bin/anthology/data.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n################################################################################\n# This file contains all constants and functions that have hardcoded data (such\n# as URLs or journal titles) which does not come from the XML. This is to\n# provide a single file where such hardcoded data can be looked up and/or\n# changed.\n################################################################################\n\nimport os\nimport re\n\nfrom typing import Tuple\n\n\n# this is the canonical URL. In contrast to all other\n# URL templates, it always links to the official anthology.\nCANONICAL_URL_TEMPLATE = \"https://aclanthology.org/{}\"\n\n# the prefix is used in different programs and we need to set it everywhere\n# We use a environment variable to set this and not have to forward the value\n# through all the programs. If this does not look like the best idea, keep in mind\n# that the structure is historically grown -- from 2019 to 2020 :-)\ntry:\n ANTHOLOGY_PREFIX = os.environ[\"ANTHOLOGY_PREFIX\"]\nexcept:\n ANTHOLOGY_PREFIX = \"https://aclanthology.org\"\n\nATTACHMENT_PREFIX = ANTHOLOGY_PREFIX + \"/attachments\"\nATTACHMENT_TEMPLATE = ATTACHMENT_PREFIX + \"/{}\"\n\nPDF_LOCATION_TEMPLATE = ANTHOLOGY_PREFIX + \"/{}.pdf\"\nPDF_THUMBNAIL_LOCATION_TEMPLATE = ANTHOLOGY_PREFIX + \"/thumb/{}.jpg\"\n\nVIDEO_LOCATION_TEMPLATE = ANTHOLOGY_PREFIX + \"/{}\"\n\n# Regular expression matching full Anthology IDs\nANTHOLOGY_ID_REGEX = r\"[A-Z]\\d{2}-\\d{4}\"\n\n# Anthology file location on server\n# Defaults to ~/anthology-files\nANTHOLOGY_FILE_DIR = os.environ.get(\n \"ANTHOLOGY_FILES\", os.path.join(os.environ[\"HOME\"], \"anthology-files\")\n)\n\n# Names of XML elements that may appear multiple times\nLIST_ELEMENTS = (\n \"attachment\",\n \"author\",\n \"editor\",\n \"video\",\n \"revision\",\n \"erratum\",\n \"award\",\n \"pwcdataset\",\n \"video\",\n \"venue\",\n \"colocated\",\n)\n\n# New-style IDs that should be handled as journals\nJOURNAL_IDS = (\"cl\", \"tacl\", \"tal\", \"lilt\")\n\n# Constants associated with DOI assignation\nDOI_URL_PREFIX = \"https://dx.doi.org/\"\nDOI_PREFIX = \"10.18653/v1/\"\n\n# Default ingestion date (= unknown)\nUNKNOWN_INGEST_DATE = \"1900-01-01\"\n\n# The venue format must match this pattern\nVENUE_FORMAT = r\"^[a-z\\d]+$\"\n\n\ndef match_volume_and_issue(booktitle) -> Tuple[str, str]:\n \"\"\"Parses a volume name and issue name from a title.\n\n Examples:\n - <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle>\n - <booktitle>Traitement Automatique des Langues 2011 Volume 52 Num\u00e9ro 1</booktitle>\n - <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle>\n\n :param booktitle: The booktitle\n :return: the volume and issue numbers\n \"\"\"\n volume_no = re.search(r\"Volume\\s*(\\d+)\", booktitle, flags=re.IGNORECASE)\n if volume_no is not None:\n volume_no = volume_no.group(1)\n\n issue_no = re.search(\n r\"(Number|Num\u00e9ro|Issue)\\s*(\\d+-?\\d*)\", booktitle, flags=re.IGNORECASE\n )\n if issue_no is not None:\n issue_no = issue_no.group(2)\n\n return volume_no, issue_no\n\n\ndef get_journal_info(top_level_id, volume_title) -> Tuple[str, str, str]:\n \"\"\"Returns info about the journal: title, volume no., and issue no.\n Currently (Feb 2023), this information is parsed from the <booktitle> tag!\n We should move instead to an explicit representation. See\n\n https://github.com/acl-org/acl-anthology/issues/2379\n\n :param top_level_id: The collection ID\n :param volume_title: The text from the <booktitle> tag\n :return: The journal title, volume number, and issue number\n \"\"\"\n\n # TODO: consider moving this from code to data (perhaps\n # under <booktitle> in the volume metadata\n\n top_level_id = top_level_id.split(\".\")[-1] # for new-style IDs; is a no-op otherwise\n\n journal_title = None\n volume_no = None\n issue_no = None\n\n if top_level_id == \"cl\":\n # <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle>\n journal_title = \"Computational Linguistics\"\n volume_no, issue_no = match_volume_and_issue(volume_title)\n\n elif top_level_id == \"lilt\":\n # <booktitle>Linguistic Issues in Language Technology, Volume 10, 2015</booktitle>\n journal_title = \"Linguistic Issues in Language Technology\"\n volume_no, _ = match_volume_and_issue(volume_title)\n\n elif top_level_id == \"tal\":\n # <booktitle>Traitement Automatique des Langues 2011 Volume 52 Num\u00e9ro 1</booktitle>\n journal_title = \"Traitement Automatique des Langues\"\n volume_no, issue_no = match_volume_and_issue(volume_title)\n\n elif top_level_id == \"nejlt\":\n journal_title = \"Northern European Journal of Language Technology\"\n volume_no, _ = match_volume_and_issue(volume_title)\n\n elif top_level_id[0] == \"J\":\n # <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle>\n year = int(top_level_id[1:3])\n if year >= 65 and year <= 83:\n journal_title = \"American Journal of Computational Linguistics\"\n else:\n journal_title = \"Computational Linguistics\"\n\n volume_no, issue_no = match_volume_and_issue(volume_title)\n\n elif top_level_id[0] == \"Q\" or top_level_id == \"tacl\":\n journal_title = \"Transactions of the Association for Computational Linguistics\"\n volume_no, _ = match_volume_and_issue(volume_title)\n\n else:\n journal_title = volume_title\n\n return journal_title, volume_no, issue_no\n", "path": "bin/anthology/data.py"}]}
2,405
173
gh_patches_debug_16444
rasdani/github-patches
git_diff
bentoml__BentoML-3636
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- feature: `client.health` ### Feature request client to have a health function: ```python client.health() await client.async_health() ``` ### Motivation For HTTP, would probably just need to invoke `/readyz`, and for gRPC is to invoke the `Health` rpc from `grpc.health.v1.HealthServicer`. ### Other _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/bentoml/_internal/client/http.py` Content: ``` 1 from __future__ import annotations 2 3 import json 4 import time 5 import socket 6 import typing as t 7 import logging 8 import urllib.error 9 import urllib.request 10 from http.client import HTTPConnection 11 from urllib.parse import urlparse 12 13 import aiohttp 14 import starlette.requests 15 import starlette.datastructures 16 17 from . import Client 18 from .. import io_descriptors as io 19 from ..service import Service 20 from ...exceptions import RemoteException 21 from ...exceptions import BentoMLException 22 from ..configuration import get_debug_mode 23 from ..service.inference_api import InferenceAPI 24 25 logger = logging.getLogger(__name__) 26 27 28 class HTTPClient(Client): 29 @staticmethod 30 def wait_until_server_ready( 31 host: str, 32 port: int, 33 timeout: int = 30, 34 check_interval: int = 1, 35 # set kwargs here to omit gRPC kwargs 36 **kwargs: t.Any, 37 ) -> None: 38 start_time = time.time() 39 status = None 40 41 logger.debug("Waiting for host %s to be ready.", f"{host}:{port}") 42 while time.time() - start_time < timeout: 43 try: 44 conn = HTTPConnection(host, port) 45 conn.request("GET", "/readyz") 46 status = conn.getresponse().status 47 if status == 200: 48 break 49 else: 50 time.sleep(check_interval) 51 except ( 52 ConnectionError, 53 urllib.error.URLError, 54 socket.timeout, 55 ConnectionRefusedError, 56 ): 57 logger.debug("Server is not ready. Retrying...") 58 time.sleep(check_interval) 59 60 # try to connect one more time and raise exception. 61 try: 62 conn = HTTPConnection(host, port) 63 conn.request("GET", "/readyz") 64 status = conn.getresponse().status 65 if status != 200: 66 raise TimeoutError( 67 f"Timed out waiting {timeout} seconds for server at '{host}:{port}' to be ready." 68 ) 69 except ( 70 ConnectionError, 71 urllib.error.URLError, 72 socket.timeout, 73 ConnectionRefusedError, 74 TimeoutError, 75 ) as err: 76 logger.error("Caught exception while connecting to %s:%s:", host, port) 77 logger.error(err) 78 raise 79 80 @classmethod 81 def from_url(cls, server_url: str, **kwargs: t.Any) -> HTTPClient: 82 server_url = server_url if "://" in server_url else "http://" + server_url 83 url_parts = urlparse(server_url) 84 85 # TODO: SSL support 86 conn = HTTPConnection(url_parts.netloc) 87 conn.set_debuglevel(logging.DEBUG if get_debug_mode() else 0) 88 conn.request("GET", url_parts.path + "/docs.json") 89 resp = conn.getresponse() 90 if resp.status != 200: 91 raise RemoteException( 92 f"Failed to get OpenAPI schema from the server: {resp.status} {resp.reason}:\n{resp.read()}" 93 ) 94 openapi_spec = json.load(resp) 95 conn.close() 96 97 dummy_service = Service(openapi_spec["info"]["title"]) 98 99 for route, spec in openapi_spec["paths"].items(): 100 for meth_spec in spec.values(): 101 if "tags" in meth_spec and "Service APIs" in meth_spec["tags"]: 102 if "x-bentoml-io-descriptor" not in meth_spec["requestBody"]: 103 # TODO: better message stating min version for from_url to work 104 raise BentoMLException( 105 f"Malformed BentoML spec received from BentoML server {server_url}" 106 ) 107 if "x-bentoml-io-descriptor" not in meth_spec["responses"]["200"]: 108 raise BentoMLException( 109 f"Malformed BentoML spec received from BentoML server {server_url}" 110 ) 111 if "x-bentoml-name" not in meth_spec: 112 raise BentoMLException( 113 f"Malformed BentoML spec received from BentoML server {server_url}" 114 ) 115 try: 116 api = InferenceAPI( 117 None, 118 io.from_spec( 119 meth_spec["requestBody"]["x-bentoml-io-descriptor"] 120 ), 121 io.from_spec( 122 meth_spec["responses"]["200"]["x-bentoml-io-descriptor"] 123 ), 124 name=meth_spec["x-bentoml-name"], 125 doc=meth_spec["description"], 126 route=route.lstrip("/"), 127 ) 128 dummy_service.apis[meth_spec["x-bentoml-name"]] = api 129 except BentoMLException as e: 130 logger.error( 131 "Failed to instantiate client for API %s: ", 132 meth_spec["x-bentoml-name"], 133 e, 134 ) 135 136 return cls(dummy_service, server_url) 137 138 async def _call( 139 self, inp: t.Any = None, *, _bentoml_api: InferenceAPI, **kwargs: t.Any 140 ) -> t.Any: 141 # All gRPC kwargs should be poped out. 142 kwargs = {k: v for k, v in kwargs.items() if not k.startswith("_grpc_")} 143 api = _bentoml_api 144 145 if api.multi_input: 146 if inp is not None: 147 raise BentoMLException( 148 f"'{api.name}' takes multiple inputs; all inputs must be passed as keyword arguments." 149 ) 150 fake_resp = await api.input.to_http_response(kwargs, None) 151 else: 152 fake_resp = await api.input.to_http_response(inp, None) 153 req_body = fake_resp.body 154 155 async with aiohttp.ClientSession(self.server_url) as sess: 156 async with sess.post( 157 "/" + api.route, 158 data=req_body, 159 headers={"content-type": fake_resp.headers["content-type"]}, 160 ) as resp: 161 if resp.status != 200: 162 raise BentoMLException( 163 f"Error making request: {resp.status}: {str(await resp.read())}" 164 ) 165 166 fake_req = starlette.requests.Request(scope={"type": "http"}) 167 headers = starlette.datastructures.Headers(headers=resp.headers) 168 fake_req._body = await resp.read() 169 # Request.headers sets a _headers variable. We will need to set this 170 # value to our fake request object. 171 fake_req._headers = headers # type: ignore (request._headers is property) 172 173 return await api.output.from_http_request(fake_req) 174 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/bentoml/_internal/client/http.py b/src/bentoml/_internal/client/http.py --- a/src/bentoml/_internal/client/http.py +++ b/src/bentoml/_internal/client/http.py @@ -4,6 +4,7 @@ import time import socket import typing as t +import asyncio import logging import urllib.error import urllib.request @@ -77,6 +78,14 @@ logger.error(err) raise + async def async_health(self) -> t.Any: + async with aiohttp.ClientSession(self.server_url) as sess: + async with sess.get("/readyz") as resp: + return resp + + def health(self) -> t.Any: + return asyncio.run(self.async_health()) + @classmethod def from_url(cls, server_url: str, **kwargs: t.Any) -> HTTPClient: server_url = server_url if "://" in server_url else "http://" + server_url
{"golden_diff": "diff --git a/src/bentoml/_internal/client/http.py b/src/bentoml/_internal/client/http.py\n--- a/src/bentoml/_internal/client/http.py\n+++ b/src/bentoml/_internal/client/http.py\n@@ -4,6 +4,7 @@\n import time\n import socket\n import typing as t\n+import asyncio\n import logging\n import urllib.error\n import urllib.request\n@@ -77,6 +78,14 @@\n logger.error(err)\n raise\n \n+ async def async_health(self) -> t.Any:\n+ async with aiohttp.ClientSession(self.server_url) as sess:\n+ async with sess.get(\"/readyz\") as resp:\n+ return resp\n+\n+ def health(self) -> t.Any:\n+ return asyncio.run(self.async_health())\n+\n @classmethod\n def from_url(cls, server_url: str, **kwargs: t.Any) -> HTTPClient:\n server_url = server_url if \"://\" in server_url else \"http://\" + server_url\n", "issue": "feature: `client.health`\n### Feature request\n\nclient to have a health function:\r\n```python\r\nclient.health()\r\nawait client.async_health()\r\n```\n\n### Motivation\n\nFor HTTP, would probably just need to invoke `/readyz`, and for gRPC is to invoke the `Health` rpc from `grpc.health.v1.HealthServicer`.\n\n### Other\n\n_No response_\n", "before_files": [{"content": "from __future__ import annotations\n\nimport json\nimport time\nimport socket\nimport typing as t\nimport logging\nimport urllib.error\nimport urllib.request\nfrom http.client import HTTPConnection\nfrom urllib.parse import urlparse\n\nimport aiohttp\nimport starlette.requests\nimport starlette.datastructures\n\nfrom . import Client\nfrom .. import io_descriptors as io\nfrom ..service import Service\nfrom ...exceptions import RemoteException\nfrom ...exceptions import BentoMLException\nfrom ..configuration import get_debug_mode\nfrom ..service.inference_api import InferenceAPI\n\nlogger = logging.getLogger(__name__)\n\n\nclass HTTPClient(Client):\n @staticmethod\n def wait_until_server_ready(\n host: str,\n port: int,\n timeout: int = 30,\n check_interval: int = 1,\n # set kwargs here to omit gRPC kwargs\n **kwargs: t.Any,\n ) -> None:\n start_time = time.time()\n status = None\n\n logger.debug(\"Waiting for host %s to be ready.\", f\"{host}:{port}\")\n while time.time() - start_time < timeout:\n try:\n conn = HTTPConnection(host, port)\n conn.request(\"GET\", \"/readyz\")\n status = conn.getresponse().status\n if status == 200:\n break\n else:\n time.sleep(check_interval)\n except (\n ConnectionError,\n urllib.error.URLError,\n socket.timeout,\n ConnectionRefusedError,\n ):\n logger.debug(\"Server is not ready. Retrying...\")\n time.sleep(check_interval)\n\n # try to connect one more time and raise exception.\n try:\n conn = HTTPConnection(host, port)\n conn.request(\"GET\", \"/readyz\")\n status = conn.getresponse().status\n if status != 200:\n raise TimeoutError(\n f\"Timed out waiting {timeout} seconds for server at '{host}:{port}' to be ready.\"\n )\n except (\n ConnectionError,\n urllib.error.URLError,\n socket.timeout,\n ConnectionRefusedError,\n TimeoutError,\n ) as err:\n logger.error(\"Caught exception while connecting to %s:%s:\", host, port)\n logger.error(err)\n raise\n\n @classmethod\n def from_url(cls, server_url: str, **kwargs: t.Any) -> HTTPClient:\n server_url = server_url if \"://\" in server_url else \"http://\" + server_url\n url_parts = urlparse(server_url)\n\n # TODO: SSL support\n conn = HTTPConnection(url_parts.netloc)\n conn.set_debuglevel(logging.DEBUG if get_debug_mode() else 0)\n conn.request(\"GET\", url_parts.path + \"/docs.json\")\n resp = conn.getresponse()\n if resp.status != 200:\n raise RemoteException(\n f\"Failed to get OpenAPI schema from the server: {resp.status} {resp.reason}:\\n{resp.read()}\"\n )\n openapi_spec = json.load(resp)\n conn.close()\n\n dummy_service = Service(openapi_spec[\"info\"][\"title\"])\n\n for route, spec in openapi_spec[\"paths\"].items():\n for meth_spec in spec.values():\n if \"tags\" in meth_spec and \"Service APIs\" in meth_spec[\"tags\"]:\n if \"x-bentoml-io-descriptor\" not in meth_spec[\"requestBody\"]:\n # TODO: better message stating min version for from_url to work\n raise BentoMLException(\n f\"Malformed BentoML spec received from BentoML server {server_url}\"\n )\n if \"x-bentoml-io-descriptor\" not in meth_spec[\"responses\"][\"200\"]:\n raise BentoMLException(\n f\"Malformed BentoML spec received from BentoML server {server_url}\"\n )\n if \"x-bentoml-name\" not in meth_spec:\n raise BentoMLException(\n f\"Malformed BentoML spec received from BentoML server {server_url}\"\n )\n try:\n api = InferenceAPI(\n None,\n io.from_spec(\n meth_spec[\"requestBody\"][\"x-bentoml-io-descriptor\"]\n ),\n io.from_spec(\n meth_spec[\"responses\"][\"200\"][\"x-bentoml-io-descriptor\"]\n ),\n name=meth_spec[\"x-bentoml-name\"],\n doc=meth_spec[\"description\"],\n route=route.lstrip(\"/\"),\n )\n dummy_service.apis[meth_spec[\"x-bentoml-name\"]] = api\n except BentoMLException as e:\n logger.error(\n \"Failed to instantiate client for API %s: \",\n meth_spec[\"x-bentoml-name\"],\n e,\n )\n\n return cls(dummy_service, server_url)\n\n async def _call(\n self, inp: t.Any = None, *, _bentoml_api: InferenceAPI, **kwargs: t.Any\n ) -> t.Any:\n # All gRPC kwargs should be poped out.\n kwargs = {k: v for k, v in kwargs.items() if not k.startswith(\"_grpc_\")}\n api = _bentoml_api\n\n if api.multi_input:\n if inp is not None:\n raise BentoMLException(\n f\"'{api.name}' takes multiple inputs; all inputs must be passed as keyword arguments.\"\n )\n fake_resp = await api.input.to_http_response(kwargs, None)\n else:\n fake_resp = await api.input.to_http_response(inp, None)\n req_body = fake_resp.body\n\n async with aiohttp.ClientSession(self.server_url) as sess:\n async with sess.post(\n \"/\" + api.route,\n data=req_body,\n headers={\"content-type\": fake_resp.headers[\"content-type\"]},\n ) as resp:\n if resp.status != 200:\n raise BentoMLException(\n f\"Error making request: {resp.status}: {str(await resp.read())}\"\n )\n\n fake_req = starlette.requests.Request(scope={\"type\": \"http\"})\n headers = starlette.datastructures.Headers(headers=resp.headers)\n fake_req._body = await resp.read()\n # Request.headers sets a _headers variable. We will need to set this\n # value to our fake request object.\n fake_req._headers = headers # type: ignore (request._headers is property)\n\n return await api.output.from_http_request(fake_req)\n", "path": "src/bentoml/_internal/client/http.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport json\nimport time\nimport socket\nimport typing as t\nimport asyncio\nimport logging\nimport urllib.error\nimport urllib.request\nfrom http.client import HTTPConnection\nfrom urllib.parse import urlparse\n\nimport aiohttp\nimport starlette.requests\nimport starlette.datastructures\n\nfrom . import Client\nfrom .. import io_descriptors as io\nfrom ..service import Service\nfrom ...exceptions import RemoteException\nfrom ...exceptions import BentoMLException\nfrom ..configuration import get_debug_mode\nfrom ..service.inference_api import InferenceAPI\n\nlogger = logging.getLogger(__name__)\n\n\nclass HTTPClient(Client):\n @staticmethod\n def wait_until_server_ready(\n host: str,\n port: int,\n timeout: int = 30,\n check_interval: int = 1,\n # set kwargs here to omit gRPC kwargs\n **kwargs: t.Any,\n ) -> None:\n start_time = time.time()\n status = None\n\n logger.debug(\"Waiting for host %s to be ready.\", f\"{host}:{port}\")\n while time.time() - start_time < timeout:\n try:\n conn = HTTPConnection(host, port)\n conn.request(\"GET\", \"/readyz\")\n status = conn.getresponse().status\n if status == 200:\n break\n else:\n time.sleep(check_interval)\n except (\n ConnectionError,\n urllib.error.URLError,\n socket.timeout,\n ConnectionRefusedError,\n ):\n logger.debug(\"Server is not ready. Retrying...\")\n time.sleep(check_interval)\n\n # try to connect one more time and raise exception.\n try:\n conn = HTTPConnection(host, port)\n conn.request(\"GET\", \"/readyz\")\n status = conn.getresponse().status\n if status != 200:\n raise TimeoutError(\n f\"Timed out waiting {timeout} seconds for server at '{host}:{port}' to be ready.\"\n )\n except (\n ConnectionError,\n urllib.error.URLError,\n socket.timeout,\n ConnectionRefusedError,\n TimeoutError,\n ) as err:\n logger.error(\"Caught exception while connecting to %s:%s:\", host, port)\n logger.error(err)\n raise\n\n async def async_health(self) -> t.Any:\n async with aiohttp.ClientSession(self.server_url) as sess:\n async with sess.get(\"/readyz\") as resp:\n return resp\n\n def health(self) -> t.Any:\n return asyncio.run(self.async_health())\n\n @classmethod\n def from_url(cls, server_url: str, **kwargs: t.Any) -> HTTPClient:\n server_url = server_url if \"://\" in server_url else \"http://\" + server_url\n url_parts = urlparse(server_url)\n\n # TODO: SSL support\n conn = HTTPConnection(url_parts.netloc)\n conn.set_debuglevel(logging.DEBUG if get_debug_mode() else 0)\n conn.request(\"GET\", url_parts.path + \"/docs.json\")\n resp = conn.getresponse()\n if resp.status != 200:\n raise RemoteException(\n f\"Failed to get OpenAPI schema from the server: {resp.status} {resp.reason}:\\n{resp.read()}\"\n )\n openapi_spec = json.load(resp)\n conn.close()\n\n dummy_service = Service(openapi_spec[\"info\"][\"title\"])\n\n for route, spec in openapi_spec[\"paths\"].items():\n for meth_spec in spec.values():\n if \"tags\" in meth_spec and \"Service APIs\" in meth_spec[\"tags\"]:\n if \"x-bentoml-io-descriptor\" not in meth_spec[\"requestBody\"]:\n # TODO: better message stating min version for from_url to work\n raise BentoMLException(\n f\"Malformed BentoML spec received from BentoML server {server_url}\"\n )\n if \"x-bentoml-io-descriptor\" not in meth_spec[\"responses\"][\"200\"]:\n raise BentoMLException(\n f\"Malformed BentoML spec received from BentoML server {server_url}\"\n )\n if \"x-bentoml-name\" not in meth_spec:\n raise BentoMLException(\n f\"Malformed BentoML spec received from BentoML server {server_url}\"\n )\n try:\n api = InferenceAPI(\n None,\n io.from_spec(\n meth_spec[\"requestBody\"][\"x-bentoml-io-descriptor\"]\n ),\n io.from_spec(\n meth_spec[\"responses\"][\"200\"][\"x-bentoml-io-descriptor\"]\n ),\n name=meth_spec[\"x-bentoml-name\"],\n doc=meth_spec[\"description\"],\n route=route.lstrip(\"/\"),\n )\n dummy_service.apis[meth_spec[\"x-bentoml-name\"]] = api\n except BentoMLException as e:\n logger.error(\n \"Failed to instantiate client for API %s: \",\n meth_spec[\"x-bentoml-name\"],\n e,\n )\n\n return cls(dummy_service, server_url)\n\n async def _call(\n self, inp: t.Any = None, *, _bentoml_api: InferenceAPI, **kwargs: t.Any\n ) -> t.Any:\n # All gRPC kwargs should be poped out.\n kwargs = {k: v for k, v in kwargs.items() if not k.startswith(\"_grpc_\")}\n api = _bentoml_api\n\n if api.multi_input:\n if inp is not None:\n raise BentoMLException(\n f\"'{api.name}' takes multiple inputs; all inputs must be passed as keyword arguments.\"\n )\n fake_resp = await api.input.to_http_response(kwargs, None)\n else:\n fake_resp = await api.input.to_http_response(inp, None)\n req_body = fake_resp.body\n\n async with aiohttp.ClientSession(self.server_url) as sess:\n async with sess.post(\n \"/\" + api.route,\n data=req_body,\n headers={\"content-type\": fake_resp.headers[\"content-type\"]},\n ) as resp:\n if resp.status != 200:\n raise BentoMLException(\n f\"Error making request: {resp.status}: {str(await resp.read())}\"\n )\n\n fake_req = starlette.requests.Request(scope={\"type\": \"http\"})\n headers = starlette.datastructures.Headers(headers=resp.headers)\n fake_req._body = await resp.read()\n # Request.headers sets a _headers variable. We will need to set this\n # value to our fake request object.\n fake_req._headers = headers # type: ignore (request._headers is property)\n\n return await api.output.from_http_request(fake_req)\n", "path": "src/bentoml/_internal/client/http.py"}]}
2,120
220
gh_patches_debug_1179
rasdani/github-patches
git_diff
locustio__locust-1395
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update flask version Our minimum required flask version is too old (saw at least one person having an issue https://stackoverflow.com/questions/61969924/typeerror-when-i-run-a-locustfile-py) https://flask.palletsprojects.com/en/1.1.x/changelog/#version-0-12-5 is a minimum, but we should probably go to 1.x right away. I can do the PR --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import ast 3 import os 4 import re 5 import sys 6 7 from setuptools import find_packages, setup 8 9 ROOT_PATH = os.path.abspath(os.path.dirname(__file__)) 10 11 # parse version from locust/__init__.py 12 _version_re = re.compile(r'__version__\s+=\s+(.*)') 13 _init_file = os.path.join(ROOT_PATH, "locust", "__init__.py") 14 with open(_init_file, 'rb') as f: 15 version = str(ast.literal_eval(_version_re.search( 16 f.read().decode('utf-8')).group(1))) 17 18 setup( 19 name='locust', 20 version=version, 21 install_requires=[ 22 "gevent>=1.5.0", 23 "flask>=0.10.1", 24 "requests>=2.9.1", 25 "msgpack>=0.6.2", 26 "pyzmq>=16.0.2", 27 "geventhttpclient>=1.4.2", 28 "ConfigArgParse>=1.0", 29 "psutil>=5.6.7", 30 "Flask-BasicAuth>=0.2.0" 31 ], 32 test_suite="locust.test", 33 tests_require=[ 34 'cryptography', 35 'mock', 36 'pyquery', 37 ], 38 ) 39 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ version=version, install_requires=[ "gevent>=1.5.0", - "flask>=0.10.1", + "flask>=1.1.2", "requests>=2.9.1", "msgpack>=0.6.2", "pyzmq>=16.0.2",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -20,7 +20,7 @@\n version=version,\n install_requires=[\n \"gevent>=1.5.0\",\n- \"flask>=0.10.1\", \n+ \"flask>=1.1.2\", \n \"requests>=2.9.1\", \n \"msgpack>=0.6.2\", \n \"pyzmq>=16.0.2\",\n", "issue": "Update flask version\nOur minimum required flask version is too old (saw at least one person having an issue https://stackoverflow.com/questions/61969924/typeerror-when-i-run-a-locustfile-py)\r\n\r\nhttps://flask.palletsprojects.com/en/1.1.x/changelog/#version-0-12-5 is a minimum, but we should probably go to 1.x right away.\r\n\r\nI can do the PR\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport ast\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nROOT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n# parse version from locust/__init__.py\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n_init_file = os.path.join(ROOT_PATH, \"locust\", \"__init__.py\")\nwith open(_init_file, 'rb') as f:\n version = str(ast.literal_eval(_version_re.search(\n f.read().decode('utf-8')).group(1)))\n\nsetup(\n name='locust',\n version=version,\n install_requires=[\n \"gevent>=1.5.0\",\n \"flask>=0.10.1\", \n \"requests>=2.9.1\", \n \"msgpack>=0.6.2\", \n \"pyzmq>=16.0.2\", \n \"geventhttpclient>=1.4.2\",\n \"ConfigArgParse>=1.0\",\n \"psutil>=5.6.7\",\n \"Flask-BasicAuth>=0.2.0\"\n ],\n test_suite=\"locust.test\",\n tests_require=[\n 'cryptography',\n 'mock',\n 'pyquery',\n ], \n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport ast\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nROOT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n# parse version from locust/__init__.py\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n_init_file = os.path.join(ROOT_PATH, \"locust\", \"__init__.py\")\nwith open(_init_file, 'rb') as f:\n version = str(ast.literal_eval(_version_re.search(\n f.read().decode('utf-8')).group(1)))\n\nsetup(\n name='locust',\n version=version,\n install_requires=[\n \"gevent>=1.5.0\",\n \"flask>=1.1.2\", \n \"requests>=2.9.1\", \n \"msgpack>=0.6.2\", \n \"pyzmq>=16.0.2\", \n \"geventhttpclient>=1.4.2\",\n \"ConfigArgParse>=1.0\",\n \"psutil>=5.6.7\",\n \"Flask-BasicAuth>=0.2.0\"\n ],\n test_suite=\"locust.test\",\n tests_require=[\n 'cryptography',\n 'mock',\n 'pyquery',\n ], \n)\n", "path": "setup.py"}]}
712
115
gh_patches_debug_18495
rasdani/github-patches
git_diff
apache__airflow-8230
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Airflow webserver not starting with SQLAlchemy==1.3.16 **Apache Airflow version**: 1.10.9 **Environment**: Ubuntu 18.04 LTS - **Cloud provider or hardware configuration**: - **OS** (e.g. from /etc/os-release):Ubuntu 18.04 LTS **What happened**: airflow webserver error airflow@airflow:~$ airflow webserver [2020-04-08 09:45:49,843] {settings.py:253} INFO - settings.configure_orm(): Using pool settings. pool_size=5, max_overflow=10, pool_recycle=1800, pid=30494 ____________ _____________ ____ |__( )_________ __/__ /________ __ ____ /| |_ /__ ___/_ /_ __ /_ __ \_ | /| / / ___ ___ | / _ / _ __/ _ / / /_/ /_ |/ |/ / _/_/ |_/_/ /_/ /_/ /_/ \____/____/|__/ [2020-04-08 09:45:50,462] {__init__.py:51} INFO - Using executor LocalExecutor [2020-04-08 09:45:50,463] {dagbag.py:403} INFO - Filling up the DagBag from /home/airflow/airflow/dags Traceback (most recent call last): File "/home/airflow/.local/bin/airflow", line 37, in <module> args.func(args) File "/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/cli.py", line 75, in wrapper return f(*args, **kwargs) File "/home/airflow/.local/lib/python3.6/site-packages/airflow/bin/cli.py", line 900, in webserver app = cached_app_rbac(None) if settings.RBAC else cached_app(None) File "/home/airflow/.local/lib/python3.6/site-packages/airflow/www/app.py", line 233, in cached_app app = create_app(config, testing) File "/home/airflow/.local/lib/python3.6/site-packages/airflow/www/app.py", line 103, in create_app models.Chart, Session, name="Charts", category="Data Profiling")) File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/view.py", line 330, in __init__ menu_icon_value=menu_icon_value) File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/model/base.py", line 818, in __init__ self._refresh_cache() File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/model/base.py", line 913, in _refresh_cache self._search_supported = self.init_search() File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/view.py", line 581, in init_search if tools.is_hybrid_property(self.model, name): File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/tools.py", line 209, in is_hybrid_property return last_name in get_hybrid_properties(last_model) File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/tools.py", line 190, in get_hybrid_properties for key, prop in inspect(model).all_orm_descriptors.items() File "/home/airflow/.local/lib/python3.6/site-packages/sqlalchemy/inspection.py", line 72, in inspect "available for object of type %s" % type_ sqlalchemy.exc.NoInspectionAvailable: No inspection system is available for object of type <class 'method'> **What you expected to happen**: to start <!-- What do you think went wrong? --> **How to reproduce it**: Install airflow with pip3 and postgres from ubuntu which is 10. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `airflow/models/chart.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Licensed to the Apache Software Foundation (ASF) under one 4 # or more contributor license agreements. See the NOTICE file 5 # distributed with this work for additional information 6 # regarding copyright ownership. The ASF licenses this file 7 # to you under the Apache License, Version 2.0 (the 8 # "License"); you may not use this file except in compliance 9 # with the License. You may obtain a copy of the License at 10 # 11 # http://www.apache.org/licenses/LICENSE-2.0 12 # 13 # Unless required by applicable law or agreed to in writing, 14 # software distributed under the License is distributed on an 15 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 16 # KIND, either express or implied. See the License for the 17 # specific language governing permissions and limitations 18 # under the License. 19 20 from sqlalchemy import Column, String, Integer, Boolean, ForeignKey, Text 21 from sqlalchemy.orm import relationship 22 23 from airflow.models.base import Base, ID_LEN 24 from airflow.utils.sqlalchemy import UtcDateTime 25 from airflow.utils import timezone 26 27 28 class Chart(Base): 29 __tablename__ = "chart" 30 31 id = Column(Integer, primary_key=True) 32 label = Column(String(200)) 33 conn_id = Column(String(ID_LEN), nullable=False) 34 user_id = Column(Integer(), ForeignKey('users.id'), nullable=True) 35 chart_type = Column(String(100), default="line") 36 sql_layout = Column(String(50), default="series") 37 sql = Column(Text, default="SELECT series, x, y FROM table") 38 y_log_scale = Column(Boolean) 39 show_datatable = Column(Boolean) 40 show_sql = Column(Boolean, default=True) 41 height = Column(Integer, default=600) 42 default_params = Column(String(5000), default="{}") 43 owner = relationship( 44 "User", cascade=False, cascade_backrefs=False, backref='charts') 45 x_is_date = Column(Boolean, default=True) 46 iteration_no = Column(Integer, default=0) 47 last_modified = Column(UtcDateTime, default=timezone.utcnow) 48 49 def __repr__(self): 50 return self.label 51 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/airflow/models/chart.py b/airflow/models/chart.py --- a/airflow/models/chart.py +++ b/airflow/models/chart.py @@ -21,6 +21,7 @@ from sqlalchemy.orm import relationship from airflow.models.base import Base, ID_LEN +from airflow.models.user import User from airflow.utils.sqlalchemy import UtcDateTime from airflow.utils import timezone @@ -41,7 +42,7 @@ height = Column(Integer, default=600) default_params = Column(String(5000), default="{}") owner = relationship( - "User", cascade=False, cascade_backrefs=False, backref='charts') + User, cascade=False, cascade_backrefs=False, backref='charts') x_is_date = Column(Boolean, default=True) iteration_no = Column(Integer, default=0) last_modified = Column(UtcDateTime, default=timezone.utcnow)
{"golden_diff": "diff --git a/airflow/models/chart.py b/airflow/models/chart.py\n--- a/airflow/models/chart.py\n+++ b/airflow/models/chart.py\n@@ -21,6 +21,7 @@\n from sqlalchemy.orm import relationship\n \n from airflow.models.base import Base, ID_LEN\n+from airflow.models.user import User\n from airflow.utils.sqlalchemy import UtcDateTime\n from airflow.utils import timezone\n \n@@ -41,7 +42,7 @@\n height = Column(Integer, default=600)\n default_params = Column(String(5000), default=\"{}\")\n owner = relationship(\n- \"User\", cascade=False, cascade_backrefs=False, backref='charts')\n+ User, cascade=False, cascade_backrefs=False, backref='charts')\n x_is_date = Column(Boolean, default=True)\n iteration_no = Column(Integer, default=0)\n last_modified = Column(UtcDateTime, default=timezone.utcnow)\n", "issue": "Airflow webserver not starting with SQLAlchemy==1.3.16\n\r\n**Apache Airflow version**: 1.10.9\r\n**Environment**: Ubuntu 18.04 LTS\r\n\r\n- **Cloud provider or hardware configuration**:\r\n- **OS** (e.g. from /etc/os-release):Ubuntu 18.04 LTS\r\n\r\n**What happened**: airflow webserver error\r\n\r\nairflow@airflow:~$ airflow webserver\r\n[2020-04-08 09:45:49,843] {settings.py:253} INFO - settings.configure_orm(): Using pool settings. pool_size=5, max_overflow=10, pool_recycle=1800, pid=30494\r\n ____________ _____________\r\n ____ |__( )_________ __/__ /________ __\r\n____ /| |_ /__ ___/_ /_ __ /_ __ \\_ | /| / /\r\n___ ___ | / _ / _ __/ _ / / /_/ /_ |/ |/ /\r\n _/_/ |_/_/ /_/ /_/ /_/ \\____/____/|__/\r\n[2020-04-08 09:45:50,462] {__init__.py:51} INFO - Using executor LocalExecutor\r\n[2020-04-08 09:45:50,463] {dagbag.py:403} INFO - Filling up the DagBag from /home/airflow/airflow/dags\r\nTraceback (most recent call last):\r\n File \"/home/airflow/.local/bin/airflow\", line 37, in <module>\r\n args.func(args)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/cli.py\", line 75, in wrapper\r\n return f(*args, **kwargs)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/bin/cli.py\", line 900, in webserver\r\n app = cached_app_rbac(None) if settings.RBAC else cached_app(None)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/www/app.py\", line 233, in cached_app\r\n app = create_app(config, testing)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/www/app.py\", line 103, in create_app\r\n models.Chart, Session, name=\"Charts\", category=\"Data Profiling\"))\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/view.py\", line 330, in __init__\r\n menu_icon_value=menu_icon_value)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/flask_admin/model/base.py\", line 818, in __init__\r\n self._refresh_cache()\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/flask_admin/model/base.py\", line 913, in _refresh_cache\r\n self._search_supported = self.init_search()\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/view.py\", line 581, in init_search\r\n if tools.is_hybrid_property(self.model, name):\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/tools.py\", line 209, in is_hybrid_property\r\n return last_name in get_hybrid_properties(last_model)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/tools.py\", line 190, in get_hybrid_properties\r\n for key, prop in inspect(model).all_orm_descriptors.items()\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/sqlalchemy/inspection.py\", line 72, in inspect\r\n \"available for object of type %s\" % type_\r\nsqlalchemy.exc.NoInspectionAvailable: No inspection system is available for object of type <class 'method'>\r\n\r\n**What you expected to happen**: to start\r\n\r\n<!-- What do you think went wrong? -->\r\n\r\n**How to reproduce it**:\r\nInstall airflow with pip3 and postgres from ubuntu which is 10.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom sqlalchemy import Column, String, Integer, Boolean, ForeignKey, Text\nfrom sqlalchemy.orm import relationship\n\nfrom airflow.models.base import Base, ID_LEN\nfrom airflow.utils.sqlalchemy import UtcDateTime\nfrom airflow.utils import timezone\n\n\nclass Chart(Base):\n __tablename__ = \"chart\"\n\n id = Column(Integer, primary_key=True)\n label = Column(String(200))\n conn_id = Column(String(ID_LEN), nullable=False)\n user_id = Column(Integer(), ForeignKey('users.id'), nullable=True)\n chart_type = Column(String(100), default=\"line\")\n sql_layout = Column(String(50), default=\"series\")\n sql = Column(Text, default=\"SELECT series, x, y FROM table\")\n y_log_scale = Column(Boolean)\n show_datatable = Column(Boolean)\n show_sql = Column(Boolean, default=True)\n height = Column(Integer, default=600)\n default_params = Column(String(5000), default=\"{}\")\n owner = relationship(\n \"User\", cascade=False, cascade_backrefs=False, backref='charts')\n x_is_date = Column(Boolean, default=True)\n iteration_no = Column(Integer, default=0)\n last_modified = Column(UtcDateTime, default=timezone.utcnow)\n\n def __repr__(self):\n return self.label\n", "path": "airflow/models/chart.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom sqlalchemy import Column, String, Integer, Boolean, ForeignKey, Text\nfrom sqlalchemy.orm import relationship\n\nfrom airflow.models.base import Base, ID_LEN\nfrom airflow.models.user import User\nfrom airflow.utils.sqlalchemy import UtcDateTime\nfrom airflow.utils import timezone\n\n\nclass Chart(Base):\n __tablename__ = \"chart\"\n\n id = Column(Integer, primary_key=True)\n label = Column(String(200))\n conn_id = Column(String(ID_LEN), nullable=False)\n user_id = Column(Integer(), ForeignKey('users.id'), nullable=True)\n chart_type = Column(String(100), default=\"line\")\n sql_layout = Column(String(50), default=\"series\")\n sql = Column(Text, default=\"SELECT series, x, y FROM table\")\n y_log_scale = Column(Boolean)\n show_datatable = Column(Boolean)\n show_sql = Column(Boolean, default=True)\n height = Column(Integer, default=600)\n default_params = Column(String(5000), default=\"{}\")\n owner = relationship(\n User, cascade=False, cascade_backrefs=False, backref='charts')\n x_is_date = Column(Boolean, default=True)\n iteration_no = Column(Integer, default=0)\n last_modified = Column(UtcDateTime, default=timezone.utcnow)\n\n def __repr__(self):\n return self.label\n", "path": "airflow/models/chart.py"}]}
1,792
204
gh_patches_debug_21535
rasdani/github-patches
git_diff
ansible-collections__community.general-3263
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- apache2_module: Relies on a2enmod / misleading error on CentOS ### Summary The ansible `apache2_module` module relies on Debian-style (also sometimes called "Debianisms") binaries as `a2enmod` for managing `httpd`/`apache` modules. On CentOS this is not the case as this distribution doesn't use these binaries. On CentOS this module emits a misleading error: ```` None not found. Perhaps this system does not use None to manage apache ```` Responsible code: https://github.com/ansible-collections/community.general/blob/da11a98cb734e99cc57f4ae6ec09d9199875c39b/plugins/modules/web_infrastructure/apache2_module.py#L207 ### Issue Type Bug Report ### Component Name apache2_module ### Ansible Version ```console (paste below) $ ansible --version ansible 2.9.10 config file = None configured module search path = ['/home/build/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible executable location = /usr/local/bin/ansible python version = 3.8.10 (default, Jun 2 2021, 10:49:15) [GCC 9.4.0] ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed (Empty) ``` ### OS / Environment WSL 2/Ubuntu 20.04 LTS (ansible client); CentOS 7.x (ansible target). ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) - name: Enable mod_substitute apache2_module: name: mod_substitute state: present ``` ### Expected Results `apache_module` ansible module manages the modules without errors. ### Actual Results ```console (paste below) None not found. Perhaps this system does not use None to manage apache ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `plugins/modules/web_infrastructure/apache2_module.py` Content: ``` 1 #!/usr/bin/python 2 # -*- coding: utf-8 -*- 3 4 # (c) 2013-2014, Christian Berendt <[email protected]> 5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) 6 7 from __future__ import absolute_import, division, print_function 8 __metaclass__ = type 9 10 11 DOCUMENTATION = ''' 12 --- 13 module: apache2_module 14 author: 15 - Christian Berendt (@berendt) 16 - Ralf Hertel (@n0trax) 17 - Robin Roth (@robinro) 18 short_description: Enables/disables a module of the Apache2 webserver. 19 description: 20 - Enables or disables a specified module of the Apache2 webserver. 21 options: 22 name: 23 type: str 24 description: 25 - Name of the module to enable/disable as given to C(a2enmod/a2dismod). 26 required: true 27 identifier: 28 type: str 29 description: 30 - Identifier of the module as listed by C(apache2ctl -M). 31 This is optional and usually determined automatically by the common convention of 32 appending C(_module) to I(name) as well as custom exception for popular modules. 33 required: False 34 force: 35 description: 36 - Force disabling of default modules and override Debian warnings. 37 required: false 38 type: bool 39 default: False 40 state: 41 type: str 42 description: 43 - Desired state of the module. 44 choices: ['present', 'absent'] 45 default: present 46 ignore_configcheck: 47 description: 48 - Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules. 49 type: bool 50 default: False 51 requirements: ["a2enmod","a2dismod"] 52 notes: 53 - This does not work on RedHat-based distributions. It does work on Debian- and SuSE-based distributions. 54 Whether it works on others depend on whether the C(a2enmod) and C(a2dismod) tools are available or not. 55 ''' 56 57 EXAMPLES = ''' 58 - name: Enable the Apache2 module wsgi 59 community.general.apache2_module: 60 state: present 61 name: wsgi 62 63 - name: Disables the Apache2 module wsgi 64 community.general.apache2_module: 65 state: absent 66 name: wsgi 67 68 - name: Disable default modules for Debian 69 community.general.apache2_module: 70 state: absent 71 name: autoindex 72 force: True 73 74 - name: Disable mpm_worker and ignore warnings about missing mpm module 75 community.general.apache2_module: 76 state: absent 77 name: mpm_worker 78 ignore_configcheck: True 79 80 - name: Enable dump_io module, which is identified as dumpio_module inside apache2 81 community.general.apache2_module: 82 state: present 83 name: dump_io 84 identifier: dumpio_module 85 ''' 86 87 RETURN = ''' 88 result: 89 description: message about action taken 90 returned: always 91 type: str 92 warnings: 93 description: list of warning messages 94 returned: when needed 95 type: list 96 rc: 97 description: return code of underlying command 98 returned: failed 99 type: int 100 stdout: 101 description: stdout of underlying command 102 returned: failed 103 type: str 104 stderr: 105 description: stderr of underlying command 106 returned: failed 107 type: str 108 ''' 109 110 import re 111 112 # import module snippets 113 from ansible.module_utils.basic import AnsibleModule 114 115 _re_threaded = re.compile(r'threaded: *yes') 116 117 118 def _run_threaded(module): 119 control_binary = _get_ctl_binary(module) 120 result, stdout, stderr = module.run_command([control_binary, "-V"]) 121 122 return bool(_re_threaded.search(stdout)) 123 124 125 def _get_ctl_binary(module): 126 for command in ['apache2ctl', 'apachectl']: 127 ctl_binary = module.get_bin_path(command) 128 if ctl_binary is not None: 129 return ctl_binary 130 131 module.fail_json(msg="Neither of apache2ctl nor apachctl found. At least one apache control binary is necessary.") 132 133 134 def _module_is_enabled(module): 135 control_binary = _get_ctl_binary(module) 136 result, stdout, stderr = module.run_command([control_binary, "-M"]) 137 138 if result != 0: 139 error_msg = "Error executing %s: %s" % (control_binary, stderr) 140 if module.params['ignore_configcheck']: 141 if 'AH00534' in stderr and 'mpm_' in module.params['name']: 142 module.warnings.append( 143 "No MPM module loaded! apache2 reload AND other module actions" 144 " will fail if no MPM module is loaded immediately." 145 ) 146 else: 147 module.warnings.append(error_msg) 148 return False 149 else: 150 module.fail_json(msg=error_msg) 151 152 searchstring = ' ' + module.params['identifier'] 153 return searchstring in stdout 154 155 156 def create_apache_identifier(name): 157 """ 158 By convention if a module is loaded via name, it appears in apache2ctl -M as 159 name_module. 160 161 Some modules don't follow this convention and we use replacements for those.""" 162 163 # a2enmod name replacement to apache2ctl -M names 164 text_workarounds = [ 165 ('shib', 'mod_shib'), 166 ('shib2', 'mod_shib'), 167 ('evasive', 'evasive20_module'), 168 ] 169 170 # re expressions to extract subparts of names 171 re_workarounds = [ 172 ('php', re.compile(r'^(php\d)\.')), 173 ] 174 175 for a2enmod_spelling, module_name in text_workarounds: 176 if a2enmod_spelling in name: 177 return module_name 178 179 for search, reexpr in re_workarounds: 180 if search in name: 181 try: 182 rematch = reexpr.search(name) 183 return rematch.group(1) + '_module' 184 except AttributeError: 185 pass 186 187 return name + '_module' 188 189 190 def _set_state(module, state): 191 name = module.params['name'] 192 force = module.params['force'] 193 194 want_enabled = state == 'present' 195 state_string = {'present': 'enabled', 'absent': 'disabled'}[state] 196 a2mod_binary = {'present': 'a2enmod', 'absent': 'a2dismod'}[state] 197 success_msg = "Module %s %s" % (name, state_string) 198 199 if _module_is_enabled(module) != want_enabled: 200 if module.check_mode: 201 module.exit_json(changed=True, 202 result=success_msg, 203 warnings=module.warnings) 204 205 a2mod_binary = [module.get_bin_path(a2mod_binary)] 206 if a2mod_binary is None: 207 module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary)) 208 209 if not want_enabled and force: 210 # force exists only for a2dismod on debian 211 a2mod_binary.append('-f') 212 213 result, stdout, stderr = module.run_command(a2mod_binary + [name]) 214 215 if _module_is_enabled(module) == want_enabled: 216 module.exit_json(changed=True, 217 result=success_msg, 218 warnings=module.warnings) 219 else: 220 msg = ( 221 'Failed to set module {name} to {state}:\n' 222 '{stdout}\n' 223 'Maybe the module identifier ({identifier}) was guessed incorrectly.' 224 'Consider setting the "identifier" option.' 225 ).format( 226 name=name, 227 state=state_string, 228 stdout=stdout, 229 identifier=module.params['identifier'] 230 ) 231 module.fail_json(msg=msg, 232 rc=result, 233 stdout=stdout, 234 stderr=stderr) 235 else: 236 module.exit_json(changed=False, 237 result=success_msg, 238 warnings=module.warnings) 239 240 241 def main(): 242 module = AnsibleModule( 243 argument_spec=dict( 244 name=dict(required=True), 245 identifier=dict(type='str'), 246 force=dict(type='bool', default=False), 247 state=dict(default='present', choices=['absent', 'present']), 248 ignore_configcheck=dict(type='bool', default=False), 249 ), 250 supports_check_mode=True, 251 ) 252 253 module.warnings = [] 254 255 name = module.params['name'] 256 if name == 'cgi' and _run_threaded(module): 257 module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module cgi possible.") 258 259 if not module.params['identifier']: 260 module.params['identifier'] = create_apache_identifier(module.params['name']) 261 262 if module.params['state'] in ['present', 'absent']: 263 _set_state(module, module.params['state']) 264 265 266 if __name__ == '__main__': 267 main() 268 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/plugins/modules/web_infrastructure/apache2_module.py b/plugins/modules/web_infrastructure/apache2_module.py --- a/plugins/modules/web_infrastructure/apache2_module.py +++ b/plugins/modules/web_infrastructure/apache2_module.py @@ -202,15 +202,17 @@ result=success_msg, warnings=module.warnings) - a2mod_binary = [module.get_bin_path(a2mod_binary)] - if a2mod_binary is None: + a2mod_binary_path = module.get_bin_path(a2mod_binary) + if a2mod_binary_path is None: module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary)) + a2mod_binary_cmd = [a2mod_binary_path] + if not want_enabled and force: # force exists only for a2dismod on debian - a2mod_binary.append('-f') + a2mod_binary_cmd.append('-f') - result, stdout, stderr = module.run_command(a2mod_binary + [name]) + result, stdout, stderr = module.run_command(a2mod_binary_cmd + [name]) if _module_is_enabled(module) == want_enabled: module.exit_json(changed=True,
{"golden_diff": "diff --git a/plugins/modules/web_infrastructure/apache2_module.py b/plugins/modules/web_infrastructure/apache2_module.py\n--- a/plugins/modules/web_infrastructure/apache2_module.py\n+++ b/plugins/modules/web_infrastructure/apache2_module.py\n@@ -202,15 +202,17 @@\n result=success_msg,\n warnings=module.warnings)\n \n- a2mod_binary = [module.get_bin_path(a2mod_binary)]\n- if a2mod_binary is None:\n+ a2mod_binary_path = module.get_bin_path(a2mod_binary)\n+ if a2mod_binary_path is None:\n module.fail_json(msg=\"%s not found. Perhaps this system does not use %s to manage apache\" % (a2mod_binary, a2mod_binary))\n \n+ a2mod_binary_cmd = [a2mod_binary_path]\n+\n if not want_enabled and force:\n # force exists only for a2dismod on debian\n- a2mod_binary.append('-f')\n+ a2mod_binary_cmd.append('-f')\n \n- result, stdout, stderr = module.run_command(a2mod_binary + [name])\n+ result, stdout, stderr = module.run_command(a2mod_binary_cmd + [name])\n \n if _module_is_enabled(module) == want_enabled:\n module.exit_json(changed=True,\n", "issue": "apache2_module: Relies on a2enmod / misleading error on CentOS\n### Summary\n\nThe ansible `apache2_module` module relies on Debian-style (also sometimes called \"Debianisms\") binaries as `a2enmod` for managing `httpd`/`apache` modules. On CentOS this is not the case as this distribution doesn't use these binaries. \r\n\r\nOn CentOS this module emits a misleading error:\r\n````\r\nNone not found. Perhaps this system does not use None to manage apache\r\n````\r\n\r\nResponsible code:\r\nhttps://github.com/ansible-collections/community.general/blob/da11a98cb734e99cc57f4ae6ec09d9199875c39b/plugins/modules/web_infrastructure/apache2_module.py#L207\n\n### Issue Type\n\nBug Report\n\n### Component Name\n\napache2_module\n\n### Ansible Version\n\n```console (paste below)\r\n$ ansible --version\r\n\r\nansible 2.9.10\r\n config file = None\r\n configured module search path = ['/home/build/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible\r\n executable location = /usr/local/bin/ansible\r\n python version = 3.8.10 (default, Jun 2 2021, 10:49:15) [GCC 9.4.0]\r\n```\r\n\n\n### Configuration\n\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\n(Empty)\r\n```\r\n\n\n### OS / Environment\n\nWSL 2/Ubuntu 20.04 LTS (ansible client); CentOS 7.x (ansible target).\n\n### Steps to Reproduce\n\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml (paste below)\r\n - name: Enable mod_substitute\r\n apache2_module:\r\n name: mod_substitute\r\n state: present\r\n```\r\n\n\n### Expected Results\n\n`apache_module` ansible module manages the modules without errors.\n\n### Actual Results\n\n```console (paste below)\r\nNone not found. Perhaps this system does not use None to manage apache\r\n```\r\n\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2013-2014, Christian Berendt <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: apache2_module\nauthor:\n - Christian Berendt (@berendt)\n - Ralf Hertel (@n0trax)\n - Robin Roth (@robinro)\nshort_description: Enables/disables a module of the Apache2 webserver.\ndescription:\n - Enables or disables a specified module of the Apache2 webserver.\noptions:\n name:\n type: str\n description:\n - Name of the module to enable/disable as given to C(a2enmod/a2dismod).\n required: true\n identifier:\n type: str\n description:\n - Identifier of the module as listed by C(apache2ctl -M).\n This is optional and usually determined automatically by the common convention of\n appending C(_module) to I(name) as well as custom exception for popular modules.\n required: False\n force:\n description:\n - Force disabling of default modules and override Debian warnings.\n required: false\n type: bool\n default: False\n state:\n type: str\n description:\n - Desired state of the module.\n choices: ['present', 'absent']\n default: present\n ignore_configcheck:\n description:\n - Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules.\n type: bool\n default: False\nrequirements: [\"a2enmod\",\"a2dismod\"]\nnotes:\n - This does not work on RedHat-based distributions. It does work on Debian- and SuSE-based distributions.\n Whether it works on others depend on whether the C(a2enmod) and C(a2dismod) tools are available or not.\n'''\n\nEXAMPLES = '''\n- name: Enable the Apache2 module wsgi\n community.general.apache2_module:\n state: present\n name: wsgi\n\n- name: Disables the Apache2 module wsgi\n community.general.apache2_module:\n state: absent\n name: wsgi\n\n- name: Disable default modules for Debian\n community.general.apache2_module:\n state: absent\n name: autoindex\n force: True\n\n- name: Disable mpm_worker and ignore warnings about missing mpm module\n community.general.apache2_module:\n state: absent\n name: mpm_worker\n ignore_configcheck: True\n\n- name: Enable dump_io module, which is identified as dumpio_module inside apache2\n community.general.apache2_module:\n state: present\n name: dump_io\n identifier: dumpio_module\n'''\n\nRETURN = '''\nresult:\n description: message about action taken\n returned: always\n type: str\nwarnings:\n description: list of warning messages\n returned: when needed\n type: list\nrc:\n description: return code of underlying command\n returned: failed\n type: int\nstdout:\n description: stdout of underlying command\n returned: failed\n type: str\nstderr:\n description: stderr of underlying command\n returned: failed\n type: str\n'''\n\nimport re\n\n# import module snippets\nfrom ansible.module_utils.basic import AnsibleModule\n\n_re_threaded = re.compile(r'threaded: *yes')\n\n\ndef _run_threaded(module):\n control_binary = _get_ctl_binary(module)\n result, stdout, stderr = module.run_command([control_binary, \"-V\"])\n\n return bool(_re_threaded.search(stdout))\n\n\ndef _get_ctl_binary(module):\n for command in ['apache2ctl', 'apachectl']:\n ctl_binary = module.get_bin_path(command)\n if ctl_binary is not None:\n return ctl_binary\n\n module.fail_json(msg=\"Neither of apache2ctl nor apachctl found. At least one apache control binary is necessary.\")\n\n\ndef _module_is_enabled(module):\n control_binary = _get_ctl_binary(module)\n result, stdout, stderr = module.run_command([control_binary, \"-M\"])\n\n if result != 0:\n error_msg = \"Error executing %s: %s\" % (control_binary, stderr)\n if module.params['ignore_configcheck']:\n if 'AH00534' in stderr and 'mpm_' in module.params['name']:\n module.warnings.append(\n \"No MPM module loaded! apache2 reload AND other module actions\"\n \" will fail if no MPM module is loaded immediately.\"\n )\n else:\n module.warnings.append(error_msg)\n return False\n else:\n module.fail_json(msg=error_msg)\n\n searchstring = ' ' + module.params['identifier']\n return searchstring in stdout\n\n\ndef create_apache_identifier(name):\n \"\"\"\n By convention if a module is loaded via name, it appears in apache2ctl -M as\n name_module.\n\n Some modules don't follow this convention and we use replacements for those.\"\"\"\n\n # a2enmod name replacement to apache2ctl -M names\n text_workarounds = [\n ('shib', 'mod_shib'),\n ('shib2', 'mod_shib'),\n ('evasive', 'evasive20_module'),\n ]\n\n # re expressions to extract subparts of names\n re_workarounds = [\n ('php', re.compile(r'^(php\\d)\\.')),\n ]\n\n for a2enmod_spelling, module_name in text_workarounds:\n if a2enmod_spelling in name:\n return module_name\n\n for search, reexpr in re_workarounds:\n if search in name:\n try:\n rematch = reexpr.search(name)\n return rematch.group(1) + '_module'\n except AttributeError:\n pass\n\n return name + '_module'\n\n\ndef _set_state(module, state):\n name = module.params['name']\n force = module.params['force']\n\n want_enabled = state == 'present'\n state_string = {'present': 'enabled', 'absent': 'disabled'}[state]\n a2mod_binary = {'present': 'a2enmod', 'absent': 'a2dismod'}[state]\n success_msg = \"Module %s %s\" % (name, state_string)\n\n if _module_is_enabled(module) != want_enabled:\n if module.check_mode:\n module.exit_json(changed=True,\n result=success_msg,\n warnings=module.warnings)\n\n a2mod_binary = [module.get_bin_path(a2mod_binary)]\n if a2mod_binary is None:\n module.fail_json(msg=\"%s not found. Perhaps this system does not use %s to manage apache\" % (a2mod_binary, a2mod_binary))\n\n if not want_enabled and force:\n # force exists only for a2dismod on debian\n a2mod_binary.append('-f')\n\n result, stdout, stderr = module.run_command(a2mod_binary + [name])\n\n if _module_is_enabled(module) == want_enabled:\n module.exit_json(changed=True,\n result=success_msg,\n warnings=module.warnings)\n else:\n msg = (\n 'Failed to set module {name} to {state}:\\n'\n '{stdout}\\n'\n 'Maybe the module identifier ({identifier}) was guessed incorrectly.'\n 'Consider setting the \"identifier\" option.'\n ).format(\n name=name,\n state=state_string,\n stdout=stdout,\n identifier=module.params['identifier']\n )\n module.fail_json(msg=msg,\n rc=result,\n stdout=stdout,\n stderr=stderr)\n else:\n module.exit_json(changed=False,\n result=success_msg,\n warnings=module.warnings)\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n name=dict(required=True),\n identifier=dict(type='str'),\n force=dict(type='bool', default=False),\n state=dict(default='present', choices=['absent', 'present']),\n ignore_configcheck=dict(type='bool', default=False),\n ),\n supports_check_mode=True,\n )\n\n module.warnings = []\n\n name = module.params['name']\n if name == 'cgi' and _run_threaded(module):\n module.fail_json(msg=\"Your MPM seems to be threaded. No automatic actions on module cgi possible.\")\n\n if not module.params['identifier']:\n module.params['identifier'] = create_apache_identifier(module.params['name'])\n\n if module.params['state'] in ['present', 'absent']:\n _set_state(module, module.params['state'])\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/web_infrastructure/apache2_module.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2013-2014, Christian Berendt <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: apache2_module\nauthor:\n - Christian Berendt (@berendt)\n - Ralf Hertel (@n0trax)\n - Robin Roth (@robinro)\nshort_description: Enables/disables a module of the Apache2 webserver.\ndescription:\n - Enables or disables a specified module of the Apache2 webserver.\noptions:\n name:\n type: str\n description:\n - Name of the module to enable/disable as given to C(a2enmod/a2dismod).\n required: true\n identifier:\n type: str\n description:\n - Identifier of the module as listed by C(apache2ctl -M).\n This is optional and usually determined automatically by the common convention of\n appending C(_module) to I(name) as well as custom exception for popular modules.\n required: False\n force:\n description:\n - Force disabling of default modules and override Debian warnings.\n required: false\n type: bool\n default: False\n state:\n type: str\n description:\n - Desired state of the module.\n choices: ['present', 'absent']\n default: present\n ignore_configcheck:\n description:\n - Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules.\n type: bool\n default: False\nrequirements: [\"a2enmod\",\"a2dismod\"]\nnotes:\n - This does not work on RedHat-based distributions. It does work on Debian- and SuSE-based distributions.\n Whether it works on others depend on whether the C(a2enmod) and C(a2dismod) tools are available or not.\n'''\n\nEXAMPLES = '''\n- name: Enable the Apache2 module wsgi\n community.general.apache2_module:\n state: present\n name: wsgi\n\n- name: Disables the Apache2 module wsgi\n community.general.apache2_module:\n state: absent\n name: wsgi\n\n- name: Disable default modules for Debian\n community.general.apache2_module:\n state: absent\n name: autoindex\n force: True\n\n- name: Disable mpm_worker and ignore warnings about missing mpm module\n community.general.apache2_module:\n state: absent\n name: mpm_worker\n ignore_configcheck: True\n\n- name: Enable dump_io module, which is identified as dumpio_module inside apache2\n community.general.apache2_module:\n state: present\n name: dump_io\n identifier: dumpio_module\n'''\n\nRETURN = '''\nresult:\n description: message about action taken\n returned: always\n type: str\nwarnings:\n description: list of warning messages\n returned: when needed\n type: list\nrc:\n description: return code of underlying command\n returned: failed\n type: int\nstdout:\n description: stdout of underlying command\n returned: failed\n type: str\nstderr:\n description: stderr of underlying command\n returned: failed\n type: str\n'''\n\nimport re\n\n# import module snippets\nfrom ansible.module_utils.basic import AnsibleModule\n\n_re_threaded = re.compile(r'threaded: *yes')\n\n\ndef _run_threaded(module):\n control_binary = _get_ctl_binary(module)\n result, stdout, stderr = module.run_command([control_binary, \"-V\"])\n\n return bool(_re_threaded.search(stdout))\n\n\ndef _get_ctl_binary(module):\n for command in ['apache2ctl', 'apachectl']:\n ctl_binary = module.get_bin_path(command)\n if ctl_binary is not None:\n return ctl_binary\n\n module.fail_json(msg=\"Neither of apache2ctl nor apachctl found. At least one apache control binary is necessary.\")\n\n\ndef _module_is_enabled(module):\n control_binary = _get_ctl_binary(module)\n result, stdout, stderr = module.run_command([control_binary, \"-M\"])\n\n if result != 0:\n error_msg = \"Error executing %s: %s\" % (control_binary, stderr)\n if module.params['ignore_configcheck']:\n if 'AH00534' in stderr and 'mpm_' in module.params['name']:\n module.warnings.append(\n \"No MPM module loaded! apache2 reload AND other module actions\"\n \" will fail if no MPM module is loaded immediately.\"\n )\n else:\n module.warnings.append(error_msg)\n return False\n else:\n module.fail_json(msg=error_msg)\n\n searchstring = ' ' + module.params['identifier']\n return searchstring in stdout\n\n\ndef create_apache_identifier(name):\n \"\"\"\n By convention if a module is loaded via name, it appears in apache2ctl -M as\n name_module.\n\n Some modules don't follow this convention and we use replacements for those.\"\"\"\n\n # a2enmod name replacement to apache2ctl -M names\n text_workarounds = [\n ('shib', 'mod_shib'),\n ('shib2', 'mod_shib'),\n ('evasive', 'evasive20_module'),\n ]\n\n # re expressions to extract subparts of names\n re_workarounds = [\n ('php', re.compile(r'^(php\\d)\\.')),\n ]\n\n for a2enmod_spelling, module_name in text_workarounds:\n if a2enmod_spelling in name:\n return module_name\n\n for search, reexpr in re_workarounds:\n if search in name:\n try:\n rematch = reexpr.search(name)\n return rematch.group(1) + '_module'\n except AttributeError:\n pass\n\n return name + '_module'\n\n\ndef _set_state(module, state):\n name = module.params['name']\n force = module.params['force']\n\n want_enabled = state == 'present'\n state_string = {'present': 'enabled', 'absent': 'disabled'}[state]\n a2mod_binary = {'present': 'a2enmod', 'absent': 'a2dismod'}[state]\n success_msg = \"Module %s %s\" % (name, state_string)\n\n if _module_is_enabled(module) != want_enabled:\n if module.check_mode:\n module.exit_json(changed=True,\n result=success_msg,\n warnings=module.warnings)\n\n a2mod_binary_path = module.get_bin_path(a2mod_binary)\n if a2mod_binary_path is None:\n module.fail_json(msg=\"%s not found. Perhaps this system does not use %s to manage apache\" % (a2mod_binary, a2mod_binary))\n\n a2mod_binary_cmd = [a2mod_binary_path]\n\n if not want_enabled and force:\n # force exists only for a2dismod on debian\n a2mod_binary_cmd.append('-f')\n\n result, stdout, stderr = module.run_command(a2mod_binary_cmd + [name])\n\n if _module_is_enabled(module) == want_enabled:\n module.exit_json(changed=True,\n result=success_msg,\n warnings=module.warnings)\n else:\n msg = (\n 'Failed to set module {name} to {state}:\\n'\n '{stdout}\\n'\n 'Maybe the module identifier ({identifier}) was guessed incorrectly.'\n 'Consider setting the \"identifier\" option.'\n ).format(\n name=name,\n state=state_string,\n stdout=stdout,\n identifier=module.params['identifier']\n )\n module.fail_json(msg=msg,\n rc=result,\n stdout=stdout,\n stderr=stderr)\n else:\n module.exit_json(changed=False,\n result=success_msg,\n warnings=module.warnings)\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n name=dict(required=True),\n identifier=dict(type='str'),\n force=dict(type='bool', default=False),\n state=dict(default='present', choices=['absent', 'present']),\n ignore_configcheck=dict(type='bool', default=False),\n ),\n supports_check_mode=True,\n )\n\n module.warnings = []\n\n name = module.params['name']\n if name == 'cgi' and _run_threaded(module):\n module.fail_json(msg=\"Your MPM seems to be threaded. No automatic actions on module cgi possible.\")\n\n if not module.params['identifier']:\n module.params['identifier'] = create_apache_identifier(module.params['name'])\n\n if module.params['state'] in ['present', 'absent']:\n _set_state(module, module.params['state'])\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/web_infrastructure/apache2_module.py"}]}
3,376
288
gh_patches_debug_17420
rasdani/github-patches
git_diff
pytorch__ignite-2676
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Scheduled workflow failed Oh no, something went wrong in the scheduled workflow **PyTorch version tests with commit 98844bf82b963a429d22b09f650cb0af2023bf20**. Please look into it: https://github.com/pytorch/ignite/actions/runs/2923090334 Feel free to close this if this was just a one-off error. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ignite/metrics/gan/utils.py` Content: ``` 1 from typing import Callable, Optional, Union 2 3 import torch 4 from packaging.version import Version 5 6 from ignite.metrics.metric import Metric 7 8 9 class InceptionModel(torch.nn.Module): 10 r"""Inception Model pre-trained on the ImageNet Dataset. 11 12 Args: 13 return_features: set it to `True` if you want the model to return features from the last pooling 14 layer instead of prediction probabilities. 15 device: specifies which device updates are accumulated on. Setting the 16 metric's device to be the same as your ``update`` arguments ensures the ``update`` method is 17 non-blocking. By default, CPU. 18 """ 19 20 def __init__(self, return_features: bool, device: Union[str, torch.device] = "cpu") -> None: 21 try: 22 from torchvision import models 23 except ImportError: 24 raise RuntimeError("This module requires torchvision to be installed.") 25 super(InceptionModel, self).__init__() 26 self._device = device 27 if Version(torch.__version__) <= Version("1.7.0"): 28 model_kwargs = {"pretrained": True} 29 else: 30 model_kwargs = {"weights": models.Inception_V3_Weights.DEFAULT} 31 32 self.model = models.inception_v3(**model_kwargs).to(self._device) 33 34 if return_features: 35 self.model.fc = torch.nn.Identity() 36 else: 37 self.model.fc = torch.nn.Sequential(self.model.fc, torch.nn.Softmax(dim=1)) 38 self.model.eval() 39 40 @torch.no_grad() 41 def forward(self, data: torch.Tensor) -> torch.Tensor: 42 if data.dim() != 4: 43 raise ValueError(f"Inputs should be a tensor of dim 4, got {data.dim()}") 44 if data.shape[1] != 3: 45 raise ValueError(f"Inputs should be a tensor with 3 channels, got {data.shape}") 46 if data.device != torch.device(self._device): 47 data = data.to(self._device) 48 return self.model(data) 49 50 51 class _BaseInceptionMetric(Metric): 52 def __init__( 53 self, 54 num_features: Optional[int], 55 feature_extractor: Optional[torch.nn.Module], 56 output_transform: Callable = lambda x: x, 57 device: Union[str, torch.device] = torch.device("cpu"), 58 ) -> None: 59 60 if num_features is None: 61 raise ValueError("Argument num_features must be provided, if feature_extractor is specified.") 62 63 if feature_extractor is None: 64 feature_extractor = torch.nn.Identity() 65 66 if num_features <= 0: 67 raise ValueError(f"Argument num_features must be greater to zero, got: {num_features}") 68 69 if not isinstance(feature_extractor, torch.nn.Module): 70 raise TypeError( 71 f"Argument feature_extractor must be of type torch.nn.Module, got {type(self._feature_extractor)}" 72 ) 73 74 self._num_features = num_features 75 self._feature_extractor = feature_extractor.to(device) 76 77 super(_BaseInceptionMetric, self).__init__(output_transform=output_transform, device=device) 78 79 def _check_feature_shapes(self, samples: torch.Tensor) -> None: 80 81 if samples.dim() != 2: 82 raise ValueError(f"feature_extractor output must be a tensor of dim 2, got: {samples.dim()}") 83 84 if samples.shape[0] == 0: 85 raise ValueError(f"Batch size should be greater than one, got: {samples.shape[0]}") 86 87 if samples.shape[1] != self._num_features: 88 raise ValueError( 89 f"num_features returned by feature_extractor should be {self._num_features}, got: {samples.shape[1]}" 90 ) 91 92 def _extract_features(self, inputs: torch.Tensor) -> torch.Tensor: 93 94 inputs = inputs.detach() 95 96 if inputs.device != torch.device(self._device): 97 inputs = inputs.to(self._device) 98 99 with torch.no_grad(): 100 outputs = self._feature_extractor(inputs).to(self._device, dtype=torch.float64) 101 self._check_feature_shapes(outputs) 102 103 return outputs 104 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ignite/metrics/gan/utils.py b/ignite/metrics/gan/utils.py --- a/ignite/metrics/gan/utils.py +++ b/ignite/metrics/gan/utils.py @@ -19,12 +19,13 @@ def __init__(self, return_features: bool, device: Union[str, torch.device] = "cpu") -> None: try: + import torchvision from torchvision import models except ImportError: raise RuntimeError("This module requires torchvision to be installed.") super(InceptionModel, self).__init__() self._device = device - if Version(torch.__version__) <= Version("1.7.0"): + if Version(torchvision.__version__) < Version("0.13.0"): model_kwargs = {"pretrained": True} else: model_kwargs = {"weights": models.Inception_V3_Weights.DEFAULT}
{"golden_diff": "diff --git a/ignite/metrics/gan/utils.py b/ignite/metrics/gan/utils.py\n--- a/ignite/metrics/gan/utils.py\n+++ b/ignite/metrics/gan/utils.py\n@@ -19,12 +19,13 @@\n \n def __init__(self, return_features: bool, device: Union[str, torch.device] = \"cpu\") -> None:\n try:\n+ import torchvision\n from torchvision import models\n except ImportError:\n raise RuntimeError(\"This module requires torchvision to be installed.\")\n super(InceptionModel, self).__init__()\n self._device = device\n- if Version(torch.__version__) <= Version(\"1.7.0\"):\n+ if Version(torchvision.__version__) < Version(\"0.13.0\"):\n model_kwargs = {\"pretrained\": True}\n else:\n model_kwargs = {\"weights\": models.Inception_V3_Weights.DEFAULT}\n", "issue": "Scheduled workflow failed\nOh no, something went wrong in the scheduled workflow **PyTorch version tests with commit 98844bf82b963a429d22b09f650cb0af2023bf20**.\nPlease look into it:\n\nhttps://github.com/pytorch/ignite/actions/runs/2923090334\n\nFeel free to close this if this was just a one-off error.\n\n", "before_files": [{"content": "from typing import Callable, Optional, Union\n\nimport torch\nfrom packaging.version import Version\n\nfrom ignite.metrics.metric import Metric\n\n\nclass InceptionModel(torch.nn.Module):\n r\"\"\"Inception Model pre-trained on the ImageNet Dataset.\n\n Args:\n return_features: set it to `True` if you want the model to return features from the last pooling\n layer instead of prediction probabilities.\n device: specifies which device updates are accumulated on. Setting the\n metric's device to be the same as your ``update`` arguments ensures the ``update`` method is\n non-blocking. By default, CPU.\n \"\"\"\n\n def __init__(self, return_features: bool, device: Union[str, torch.device] = \"cpu\") -> None:\n try:\n from torchvision import models\n except ImportError:\n raise RuntimeError(\"This module requires torchvision to be installed.\")\n super(InceptionModel, self).__init__()\n self._device = device\n if Version(torch.__version__) <= Version(\"1.7.0\"):\n model_kwargs = {\"pretrained\": True}\n else:\n model_kwargs = {\"weights\": models.Inception_V3_Weights.DEFAULT}\n\n self.model = models.inception_v3(**model_kwargs).to(self._device)\n\n if return_features:\n self.model.fc = torch.nn.Identity()\n else:\n self.model.fc = torch.nn.Sequential(self.model.fc, torch.nn.Softmax(dim=1))\n self.model.eval()\n\n @torch.no_grad()\n def forward(self, data: torch.Tensor) -> torch.Tensor:\n if data.dim() != 4:\n raise ValueError(f\"Inputs should be a tensor of dim 4, got {data.dim()}\")\n if data.shape[1] != 3:\n raise ValueError(f\"Inputs should be a tensor with 3 channels, got {data.shape}\")\n if data.device != torch.device(self._device):\n data = data.to(self._device)\n return self.model(data)\n\n\nclass _BaseInceptionMetric(Metric):\n def __init__(\n self,\n num_features: Optional[int],\n feature_extractor: Optional[torch.nn.Module],\n output_transform: Callable = lambda x: x,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ) -> None:\n\n if num_features is None:\n raise ValueError(\"Argument num_features must be provided, if feature_extractor is specified.\")\n\n if feature_extractor is None:\n feature_extractor = torch.nn.Identity()\n\n if num_features <= 0:\n raise ValueError(f\"Argument num_features must be greater to zero, got: {num_features}\")\n\n if not isinstance(feature_extractor, torch.nn.Module):\n raise TypeError(\n f\"Argument feature_extractor must be of type torch.nn.Module, got {type(self._feature_extractor)}\"\n )\n\n self._num_features = num_features\n self._feature_extractor = feature_extractor.to(device)\n\n super(_BaseInceptionMetric, self).__init__(output_transform=output_transform, device=device)\n\n def _check_feature_shapes(self, samples: torch.Tensor) -> None:\n\n if samples.dim() != 2:\n raise ValueError(f\"feature_extractor output must be a tensor of dim 2, got: {samples.dim()}\")\n\n if samples.shape[0] == 0:\n raise ValueError(f\"Batch size should be greater than one, got: {samples.shape[0]}\")\n\n if samples.shape[1] != self._num_features:\n raise ValueError(\n f\"num_features returned by feature_extractor should be {self._num_features}, got: {samples.shape[1]}\"\n )\n\n def _extract_features(self, inputs: torch.Tensor) -> torch.Tensor:\n\n inputs = inputs.detach()\n\n if inputs.device != torch.device(self._device):\n inputs = inputs.to(self._device)\n\n with torch.no_grad():\n outputs = self._feature_extractor(inputs).to(self._device, dtype=torch.float64)\n self._check_feature_shapes(outputs)\n\n return outputs\n", "path": "ignite/metrics/gan/utils.py"}], "after_files": [{"content": "from typing import Callable, Optional, Union\n\nimport torch\nfrom packaging.version import Version\n\nfrom ignite.metrics.metric import Metric\n\n\nclass InceptionModel(torch.nn.Module):\n r\"\"\"Inception Model pre-trained on the ImageNet Dataset.\n\n Args:\n return_features: set it to `True` if you want the model to return features from the last pooling\n layer instead of prediction probabilities.\n device: specifies which device updates are accumulated on. Setting the\n metric's device to be the same as your ``update`` arguments ensures the ``update`` method is\n non-blocking. By default, CPU.\n \"\"\"\n\n def __init__(self, return_features: bool, device: Union[str, torch.device] = \"cpu\") -> None:\n try:\n import torchvision\n from torchvision import models\n except ImportError:\n raise RuntimeError(\"This module requires torchvision to be installed.\")\n super(InceptionModel, self).__init__()\n self._device = device\n if Version(torchvision.__version__) < Version(\"0.13.0\"):\n model_kwargs = {\"pretrained\": True}\n else:\n model_kwargs = {\"weights\": models.Inception_V3_Weights.DEFAULT}\n\n self.model = models.inception_v3(**model_kwargs).to(self._device)\n\n if return_features:\n self.model.fc = torch.nn.Identity()\n else:\n self.model.fc = torch.nn.Sequential(self.model.fc, torch.nn.Softmax(dim=1))\n self.model.eval()\n\n @torch.no_grad()\n def forward(self, data: torch.Tensor) -> torch.Tensor:\n if data.dim() != 4:\n raise ValueError(f\"Inputs should be a tensor of dim 4, got {data.dim()}\")\n if data.shape[1] != 3:\n raise ValueError(f\"Inputs should be a tensor with 3 channels, got {data.shape}\")\n if data.device != torch.device(self._device):\n data = data.to(self._device)\n return self.model(data)\n\n\nclass _BaseInceptionMetric(Metric):\n def __init__(\n self,\n num_features: Optional[int],\n feature_extractor: Optional[torch.nn.Module],\n output_transform: Callable = lambda x: x,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ) -> None:\n\n if num_features is None:\n raise ValueError(\"Argument num_features must be provided, if feature_extractor is specified.\")\n\n if feature_extractor is None:\n feature_extractor = torch.nn.Identity()\n\n if num_features <= 0:\n raise ValueError(f\"Argument num_features must be greater to zero, got: {num_features}\")\n\n if not isinstance(feature_extractor, torch.nn.Module):\n raise TypeError(\n f\"Argument feature_extractor must be of type torch.nn.Module, got {type(self._feature_extractor)}\"\n )\n\n self._num_features = num_features\n self._feature_extractor = feature_extractor.to(device)\n\n super(_BaseInceptionMetric, self).__init__(output_transform=output_transform, device=device)\n\n def _check_feature_shapes(self, samples: torch.Tensor) -> None:\n\n if samples.dim() != 2:\n raise ValueError(f\"feature_extractor output must be a tensor of dim 2, got: {samples.dim()}\")\n\n if samples.shape[0] == 0:\n raise ValueError(f\"Batch size should be greater than one, got: {samples.shape[0]}\")\n\n if samples.shape[1] != self._num_features:\n raise ValueError(\n f\"num_features returned by feature_extractor should be {self._num_features}, got: {samples.shape[1]}\"\n )\n\n def _extract_features(self, inputs: torch.Tensor) -> torch.Tensor:\n\n inputs = inputs.detach()\n\n if inputs.device != torch.device(self._device):\n inputs = inputs.to(self._device)\n\n with torch.no_grad():\n outputs = self._feature_extractor(inputs).to(self._device, dtype=torch.float64)\n self._check_feature_shapes(outputs)\n\n return outputs\n", "path": "ignite/metrics/gan/utils.py"}]}
1,420
198
gh_patches_debug_18182
rasdani/github-patches
git_diff
conda-forge__conda-smithy-1120
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- recipe-lint parser error on template Issue: `conda smithy recipe-lint` encounters a parser error when running on [this recipe](https://github.com/conda-forge/staged-recipes/pull/8838/files#diff-857178a7191e6e87c83b1b9b76ec0e19): ```  100% (...)   ~/src/staged-recipes/recipes/itk-meshtopolydata    cf2ec104  add-itk-meshtopolydata  INSERT  conda smithy recipe-lint . Traceback (most recent call last): File "/home/matt/bin/miniconda3/bin/conda-smithy", line 10, in <module> sys.exit(main()) File "/home/matt/bin/miniconda3/lib/python3.7/site-packages/conda_smithy/cli.py", line 471, in main args.subcommand_func(args) File "/home/matt/bin/miniconda3/lib/python3.7/site-packages/conda_smithy/cli.py", line 366, in __call__ return_hints=True, File "/home/matt/bin/miniconda3/lib/python3.7/site-packages/conda_smithy/lint_recipe.py", line 536, in main meta = ruamel.yaml.load(content, ruamel.yaml.RoundTripLoader) File "/home/matt/bin/miniconda3/lib/python3.7/site-packages/ruamel/yaml/main.py", line 935, in load return loader._constructor.get_single_data() File "/home/matt/bin/miniconda3/lib/python3.7/site-packages/ruamel/yaml/constructor.py", line 109, in get_single_data node = self.composer.get_single_node() File "/home/matt/bin/miniconda3/lib/python3.7/site-packages/ruamel/yaml/composer.py", line 78, in get_single_node document = self.compose_document() File "/home/matt/bin/miniconda3/lib/python3.7/site-packages/ruamel/yaml/composer.py", line 101, in compose_document node = self.compose_node(None, None) File "/home/matt/bin/miniconda3/lib/python3.7/site-packages/ruamel/yaml/composer.py", line 138, in compose_node node = self.compose_mapping_node(anchor) File "/home/matt/bin/miniconda3/lib/python3.7/site-packages/ruamel/yaml/composer.py", line 218, in compose_mapping_node item_value = self.compose_node(node, item_key) File "/home/matt/bin/miniconda3/lib/python3.7/site-packages/ruamel/yaml/composer.py", line 138, in compose_node node = self.compose_mapping_node(anchor) File "/home/matt/bin/miniconda3/lib/python3.7/site-packages/ruamel/yaml/composer.py", line 211, in compose_mapping_node while not self.parser.check_event(MappingEndEvent): File "/home/matt/bin/miniconda3/lib/python3.7/site-packages/ruamel/yaml/parser.py", line 141, in check_event self.current_event = self.state() File "/home/matt/bin/miniconda3/lib/python3.7/site-packages/ruamel/yaml/parser.py", line 581, in parse_block_mapping_key token.start_mark, ruamel.yaml.parser.ParserError: while parsing a block mapping in "<unicode string>", line 30, column 3: number: 0 ^ (line: 30) expected <block end>, but found '<scalar>' in "<unicode string>", line 35, column 71: ... vv itk_meshtopolydata-0.3.4-cp["0"]["2"]-cp["0"]["2"]m-macosx_10 ... ^ (line: 35) ``` <br/> Environment (<code>conda list</code>): <details> ``` $ conda list # packages in environment at /home/matt/bin/miniconda3/envs/itk-recipes: # # Name Version Build Channel _libgcc_mutex 0.1 main asn1crypto 0.24.0 py37_1003 conda-forge beautifulsoup4 4.7.1 py37_1001 conda-forge bzip2 1.0.8 h516909a_0 conda-forge ca-certificates 2019.6.16 hecc5488_0 conda-forge certifi 2019.6.16 py37_1 conda-forge cffi 1.12.3 py37h8022711_0 conda-forge chardet 3.0.4 py37_1003 conda-forge click 7.0 py_0 conda-forge conda 4.7.5 py37_0 conda-forge conda-build 3.18.7 py37_1 conda-forge conda-package-handling 1.3.11 py37_0 conda-forge conda-verify 3.1.1 py37_1000 conda-forge cryptography 2.7 py37h72c5cf5_0 conda-forge filelock 3.0.10 py_0 conda-forge future 0.17.1 py37_1000 conda-forge glob2 0.7 py_0 conda-forge icu 58.2 hf484d3e_1000 conda-forge idna 2.8 py37_1000 conda-forge jinja2 2.10.1 py_0 conda-forge libarchive 3.3.3 hb44662c_1005 conda-forge libffi 3.2.1 he1b5a44_1006 conda-forge libgcc-ng 9.1.0 hdf63c60_0 libiconv 1.15 h516909a_1005 conda-forge liblief 0.9.0 hf8a498c_1 conda-forge libstdcxx-ng 9.1.0 hdf63c60_0 libxml2 2.9.9 h13577e0_1 conda-forge lz4-c 1.8.3 he1b5a44_1001 conda-forge lzo 2.10 h14c3975_1000 conda-forge markupsafe 1.1.1 py37h14c3975_0 conda-forge ncurses 6.1 hf484d3e_1002 conda-forge openssl 1.1.1c h516909a_0 conda-forge patchelf 0.10 he1b5a44_0 conda-forge pip 19.1.1 py37_0 conda-forge pkginfo 1.5.0.1 py_0 conda-forge psutil 5.6.3 py37h516909a_0 conda-forge py-lief 0.9.0 py37he1b5a44_1 conda-forge pycosat 0.6.3 py37h14c3975_1001 conda-forge pycparser 2.19 py37_1 conda-forge pyopenssl 19.0.0 py37_0 conda-forge pysocks 1.7.0 py37_0 conda-forge python 3.7.3 h33d41f4_1 conda-forge python-libarchive-c 2.8 py37_1004 conda-forge pytz 2019.1 py_0 conda-forge pyyaml 5.1.1 py37h516909a_0 conda-forge readline 8.0 hf8c457e_0 conda-forge requests 2.22.0 py37_1 conda-forge ruamel_yaml 0.15.71 py37h14c3975_1000 conda-forge setuptools 41.0.1 py37_0 conda-forge six 1.12.0 py37_1000 conda-forge soupsieve 1.9.2 py37_0 conda-forge sqlite 3.29.0 hcee41ef_0 conda-forge tk 8.6.9 hed695b0_1002 conda-forge tqdm 4.32.2 py_0 conda-forge urllib3 1.25.3 py37_0 conda-forge wheel 0.33.4 py37_0 conda-forge xz 5.2.4 h14c3975_1001 conda-forge yaml 0.1.7 h14c3975_1001 conda-forge zlib 1.2.11 h516909a_1005 conda-forge zstd 1.4.0 h3b9ef0a_0 conda-forge ``` </details> <br/> Details about <code>conda</code> and system ( <code>conda info</code> ): <details> ``` $ conda info active environment : itk-recipes active env location : /home/matt/bin/miniconda3/envs/itk-recipes shell level : 2 user config file : /home/matt/.condarc populated config files : /home/matt/.condarc conda version : 4.7.8 conda-build version : 3.18.8 python version : 3.7.3.final.0 virtual packages : __cuda=9.1 base environment : /home/matt/bin/miniconda3 (writable) channel URLs : https://conda.anaconda.org/conda-forge/linux-64 https://conda.anaconda.org/conda-forge/noarch https://repo.anaconda.com/pkgs/main/linux-64 https://repo.anaconda.com/pkgs/main/noarch https://repo.anaconda.com/pkgs/r/linux-64 https://repo.anaconda.com/pkgs/r/noarch package cache : /home/matt/bin/miniconda3/pkgs /home/matt/.conda/pkgs envs directories : /home/matt/bin/miniconda3/envs /home/matt/.conda/envs platform : linux-64 user-agent : conda/4.7.8 requests/2.21.0 CPython/3.7.3 Linux/4.15.0-54-generic ubuntu/18.04.2 glibc/2.27 UID:GID : 1000:1000 netrc file : None offline mode : False ``` </details> @scopatz @ocefpaf --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conda_smithy/utils.py` Content: ``` 1 import shutil 2 import tempfile 3 import jinja2 4 import datetime 5 import time 6 import os 7 from collections import defaultdict 8 from contextlib import contextmanager 9 10 import ruamel.yaml 11 12 13 # define global yaml API 14 # roundrip-loader and allowing duplicate keys 15 # for handling # [filter] / # [not filter] 16 yaml = ruamel.yaml.YAML(typ='rt') 17 yaml.allow_duplicate_keys = True 18 19 20 @contextmanager 21 def tmp_directory(): 22 tmp_dir = tempfile.mkdtemp("_recipe") 23 yield tmp_dir 24 shutil.rmtree(tmp_dir) 25 26 27 class NullUndefined(jinja2.Undefined): 28 def __unicode__(self): 29 return self._undefined_name 30 31 def __getattr__(self, name): 32 return "{}.{}".format(self, name) 33 34 def __getitem__(self, name): 35 return '{}["{}"]'.format(self, name) 36 37 38 class MockOS(dict): 39 def __init__(self): 40 self.environ = defaultdict(lambda: "") 41 self.sep = "/" 42 43 44 def render_meta_yaml(text): 45 env = jinja2.Environment(undefined=NullUndefined) 46 47 # stub out cb3 jinja2 functions - they are not important for linting 48 # if we don't stub them out, the ruamel.yaml load fails to interpret them 49 # we can't just use conda-build's api.render functionality, because it would apply selectors 50 env.globals.update( 51 dict( 52 compiler=lambda x: x + "_compiler_stub", 53 pin_subpackage=lambda *args, **kwargs: "subpackage_stub", 54 pin_compatible=lambda *args, **kwargs: "compatible_pin_stub", 55 cdt=lambda *args, **kwargs: "cdt_stub", 56 load_file_regex=lambda *args, **kwargs: defaultdict(lambda: ""), 57 datetime=datetime, 58 time=time, 59 target_platform="linux-64", 60 ) 61 ) 62 mockos = MockOS() 63 content = env.from_string(text).render(os=mockos, environ=mockos.environ) 64 return content 65 66 67 @contextmanager 68 def update_conda_forge_config(feedstock_directory): 69 """Utility method used to update conda forge configuration files 70 71 Uage: 72 >>> with update_conda_forge_config(somepath) as cfg: 73 ... cfg['foo'] = 'bar' 74 """ 75 forge_yaml = os.path.join(feedstock_directory, "conda-forge.yml") 76 if os.path.exists(forge_yaml): 77 with open(forge_yaml, "r") as fh: 78 code = yaml.load(fh) 79 else: 80 code = {} 81 82 # Code could come in as an empty list. 83 if not code: 84 code = {} 85 86 yield code 87 88 with open(forge_yaml, "w") as fh: 89 fh.write(yaml.dump(code)) 90 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conda_smithy/utils.py b/conda_smithy/utils.py --- a/conda_smithy/utils.py +++ b/conda_smithy/utils.py @@ -4,6 +4,7 @@ import datetime import time import os +import sys from collections import defaultdict from contextlib import contextmanager @@ -13,7 +14,7 @@ # define global yaml API # roundrip-loader and allowing duplicate keys # for handling # [filter] / # [not filter] -yaml = ruamel.yaml.YAML(typ='rt') +yaml = ruamel.yaml.YAML(typ="rt") yaml.allow_duplicate_keys = True @@ -60,7 +61,9 @@ ) ) mockos = MockOS() - content = env.from_string(text).render(os=mockos, environ=mockos.environ) + py_ver = "3.7" + context = {"os": mockos, "environ": mockos.environ, "PY_VER": py_ver} + content = env.from_string(text).render(context) return content
{"golden_diff": "diff --git a/conda_smithy/utils.py b/conda_smithy/utils.py\n--- a/conda_smithy/utils.py\n+++ b/conda_smithy/utils.py\n@@ -4,6 +4,7 @@\n import datetime\n import time\n import os\n+import sys\n from collections import defaultdict\n from contextlib import contextmanager\n \n@@ -13,7 +14,7 @@\n # define global yaml API\n # roundrip-loader and allowing duplicate keys\n # for handling # [filter] / # [not filter]\n-yaml = ruamel.yaml.YAML(typ='rt')\n+yaml = ruamel.yaml.YAML(typ=\"rt\")\n yaml.allow_duplicate_keys = True\n \n \n@@ -60,7 +61,9 @@\n )\n )\n mockos = MockOS()\n- content = env.from_string(text).render(os=mockos, environ=mockos.environ)\n+ py_ver = \"3.7\"\n+ context = {\"os\": mockos, \"environ\": mockos.environ, \"PY_VER\": py_ver}\n+ content = env.from_string(text).render(context)\n return content\n", "issue": "recipe-lint parser error on template\nIssue:\r\n\r\n`conda smithy recipe-lint` encounters a parser error when running on [this recipe](https://github.com/conda-forge/staged-recipes/pull/8838/files#diff-857178a7191e6e87c83b1b9b76ec0e19):\r\n\r\n```\r\n \uf240 100% (...) \ue0b0 \uf07c ~/src/staged-recipes/recipes/itk-meshtopolydata \ue0b0 \uf113 \ue729 cf2ec104 \uf126 add-itk-meshtopolydata \ue0b0 INSERT \ue0b0 conda smithy recipe-lint . Traceback (most recent call last):\r\n File \"/home/matt/bin/miniconda3/bin/conda-smithy\", line 10, in <module>\r\n sys.exit(main())\r\n File \"/home/matt/bin/miniconda3/lib/python3.7/site-packages/conda_smithy/cli.py\", line 471, in main\r\n args.subcommand_func(args)\r\n File \"/home/matt/bin/miniconda3/lib/python3.7/site-packages/conda_smithy/cli.py\", line 366, in __call__\r\n return_hints=True,\r\n File \"/home/matt/bin/miniconda3/lib/python3.7/site-packages/conda_smithy/lint_recipe.py\", line 536, in main\r\n meta = ruamel.yaml.load(content, ruamel.yaml.RoundTripLoader)\r\n File \"/home/matt/bin/miniconda3/lib/python3.7/site-packages/ruamel/yaml/main.py\", line 935, in load\r\n return loader._constructor.get_single_data()\r\n File \"/home/matt/bin/miniconda3/lib/python3.7/site-packages/ruamel/yaml/constructor.py\", line 109, in get_single_data\r\n node = self.composer.get_single_node()\r\n File \"/home/matt/bin/miniconda3/lib/python3.7/site-packages/ruamel/yaml/composer.py\", line 78, in get_single_node\r\n document = self.compose_document()\r\n File \"/home/matt/bin/miniconda3/lib/python3.7/site-packages/ruamel/yaml/composer.py\", line 101, in compose_document\r\n node = self.compose_node(None, None)\r\n File \"/home/matt/bin/miniconda3/lib/python3.7/site-packages/ruamel/yaml/composer.py\", line 138, in compose_node\r\n node = self.compose_mapping_node(anchor)\r\n File \"/home/matt/bin/miniconda3/lib/python3.7/site-packages/ruamel/yaml/composer.py\", line 218, in compose_mapping_node\r\n item_value = self.compose_node(node, item_key)\r\n File \"/home/matt/bin/miniconda3/lib/python3.7/site-packages/ruamel/yaml/composer.py\", line 138, in compose_node\r\n node = self.compose_mapping_node(anchor)\r\n File \"/home/matt/bin/miniconda3/lib/python3.7/site-packages/ruamel/yaml/composer.py\", line 211, in compose_mapping_node\r\n while not self.parser.check_event(MappingEndEvent):\r\n File \"/home/matt/bin/miniconda3/lib/python3.7/site-packages/ruamel/yaml/parser.py\", line 141, in check_event\r\n self.current_event = self.state()\r\n File \"/home/matt/bin/miniconda3/lib/python3.7/site-packages/ruamel/yaml/parser.py\", line 581, in parse_block_mapping_key\r\n token.start_mark,\r\nruamel.yaml.parser.ParserError: while parsing a block mapping\r\n in \"<unicode string>\", line 30, column 3:\r\n number: 0\r\n ^ (line: 30)\r\nexpected <block end>, but found '<scalar>'\r\n in \"<unicode string>\", line 35, column 71:\r\n ... vv itk_meshtopolydata-0.3.4-cp[\"0\"][\"2\"]-cp[\"0\"][\"2\"]m-macosx_10 ...\r\n ^ (line: 35)\r\n\r\n\r\n```\r\n<br/>\r\nEnvironment (<code>conda list</code>):\r\n<details>\r\n\r\n```\r\n$ conda list\r\n# packages in environment at /home/matt/bin/miniconda3/envs/itk-recipes:\r\n#\r\n# Name Version Build Channel\r\n_libgcc_mutex 0.1 main \r\nasn1crypto 0.24.0 py37_1003 conda-forge\r\nbeautifulsoup4 4.7.1 py37_1001 conda-forge\r\nbzip2 1.0.8 h516909a_0 conda-forge\r\nca-certificates 2019.6.16 hecc5488_0 conda-forge\r\ncertifi 2019.6.16 py37_1 conda-forge\r\ncffi 1.12.3 py37h8022711_0 conda-forge\r\nchardet 3.0.4 py37_1003 conda-forge\r\nclick 7.0 py_0 conda-forge\r\nconda 4.7.5 py37_0 conda-forge\r\nconda-build 3.18.7 py37_1 conda-forge\r\nconda-package-handling 1.3.11 py37_0 conda-forge\r\nconda-verify 3.1.1 py37_1000 conda-forge\r\ncryptography 2.7 py37h72c5cf5_0 conda-forge\r\nfilelock 3.0.10 py_0 conda-forge\r\nfuture 0.17.1 py37_1000 conda-forge\r\nglob2 0.7 py_0 conda-forge\r\nicu 58.2 hf484d3e_1000 conda-forge\r\nidna 2.8 py37_1000 conda-forge\r\njinja2 2.10.1 py_0 conda-forge\r\nlibarchive 3.3.3 hb44662c_1005 conda-forge\r\nlibffi 3.2.1 he1b5a44_1006 conda-forge\r\nlibgcc-ng 9.1.0 hdf63c60_0 \r\nlibiconv 1.15 h516909a_1005 conda-forge\r\nliblief 0.9.0 hf8a498c_1 conda-forge\r\nlibstdcxx-ng 9.1.0 hdf63c60_0 \r\nlibxml2 2.9.9 h13577e0_1 conda-forge\r\nlz4-c 1.8.3 he1b5a44_1001 conda-forge\r\nlzo 2.10 h14c3975_1000 conda-forge\r\nmarkupsafe 1.1.1 py37h14c3975_0 conda-forge\r\nncurses 6.1 hf484d3e_1002 conda-forge\r\nopenssl 1.1.1c h516909a_0 conda-forge\r\npatchelf 0.10 he1b5a44_0 conda-forge\r\npip 19.1.1 py37_0 conda-forge\r\npkginfo 1.5.0.1 py_0 conda-forge\r\npsutil 5.6.3 py37h516909a_0 conda-forge\r\npy-lief 0.9.0 py37he1b5a44_1 conda-forge\r\npycosat 0.6.3 py37h14c3975_1001 conda-forge\r\npycparser 2.19 py37_1 conda-forge\r\npyopenssl 19.0.0 py37_0 conda-forge\r\npysocks 1.7.0 py37_0 conda-forge\r\npython 3.7.3 h33d41f4_1 conda-forge\r\npython-libarchive-c 2.8 py37_1004 conda-forge\r\npytz 2019.1 py_0 conda-forge\r\npyyaml 5.1.1 py37h516909a_0 conda-forge\r\nreadline 8.0 hf8c457e_0 conda-forge\r\nrequests 2.22.0 py37_1 conda-forge\r\nruamel_yaml 0.15.71 py37h14c3975_1000 conda-forge\r\nsetuptools 41.0.1 py37_0 conda-forge\r\nsix 1.12.0 py37_1000 conda-forge\r\nsoupsieve 1.9.2 py37_0 conda-forge\r\nsqlite 3.29.0 hcee41ef_0 conda-forge\r\ntk 8.6.9 hed695b0_1002 conda-forge\r\ntqdm 4.32.2 py_0 conda-forge\r\nurllib3 1.25.3 py37_0 conda-forge\r\nwheel 0.33.4 py37_0 conda-forge\r\nxz 5.2.4 h14c3975_1001 conda-forge\r\nyaml 0.1.7 h14c3975_1001 conda-forge\r\nzlib 1.2.11 h516909a_1005 conda-forge\r\nzstd 1.4.0 h3b9ef0a_0 conda-forge\r\n\r\n```\r\n</details>\r\n\r\n<br/>\r\nDetails about <code>conda</code> and system ( <code>conda info</code> ):\r\n<details>\r\n\r\n```\r\n$ conda info\r\n\r\n active environment : itk-recipes\r\n active env location : /home/matt/bin/miniconda3/envs/itk-recipes\r\n shell level : 2\r\n user config file : /home/matt/.condarc\r\n populated config files : /home/matt/.condarc\r\n conda version : 4.7.8\r\n conda-build version : 3.18.8\r\n python version : 3.7.3.final.0\r\n virtual packages : __cuda=9.1\r\n base environment : /home/matt/bin/miniconda3 (writable)\r\n channel URLs : https://conda.anaconda.org/conda-forge/linux-64\r\n https://conda.anaconda.org/conda-forge/noarch\r\n https://repo.anaconda.com/pkgs/main/linux-64\r\n https://repo.anaconda.com/pkgs/main/noarch\r\n https://repo.anaconda.com/pkgs/r/linux-64\r\n https://repo.anaconda.com/pkgs/r/noarch\r\n package cache : /home/matt/bin/miniconda3/pkgs\r\n /home/matt/.conda/pkgs\r\n envs directories : /home/matt/bin/miniconda3/envs\r\n /home/matt/.conda/envs\r\n platform : linux-64\r\n user-agent : conda/4.7.8 requests/2.21.0 CPython/3.7.3 Linux/4.15.0-54-generic ubuntu/18.04.2 glibc/2.27\r\n UID:GID : 1000:1000\r\n netrc file : None\r\n offline mode : False\r\n\r\n\r\n```\r\n</details>\r\n\r\n@scopatz @ocefpaf \n", "before_files": [{"content": "import shutil\nimport tempfile\nimport jinja2\nimport datetime\nimport time\nimport os\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\nimport ruamel.yaml\n\n\n# define global yaml API\n# roundrip-loader and allowing duplicate keys\n# for handling # [filter] / # [not filter]\nyaml = ruamel.yaml.YAML(typ='rt')\nyaml.allow_duplicate_keys = True\n\n\n@contextmanager\ndef tmp_directory():\n tmp_dir = tempfile.mkdtemp(\"_recipe\")\n yield tmp_dir\n shutil.rmtree(tmp_dir)\n\n\nclass NullUndefined(jinja2.Undefined):\n def __unicode__(self):\n return self._undefined_name\n\n def __getattr__(self, name):\n return \"{}.{}\".format(self, name)\n\n def __getitem__(self, name):\n return '{}[\"{}\"]'.format(self, name)\n\n\nclass MockOS(dict):\n def __init__(self):\n self.environ = defaultdict(lambda: \"\")\n self.sep = \"/\"\n\n\ndef render_meta_yaml(text):\n env = jinja2.Environment(undefined=NullUndefined)\n\n # stub out cb3 jinja2 functions - they are not important for linting\n # if we don't stub them out, the ruamel.yaml load fails to interpret them\n # we can't just use conda-build's api.render functionality, because it would apply selectors\n env.globals.update(\n dict(\n compiler=lambda x: x + \"_compiler_stub\",\n pin_subpackage=lambda *args, **kwargs: \"subpackage_stub\",\n pin_compatible=lambda *args, **kwargs: \"compatible_pin_stub\",\n cdt=lambda *args, **kwargs: \"cdt_stub\",\n load_file_regex=lambda *args, **kwargs: defaultdict(lambda: \"\"),\n datetime=datetime,\n time=time,\n target_platform=\"linux-64\",\n )\n )\n mockos = MockOS()\n content = env.from_string(text).render(os=mockos, environ=mockos.environ)\n return content\n\n\n@contextmanager\ndef update_conda_forge_config(feedstock_directory):\n \"\"\"Utility method used to update conda forge configuration files\n\n Uage:\n >>> with update_conda_forge_config(somepath) as cfg:\n ... cfg['foo'] = 'bar'\n \"\"\"\n forge_yaml = os.path.join(feedstock_directory, \"conda-forge.yml\")\n if os.path.exists(forge_yaml):\n with open(forge_yaml, \"r\") as fh:\n code = yaml.load(fh)\n else:\n code = {}\n\n # Code could come in as an empty list.\n if not code:\n code = {}\n\n yield code\n\n with open(forge_yaml, \"w\") as fh:\n fh.write(yaml.dump(code))\n", "path": "conda_smithy/utils.py"}], "after_files": [{"content": "import shutil\nimport tempfile\nimport jinja2\nimport datetime\nimport time\nimport os\nimport sys\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\nimport ruamel.yaml\n\n\n# define global yaml API\n# roundrip-loader and allowing duplicate keys\n# for handling # [filter] / # [not filter]\nyaml = ruamel.yaml.YAML(typ=\"rt\")\nyaml.allow_duplicate_keys = True\n\n\n@contextmanager\ndef tmp_directory():\n tmp_dir = tempfile.mkdtemp(\"_recipe\")\n yield tmp_dir\n shutil.rmtree(tmp_dir)\n\n\nclass NullUndefined(jinja2.Undefined):\n def __unicode__(self):\n return self._undefined_name\n\n def __getattr__(self, name):\n return \"{}.{}\".format(self, name)\n\n def __getitem__(self, name):\n return '{}[\"{}\"]'.format(self, name)\n\n\nclass MockOS(dict):\n def __init__(self):\n self.environ = defaultdict(lambda: \"\")\n self.sep = \"/\"\n\n\ndef render_meta_yaml(text):\n env = jinja2.Environment(undefined=NullUndefined)\n\n # stub out cb3 jinja2 functions - they are not important for linting\n # if we don't stub them out, the ruamel.yaml load fails to interpret them\n # we can't just use conda-build's api.render functionality, because it would apply selectors\n env.globals.update(\n dict(\n compiler=lambda x: x + \"_compiler_stub\",\n pin_subpackage=lambda *args, **kwargs: \"subpackage_stub\",\n pin_compatible=lambda *args, **kwargs: \"compatible_pin_stub\",\n cdt=lambda *args, **kwargs: \"cdt_stub\",\n load_file_regex=lambda *args, **kwargs: defaultdict(lambda: \"\"),\n datetime=datetime,\n time=time,\n target_platform=\"linux-64\",\n )\n )\n mockos = MockOS()\n py_ver = \"3.7\"\n context = {\"os\": mockos, \"environ\": mockos.environ, \"PY_VER\": py_ver}\n content = env.from_string(text).render(context)\n return content\n\n\n@contextmanager\ndef update_conda_forge_config(feedstock_directory):\n \"\"\"Utility method used to update conda forge configuration files\n\n Uage:\n >>> with update_conda_forge_config(somepath) as cfg:\n ... cfg['foo'] = 'bar'\n \"\"\"\n forge_yaml = os.path.join(feedstock_directory, \"conda-forge.yml\")\n if os.path.exists(forge_yaml):\n with open(forge_yaml, \"r\") as fh:\n code = yaml.load(fh)\n else:\n code = {}\n\n # Code could come in as an empty list.\n if not code:\n code = {}\n\n yield code\n\n with open(forge_yaml, \"w\") as fh:\n fh.write(yaml.dump(code))\n", "path": "conda_smithy/utils.py"}]}
3,907
242
gh_patches_debug_11941
rasdani/github-patches
git_diff
SeldonIO__MLServer-1104
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- mlserver >= 1.3 doesn't work with MLflow After the release of `mlserver==1.3.1`, one of our tests in `https://github.com/mlflow/mlflow` started failing with the following error. We're investigating the cause. https://github.com/mlflow/mlflow/actions/runs/4716614587/jobs/8364498235?pr=8241#step:7:8138 ``` 2023-04-17 03:00:01,885 [mlserver.parallel] DEBUG - Starting response processing loop... 2023-04-17 03:00:01,894 [mlserver.rest] INFO - HTTP server running on http://0.0.0.0:8080/ 2023-04-17 03:00:01,943 [mlserver.metrics] INFO - Metrics server running on http://0.0.0.0:8082/ 2023-04-17 03:00:01,944 [mlserver.metrics] INFO - Prometheus scraping endpoint can be accessed on http://0.0.0.0:8082/metrics 2023-04-17 03:00:01,963 [mlserver.grpc] INFO - gRPC server running on http://0.0.0.0:8081/ INFO: 172.17.0.1:59636 - "GET /ping HTTP/1.1" 404 Not Found INFO: 172.17.0.1:59644 - "GET /ping HTTP/1.1" 404 Not Found 2023-04-17 03:00:03,801 [mlserver] INFO - Loaded model 'mlflow-model' succesfully. 2023-04-17 03:00:03,802 [mlserver] INFO - Loaded model 'mlflow-model' succesfully. 2023-04-17 03:00:03,804 [mlserver] INFO - Loaded model 'mlflow-model' succesfully. INFO: 172.17.0.1:59646 - "GET /ping HTTP/1.1" 404 Not Found INFO: 172.17.0.1:59660 - "GET /ping HTTP/1.1" 404 Not Found INFO: 172.17.0.1:59674 - "GET /ping HTTP/1.1" 404 Not Found INFO: 172.17.0.1:59690 - "GET /ping HTTP/1.1" 404 Not Found INFO: 172.17.0.1:59702 - "GET /ping HTTP/1.1" 404 Not Found INFO: 172.17.0.1:59708 - "GET /ping HTTP/1.1" 404 Not Found INFO: 172.17.0.1:59712 - "GET /ping HTTP/1.1" 404 Not Found INFO: 172.17.0.1:59718 - "GET /ping HTTP/1.1" 404 Not Found INFO: 172.17.0.1:42498 - "GET /ping HTTP/1.1" 404 Not Found INFO: 172.17.0.1:42512 - "GET /ping HTTP/1.1" 404 Not Found INFO: 172.17.0.1:42528 - "GET /ping HTTP/1.1" 404 Not Found INFO: 172.17.0.1:42530 - "GET /ping HTTP/1.1" 404 Not Found INFO: 172.17.0.1:42536 - "GET /ping HTTP/1.1" 404 Not Found INFO: 172.17.0.1:42544 - "GET /ping HTTP/1.1" 404 Not Found INFO: 172.17.0.1:42546 - "GET /ping HTTP/1.1" 404 Not Found INFO: 172.17.0.1:42552 - "GET /ping HTTP/1.1" 404 Not Found ... ``` The error above indicates that the `/ping` endpoint is not registered. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mlserver/parallel/registry.py` Content: ``` 1 import asyncio 2 import os 3 import shutil 4 5 from typing import Optional, Dict, List 6 7 from ..settings import ModelSettings 8 from ..utils import to_absolute_path 9 from ..model import MLModel 10 from ..settings import Settings 11 from ..env import Environment, compute_hash 12 from ..registry import model_initialiser 13 14 from .errors import EnvironmentNotFound 15 from .logging import logger 16 from .pool import InferencePool, InferencePoolHook 17 18 ENV_HASH_ATTR = "__env_hash__" 19 20 21 def _set_environment_hash(model: MLModel, env_hash: Optional[str]): 22 setattr(model, ENV_HASH_ATTR, env_hash) 23 24 25 def _get_environment_hash(model: MLModel) -> Optional[str]: 26 return getattr(model, ENV_HASH_ATTR, None) 27 28 29 def _get_env_tarball(model: MLModel) -> Optional[str]: 30 model_settings = model.settings 31 if model_settings.parameters is None: 32 return None 33 34 env_tarball = model_settings.parameters.environment_tarball 35 if env_tarball is None: 36 return None 37 38 return to_absolute_path(model_settings, env_tarball) 39 40 41 class InferencePoolRegistry: 42 """ 43 Keeps track of the different inference pools loaded in the server. 44 Each inference pool will generally be used to load a different environment. 45 """ 46 47 def __init__( 48 self, settings: Settings, on_worker_stop: List[InferencePoolHook] = [] 49 ): 50 self._settings = settings 51 self._on_worker_stop = on_worker_stop 52 self._default_pool = InferencePool( 53 self._settings, on_worker_stop=on_worker_stop 54 ) 55 self._pools: Dict[str, InferencePool] = {} 56 57 os.makedirs(self._settings.environments_dir, exist_ok=True) 58 59 async def _get_or_create(self, model: MLModel) -> InferencePool: 60 env_tarball = _get_env_tarball(model) 61 if not env_tarball: 62 return self._default_pool 63 64 env_hash = await compute_hash(env_tarball) 65 if env_hash in self._pools: 66 return self._pools[env_hash] 67 68 env = await self._extract_tarball(env_hash, env_tarball) 69 pool = InferencePool( 70 self._settings, env=env, on_worker_stop=self._on_worker_stop 71 ) 72 self._pools[env_hash] = pool 73 return pool 74 75 async def _extract_tarball(self, env_hash: str, env_tarball: str) -> Environment: 76 env_path = self._get_env_path(env_hash) 77 if os.path.isdir(env_path): 78 # If env has already been extracted, use that 79 return Environment(env_path, env_hash) 80 81 os.makedirs(env_path) 82 return await Environment.from_tarball(env_tarball, env_path, env_hash) 83 84 def _get_env_path(self, env_hash: str) -> str: 85 return os.path.join(self._settings.environments_dir, env_hash) 86 87 async def _find(self, model: MLModel) -> InferencePool: 88 env_hash = _get_environment_hash(model) 89 if not env_hash: 90 return self._default_pool 91 92 if env_hash not in self._pools: 93 raise EnvironmentNotFound(model, env_hash) 94 95 return self._pools[env_hash] 96 97 def _should_load_model(self, model_settings: ModelSettings): 98 if model_settings.parallel_workers is not None: 99 logger.warning( 100 "DEPRECATED!! The `parallel_workers` setting at the model-level " 101 "has now been deprecated and moved " 102 "to the top-level server " 103 "settings. " 104 "This field will be removed in MLServer 1.2.0. " 105 "To access the new field, you can either update the " 106 "`settings.json` file, or update the `MLSERVER_PARALLEL_WORKERS` " 107 "environment variable. " 108 f"The current value of the server-level's `parallel_workers` field is " 109 f"'{self._settings.parallel_workers}'." 110 ) 111 112 # NOTE: This is a remnant from the previous architecture for parallel 113 # workers, where each worker had its own pool. 114 # For backwards compatibility, we will respect when a model disables 115 # parallel inference. 116 if model_settings.parallel_workers <= 0: 117 return False 118 119 if not self._settings.parallel_workers: 120 return False 121 122 return True 123 124 def model_initialiser(self, model_settings: ModelSettings) -> MLModel: 125 """ 126 Used to initialise a model object in the ModelRegistry. 127 """ 128 if not self._should_load_model(model_settings): 129 # If parallel inference should not be used, instantiate the model 130 # as normal. 131 return model_initialiser(model_settings) 132 133 # Otherwise, return a dummy model for now and wait for the load_model 134 # hook to create the actual thing. 135 # This avoids instantiating the model's actual class within the 136 # main process. 137 return MLModel(model_settings) 138 139 async def load_model(self, model: MLModel) -> MLModel: 140 if not self._should_load_model(model.settings): 141 # Skip load if model has disabled parallel workers 142 return model 143 144 # TODO: If load fails, should we remove pool if empty? 145 pool = await self._get_or_create(model) 146 loaded = await pool.load_model(model) 147 _set_environment_hash(loaded, pool.env_hash) 148 return loaded 149 150 async def reload_model(self, old_model: MLModel, new_model: MLModel) -> MLModel: 151 if not self._should_load_model(new_model.settings): 152 # TODO: What would happen if old_model had parallel inference 153 # enabled and is disabled in new_model (and viceversa)? 154 # Skip reload if model has disabled parallel workers 155 return new_model 156 157 old_hash = _get_environment_hash(old_model) 158 new_pool = await self._get_or_create(new_model) 159 160 loaded = await new_pool.reload_model(old_model, new_model) 161 _set_environment_hash(loaded, new_pool.env_hash) 162 if old_hash != new_pool.env_hash: 163 # Environment has changed in the new version, so unload the old one 164 await self.unload_model(old_model) 165 166 return loaded 167 168 async def unload_model(self, model: MLModel) -> MLModel: 169 if not self._should_load_model(model.settings): 170 # Skip unload if model has disabled parallel workers 171 return model 172 173 pool = await self._find(model) 174 unloaded = await pool.unload_model(model) 175 176 if pool != self._default_pool and pool.empty(): 177 logger.info(f"Inference pool with hash '{pool.env_hash}' is now empty") 178 await self._close_pool(pool.env_hash) 179 180 return unloaded 181 182 async def close(self): 183 await asyncio.gather( 184 self._close_pool(None), 185 *[self._close_pool(env_hash) for env_hash in self._pools], 186 ) 187 188 async def _close_pool(self, env_hash: Optional[str] = None): 189 pool = self._default_pool 190 pool_name = "default inference pool" 191 if env_hash: 192 pool = self._pools[env_hash] 193 pool_name = f"inference pool with hash '{env_hash}'" 194 195 logger.info(f"Waiting for shutdown of {pool_name}...") 196 await pool.close() 197 logger.info(f"Shutdown of {pool_name} complete") 198 199 if env_hash: 200 del self._pools[env_hash] 201 env_path = self._get_env_path(env_hash) 202 shutil.rmtree(env_path) 203 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mlserver/parallel/registry.py b/mlserver/parallel/registry.py --- a/mlserver/parallel/registry.py +++ b/mlserver/parallel/registry.py @@ -130,6 +130,12 @@ # as normal. return model_initialiser(model_settings) + parameters = model_settings.parameters + if not parameters or not parameters.environment_tarball: + # If model is not using a custom environment, instantiate the model + # as normal. + return model_initialiser(model_settings) + # Otherwise, return a dummy model for now and wait for the load_model # hook to create the actual thing. # This avoids instantiating the model's actual class within the
{"golden_diff": "diff --git a/mlserver/parallel/registry.py b/mlserver/parallel/registry.py\n--- a/mlserver/parallel/registry.py\n+++ b/mlserver/parallel/registry.py\n@@ -130,6 +130,12 @@\n # as normal.\n return model_initialiser(model_settings)\n \n+ parameters = model_settings.parameters\n+ if not parameters or not parameters.environment_tarball:\n+ # If model is not using a custom environment, instantiate the model\n+ # as normal.\n+ return model_initialiser(model_settings)\n+\n # Otherwise, return a dummy model for now and wait for the load_model\n # hook to create the actual thing.\n # This avoids instantiating the model's actual class within the\n", "issue": "mlserver >= 1.3 doesn't work with MLflow\nAfter the release of `mlserver==1.3.1`, one of our tests in `https://github.com/mlflow/mlflow` started failing with the following error. We're investigating the cause.\r\n\r\nhttps://github.com/mlflow/mlflow/actions/runs/4716614587/jobs/8364498235?pr=8241#step:7:8138\r\n\r\n```\r\n2023-04-17 03:00:01,885 [mlserver.parallel] DEBUG - Starting response processing loop...\r\n2023-04-17 03:00:01,894 [mlserver.rest] INFO - HTTP server running on http://0.0.0.0:8080/\r\n2023-04-17 03:00:01,943 [mlserver.metrics] INFO - Metrics server running on http://0.0.0.0:8082/\r\n2023-04-17 03:00:01,944 [mlserver.metrics] INFO - Prometheus scraping endpoint can be accessed on http://0.0.0.0:8082/metrics\r\n2023-04-17 03:00:01,963 [mlserver.grpc] INFO - gRPC server running on http://0.0.0.0:8081/\r\nINFO: 172.17.0.1:59636 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:59644 - \"GET /ping HTTP/1.1\" 404 Not Found\r\n2023-04-17 03:00:03,801 [mlserver] INFO - Loaded model 'mlflow-model' succesfully.\r\n2023-04-17 03:00:03,802 [mlserver] INFO - Loaded model 'mlflow-model' succesfully.\r\n2023-04-17 03:00:03,804 [mlserver] INFO - Loaded model 'mlflow-model' succesfully.\r\nINFO: 172.17.0.1:59646 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:59660 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:59674 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:59690 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:59702 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:59708 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:59712 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:59718 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:42498 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:42512 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:42528 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:42530 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:42536 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:42544 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:42546 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:42552 - \"GET /ping HTTP/1.1\" 404 Not Found\r\n...\r\n```\r\n\r\nThe error above indicates that the `/ping` endpoint is not registered.\n", "before_files": [{"content": "import asyncio\nimport os\nimport shutil\n\nfrom typing import Optional, Dict, List\n\nfrom ..settings import ModelSettings\nfrom ..utils import to_absolute_path\nfrom ..model import MLModel\nfrom ..settings import Settings\nfrom ..env import Environment, compute_hash\nfrom ..registry import model_initialiser\n\nfrom .errors import EnvironmentNotFound\nfrom .logging import logger\nfrom .pool import InferencePool, InferencePoolHook\n\nENV_HASH_ATTR = \"__env_hash__\"\n\n\ndef _set_environment_hash(model: MLModel, env_hash: Optional[str]):\n setattr(model, ENV_HASH_ATTR, env_hash)\n\n\ndef _get_environment_hash(model: MLModel) -> Optional[str]:\n return getattr(model, ENV_HASH_ATTR, None)\n\n\ndef _get_env_tarball(model: MLModel) -> Optional[str]:\n model_settings = model.settings\n if model_settings.parameters is None:\n return None\n\n env_tarball = model_settings.parameters.environment_tarball\n if env_tarball is None:\n return None\n\n return to_absolute_path(model_settings, env_tarball)\n\n\nclass InferencePoolRegistry:\n \"\"\"\n Keeps track of the different inference pools loaded in the server.\n Each inference pool will generally be used to load a different environment.\n \"\"\"\n\n def __init__(\n self, settings: Settings, on_worker_stop: List[InferencePoolHook] = []\n ):\n self._settings = settings\n self._on_worker_stop = on_worker_stop\n self._default_pool = InferencePool(\n self._settings, on_worker_stop=on_worker_stop\n )\n self._pools: Dict[str, InferencePool] = {}\n\n os.makedirs(self._settings.environments_dir, exist_ok=True)\n\n async def _get_or_create(self, model: MLModel) -> InferencePool:\n env_tarball = _get_env_tarball(model)\n if not env_tarball:\n return self._default_pool\n\n env_hash = await compute_hash(env_tarball)\n if env_hash in self._pools:\n return self._pools[env_hash]\n\n env = await self._extract_tarball(env_hash, env_tarball)\n pool = InferencePool(\n self._settings, env=env, on_worker_stop=self._on_worker_stop\n )\n self._pools[env_hash] = pool\n return pool\n\n async def _extract_tarball(self, env_hash: str, env_tarball: str) -> Environment:\n env_path = self._get_env_path(env_hash)\n if os.path.isdir(env_path):\n # If env has already been extracted, use that\n return Environment(env_path, env_hash)\n\n os.makedirs(env_path)\n return await Environment.from_tarball(env_tarball, env_path, env_hash)\n\n def _get_env_path(self, env_hash: str) -> str:\n return os.path.join(self._settings.environments_dir, env_hash)\n\n async def _find(self, model: MLModel) -> InferencePool:\n env_hash = _get_environment_hash(model)\n if not env_hash:\n return self._default_pool\n\n if env_hash not in self._pools:\n raise EnvironmentNotFound(model, env_hash)\n\n return self._pools[env_hash]\n\n def _should_load_model(self, model_settings: ModelSettings):\n if model_settings.parallel_workers is not None:\n logger.warning(\n \"DEPRECATED!! The `parallel_workers` setting at the model-level \"\n \"has now been deprecated and moved \"\n \"to the top-level server \"\n \"settings. \"\n \"This field will be removed in MLServer 1.2.0. \"\n \"To access the new field, you can either update the \"\n \"`settings.json` file, or update the `MLSERVER_PARALLEL_WORKERS` \"\n \"environment variable. \"\n f\"The current value of the server-level's `parallel_workers` field is \"\n f\"'{self._settings.parallel_workers}'.\"\n )\n\n # NOTE: This is a remnant from the previous architecture for parallel\n # workers, where each worker had its own pool.\n # For backwards compatibility, we will respect when a model disables\n # parallel inference.\n if model_settings.parallel_workers <= 0:\n return False\n\n if not self._settings.parallel_workers:\n return False\n\n return True\n\n def model_initialiser(self, model_settings: ModelSettings) -> MLModel:\n \"\"\"\n Used to initialise a model object in the ModelRegistry.\n \"\"\"\n if not self._should_load_model(model_settings):\n # If parallel inference should not be used, instantiate the model\n # as normal.\n return model_initialiser(model_settings)\n\n # Otherwise, return a dummy model for now and wait for the load_model\n # hook to create the actual thing.\n # This avoids instantiating the model's actual class within the\n # main process.\n return MLModel(model_settings)\n\n async def load_model(self, model: MLModel) -> MLModel:\n if not self._should_load_model(model.settings):\n # Skip load if model has disabled parallel workers\n return model\n\n # TODO: If load fails, should we remove pool if empty?\n pool = await self._get_or_create(model)\n loaded = await pool.load_model(model)\n _set_environment_hash(loaded, pool.env_hash)\n return loaded\n\n async def reload_model(self, old_model: MLModel, new_model: MLModel) -> MLModel:\n if not self._should_load_model(new_model.settings):\n # TODO: What would happen if old_model had parallel inference\n # enabled and is disabled in new_model (and viceversa)?\n # Skip reload if model has disabled parallel workers\n return new_model\n\n old_hash = _get_environment_hash(old_model)\n new_pool = await self._get_or_create(new_model)\n\n loaded = await new_pool.reload_model(old_model, new_model)\n _set_environment_hash(loaded, new_pool.env_hash)\n if old_hash != new_pool.env_hash:\n # Environment has changed in the new version, so unload the old one\n await self.unload_model(old_model)\n\n return loaded\n\n async def unload_model(self, model: MLModel) -> MLModel:\n if not self._should_load_model(model.settings):\n # Skip unload if model has disabled parallel workers\n return model\n\n pool = await self._find(model)\n unloaded = await pool.unload_model(model)\n\n if pool != self._default_pool and pool.empty():\n logger.info(f\"Inference pool with hash '{pool.env_hash}' is now empty\")\n await self._close_pool(pool.env_hash)\n\n return unloaded\n\n async def close(self):\n await asyncio.gather(\n self._close_pool(None),\n *[self._close_pool(env_hash) for env_hash in self._pools],\n )\n\n async def _close_pool(self, env_hash: Optional[str] = None):\n pool = self._default_pool\n pool_name = \"default inference pool\"\n if env_hash:\n pool = self._pools[env_hash]\n pool_name = f\"inference pool with hash '{env_hash}'\"\n\n logger.info(f\"Waiting for shutdown of {pool_name}...\")\n await pool.close()\n logger.info(f\"Shutdown of {pool_name} complete\")\n\n if env_hash:\n del self._pools[env_hash]\n env_path = self._get_env_path(env_hash)\n shutil.rmtree(env_path)\n", "path": "mlserver/parallel/registry.py"}], "after_files": [{"content": "import asyncio\nimport os\nimport shutil\n\nfrom typing import Optional, Dict, List\n\nfrom ..settings import ModelSettings\nfrom ..utils import to_absolute_path\nfrom ..model import MLModel\nfrom ..settings import Settings\nfrom ..env import Environment, compute_hash\nfrom ..registry import model_initialiser\n\nfrom .errors import EnvironmentNotFound\nfrom .logging import logger\nfrom .pool import InferencePool, InferencePoolHook\n\nENV_HASH_ATTR = \"__env_hash__\"\n\n\ndef _set_environment_hash(model: MLModel, env_hash: Optional[str]):\n setattr(model, ENV_HASH_ATTR, env_hash)\n\n\ndef _get_environment_hash(model: MLModel) -> Optional[str]:\n return getattr(model, ENV_HASH_ATTR, None)\n\n\ndef _get_env_tarball(model: MLModel) -> Optional[str]:\n model_settings = model.settings\n if model_settings.parameters is None:\n return None\n\n env_tarball = model_settings.parameters.environment_tarball\n if env_tarball is None:\n return None\n\n return to_absolute_path(model_settings, env_tarball)\n\n\nclass InferencePoolRegistry:\n \"\"\"\n Keeps track of the different inference pools loaded in the server.\n Each inference pool will generally be used to load a different environment.\n \"\"\"\n\n def __init__(\n self, settings: Settings, on_worker_stop: List[InferencePoolHook] = []\n ):\n self._settings = settings\n self._on_worker_stop = on_worker_stop\n self._default_pool = InferencePool(\n self._settings, on_worker_stop=on_worker_stop\n )\n self._pools: Dict[str, InferencePool] = {}\n\n os.makedirs(self._settings.environments_dir, exist_ok=True)\n\n async def _get_or_create(self, model: MLModel) -> InferencePool:\n env_tarball = _get_env_tarball(model)\n if not env_tarball:\n return self._default_pool\n\n env_hash = await compute_hash(env_tarball)\n if env_hash in self._pools:\n return self._pools[env_hash]\n\n env = await self._extract_tarball(env_hash, env_tarball)\n pool = InferencePool(\n self._settings, env=env, on_worker_stop=self._on_worker_stop\n )\n self._pools[env_hash] = pool\n return pool\n\n async def _extract_tarball(self, env_hash: str, env_tarball: str) -> Environment:\n env_path = self._get_env_path(env_hash)\n if os.path.isdir(env_path):\n # If env has already been extracted, use that\n return Environment(env_path, env_hash)\n\n os.makedirs(env_path)\n return await Environment.from_tarball(env_tarball, env_path, env_hash)\n\n def _get_env_path(self, env_hash: str) -> str:\n return os.path.join(self._settings.environments_dir, env_hash)\n\n async def _find(self, model: MLModel) -> InferencePool:\n env_hash = _get_environment_hash(model)\n if not env_hash:\n return self._default_pool\n\n if env_hash not in self._pools:\n raise EnvironmentNotFound(model, env_hash)\n\n return self._pools[env_hash]\n\n def _should_load_model(self, model_settings: ModelSettings):\n if model_settings.parallel_workers is not None:\n logger.warning(\n \"DEPRECATED!! The `parallel_workers` setting at the model-level \"\n \"has now been deprecated and moved \"\n \"to the top-level server \"\n \"settings. \"\n \"This field will be removed in MLServer 1.2.0. \"\n \"To access the new field, you can either update the \"\n \"`settings.json` file, or update the `MLSERVER_PARALLEL_WORKERS` \"\n \"environment variable. \"\n f\"The current value of the server-level's `parallel_workers` field is \"\n f\"'{self._settings.parallel_workers}'.\"\n )\n\n # NOTE: This is a remnant from the previous architecture for parallel\n # workers, where each worker had its own pool.\n # For backwards compatibility, we will respect when a model disables\n # parallel inference.\n if model_settings.parallel_workers <= 0:\n return False\n\n if not self._settings.parallel_workers:\n return False\n\n return True\n\n def model_initialiser(self, model_settings: ModelSettings) -> MLModel:\n \"\"\"\n Used to initialise a model object in the ModelRegistry.\n \"\"\"\n if not self._should_load_model(model_settings):\n # If parallel inference should not be used, instantiate the model\n # as normal.\n return model_initialiser(model_settings)\n\n parameters = model_settings.parameters\n if not parameters or not parameters.environment_tarball:\n # If model is not using a custom environment, instantiate the model\n # as normal.\n return model_initialiser(model_settings)\n\n # Otherwise, return a dummy model for now and wait for the load_model\n # hook to create the actual thing.\n # This avoids instantiating the model's actual class within the\n # main process.\n return MLModel(model_settings)\n\n async def load_model(self, model: MLModel) -> MLModel:\n if not self._should_load_model(model.settings):\n # Skip load if model has disabled parallel workers\n return model\n\n # TODO: If load fails, should we remove pool if empty?\n pool = await self._get_or_create(model)\n loaded = await pool.load_model(model)\n _set_environment_hash(loaded, pool.env_hash)\n return loaded\n\n async def reload_model(self, old_model: MLModel, new_model: MLModel) -> MLModel:\n if not self._should_load_model(new_model.settings):\n # TODO: What would happen if old_model had parallel inference\n # enabled and is disabled in new_model (and viceversa)?\n # Skip reload if model has disabled parallel workers\n return new_model\n\n old_hash = _get_environment_hash(old_model)\n new_pool = await self._get_or_create(new_model)\n\n loaded = await new_pool.reload_model(old_model, new_model)\n _set_environment_hash(loaded, new_pool.env_hash)\n if old_hash != new_pool.env_hash:\n # Environment has changed in the new version, so unload the old one\n await self.unload_model(old_model)\n\n return loaded\n\n async def unload_model(self, model: MLModel) -> MLModel:\n if not self._should_load_model(model.settings):\n # Skip unload if model has disabled parallel workers\n return model\n\n pool = await self._find(model)\n unloaded = await pool.unload_model(model)\n\n if pool != self._default_pool and pool.empty():\n logger.info(f\"Inference pool with hash '{pool.env_hash}' is now empty\")\n await self._close_pool(pool.env_hash)\n\n return unloaded\n\n async def close(self):\n await asyncio.gather(\n self._close_pool(None),\n *[self._close_pool(env_hash) for env_hash in self._pools],\n )\n\n async def _close_pool(self, env_hash: Optional[str] = None):\n pool = self._default_pool\n pool_name = \"default inference pool\"\n if env_hash:\n pool = self._pools[env_hash]\n pool_name = f\"inference pool with hash '{env_hash}'\"\n\n logger.info(f\"Waiting for shutdown of {pool_name}...\")\n await pool.close()\n logger.info(f\"Shutdown of {pool_name} complete\")\n\n if env_hash:\n del self._pools[env_hash]\n env_path = self._get_env_path(env_hash)\n shutil.rmtree(env_path)\n", "path": "mlserver/parallel/registry.py"}]}
3,539
161
gh_patches_debug_34450
rasdani/github-patches
git_diff
medtagger__MedTagger-88
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Do not drop tables in functional tests ## Expected Behavior Functional tests should clean the tables in Postgres and HBase. ## Actual Behavior Functional tests drops all the available tables and create them again. This may be (and probably is) time consuming. ## Additional comment Please compare both implementations before merge. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `backend/medtagger/clients/hbase_client.py` Content: ``` 1 """Module responsible for definition of client for HBase database.""" 2 import logging 3 from typing import Iterable, List, Mapping, Tuple, Any 4 5 import happybase 6 from retrying import retry 7 from thriftpy.transport import TTransportException 8 9 from medtagger.config import AppConfiguration 10 11 logger = logging.getLogger(__name__) 12 13 configuration = AppConfiguration() 14 host = configuration.get('hbase', 'host', fallback='localhost') 15 port = configuration.getint('hbase', 'port', fallback=9090) 16 size = configuration.getint('hbase', 'connection_pool_size', fallback=10) 17 try: 18 HBASE_CONNECTION_POOL = happybase.ConnectionPool(size, host=host, port=port) 19 except (TTransportException, BrokenPipeError): 20 logger.warning('Could not connect to HBase. Is it down?') 21 22 23 def is_alive() -> bool: 24 """Return boolean information if HBase is alive or not.""" 25 try: 26 happybase.ConnectionPool(1, host=host, port=port) 27 return True 28 except (TTransportException, BrokenPipeError): 29 return False 30 31 32 class HBaseClient(object): 33 """Client for HBase. 34 35 How to use this client? 36 ----------------------- 37 This is a wrapper for HappyBase Connection. Client uses HappyBase's Connection Pool, so don't worry about closing 38 connection, etc. This client should do everything inside below methods. 39 40 WATCH OUT: Script that migrates HBase schema may not work properly if you want to change column names! 41 In such case please run your migration manually! 42 43 Example: 44 45 >>> hbase_client = HBaseClient() 46 >>> data = hbase_client.get('my_table_name', 'row_key') 47 >>> ... 48 49 """ 50 51 ORIGINAL_SLICES_TABLE = 'original_slices' 52 CONVERTED_SLICES_TABLE = 'converted_slices' 53 LABEL_SELECTION_BINARY_MASK_TABLE = 'label_selection_binary_mask' 54 55 HBASE_SCHEMA = { 56 ORIGINAL_SLICES_TABLE: ['image'], 57 CONVERTED_SLICES_TABLE: ['image'], 58 LABEL_SELECTION_BINARY_MASK_TABLE: ['binary_mask'], 59 } 60 61 def __init__(self) -> None: 62 """Initialize client.""" 63 pass 64 65 @staticmethod 66 @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000, 67 retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError))) 68 def get_all_keys(table_name: str, starts_with: str = None) -> Iterable[str]: 69 """Fetch all keys for given table. 70 71 :param table_name: name of a table 72 :param starts_with: prefix for keys 73 :return: iterator for table keys 74 """ 75 with HBASE_CONNECTION_POOL.connection() as connection: 76 row_prefix = str.encode(starts_with) if starts_with else None 77 table = connection.table(table_name) 78 for key, _ in table.scan(row_prefix=row_prefix, filter=str.encode('KeyOnlyFilter()')): 79 yield key.decode('utf-8') 80 81 @staticmethod 82 @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000, 83 retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError))) 84 def get_all_rows(table_name: str, columns: List, starts_with: str = None) -> Iterable[Tuple[str, Any]]: 85 """Fetch all rows for given table. 86 87 :param table_name: name of a table 88 :param starts_with: prefix for keys 89 :param columns: list of columns to fetch 90 :return: iterator for table keys 91 """ 92 with HBASE_CONNECTION_POOL.connection() as connection: 93 row_prefix = str.encode(starts_with) if starts_with else None 94 table = connection.table(table_name) 95 for key, value in table.scan(row_prefix=row_prefix, columns=columns): 96 yield key.decode('utf-8'), value 97 98 @staticmethod 99 @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000, 100 retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError))) 101 def get(table_name: str, key: str, columns: List[str] = None) -> Mapping: 102 """Fetch a single row from HBase table. 103 104 :param table_name: name of a table 105 :param key: key representing a row 106 :param columns: columns which should be loaded (by default all) 107 :return: mapping returned by HBase 108 """ 109 hbase_key = str.encode(key) 110 with HBASE_CONNECTION_POOL.connection() as connection: 111 table = connection.table(table_name) 112 return table.row(hbase_key, columns=columns) 113 114 @staticmethod 115 @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000, 116 retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError))) 117 def put(table_name: str, key: str, value: Any) -> None: 118 """Add new entry into HBase table. 119 120 :param table_name: name of a table 121 :param key: key under value should be stored 122 :param value: value which should be stored 123 """ 124 hbase_key = str.encode(key) 125 with HBASE_CONNECTION_POOL.connection() as connection: 126 table = connection.table(table_name) 127 table.put(hbase_key, value) 128 129 @staticmethod 130 @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000, 131 retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError))) 132 def check_if_exists(table_name: str, key: str) -> bool: 133 """Scan database and check if given key exists. 134 135 :param table_name: name of a table 136 :param key: HBase key 137 :return: boolean information if such key exists or not 138 """ 139 hbase_key = str.encode(key) 140 with HBASE_CONNECTION_POOL.connection() as connection: 141 table = connection.table(table_name) 142 results = table.scan(row_start=hbase_key, row_stop=hbase_key, 143 filter=str.encode('KeyOnlyFilter() AND FirstKeyOnlyFilter()'), limit=1) 144 return next(results, None) is not None 145 ``` Path: `backend/scripts/utils.py` Content: ``` 1 """Storage for all utility functions.""" 2 from starbase import Connection 3 from medtagger.config import AppConfiguration 4 5 6 def get_connection_to_hbase() -> Connection: 7 """Fetch configuration data and create HBase connection. 8 9 :return: connection to HBase using Starbase library 10 """ 11 configuration = AppConfiguration() 12 host = configuration.get('hbase', 'host', fallback='localhost') 13 port = configuration.getint('hbase', 'rest_port', fallback=8080) 14 return Connection(host=host, port=port) 15 16 17 def user_agrees(prompt_message: str) -> bool: 18 """Ask user a question and ask him/her for True/False answer (default answer is False). 19 20 :param prompt_message: message that will be prompted to user 21 :return: boolean information if user agrees or not 22 """ 23 answer = input(prompt_message + ' [y/N] ') 24 return answer.lower() in ['y', 'yes', 't', 'true'] 25 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/backend/medtagger/clients/hbase_client.py b/backend/medtagger/clients/hbase_client.py --- a/backend/medtagger/clients/hbase_client.py +++ b/backend/medtagger/clients/hbase_client.py @@ -111,6 +111,21 @@ table = connection.table(table_name) return table.row(hbase_key, columns=columns) + @staticmethod + @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000, + retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError))) + def delete(table_name: str, key: str, columns: List[str] = None) -> None: + """Delete a single row (or values from colums in given row) in HBase table. + + :param table_name: name of a table + :param key: key representing a row + :param columns: columns which should be cleared + """ + hbase_key = str.encode(key) + with HBASE_CONNECTION_POOL.connection() as connection: + table = connection.table(table_name) + table.delete(hbase_key, columns=columns) + @staticmethod @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000, retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError))) diff --git a/backend/scripts/utils.py b/backend/scripts/utils.py --- a/backend/scripts/utils.py +++ b/backend/scripts/utils.py @@ -1,8 +1,13 @@ """Storage for all utility functions.""" from starbase import Connection +from retrying import retry +import requests + from medtagger.config import AppConfiguration +@retry(stop_max_attempt_number=5, wait_random_min=200, wait_random_max=1000, + retry_on_exception=lambda ex: isinstance(ex, requests.ConnectionError)) def get_connection_to_hbase() -> Connection: """Fetch configuration data and create HBase connection. @@ -11,7 +16,9 @@ configuration = AppConfiguration() host = configuration.get('hbase', 'host', fallback='localhost') port = configuration.getint('hbase', 'rest_port', fallback=8080) - return Connection(host=host, port=port) + connection = Connection(host=host, port=port) + connection.tables() # Test if the connection was properly set up + return connection def user_agrees(prompt_message: str) -> bool:
{"golden_diff": "diff --git a/backend/medtagger/clients/hbase_client.py b/backend/medtagger/clients/hbase_client.py\n--- a/backend/medtagger/clients/hbase_client.py\n+++ b/backend/medtagger/clients/hbase_client.py\n@@ -111,6 +111,21 @@\n table = connection.table(table_name)\n return table.row(hbase_key, columns=columns)\n \n+ @staticmethod\n+ @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,\n+ retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))\n+ def delete(table_name: str, key: str, columns: List[str] = None) -> None:\n+ \"\"\"Delete a single row (or values from colums in given row) in HBase table.\n+\n+ :param table_name: name of a table\n+ :param key: key representing a row\n+ :param columns: columns which should be cleared\n+ \"\"\"\n+ hbase_key = str.encode(key)\n+ with HBASE_CONNECTION_POOL.connection() as connection:\n+ table = connection.table(table_name)\n+ table.delete(hbase_key, columns=columns)\n+\n @staticmethod\n @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,\n retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))\ndiff --git a/backend/scripts/utils.py b/backend/scripts/utils.py\n--- a/backend/scripts/utils.py\n+++ b/backend/scripts/utils.py\n@@ -1,8 +1,13 @@\n \"\"\"Storage for all utility functions.\"\"\"\n from starbase import Connection\n+from retrying import retry\n+import requests\n+\n from medtagger.config import AppConfiguration\n \n \n+@retry(stop_max_attempt_number=5, wait_random_min=200, wait_random_max=1000,\n+ retry_on_exception=lambda ex: isinstance(ex, requests.ConnectionError))\n def get_connection_to_hbase() -> Connection:\n \"\"\"Fetch configuration data and create HBase connection.\n \n@@ -11,7 +16,9 @@\n configuration = AppConfiguration()\n host = configuration.get('hbase', 'host', fallback='localhost')\n port = configuration.getint('hbase', 'rest_port', fallback=8080)\n- return Connection(host=host, port=port)\n+ connection = Connection(host=host, port=port)\n+ connection.tables() # Test if the connection was properly set up\n+ return connection\n \n \n def user_agrees(prompt_message: str) -> bool:\n", "issue": "Do not drop tables in functional tests\n## Expected Behavior\r\n\r\nFunctional tests should clean the tables in Postgres and HBase.\r\n\r\n## Actual Behavior\r\n\r\nFunctional tests drops all the available tables and create them again. This may be (and probably is) time consuming.\r\n\r\n## Additional comment\r\n\r\nPlease compare both implementations before merge.\n", "before_files": [{"content": "\"\"\"Module responsible for definition of client for HBase database.\"\"\"\nimport logging\nfrom typing import Iterable, List, Mapping, Tuple, Any\n\nimport happybase\nfrom retrying import retry\nfrom thriftpy.transport import TTransportException\n\nfrom medtagger.config import AppConfiguration\n\nlogger = logging.getLogger(__name__)\n\nconfiguration = AppConfiguration()\nhost = configuration.get('hbase', 'host', fallback='localhost')\nport = configuration.getint('hbase', 'port', fallback=9090)\nsize = configuration.getint('hbase', 'connection_pool_size', fallback=10)\ntry:\n HBASE_CONNECTION_POOL = happybase.ConnectionPool(size, host=host, port=port)\nexcept (TTransportException, BrokenPipeError):\n logger.warning('Could not connect to HBase. Is it down?')\n\n\ndef is_alive() -> bool:\n \"\"\"Return boolean information if HBase is alive or not.\"\"\"\n try:\n happybase.ConnectionPool(1, host=host, port=port)\n return True\n except (TTransportException, BrokenPipeError):\n return False\n\n\nclass HBaseClient(object):\n \"\"\"Client for HBase.\n\n How to use this client?\n -----------------------\n This is a wrapper for HappyBase Connection. Client uses HappyBase's Connection Pool, so don't worry about closing\n connection, etc. This client should do everything inside below methods.\n\n WATCH OUT: Script that migrates HBase schema may not work properly if you want to change column names!\n In such case please run your migration manually!\n\n Example:\n\n >>> hbase_client = HBaseClient()\n >>> data = hbase_client.get('my_table_name', 'row_key')\n >>> ...\n\n \"\"\"\n\n ORIGINAL_SLICES_TABLE = 'original_slices'\n CONVERTED_SLICES_TABLE = 'converted_slices'\n LABEL_SELECTION_BINARY_MASK_TABLE = 'label_selection_binary_mask'\n\n HBASE_SCHEMA = {\n ORIGINAL_SLICES_TABLE: ['image'],\n CONVERTED_SLICES_TABLE: ['image'],\n LABEL_SELECTION_BINARY_MASK_TABLE: ['binary_mask'],\n }\n\n def __init__(self) -> None:\n \"\"\"Initialize client.\"\"\"\n pass\n\n @staticmethod\n @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,\n retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))\n def get_all_keys(table_name: str, starts_with: str = None) -> Iterable[str]:\n \"\"\"Fetch all keys for given table.\n\n :param table_name: name of a table\n :param starts_with: prefix for keys\n :return: iterator for table keys\n \"\"\"\n with HBASE_CONNECTION_POOL.connection() as connection:\n row_prefix = str.encode(starts_with) if starts_with else None\n table = connection.table(table_name)\n for key, _ in table.scan(row_prefix=row_prefix, filter=str.encode('KeyOnlyFilter()')):\n yield key.decode('utf-8')\n\n @staticmethod\n @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,\n retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))\n def get_all_rows(table_name: str, columns: List, starts_with: str = None) -> Iterable[Tuple[str, Any]]:\n \"\"\"Fetch all rows for given table.\n\n :param table_name: name of a table\n :param starts_with: prefix for keys\n :param columns: list of columns to fetch\n :return: iterator for table keys\n \"\"\"\n with HBASE_CONNECTION_POOL.connection() as connection:\n row_prefix = str.encode(starts_with) if starts_with else None\n table = connection.table(table_name)\n for key, value in table.scan(row_prefix=row_prefix, columns=columns):\n yield key.decode('utf-8'), value\n\n @staticmethod\n @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,\n retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))\n def get(table_name: str, key: str, columns: List[str] = None) -> Mapping:\n \"\"\"Fetch a single row from HBase table.\n\n :param table_name: name of a table\n :param key: key representing a row\n :param columns: columns which should be loaded (by default all)\n :return: mapping returned by HBase\n \"\"\"\n hbase_key = str.encode(key)\n with HBASE_CONNECTION_POOL.connection() as connection:\n table = connection.table(table_name)\n return table.row(hbase_key, columns=columns)\n\n @staticmethod\n @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,\n retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))\n def put(table_name: str, key: str, value: Any) -> None:\n \"\"\"Add new entry into HBase table.\n\n :param table_name: name of a table\n :param key: key under value should be stored\n :param value: value which should be stored\n \"\"\"\n hbase_key = str.encode(key)\n with HBASE_CONNECTION_POOL.connection() as connection:\n table = connection.table(table_name)\n table.put(hbase_key, value)\n\n @staticmethod\n @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,\n retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))\n def check_if_exists(table_name: str, key: str) -> bool:\n \"\"\"Scan database and check if given key exists.\n\n :param table_name: name of a table\n :param key: HBase key\n :return: boolean information if such key exists or not\n \"\"\"\n hbase_key = str.encode(key)\n with HBASE_CONNECTION_POOL.connection() as connection:\n table = connection.table(table_name)\n results = table.scan(row_start=hbase_key, row_stop=hbase_key,\n filter=str.encode('KeyOnlyFilter() AND FirstKeyOnlyFilter()'), limit=1)\n return next(results, None) is not None\n", "path": "backend/medtagger/clients/hbase_client.py"}, {"content": "\"\"\"Storage for all utility functions.\"\"\"\nfrom starbase import Connection\nfrom medtagger.config import AppConfiguration\n\n\ndef get_connection_to_hbase() -> Connection:\n \"\"\"Fetch configuration data and create HBase connection.\n\n :return: connection to HBase using Starbase library\n \"\"\"\n configuration = AppConfiguration()\n host = configuration.get('hbase', 'host', fallback='localhost')\n port = configuration.getint('hbase', 'rest_port', fallback=8080)\n return Connection(host=host, port=port)\n\n\ndef user_agrees(prompt_message: str) -> bool:\n \"\"\"Ask user a question and ask him/her for True/False answer (default answer is False).\n\n :param prompt_message: message that will be prompted to user\n :return: boolean information if user agrees or not\n \"\"\"\n answer = input(prompt_message + ' [y/N] ')\n return answer.lower() in ['y', 'yes', 't', 'true']\n", "path": "backend/scripts/utils.py"}], "after_files": [{"content": "\"\"\"Module responsible for definition of client for HBase database.\"\"\"\nimport logging\nfrom typing import Iterable, List, Mapping, Tuple, Any\n\nimport happybase\nfrom retrying import retry\nfrom thriftpy.transport import TTransportException\n\nfrom medtagger.config import AppConfiguration\n\nlogger = logging.getLogger(__name__)\n\nconfiguration = AppConfiguration()\nhost = configuration.get('hbase', 'host', fallback='localhost')\nport = configuration.getint('hbase', 'port', fallback=9090)\nsize = configuration.getint('hbase', 'connection_pool_size', fallback=10)\ntry:\n HBASE_CONNECTION_POOL = happybase.ConnectionPool(size, host=host, port=port)\nexcept (TTransportException, BrokenPipeError):\n logger.warning('Could not connect to HBase. Is it down?')\n\n\ndef is_alive() -> bool:\n \"\"\"Return boolean information if HBase is alive or not.\"\"\"\n try:\n happybase.ConnectionPool(1, host=host, port=port)\n return True\n except (TTransportException, BrokenPipeError):\n return False\n\n\nclass HBaseClient(object):\n \"\"\"Client for HBase.\n\n How to use this client?\n -----------------------\n This is a wrapper for HappyBase Connection. Client uses HappyBase's Connection Pool, so don't worry about closing\n connection, etc. This client should do everything inside below methods.\n\n WATCH OUT: Script that migrates HBase schema may not work properly if you want to change column names!\n In such case please run your migration manually!\n\n Example:\n\n >>> hbase_client = HBaseClient()\n >>> data = hbase_client.get('my_table_name', 'row_key')\n >>> ...\n\n \"\"\"\n\n ORIGINAL_SLICES_TABLE = 'original_slices'\n CONVERTED_SLICES_TABLE = 'converted_slices'\n LABEL_SELECTION_BINARY_MASK_TABLE = 'label_selection_binary_mask'\n\n HBASE_SCHEMA = {\n ORIGINAL_SLICES_TABLE: ['image'],\n CONVERTED_SLICES_TABLE: ['image'],\n LABEL_SELECTION_BINARY_MASK_TABLE: ['binary_mask'],\n }\n\n def __init__(self) -> None:\n \"\"\"Initialize client.\"\"\"\n pass\n\n @staticmethod\n @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,\n retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))\n def get_all_keys(table_name: str, starts_with: str = None) -> Iterable[str]:\n \"\"\"Fetch all keys for given table.\n\n :param table_name: name of a table\n :param starts_with: prefix for keys\n :return: iterator for table keys\n \"\"\"\n with HBASE_CONNECTION_POOL.connection() as connection:\n row_prefix = str.encode(starts_with) if starts_with else None\n table = connection.table(table_name)\n for key, _ in table.scan(row_prefix=row_prefix, filter=str.encode('KeyOnlyFilter()')):\n yield key.decode('utf-8')\n\n @staticmethod\n @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,\n retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))\n def get_all_rows(table_name: str, columns: List, starts_with: str = None) -> Iterable[Tuple[str, Any]]:\n \"\"\"Fetch all rows for given table.\n\n :param table_name: name of a table\n :param starts_with: prefix for keys\n :param columns: list of columns to fetch\n :return: iterator for table keys\n \"\"\"\n with HBASE_CONNECTION_POOL.connection() as connection:\n row_prefix = str.encode(starts_with) if starts_with else None\n table = connection.table(table_name)\n for key, value in table.scan(row_prefix=row_prefix, columns=columns):\n yield key.decode('utf-8'), value\n\n @staticmethod\n @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,\n retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))\n def get(table_name: str, key: str, columns: List[str] = None) -> Mapping:\n \"\"\"Fetch a single row from HBase table.\n\n :param table_name: name of a table\n :param key: key representing a row\n :param columns: columns which should be loaded (by default all)\n :return: mapping returned by HBase\n \"\"\"\n hbase_key = str.encode(key)\n with HBASE_CONNECTION_POOL.connection() as connection:\n table = connection.table(table_name)\n return table.row(hbase_key, columns=columns)\n\n @staticmethod\n @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,\n retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))\n def delete(table_name: str, key: str, columns: List[str] = None) -> None:\n \"\"\"Delete a single row (or values from colums in given row) in HBase table.\n\n :param table_name: name of a table\n :param key: key representing a row\n :param columns: columns which should be cleared\n \"\"\"\n hbase_key = str.encode(key)\n with HBASE_CONNECTION_POOL.connection() as connection:\n table = connection.table(table_name)\n table.delete(hbase_key, columns=columns)\n\n @staticmethod\n @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,\n retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))\n def put(table_name: str, key: str, value: Any) -> None:\n \"\"\"Add new entry into HBase table.\n\n :param table_name: name of a table\n :param key: key under value should be stored\n :param value: value which should be stored\n \"\"\"\n hbase_key = str.encode(key)\n with HBASE_CONNECTION_POOL.connection() as connection:\n table = connection.table(table_name)\n table.put(hbase_key, value)\n\n @staticmethod\n @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,\n retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))\n def check_if_exists(table_name: str, key: str) -> bool:\n \"\"\"Scan database and check if given key exists.\n\n :param table_name: name of a table\n :param key: HBase key\n :return: boolean information if such key exists or not\n \"\"\"\n hbase_key = str.encode(key)\n with HBASE_CONNECTION_POOL.connection() as connection:\n table = connection.table(table_name)\n results = table.scan(row_start=hbase_key, row_stop=hbase_key,\n filter=str.encode('KeyOnlyFilter() AND FirstKeyOnlyFilter()'), limit=1)\n return next(results, None) is not None\n", "path": "backend/medtagger/clients/hbase_client.py"}, {"content": "\"\"\"Storage for all utility functions.\"\"\"\nfrom starbase import Connection\nfrom retrying import retry\nimport requests\n\nfrom medtagger.config import AppConfiguration\n\n\n@retry(stop_max_attempt_number=5, wait_random_min=200, wait_random_max=1000,\n retry_on_exception=lambda ex: isinstance(ex, requests.ConnectionError))\ndef get_connection_to_hbase() -> Connection:\n \"\"\"Fetch configuration data and create HBase connection.\n\n :return: connection to HBase using Starbase library\n \"\"\"\n configuration = AppConfiguration()\n host = configuration.get('hbase', 'host', fallback='localhost')\n port = configuration.getint('hbase', 'rest_port', fallback=8080)\n connection = Connection(host=host, port=port)\n connection.tables() # Test if the connection was properly set up\n return connection\n\n\ndef user_agrees(prompt_message: str) -> bool:\n \"\"\"Ask user a question and ask him/her for True/False answer (default answer is False).\n\n :param prompt_message: message that will be prompted to user\n :return: boolean information if user agrees or not\n \"\"\"\n answer = input(prompt_message + ' [y/N] ')\n return answer.lower() in ['y', 'yes', 't', 'true']\n", "path": "backend/scripts/utils.py"}]}
2,281
576
gh_patches_debug_41631
rasdani/github-patches
git_diff
acl-org__acl-anthology-3045
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Compact bibfile Overleaf has a 50 MB file size limit, and `anthology.bib` is now larger than this. We should create a compact BibTeX export using string substitution [as suggested here](https://twitter.com/daniel_hers/status/1744434842895294496). I'm not sure if this should just replace the current Anthology bib file, or become a new export, say `anthology-compact.bib`: * The advantage of encompactifying the current file is it would work for everyone without having to change anything. * The disadvantage is it complicates cutting-and-pasting. * However, we already have [https://aclanthology.org/anthology+abstracts.bib.gz](https://aclanthology.org/anthology+abstracts.bib.gz). I'm not sure how often people cut-and-paste individual entries from the complete file, anyway; it seems that it's main use is in Overleaf. I'm therefore include to simply replace `anthology.bib`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bin/create_bibtex.py` Content: ``` 1 #!/usr/bin/env python3 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright 2019 Marcel Bollmann <[email protected]> 5 # 6 # Licensed under the Apache License, Version 2.0 (the "License"); 7 # you may not use this file except in compliance with the License. 8 # You may obtain a copy of the License at 9 # 10 # http://www.apache.org/licenses/LICENSE-2.0 11 # 12 # Unless required by applicable law or agreed to in writing, software 13 # distributed under the License is distributed on an "AS IS" BASIS, 14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 # See the License for the specific language governing permissions and 16 # limitations under the License. 17 18 """Usage: create_bibtex.py [--importdir=DIR] [--exportdir=DIR] [-c] [--debug] 19 20 Creates .bib files for all papers in the Hugo directory. 21 22 Options: 23 --importdir=DIR Directory to import XML files from. [default: {scriptdir}/../data/] 24 --exportdir=DIR Directory to write exported files to. [default: {scriptdir}/../build/data-export/] 25 --debug Output debug-level log messages. 26 -c, --clean Delete existing files in target directory before generation. 27 -h, --help Display this helpful text. 28 """ 29 30 from docopt import docopt 31 from tqdm import tqdm 32 import gzip 33 import logging as log 34 import os 35 36 from anthology import Anthology 37 from anthology.utils import SeverityTracker, deconstruct_anthology_id, infer_year 38 from create_hugo_pages import check_directory 39 40 41 def volume_sorter(volume_tuple): 42 """ 43 Extracts the year so that we can sort by the year and then 44 the collection ID. 45 """ 46 volume_id = volume_tuple[0] 47 collection_id, year, _ = deconstruct_anthology_id(volume_id) 48 year = infer_year(collection_id) 49 return year, volume_id 50 51 52 def create_bibtex(anthology, trgdir, limit=0, clean=False) -> None: 53 """Creates .bib files for all papers. 54 55 :param anthology: The Anthology object. 56 :param trgdir: The target directory to write to 57 :param limit: If nonzero, only generate {limit} entries per volume 58 :param clean: Clean the directory first 59 """ 60 if not check_directory("{}/papers".format(trgdir), clean=clean): 61 return 62 if not check_directory("{}/volumes".format(trgdir), clean=clean): 63 return 64 65 log.info("Creating BibTeX files for all papers...") 66 with open( 67 "{}/anthology.bib".format(trgdir), "wt", encoding="utf-8" 68 ) as file_anthology_raw, gzip.open( 69 "{}/anthology.bib.gz".format(trgdir), "wt", encoding="utf-8" 70 ) as file_anthology, gzip.open( 71 "{}/anthology+abstracts.bib.gz".format(trgdir), "wt", encoding="utf-8" 72 ) as file_anthology_with_abstracts: 73 for volume_id, volume in tqdm( 74 sorted(anthology.volumes.items(), key=volume_sorter, reverse=True) 75 ): 76 volume_dir = trgdir 77 if not os.path.exists(volume_dir): 78 os.makedirs(volume_dir) 79 with open("{}/volumes/{}.bib".format(trgdir, volume_id), "w") as file_volume: 80 for i, paper in enumerate(volume, 1): 81 if limit and i > limit: 82 break 83 84 with open( 85 "{}/{}.bib".format(volume_dir, paper.full_id), "w" 86 ) as file_paper: 87 contents = paper.as_bibtex() 88 print(contents, file=file_paper) 89 print(contents, file=file_anthology_with_abstracts) 90 91 concise_contents = paper.as_bibtex(concise=True) 92 print(concise_contents, file=file_volume) 93 print(concise_contents, file=file_anthology) 94 print(concise_contents, file=file_anthology_raw) 95 96 97 if __name__ == "__main__": 98 args = docopt(__doc__) 99 scriptdir = os.path.dirname(os.path.abspath(__file__)) 100 if "{scriptdir}" in args["--importdir"]: 101 args["--importdir"] = os.path.abspath( 102 args["--importdir"].format(scriptdir=scriptdir) 103 ) 104 if "{scriptdir}" in args["--exportdir"]: 105 args["--exportdir"] = os.path.abspath( 106 args["--exportdir"].format(scriptdir=scriptdir) 107 ) 108 109 log_level = log.DEBUG if args["--debug"] else log.INFO 110 log.basicConfig(format="%(levelname)-8s %(message)s", level=log_level) 111 tracker = SeverityTracker() 112 log.getLogger().addHandler(tracker) 113 114 # If NOBIB is set, generate only three bibs per volume 115 limit = 0 if os.environ.get("NOBIB", "false") == "false" else 3 116 log.info(f"NOBIB=true, generating only {limit} BibTEX files per volume") 117 118 anthology = Anthology(importdir=args["--importdir"], fast_load=True) 119 create_bibtex(anthology, args["--exportdir"], limit=limit, clean=args["--clean"]) 120 121 if tracker.highest >= log.ERROR: 122 exit(1) 123 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bin/create_bibtex.py b/bin/create_bibtex.py --- a/bin/create_bibtex.py +++ b/bin/create_bibtex.py @@ -27,6 +27,7 @@ -h, --help Display this helpful text. """ +import re from docopt import docopt from tqdm import tqdm import gzip @@ -70,9 +71,20 @@ ) as file_anthology, gzip.open( "{}/anthology+abstracts.bib.gz".format(trgdir), "wt", encoding="utf-8" ) as file_anthology_with_abstracts: + # Add some shortcuts to the consolidated bib file + print( + "@string{acl = {Association for Computational Linguistics}}", + file=file_anthology_raw, + ) + print("@string{anth = {https://aclanthology.org/}}", file=file_anthology_raw) + print(file=file_anthology_raw) + for volume_id, volume in tqdm( sorted(anthology.volumes.items(), key=volume_sorter, reverse=True) ): + # reset this each time + abbrev = None + volume_dir = trgdir if not os.path.exists(volume_dir): os.makedirs(volume_dir) @@ -91,6 +103,53 @@ concise_contents = paper.as_bibtex(concise=True) print(concise_contents, file=file_volume) print(concise_contents, file=file_anthology) + + # Space saver (https://github.com/acl-org/acl-anthology/issues/3016) + # Replace verbose text with abbreviations to get the file under 50 MB for Overleaf + + concise_contents = concise_contents.replace( + 'publisher = "Association for Computational Linguistics",', + "publisher = acl,", + ) + concise_contents = re.sub( + r'url = "https://aclanthology.org/(.*)"', + r"url = anth # {\1}", + concise_contents, + ) + + # Abbreviate the booktitle by extracting it and printing it before + # the first entry in each volume + if concise_contents.startswith("@proceedings"): + # Grab the title string and create the alias + abbrev = f"{volume.get_venues()[0].upper()}:{infer_year(volume.collection_id)}:{volume.volume_id}" + try: + booktitle = re.search( + r" title = \"(.*)\",", concise_contents + ).group(1) + print( + f"@string{{{abbrev} = {{{booktitle}}}}}", + file=file_anthology_raw, + ) + except AttributeError: + import sys + + print( + f"Could not find title for {volume_id}", + file=sys.stderr, + ) + abbrev = None + + if abbrev is not None and "booktitle" in concise_contents: + # substitute the alias for the booktitle + concise_contents = re.sub( + r" booktitle = (\".*\"),", + f" booktitle = {abbrev},", + concise_contents, + ) + + # Remove newlines, indentations, and double-spaces around author separators + concise_contents = re.sub(r"\s+", " ", concise_contents) + print(concise_contents, file=file_anthology_raw)
{"golden_diff": "diff --git a/bin/create_bibtex.py b/bin/create_bibtex.py\n--- a/bin/create_bibtex.py\n+++ b/bin/create_bibtex.py\n@@ -27,6 +27,7 @@\n -h, --help Display this helpful text.\n \"\"\"\n \n+import re\n from docopt import docopt\n from tqdm import tqdm\n import gzip\n@@ -70,9 +71,20 @@\n ) as file_anthology, gzip.open(\n \"{}/anthology+abstracts.bib.gz\".format(trgdir), \"wt\", encoding=\"utf-8\"\n ) as file_anthology_with_abstracts:\n+ # Add some shortcuts to the consolidated bib file\n+ print(\n+ \"@string{acl = {Association for Computational Linguistics}}\",\n+ file=file_anthology_raw,\n+ )\n+ print(\"@string{anth = {https://aclanthology.org/}}\", file=file_anthology_raw)\n+ print(file=file_anthology_raw)\n+\n for volume_id, volume in tqdm(\n sorted(anthology.volumes.items(), key=volume_sorter, reverse=True)\n ):\n+ # reset this each time\n+ abbrev = None\n+\n volume_dir = trgdir\n if not os.path.exists(volume_dir):\n os.makedirs(volume_dir)\n@@ -91,6 +103,53 @@\n concise_contents = paper.as_bibtex(concise=True)\n print(concise_contents, file=file_volume)\n print(concise_contents, file=file_anthology)\n+\n+ # Space saver (https://github.com/acl-org/acl-anthology/issues/3016)\n+ # Replace verbose text with abbreviations to get the file under 50 MB for Overleaf\n+\n+ concise_contents = concise_contents.replace(\n+ 'publisher = \"Association for Computational Linguistics\",',\n+ \"publisher = acl,\",\n+ )\n+ concise_contents = re.sub(\n+ r'url = \"https://aclanthology.org/(.*)\"',\n+ r\"url = anth # {\\1}\",\n+ concise_contents,\n+ )\n+\n+ # Abbreviate the booktitle by extracting it and printing it before\n+ # the first entry in each volume\n+ if concise_contents.startswith(\"@proceedings\"):\n+ # Grab the title string and create the alias\n+ abbrev = f\"{volume.get_venues()[0].upper()}:{infer_year(volume.collection_id)}:{volume.volume_id}\"\n+ try:\n+ booktitle = re.search(\n+ r\" title = \\\"(.*)\\\",\", concise_contents\n+ ).group(1)\n+ print(\n+ f\"@string{{{abbrev} = {{{booktitle}}}}}\",\n+ file=file_anthology_raw,\n+ )\n+ except AttributeError:\n+ import sys\n+\n+ print(\n+ f\"Could not find title for {volume_id}\",\n+ file=sys.stderr,\n+ )\n+ abbrev = None\n+\n+ if abbrev is not None and \"booktitle\" in concise_contents:\n+ # substitute the alias for the booktitle\n+ concise_contents = re.sub(\n+ r\" booktitle = (\\\".*\\\"),\",\n+ f\" booktitle = {abbrev},\",\n+ concise_contents,\n+ )\n+\n+ # Remove newlines, indentations, and double-spaces around author separators\n+ concise_contents = re.sub(r\"\\s+\", \" \", concise_contents)\n+\n print(concise_contents, file=file_anthology_raw)\n", "issue": "Compact bibfile\nOverleaf has a 50 MB file size limit, and `anthology.bib` is now larger than this. We should create a compact BibTeX export using string substitution [as suggested here](https://twitter.com/daniel_hers/status/1744434842895294496). I'm not sure if this should just replace the current Anthology bib file, or become a new export, say `anthology-compact.bib`:\r\n\r\n* The advantage of encompactifying the current file is it would work for everyone without having to change anything.\r\n* The disadvantage is it complicates cutting-and-pasting.\r\n* However, we already have [https://aclanthology.org/anthology+abstracts.bib.gz](https://aclanthology.org/anthology+abstracts.bib.gz). I'm not sure how often people cut-and-paste individual entries from the complete file, anyway; it seems that it's main use is in Overleaf.\r\n\r\nI'm therefore include to simply replace `anthology.bib`.\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Usage: create_bibtex.py [--importdir=DIR] [--exportdir=DIR] [-c] [--debug]\n\nCreates .bib files for all papers in the Hugo directory.\n\nOptions:\n --importdir=DIR Directory to import XML files from. [default: {scriptdir}/../data/]\n --exportdir=DIR Directory to write exported files to. [default: {scriptdir}/../build/data-export/]\n --debug Output debug-level log messages.\n -c, --clean Delete existing files in target directory before generation.\n -h, --help Display this helpful text.\n\"\"\"\n\nfrom docopt import docopt\nfrom tqdm import tqdm\nimport gzip\nimport logging as log\nimport os\n\nfrom anthology import Anthology\nfrom anthology.utils import SeverityTracker, deconstruct_anthology_id, infer_year\nfrom create_hugo_pages import check_directory\n\n\ndef volume_sorter(volume_tuple):\n \"\"\"\n Extracts the year so that we can sort by the year and then\n the collection ID.\n \"\"\"\n volume_id = volume_tuple[0]\n collection_id, year, _ = deconstruct_anthology_id(volume_id)\n year = infer_year(collection_id)\n return year, volume_id\n\n\ndef create_bibtex(anthology, trgdir, limit=0, clean=False) -> None:\n \"\"\"Creates .bib files for all papers.\n\n :param anthology: The Anthology object.\n :param trgdir: The target directory to write to\n :param limit: If nonzero, only generate {limit} entries per volume\n :param clean: Clean the directory first\n \"\"\"\n if not check_directory(\"{}/papers\".format(trgdir), clean=clean):\n return\n if not check_directory(\"{}/volumes\".format(trgdir), clean=clean):\n return\n\n log.info(\"Creating BibTeX files for all papers...\")\n with open(\n \"{}/anthology.bib\".format(trgdir), \"wt\", encoding=\"utf-8\"\n ) as file_anthology_raw, gzip.open(\n \"{}/anthology.bib.gz\".format(trgdir), \"wt\", encoding=\"utf-8\"\n ) as file_anthology, gzip.open(\n \"{}/anthology+abstracts.bib.gz\".format(trgdir), \"wt\", encoding=\"utf-8\"\n ) as file_anthology_with_abstracts:\n for volume_id, volume in tqdm(\n sorted(anthology.volumes.items(), key=volume_sorter, reverse=True)\n ):\n volume_dir = trgdir\n if not os.path.exists(volume_dir):\n os.makedirs(volume_dir)\n with open(\"{}/volumes/{}.bib\".format(trgdir, volume_id), \"w\") as file_volume:\n for i, paper in enumerate(volume, 1):\n if limit and i > limit:\n break\n\n with open(\n \"{}/{}.bib\".format(volume_dir, paper.full_id), \"w\"\n ) as file_paper:\n contents = paper.as_bibtex()\n print(contents, file=file_paper)\n print(contents, file=file_anthology_with_abstracts)\n\n concise_contents = paper.as_bibtex(concise=True)\n print(concise_contents, file=file_volume)\n print(concise_contents, file=file_anthology)\n print(concise_contents, file=file_anthology_raw)\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n scriptdir = os.path.dirname(os.path.abspath(__file__))\n if \"{scriptdir}\" in args[\"--importdir\"]:\n args[\"--importdir\"] = os.path.abspath(\n args[\"--importdir\"].format(scriptdir=scriptdir)\n )\n if \"{scriptdir}\" in args[\"--exportdir\"]:\n args[\"--exportdir\"] = os.path.abspath(\n args[\"--exportdir\"].format(scriptdir=scriptdir)\n )\n\n log_level = log.DEBUG if args[\"--debug\"] else log.INFO\n log.basicConfig(format=\"%(levelname)-8s %(message)s\", level=log_level)\n tracker = SeverityTracker()\n log.getLogger().addHandler(tracker)\n\n # If NOBIB is set, generate only three bibs per volume\n limit = 0 if os.environ.get(\"NOBIB\", \"false\") == \"false\" else 3\n log.info(f\"NOBIB=true, generating only {limit} BibTEX files per volume\")\n\n anthology = Anthology(importdir=args[\"--importdir\"], fast_load=True)\n create_bibtex(anthology, args[\"--exportdir\"], limit=limit, clean=args[\"--clean\"])\n\n if tracker.highest >= log.ERROR:\n exit(1)\n", "path": "bin/create_bibtex.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Usage: create_bibtex.py [--importdir=DIR] [--exportdir=DIR] [-c] [--debug]\n\nCreates .bib files for all papers in the Hugo directory.\n\nOptions:\n --importdir=DIR Directory to import XML files from. [default: {scriptdir}/../data/]\n --exportdir=DIR Directory to write exported files to. [default: {scriptdir}/../build/data-export/]\n --debug Output debug-level log messages.\n -c, --clean Delete existing files in target directory before generation.\n -h, --help Display this helpful text.\n\"\"\"\n\nimport re\nfrom docopt import docopt\nfrom tqdm import tqdm\nimport gzip\nimport logging as log\nimport os\n\nfrom anthology import Anthology\nfrom anthology.utils import SeverityTracker, deconstruct_anthology_id, infer_year\nfrom create_hugo_pages import check_directory\n\n\ndef volume_sorter(volume_tuple):\n \"\"\"\n Extracts the year so that we can sort by the year and then\n the collection ID.\n \"\"\"\n volume_id = volume_tuple[0]\n collection_id, year, _ = deconstruct_anthology_id(volume_id)\n year = infer_year(collection_id)\n return year, volume_id\n\n\ndef create_bibtex(anthology, trgdir, limit=0, clean=False) -> None:\n \"\"\"Creates .bib files for all papers.\n\n :param anthology: The Anthology object.\n :param trgdir: The target directory to write to\n :param limit: If nonzero, only generate {limit} entries per volume\n :param clean: Clean the directory first\n \"\"\"\n if not check_directory(\"{}/papers\".format(trgdir), clean=clean):\n return\n if not check_directory(\"{}/volumes\".format(trgdir), clean=clean):\n return\n\n log.info(\"Creating BibTeX files for all papers...\")\n with open(\n \"{}/anthology.bib\".format(trgdir), \"wt\", encoding=\"utf-8\"\n ) as file_anthology_raw, gzip.open(\n \"{}/anthology.bib.gz\".format(trgdir), \"wt\", encoding=\"utf-8\"\n ) as file_anthology, gzip.open(\n \"{}/anthology+abstracts.bib.gz\".format(trgdir), \"wt\", encoding=\"utf-8\"\n ) as file_anthology_with_abstracts:\n # Add some shortcuts to the consolidated bib file\n print(\n \"@string{acl = {Association for Computational Linguistics}}\",\n file=file_anthology_raw,\n )\n print(\"@string{anth = {https://aclanthology.org/}}\", file=file_anthology_raw)\n print(file=file_anthology_raw)\n\n for volume_id, volume in tqdm(\n sorted(anthology.volumes.items(), key=volume_sorter, reverse=True)\n ):\n # reset this each time\n abbrev = None\n\n volume_dir = trgdir\n if not os.path.exists(volume_dir):\n os.makedirs(volume_dir)\n with open(\"{}/volumes/{}.bib\".format(trgdir, volume_id), \"w\") as file_volume:\n for i, paper in enumerate(volume, 1):\n if limit and i > limit:\n break\n\n with open(\n \"{}/{}.bib\".format(volume_dir, paper.full_id), \"w\"\n ) as file_paper:\n contents = paper.as_bibtex()\n print(contents, file=file_paper)\n print(contents, file=file_anthology_with_abstracts)\n\n concise_contents = paper.as_bibtex(concise=True)\n print(concise_contents, file=file_volume)\n print(concise_contents, file=file_anthology)\n\n # Space saver (https://github.com/acl-org/acl-anthology/issues/3016)\n # Replace verbose text with abbreviations to get the file under 50 MB for Overleaf\n\n concise_contents = concise_contents.replace(\n 'publisher = \"Association for Computational Linguistics\",',\n \"publisher = acl,\",\n )\n concise_contents = re.sub(\n r'url = \"https://aclanthology.org/(.*)\"',\n r\"url = anth # {\\1}\",\n concise_contents,\n )\n\n # Abbreviate the booktitle by extracting it and printing it before\n # the first entry in each volume\n if concise_contents.startswith(\"@proceedings\"):\n # Grab the title string and create the alias\n abbrev = f\"{volume.get_venues()[0].upper()}:{infer_year(volume.collection_id)}:{volume.volume_id}\"\n try:\n booktitle = re.search(\n r\" title = \\\"(.*)\\\",\", concise_contents\n ).group(1)\n print(\n f\"@string{{{abbrev} = {{{booktitle}}}}}\",\n file=file_anthology_raw,\n )\n except AttributeError:\n import sys\n\n print(\n f\"Could not find title for {volume_id}\",\n file=sys.stderr,\n )\n abbrev = None\n\n if abbrev is not None and \"booktitle\" in concise_contents:\n # substitute the alias for the booktitle\n concise_contents = re.sub(\n r\" booktitle = (\\\".*\\\"),\",\n f\" booktitle = {abbrev},\",\n concise_contents,\n )\n\n # Remove newlines, indentations, and double-spaces around author separators\n concise_contents = re.sub(r\"\\s+\", \" \", concise_contents)\n\n print(concise_contents, file=file_anthology_raw)\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n scriptdir = os.path.dirname(os.path.abspath(__file__))\n if \"{scriptdir}\" in args[\"--importdir\"]:\n args[\"--importdir\"] = os.path.abspath(\n args[\"--importdir\"].format(scriptdir=scriptdir)\n )\n if \"{scriptdir}\" in args[\"--exportdir\"]:\n args[\"--exportdir\"] = os.path.abspath(\n args[\"--exportdir\"].format(scriptdir=scriptdir)\n )\n\n log_level = log.DEBUG if args[\"--debug\"] else log.INFO\n log.basicConfig(format=\"%(levelname)-8s %(message)s\", level=log_level)\n tracker = SeverityTracker()\n log.getLogger().addHandler(tracker)\n\n # If NOBIB is set, generate only three bibs per volume\n limit = 0 if os.environ.get(\"NOBIB\", \"false\") == \"false\" else 3\n log.info(f\"NOBIB=true, generating only {limit} BibTEX files per volume\")\n\n anthology = Anthology(importdir=args[\"--importdir\"], fast_load=True)\n create_bibtex(anthology, args[\"--exportdir\"], limit=limit, clean=args[\"--clean\"])\n\n if tracker.highest >= log.ERROR:\n exit(1)\n", "path": "bin/create_bibtex.py"}]}
1,918
769
gh_patches_debug_6629
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-8853
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- OSError: File does not exist: staticfiles.json - Dev Install ## Details * Read the Docs project URL: **N/A** * Build URL (if applicable): **N/A** * Read the Docs username (if applicable): **N/A** ## Expected Result Show the http://community.dev.readthedocs.io/ local site and allow you to login and generate docs. ## Actual Result ![Screenshot 2022-01-14 002309](https://user-images.githubusercontent.com/12134329/149456476-6e2d0d65-9d7b-4880-b798-1ee74985870c.png) This error also shows up when you try to go to http://community.dev.readthedocs.io/ but in the form of a nicely formatted django error with details about it. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `readthedocs/settings/docker_compose.py` Content: ``` 1 import os 2 import socket 3 4 from .dev import CommunityDevSettings 5 6 7 class DockerBaseSettings(CommunityDevSettings): 8 9 """Settings for local development with Docker""" 10 11 DOCKER_ENABLE = True 12 RTD_DOCKER_COMPOSE = True 13 RTD_DOCKER_COMPOSE_VOLUME = 'community_build-user-builds' 14 RTD_DOCKER_USER = f'{os.geteuid()}:{os.getegid()}' 15 DOCKER_LIMITS = {'memory': '1g', 'time': 900} 16 USE_SUBDOMAIN = True 17 18 PRODUCTION_DOMAIN = 'community.dev.readthedocs.io' 19 PUBLIC_DOMAIN = 'community.dev.readthedocs.io' 20 PUBLIC_API_URL = f'http://{PRODUCTION_DOMAIN}' 21 22 SLUMBER_API_HOST = 'http://web:8000' 23 SLUMBER_USERNAME = 'admin' 24 SLUMBER_PASSWORD = 'admin' 25 26 RTD_EXTERNAL_VERSION_DOMAIN = 'org.dev.readthedocs.build' 27 28 STATIC_URL = '/static/' 29 30 # In the local docker environment, nginx should be trusted to set the host correctly 31 USE_X_FORWARDED_HOST = True 32 33 MULTIPLE_BUILD_SERVERS = ['build'] 34 35 # https://docs.docker.com/engine/reference/commandline/run/#add-entries-to-container-hosts-file---add-host 36 # export HOSTIP=`ip -4 addr show scope global dev wlp4s0 | grep inet | awk '{print \$2}' | cut -d / -f 1` 37 HOSTIP = os.environ.get('HOSTIP') 38 39 # If the host IP is not specified, try to get it from the socket address list 40 _, __, ips = socket.gethostbyname_ex(socket.gethostname()) 41 if ips and not HOSTIP: 42 HOSTIP = ips[0][:-1] + "1" 43 44 # Turn this on to test ads 45 USE_PROMOS = False 46 ADSERVER_API_BASE = f'http://{HOSTIP}:5000' 47 # Create a Token for an admin User and set it here. 48 ADSERVER_API_KEY = None 49 ADSERVER_API_TIMEOUT = 2 # seconds - Docker for Mac is very slow 50 51 # New templates 52 @property 53 def RTD_EXT_THEME_DEV_SERVER_ENABLED(self): 54 return os.environ.get('RTD_EXT_THEME_DEV_SERVER_ENABLED') is not None 55 56 @property 57 def RTD_EXT_THEME_DEV_SERVER(self): 58 if self.RTD_EXT_THEME_DEV_SERVER_ENABLED: 59 return "http://assets.community.dev.readthedocs.io:10001" 60 61 # Enable auto syncing elasticsearch documents 62 ELASTICSEARCH_DSL_AUTOSYNC = 'SEARCH' in os.environ 63 64 RTD_CLEAN_AFTER_BUILD = True 65 66 @property 67 def RTD_EMBED_API_EXTERNAL_DOMAINS(self): 68 domains = super().RTD_EMBED_API_EXTERNAL_DOMAINS 69 domains.extend([ 70 r'.*\.readthedocs\.io', 71 r'.*\.org\.readthedocs\.build', 72 r'.*\.readthedocs-hosted\.com', 73 r'.*\.com\.readthedocs\.build', 74 ]) 75 return domains 76 77 @property 78 def LOGGING(self): 79 logging = super().LOGGING 80 logging['handlers']['console']['formatter'] = 'colored_console' 81 logging['loggers'].update({ 82 # Disable Django access requests logging (e.g. GET /path/to/url) 83 # https://github.com/django/django/blob/ca9872905559026af82000e46cde6f7dedc897b6/django/core/servers/basehttp.py#L24 84 'django.server': { 85 'handlers': ['null'], 86 'propagate': False, 87 }, 88 # Disable S3 logging 89 'boto3': { 90 'handlers': ['null'], 91 'propagate': False, 92 }, 93 'botocore': { 94 'handlers': ['null'], 95 'propagate': False, 96 }, 97 's3transfer': { 98 'handlers': ['null'], 99 'propagate': False, 100 }, 101 # Disable Docker API logging 102 'urllib3': { 103 'handlers': ['null'], 104 'propagate': False, 105 }, 106 # Disable gitpython logging 107 'git.cmd': { 108 'handlers': ['null'], 109 'propagate': False, 110 }, 111 }) 112 return logging 113 114 @property 115 def DATABASES(self): # noqa 116 return { 117 "default": { 118 "ENGINE": "django.db.backends.postgresql_psycopg2", 119 "NAME": "docs_db", 120 "USER": os.environ.get("DB_USER", "docs_user"), 121 "PASSWORD": os.environ.get("DB_PWD", "docs_pwd"), 122 "HOST": os.environ.get("DB_HOST", "database"), 123 "PORT": "", 124 } 125 } 126 127 def show_debug_toolbar(request): 128 from django.conf import settings 129 return settings.DEBUG 130 131 DEBUG_TOOLBAR_CONFIG = { 132 'SHOW_TOOLBAR_CALLBACK': show_debug_toolbar, 133 } 134 135 ACCOUNT_EMAIL_VERIFICATION = "none" 136 SESSION_COOKIE_DOMAIN = None 137 CACHES = { 138 'default': { 139 'BACKEND': 'redis_cache.RedisCache', 140 'LOCATION': 'cache:6379', 141 } 142 } 143 144 BROKER_URL = "redis://cache:6379/0" 145 CELERY_RESULT_BACKEND = "redis://cache:6379/0" 146 CELERY_RESULT_SERIALIZER = "json" 147 CELERY_ALWAYS_EAGER = False 148 CELERY_TASK_IGNORE_RESULT = False 149 150 EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend" 151 152 RTD_BUILD_MEDIA_STORAGE = 'readthedocs.storage.s3_storage.S3BuildMediaStorage' 153 # Storage backend for build cached environments 154 RTD_BUILD_ENVIRONMENT_STORAGE = 'readthedocs.storage.s3_storage.S3BuildEnvironmentStorage' 155 # Storage backend for build languages 156 RTD_BUILD_TOOLS_STORAGE = 'readthedocs.storage.s3_storage.S3BuildToolsStorage' 157 # Storage for static files (those collected with `collectstatic`) 158 STATICFILES_STORAGE = 'readthedocs.storage.s3_storage.S3StaticStorage' 159 160 AWS_ACCESS_KEY_ID = 'admin' 161 AWS_SECRET_ACCESS_KEY = 'password' 162 S3_MEDIA_STORAGE_BUCKET = 'media' 163 S3_BUILD_COMMANDS_STORAGE_BUCKET = 'builds' 164 S3_BUILD_ENVIRONMENT_STORAGE_BUCKET = 'envs' 165 S3_BUILD_TOOLS_STORAGE_BUCKET = 'build-tools' 166 S3_STATIC_STORAGE_BUCKET = 'static' 167 S3_STATIC_STORAGE_OVERRIDE_HOSTNAME = 'community.dev.readthedocs.io' 168 S3_MEDIA_STORAGE_OVERRIDE_HOSTNAME = 'community.dev.readthedocs.io' 169 170 AWS_AUTO_CREATE_BUCKET = True 171 AWS_DEFAULT_ACL = 'public-read' 172 AWS_BUCKET_ACL = 'public-read' 173 AWS_S3_ENCRYPTION = False 174 AWS_S3_SECURE_URLS = False 175 AWS_S3_USE_SSL = False 176 AWS_S3_ENDPOINT_URL = 'http://storage:9000/' 177 AWS_QUERYSTRING_AUTH = False 178 179 RTD_SAVE_BUILD_COMMANDS_TO_STORAGE = True 180 RTD_BUILD_COMMANDS_STORAGE = 'readthedocs.storage.s3_storage.S3BuildCommandsStorage' 181 BUILD_COLD_STORAGE_URL = 'http://storage:9000/builds' 182 183 STATICFILES_DIRS = [ 184 os.path.join(CommunityDevSettings.SITE_ROOT, 'readthedocs', 'static'), 185 os.path.join(CommunityDevSettings.SITE_ROOT, 'media'), 186 ] 187 188 # Remove the checks on the number of fields being submitted 189 # This limit is mostly hit on large forms in the Django admin 190 DATA_UPLOAD_MAX_NUMBER_FIELDS = None 191 192 # This allows us to have CORS work well in dev 193 CORS_ORIGIN_ALLOW_ALL = True 194 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/readthedocs/settings/docker_compose.py b/readthedocs/settings/docker_compose.py --- a/readthedocs/settings/docker_compose.py +++ b/readthedocs/settings/docker_compose.py @@ -167,9 +167,6 @@ S3_STATIC_STORAGE_OVERRIDE_HOSTNAME = 'community.dev.readthedocs.io' S3_MEDIA_STORAGE_OVERRIDE_HOSTNAME = 'community.dev.readthedocs.io' - AWS_AUTO_CREATE_BUCKET = True - AWS_DEFAULT_ACL = 'public-read' - AWS_BUCKET_ACL = 'public-read' AWS_S3_ENCRYPTION = False AWS_S3_SECURE_URLS = False AWS_S3_USE_SSL = False
{"golden_diff": "diff --git a/readthedocs/settings/docker_compose.py b/readthedocs/settings/docker_compose.py\n--- a/readthedocs/settings/docker_compose.py\n+++ b/readthedocs/settings/docker_compose.py\n@@ -167,9 +167,6 @@\n S3_STATIC_STORAGE_OVERRIDE_HOSTNAME = 'community.dev.readthedocs.io'\n S3_MEDIA_STORAGE_OVERRIDE_HOSTNAME = 'community.dev.readthedocs.io'\n \n- AWS_AUTO_CREATE_BUCKET = True\n- AWS_DEFAULT_ACL = 'public-read'\n- AWS_BUCKET_ACL = 'public-read'\n AWS_S3_ENCRYPTION = False\n AWS_S3_SECURE_URLS = False\n AWS_S3_USE_SSL = False\n", "issue": "OSError: File does not exist: staticfiles.json - Dev Install\n## Details\r\n\r\n* Read the Docs project URL: **N/A**\r\n* Build URL (if applicable): **N/A**\r\n* Read the Docs username (if applicable): **N/A**\r\n\r\n## Expected Result\r\n\r\nShow the http://community.dev.readthedocs.io/ local site and allow you to login and generate docs.\r\n\r\n## Actual Result\r\n\r\n![Screenshot 2022-01-14 002309](https://user-images.githubusercontent.com/12134329/149456476-6e2d0d65-9d7b-4880-b798-1ee74985870c.png)\r\n\r\nThis error also shows up when you try to go to http://community.dev.readthedocs.io/ but in the form of a nicely formatted django error with details about it.\r\n\n", "before_files": [{"content": "import os\nimport socket\n\nfrom .dev import CommunityDevSettings\n\n\nclass DockerBaseSettings(CommunityDevSettings):\n\n \"\"\"Settings for local development with Docker\"\"\"\n\n DOCKER_ENABLE = True\n RTD_DOCKER_COMPOSE = True\n RTD_DOCKER_COMPOSE_VOLUME = 'community_build-user-builds'\n RTD_DOCKER_USER = f'{os.geteuid()}:{os.getegid()}'\n DOCKER_LIMITS = {'memory': '1g', 'time': 900}\n USE_SUBDOMAIN = True\n\n PRODUCTION_DOMAIN = 'community.dev.readthedocs.io'\n PUBLIC_DOMAIN = 'community.dev.readthedocs.io'\n PUBLIC_API_URL = f'http://{PRODUCTION_DOMAIN}'\n\n SLUMBER_API_HOST = 'http://web:8000'\n SLUMBER_USERNAME = 'admin'\n SLUMBER_PASSWORD = 'admin'\n\n RTD_EXTERNAL_VERSION_DOMAIN = 'org.dev.readthedocs.build'\n\n STATIC_URL = '/static/'\n\n # In the local docker environment, nginx should be trusted to set the host correctly\n USE_X_FORWARDED_HOST = True\n\n MULTIPLE_BUILD_SERVERS = ['build']\n\n # https://docs.docker.com/engine/reference/commandline/run/#add-entries-to-container-hosts-file---add-host\n # export HOSTIP=`ip -4 addr show scope global dev wlp4s0 | grep inet | awk '{print \\$2}' | cut -d / -f 1`\n HOSTIP = os.environ.get('HOSTIP')\n\n # If the host IP is not specified, try to get it from the socket address list\n _, __, ips = socket.gethostbyname_ex(socket.gethostname())\n if ips and not HOSTIP:\n HOSTIP = ips[0][:-1] + \"1\"\n\n # Turn this on to test ads\n USE_PROMOS = False\n ADSERVER_API_BASE = f'http://{HOSTIP}:5000'\n # Create a Token for an admin User and set it here.\n ADSERVER_API_KEY = None\n ADSERVER_API_TIMEOUT = 2 # seconds - Docker for Mac is very slow\n\n # New templates\n @property\n def RTD_EXT_THEME_DEV_SERVER_ENABLED(self):\n return os.environ.get('RTD_EXT_THEME_DEV_SERVER_ENABLED') is not None\n\n @property\n def RTD_EXT_THEME_DEV_SERVER(self):\n if self.RTD_EXT_THEME_DEV_SERVER_ENABLED:\n return \"http://assets.community.dev.readthedocs.io:10001\"\n\n # Enable auto syncing elasticsearch documents\n ELASTICSEARCH_DSL_AUTOSYNC = 'SEARCH' in os.environ\n\n RTD_CLEAN_AFTER_BUILD = True\n\n @property\n def RTD_EMBED_API_EXTERNAL_DOMAINS(self):\n domains = super().RTD_EMBED_API_EXTERNAL_DOMAINS\n domains.extend([\n r'.*\\.readthedocs\\.io',\n r'.*\\.org\\.readthedocs\\.build',\n r'.*\\.readthedocs-hosted\\.com',\n r'.*\\.com\\.readthedocs\\.build',\n ])\n return domains\n\n @property\n def LOGGING(self):\n logging = super().LOGGING\n logging['handlers']['console']['formatter'] = 'colored_console'\n logging['loggers'].update({\n # Disable Django access requests logging (e.g. GET /path/to/url)\n # https://github.com/django/django/blob/ca9872905559026af82000e46cde6f7dedc897b6/django/core/servers/basehttp.py#L24\n 'django.server': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n # Disable S3 logging\n 'boto3': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n 'botocore': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n 's3transfer': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n # Disable Docker API logging\n 'urllib3': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n # Disable gitpython logging\n 'git.cmd': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n })\n return logging\n\n @property\n def DATABASES(self): # noqa\n return {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": \"docs_db\",\n \"USER\": os.environ.get(\"DB_USER\", \"docs_user\"),\n \"PASSWORD\": os.environ.get(\"DB_PWD\", \"docs_pwd\"),\n \"HOST\": os.environ.get(\"DB_HOST\", \"database\"),\n \"PORT\": \"\",\n }\n }\n\n def show_debug_toolbar(request):\n from django.conf import settings\n return settings.DEBUG\n\n DEBUG_TOOLBAR_CONFIG = {\n 'SHOW_TOOLBAR_CALLBACK': show_debug_toolbar,\n }\n\n ACCOUNT_EMAIL_VERIFICATION = \"none\"\n SESSION_COOKIE_DOMAIN = None\n CACHES = {\n 'default': {\n 'BACKEND': 'redis_cache.RedisCache',\n 'LOCATION': 'cache:6379',\n }\n }\n\n BROKER_URL = \"redis://cache:6379/0\"\n CELERY_RESULT_BACKEND = \"redis://cache:6379/0\"\n CELERY_RESULT_SERIALIZER = \"json\"\n CELERY_ALWAYS_EAGER = False\n CELERY_TASK_IGNORE_RESULT = False\n\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n\n RTD_BUILD_MEDIA_STORAGE = 'readthedocs.storage.s3_storage.S3BuildMediaStorage'\n # Storage backend for build cached environments\n RTD_BUILD_ENVIRONMENT_STORAGE = 'readthedocs.storage.s3_storage.S3BuildEnvironmentStorage'\n # Storage backend for build languages\n RTD_BUILD_TOOLS_STORAGE = 'readthedocs.storage.s3_storage.S3BuildToolsStorage'\n # Storage for static files (those collected with `collectstatic`)\n STATICFILES_STORAGE = 'readthedocs.storage.s3_storage.S3StaticStorage'\n\n AWS_ACCESS_KEY_ID = 'admin'\n AWS_SECRET_ACCESS_KEY = 'password'\n S3_MEDIA_STORAGE_BUCKET = 'media'\n S3_BUILD_COMMANDS_STORAGE_BUCKET = 'builds'\n S3_BUILD_ENVIRONMENT_STORAGE_BUCKET = 'envs'\n S3_BUILD_TOOLS_STORAGE_BUCKET = 'build-tools'\n S3_STATIC_STORAGE_BUCKET = 'static'\n S3_STATIC_STORAGE_OVERRIDE_HOSTNAME = 'community.dev.readthedocs.io'\n S3_MEDIA_STORAGE_OVERRIDE_HOSTNAME = 'community.dev.readthedocs.io'\n\n AWS_AUTO_CREATE_BUCKET = True\n AWS_DEFAULT_ACL = 'public-read'\n AWS_BUCKET_ACL = 'public-read'\n AWS_S3_ENCRYPTION = False\n AWS_S3_SECURE_URLS = False\n AWS_S3_USE_SSL = False\n AWS_S3_ENDPOINT_URL = 'http://storage:9000/'\n AWS_QUERYSTRING_AUTH = False\n\n RTD_SAVE_BUILD_COMMANDS_TO_STORAGE = True\n RTD_BUILD_COMMANDS_STORAGE = 'readthedocs.storage.s3_storage.S3BuildCommandsStorage'\n BUILD_COLD_STORAGE_URL = 'http://storage:9000/builds'\n\n STATICFILES_DIRS = [\n os.path.join(CommunityDevSettings.SITE_ROOT, 'readthedocs', 'static'),\n os.path.join(CommunityDevSettings.SITE_ROOT, 'media'),\n ]\n\n # Remove the checks on the number of fields being submitted\n # This limit is mostly hit on large forms in the Django admin\n DATA_UPLOAD_MAX_NUMBER_FIELDS = None\n\n # This allows us to have CORS work well in dev\n CORS_ORIGIN_ALLOW_ALL = True\n", "path": "readthedocs/settings/docker_compose.py"}], "after_files": [{"content": "import os\nimport socket\n\nfrom .dev import CommunityDevSettings\n\n\nclass DockerBaseSettings(CommunityDevSettings):\n\n \"\"\"Settings for local development with Docker\"\"\"\n\n DOCKER_ENABLE = True\n RTD_DOCKER_COMPOSE = True\n RTD_DOCKER_COMPOSE_VOLUME = 'community_build-user-builds'\n RTD_DOCKER_USER = f'{os.geteuid()}:{os.getegid()}'\n DOCKER_LIMITS = {'memory': '1g', 'time': 900}\n USE_SUBDOMAIN = True\n\n PRODUCTION_DOMAIN = 'community.dev.readthedocs.io'\n PUBLIC_DOMAIN = 'community.dev.readthedocs.io'\n PUBLIC_API_URL = f'http://{PRODUCTION_DOMAIN}'\n\n SLUMBER_API_HOST = 'http://web:8000'\n SLUMBER_USERNAME = 'admin'\n SLUMBER_PASSWORD = 'admin'\n\n RTD_EXTERNAL_VERSION_DOMAIN = 'org.dev.readthedocs.build'\n\n STATIC_URL = '/static/'\n\n # In the local docker environment, nginx should be trusted to set the host correctly\n USE_X_FORWARDED_HOST = True\n\n MULTIPLE_BUILD_SERVERS = ['build']\n\n # https://docs.docker.com/engine/reference/commandline/run/#add-entries-to-container-hosts-file---add-host\n # export HOSTIP=`ip -4 addr show scope global dev wlp4s0 | grep inet | awk '{print \\$2}' | cut -d / -f 1`\n HOSTIP = os.environ.get('HOSTIP')\n\n # If the host IP is not specified, try to get it from the socket address list\n _, __, ips = socket.gethostbyname_ex(socket.gethostname())\n if ips and not HOSTIP:\n HOSTIP = ips[0][:-1] + \"1\"\n\n # Turn this on to test ads\n USE_PROMOS = False\n ADSERVER_API_BASE = f'http://{HOSTIP}:5000'\n # Create a Token for an admin User and set it here.\n ADSERVER_API_KEY = None\n ADSERVER_API_TIMEOUT = 2 # seconds - Docker for Mac is very slow\n\n # New templates\n @property\n def RTD_EXT_THEME_DEV_SERVER_ENABLED(self):\n return os.environ.get('RTD_EXT_THEME_DEV_SERVER_ENABLED') is not None\n\n @property\n def RTD_EXT_THEME_DEV_SERVER(self):\n if self.RTD_EXT_THEME_DEV_SERVER_ENABLED:\n return \"http://assets.community.dev.readthedocs.io:10001\"\n\n # Enable auto syncing elasticsearch documents\n ELASTICSEARCH_DSL_AUTOSYNC = 'SEARCH' in os.environ\n\n RTD_CLEAN_AFTER_BUILD = True\n\n @property\n def RTD_EMBED_API_EXTERNAL_DOMAINS(self):\n domains = super().RTD_EMBED_API_EXTERNAL_DOMAINS\n domains.extend([\n r'.*\\.readthedocs\\.io',\n r'.*\\.org\\.readthedocs\\.build',\n r'.*\\.readthedocs-hosted\\.com',\n r'.*\\.com\\.readthedocs\\.build',\n ])\n return domains\n\n @property\n def LOGGING(self):\n logging = super().LOGGING\n logging['handlers']['console']['formatter'] = 'colored_console'\n logging['loggers'].update({\n # Disable Django access requests logging (e.g. GET /path/to/url)\n # https://github.com/django/django/blob/ca9872905559026af82000e46cde6f7dedc897b6/django/core/servers/basehttp.py#L24\n 'django.server': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n # Disable S3 logging\n 'boto3': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n 'botocore': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n 's3transfer': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n # Disable Docker API logging\n 'urllib3': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n # Disable gitpython logging\n 'git.cmd': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n })\n return logging\n\n @property\n def DATABASES(self): # noqa\n return {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": \"docs_db\",\n \"USER\": os.environ.get(\"DB_USER\", \"docs_user\"),\n \"PASSWORD\": os.environ.get(\"DB_PWD\", \"docs_pwd\"),\n \"HOST\": os.environ.get(\"DB_HOST\", \"database\"),\n \"PORT\": \"\",\n }\n }\n\n def show_debug_toolbar(request):\n from django.conf import settings\n return settings.DEBUG\n\n DEBUG_TOOLBAR_CONFIG = {\n 'SHOW_TOOLBAR_CALLBACK': show_debug_toolbar,\n }\n\n ACCOUNT_EMAIL_VERIFICATION = \"none\"\n SESSION_COOKIE_DOMAIN = None\n CACHES = {\n 'default': {\n 'BACKEND': 'redis_cache.RedisCache',\n 'LOCATION': 'cache:6379',\n }\n }\n\n BROKER_URL = \"redis://cache:6379/0\"\n CELERY_RESULT_BACKEND = \"redis://cache:6379/0\"\n CELERY_RESULT_SERIALIZER = \"json\"\n CELERY_ALWAYS_EAGER = False\n CELERY_TASK_IGNORE_RESULT = False\n\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n\n RTD_BUILD_MEDIA_STORAGE = 'readthedocs.storage.s3_storage.S3BuildMediaStorage'\n # Storage backend for build cached environments\n RTD_BUILD_ENVIRONMENT_STORAGE = 'readthedocs.storage.s3_storage.S3BuildEnvironmentStorage'\n # Storage backend for build languages\n RTD_BUILD_TOOLS_STORAGE = 'readthedocs.storage.s3_storage.S3BuildToolsStorage'\n # Storage for static files (those collected with `collectstatic`)\n STATICFILES_STORAGE = 'readthedocs.storage.s3_storage.S3StaticStorage'\n\n AWS_ACCESS_KEY_ID = 'admin'\n AWS_SECRET_ACCESS_KEY = 'password'\n S3_MEDIA_STORAGE_BUCKET = 'media'\n S3_BUILD_COMMANDS_STORAGE_BUCKET = 'builds'\n S3_BUILD_ENVIRONMENT_STORAGE_BUCKET = 'envs'\n S3_BUILD_TOOLS_STORAGE_BUCKET = 'build-tools'\n S3_STATIC_STORAGE_BUCKET = 'static'\n S3_STATIC_STORAGE_OVERRIDE_HOSTNAME = 'community.dev.readthedocs.io'\n S3_MEDIA_STORAGE_OVERRIDE_HOSTNAME = 'community.dev.readthedocs.io'\n\n AWS_S3_ENCRYPTION = False\n AWS_S3_SECURE_URLS = False\n AWS_S3_USE_SSL = False\n AWS_S3_ENDPOINT_URL = 'http://storage:9000/'\n AWS_QUERYSTRING_AUTH = False\n\n RTD_SAVE_BUILD_COMMANDS_TO_STORAGE = True\n RTD_BUILD_COMMANDS_STORAGE = 'readthedocs.storage.s3_storage.S3BuildCommandsStorage'\n BUILD_COLD_STORAGE_URL = 'http://storage:9000/builds'\n\n STATICFILES_DIRS = [\n os.path.join(CommunityDevSettings.SITE_ROOT, 'readthedocs', 'static'),\n os.path.join(CommunityDevSettings.SITE_ROOT, 'media'),\n ]\n\n # Remove the checks on the number of fields being submitted\n # This limit is mostly hit on large forms in the Django admin\n DATA_UPLOAD_MAX_NUMBER_FIELDS = None\n\n # This allows us to have CORS work well in dev\n CORS_ORIGIN_ALLOW_ALL = True\n", "path": "readthedocs/settings/docker_compose.py"}]}
2,643
151
gh_patches_debug_17455
rasdani/github-patches
git_diff
modin-project__modin-7188
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Change "master" branch to "main" We should update all refs to the new default branch. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/release.py` Content: ``` 1 import argparse 2 import atexit 3 import collections 4 import json 5 import re 6 import sys 7 from pathlib import Path 8 9 import github 10 import pygit2 11 from packaging import version 12 13 14 class GithubUserResolver: 15 def __init__(self, email2commit, token): 16 self.__cache_file = Path(__file__).parent / "gh-users-cache.json" 17 self.__cache = ( 18 json.loads(self.__cache_file.read_text()) 19 if self.__cache_file.exists() 20 else {} 21 ) 22 # filter unknown users hoping we'd be able to find them this time 23 self.__cache = {key: value for key, value in self.__cache.items() if value} 24 # using anonymous access if token not specified 25 self.__github = github.Github(token or None) 26 self.__modin_repo = self.__github.get_repo("modin-project/modin") 27 self.__email2commit = email2commit 28 atexit.register(self.__save) 29 30 def __search_commits(self, term): 31 if commit := self.__email2commit.get(term): 32 gh_commit = self.__modin_repo.get_commit(str(commit)) 33 return gh_commit.author.login 34 return None 35 36 @staticmethod 37 def __is_email(term): 38 return re.match(r".*@.*\..*", term) 39 40 def __search_github(self, term): 41 search = f"in:email {term}" if self.__is_email(term) else f"fullname:{term}" 42 match = [user.login for user in self.__github.search_users(search)] 43 return match[0] if len(match) == 1 else None 44 45 def __try_user(self, term): 46 if self.__is_email(term): 47 return None 48 try: 49 return self.__github.get_user(term).login 50 except github.GithubException as ex: 51 if ex.status != 404: 52 raise 53 return None 54 55 def __resolve_single(self, term): 56 return ( 57 self.__search_commits(term) 58 or self.__search_github(term) 59 or self.__try_user(term) 60 ) 61 62 def __resolve_cache(self, name, email): 63 return self.__cache.get(f"{name} <{email}>", None) 64 65 def __register(self, name, email, match): 66 self.__cache[f"{name} <{email}>"] = match 67 68 def resolve(self, people): 69 logins, unknowns = set(), set() 70 71 for name, email in people: 72 if match := self.__resolve_cache(name, email): 73 logins.add(match) 74 elif match := self.__resolve_single(email): 75 self.__register(name, email, match) 76 logins.add(match) 77 else: 78 if match := self.__resolve_single(name): 79 logins.add(match) 80 else: 81 unknowns.add((name, email)) 82 self.__register(name, email, match) 83 84 return logins, unknowns 85 86 def resolve_by_reviews(self, unknowns, email2pr): 87 logins, new_unknowns = set(), set() 88 for name, email in unknowns: 89 commit = self.__modin_repo.get_commit(str(email2pr[email])) 90 found = set() 91 for pull in commit.get_pulls(): 92 for review in pull.get_reviews(): 93 user = review.user 94 if user.name == name and (not user.email or user.email == email): 95 found.add(user.login) 96 97 if len(found) == 1: 98 self.__register(name, email, list(found)[0]) 99 logins |= found 100 else: 101 new_unknowns.add((name, email)) 102 103 return logins, new_unknowns 104 105 def __save(self): 106 self.__cache_file.write_text(json.dumps(self.__cache, indent=4, sort_keys=True)) 107 108 109 class GitWrapper: 110 def __init__(self): 111 self.repo = pygit2.Repository(Path(__file__).parent) 112 113 def is_on_master(self): 114 return self.repo.references["refs/heads/master"] == self.repo.head 115 116 @staticmethod 117 def __get_tag_version(entry): 118 try: 119 return version.parse(entry.lstrip("refs/tags/")) 120 except version.InvalidVersion as ex: 121 return f'<bad version "{entry}": {ex}>' 122 123 def get_previous_release(self, rel_type): 124 tags = [ 125 (entry, self.__get_tag_version(entry)) 126 for entry in self.repo.references 127 if entry.startswith("refs/tags/") 128 ] 129 # filter away legacy versions (which aren't following the proper naming schema); 130 # also skip pre-releases 131 tags = [ 132 (entry, ver) 133 for entry, ver in tags 134 if isinstance(ver, version.Version) and not ver.pre 135 ] 136 if rel_type == "minor": 137 # leave only minor releases 138 tags = [(entry, ver) for entry, ver in tags if ver.micro == 0] 139 else: 140 assert rel_type == "patch" 141 prev_ref, prev_ver = max(tags, key=lambda pair: pair[1]) 142 return prev_ref, self.repo.references[prev_ref].peel(), prev_ver 143 144 def get_commits_upto(self, stop_commit): 145 history = [] 146 for obj in self.repo.walk(self.repo.head.target): 147 if obj.id == stop_commit.id: 148 break 149 history.append(obj) 150 else: 151 raise ValueError("Current HEAD is not derived from previous release") 152 return history 153 154 def ensure_title_link(self, obj: pygit2.Commit): 155 title = obj.message.splitlines()[0] 156 if not re.match(r".*\(#(\d+)\)$", title): 157 title += f" ({obj.short_id})" 158 return title 159 160 161 def make_notes(args): 162 wrapper = GitWrapper() 163 release_type = "minor" if wrapper.is_on_master() else "patch" 164 sys.stderr.write(f"Detected release type: {release_type}\n") 165 166 prev_ref, prev_commit, prev_ver = wrapper.get_previous_release(release_type) 167 sys.stderr.write(f"Previous {release_type} release: {prev_ref}\n") 168 169 next_major, next_minor, next_patch = prev_ver.release 170 if release_type == "minor": 171 next_minor += 1 172 elif release_type == "patch": 173 next_patch += 1 174 else: 175 raise ValueError(f"Unexpected release type: {release_type}") 176 next_ver = version.Version(f"{next_major}.{next_minor}.{next_patch}") 177 178 sys.stderr.write(f"Computing release notes for {prev_ver} -> {next_ver}...\n") 179 try: 180 history = wrapper.get_commits_upto(prev_commit) 181 except ValueError as ex: 182 sys.stderr.write( 183 f"{ex}: did you forget to checkout correct branch or pull tags?" 184 ) 185 return 1 186 if not history: 187 sys.stderr.write(f"No commits since {prev_ver} found, nothing to generate!\n") 188 return 1 189 190 titles = collections.defaultdict(list) 191 people = set() 192 email2commit, email2pr = {}, {} 193 for obj in history: 194 title = obj.message.splitlines()[0] 195 titles[title.split("-")[0]].append(obj) 196 new_people = set( 197 re.findall( 198 r"(?:(?:Signed-off-by|Co-authored-by):\s*)([\w\s,]+?)\s*<([^>]+)>", 199 obj.message, 200 ) 201 ) 202 for _, email in new_people: 203 email2pr[email] = obj.id 204 people |= new_people 205 email2commit[obj.author.email] = obj.id 206 sys.stderr.write(f"Found {len(history)} commit(s) since {prev_ref}\n") 207 208 sys.stderr.write("Resolving contributors...\n") 209 user_resolver = GithubUserResolver(email2commit, args.token) 210 logins, unknowns = user_resolver.resolve(people) 211 new_logins, unknowns = user_resolver.resolve_by_reviews(unknowns, email2pr) 212 logins |= new_logins 213 sys.stderr.write(f"Found {len(logins)} GitHub usernames.\n") 214 if unknowns: 215 sys.stderr.write( 216 f"Warning! Failed to resolve {len(unknowns)} usernames, please resolve them manually!\n" 217 ) 218 219 sections = [ 220 ("Stability and Bugfixes", "FIX"), 221 ("Performance enhancements", "PERF"), 222 ("Refactor Codebase", "REFACTOR"), 223 ("Update testing suite", "TEST"), 224 ("Documentation improvements", "DOCS"), 225 ("New Features", "FEAT"), 226 ] 227 228 notes = rf"""Modin {next_ver} 229 230 <Please fill in short release summary> 231 232 Key Features and Updates Since {prev_ver} 233 -------------------------------{'-' * len(str(prev_ver))} 234 """ 235 236 def _add_section(section, prs): 237 nonlocal notes 238 if prs: 239 notes += f"* {section}\n" 240 notes += "\n".join( 241 [ 242 f" * {wrapper.ensure_title_link(obj)}" 243 for obj in sorted(prs, key=lambda obj: obj.message) 244 ] 245 ) 246 notes += "\n" 247 248 for section, key in sections: 249 _add_section(section, titles.pop(key, None)) 250 251 uncategorized = sum(titles.values(), []) 252 _add_section("Uncategorized improvements", uncategorized) 253 254 notes += r""" 255 Contributors 256 ------------ 257 """ 258 notes += "\n".join(f"@{login}" for login in sorted(logins)) + "\n" 259 notes += ( 260 "\n".join( 261 f"<unknown-login> {name} <{email}>" for name, email in sorted(unknowns) 262 ) 263 + "\n" 264 ) 265 266 sys.stdout.write(notes) 267 268 269 def main(): 270 parse = argparse.ArgumentParser() 271 parse.add_argument( 272 "--token", 273 type=str, 274 default="", 275 help="GitHub token for queries (optional, bumps up rate limit)", 276 ) 277 parse.set_defaults(func=lambda _: parse.print_usage()) 278 subparsers = parse.add_subparsers() 279 280 notes = subparsers.add_parser("notes", help="Generate release notes") 281 notes.set_defaults(func=make_notes) 282 283 args = parse.parse_args() 284 sys.exit(args.func(args)) 285 286 287 if __name__ == "__main__": 288 main() 289 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/release.py b/scripts/release.py --- a/scripts/release.py +++ b/scripts/release.py @@ -110,8 +110,8 @@ def __init__(self): self.repo = pygit2.Repository(Path(__file__).parent) - def is_on_master(self): - return self.repo.references["refs/heads/master"] == self.repo.head + def is_on_main(self): + return self.repo.references["refs/heads/main"] == self.repo.head @staticmethod def __get_tag_version(entry): @@ -160,7 +160,7 @@ def make_notes(args): wrapper = GitWrapper() - release_type = "minor" if wrapper.is_on_master() else "patch" + release_type = "minor" if wrapper.is_on_main() else "patch" sys.stderr.write(f"Detected release type: {release_type}\n") prev_ref, prev_commit, prev_ver = wrapper.get_previous_release(release_type)
{"golden_diff": "diff --git a/scripts/release.py b/scripts/release.py\n--- a/scripts/release.py\n+++ b/scripts/release.py\n@@ -110,8 +110,8 @@\n def __init__(self):\n self.repo = pygit2.Repository(Path(__file__).parent)\n \n- def is_on_master(self):\n- return self.repo.references[\"refs/heads/master\"] == self.repo.head\n+ def is_on_main(self):\n+ return self.repo.references[\"refs/heads/main\"] == self.repo.head\n \n @staticmethod\n def __get_tag_version(entry):\n@@ -160,7 +160,7 @@\n \n def make_notes(args):\n wrapper = GitWrapper()\n- release_type = \"minor\" if wrapper.is_on_master() else \"patch\"\n+ release_type = \"minor\" if wrapper.is_on_main() else \"patch\"\n sys.stderr.write(f\"Detected release type: {release_type}\\n\")\n \n prev_ref, prev_commit, prev_ver = wrapper.get_previous_release(release_type)\n", "issue": "Change \"master\" branch to \"main\"\nWe should update all refs to the new default branch.\n", "before_files": [{"content": "import argparse\nimport atexit\nimport collections\nimport json\nimport re\nimport sys\nfrom pathlib import Path\n\nimport github\nimport pygit2\nfrom packaging import version\n\n\nclass GithubUserResolver:\n def __init__(self, email2commit, token):\n self.__cache_file = Path(__file__).parent / \"gh-users-cache.json\"\n self.__cache = (\n json.loads(self.__cache_file.read_text())\n if self.__cache_file.exists()\n else {}\n )\n # filter unknown users hoping we'd be able to find them this time\n self.__cache = {key: value for key, value in self.__cache.items() if value}\n # using anonymous access if token not specified\n self.__github = github.Github(token or None)\n self.__modin_repo = self.__github.get_repo(\"modin-project/modin\")\n self.__email2commit = email2commit\n atexit.register(self.__save)\n\n def __search_commits(self, term):\n if commit := self.__email2commit.get(term):\n gh_commit = self.__modin_repo.get_commit(str(commit))\n return gh_commit.author.login\n return None\n\n @staticmethod\n def __is_email(term):\n return re.match(r\".*@.*\\..*\", term)\n\n def __search_github(self, term):\n search = f\"in:email {term}\" if self.__is_email(term) else f\"fullname:{term}\"\n match = [user.login for user in self.__github.search_users(search)]\n return match[0] if len(match) == 1 else None\n\n def __try_user(self, term):\n if self.__is_email(term):\n return None\n try:\n return self.__github.get_user(term).login\n except github.GithubException as ex:\n if ex.status != 404:\n raise\n return None\n\n def __resolve_single(self, term):\n return (\n self.__search_commits(term)\n or self.__search_github(term)\n or self.__try_user(term)\n )\n\n def __resolve_cache(self, name, email):\n return self.__cache.get(f\"{name} <{email}>\", None)\n\n def __register(self, name, email, match):\n self.__cache[f\"{name} <{email}>\"] = match\n\n def resolve(self, people):\n logins, unknowns = set(), set()\n\n for name, email in people:\n if match := self.__resolve_cache(name, email):\n logins.add(match)\n elif match := self.__resolve_single(email):\n self.__register(name, email, match)\n logins.add(match)\n else:\n if match := self.__resolve_single(name):\n logins.add(match)\n else:\n unknowns.add((name, email))\n self.__register(name, email, match)\n\n return logins, unknowns\n\n def resolve_by_reviews(self, unknowns, email2pr):\n logins, new_unknowns = set(), set()\n for name, email in unknowns:\n commit = self.__modin_repo.get_commit(str(email2pr[email]))\n found = set()\n for pull in commit.get_pulls():\n for review in pull.get_reviews():\n user = review.user\n if user.name == name and (not user.email or user.email == email):\n found.add(user.login)\n\n if len(found) == 1:\n self.__register(name, email, list(found)[0])\n logins |= found\n else:\n new_unknowns.add((name, email))\n\n return logins, new_unknowns\n\n def __save(self):\n self.__cache_file.write_text(json.dumps(self.__cache, indent=4, sort_keys=True))\n\n\nclass GitWrapper:\n def __init__(self):\n self.repo = pygit2.Repository(Path(__file__).parent)\n\n def is_on_master(self):\n return self.repo.references[\"refs/heads/master\"] == self.repo.head\n\n @staticmethod\n def __get_tag_version(entry):\n try:\n return version.parse(entry.lstrip(\"refs/tags/\"))\n except version.InvalidVersion as ex:\n return f'<bad version \"{entry}\": {ex}>'\n\n def get_previous_release(self, rel_type):\n tags = [\n (entry, self.__get_tag_version(entry))\n for entry in self.repo.references\n if entry.startswith(\"refs/tags/\")\n ]\n # filter away legacy versions (which aren't following the proper naming schema);\n # also skip pre-releases\n tags = [\n (entry, ver)\n for entry, ver in tags\n if isinstance(ver, version.Version) and not ver.pre\n ]\n if rel_type == \"minor\":\n # leave only minor releases\n tags = [(entry, ver) for entry, ver in tags if ver.micro == 0]\n else:\n assert rel_type == \"patch\"\n prev_ref, prev_ver = max(tags, key=lambda pair: pair[1])\n return prev_ref, self.repo.references[prev_ref].peel(), prev_ver\n\n def get_commits_upto(self, stop_commit):\n history = []\n for obj in self.repo.walk(self.repo.head.target):\n if obj.id == stop_commit.id:\n break\n history.append(obj)\n else:\n raise ValueError(\"Current HEAD is not derived from previous release\")\n return history\n\n def ensure_title_link(self, obj: pygit2.Commit):\n title = obj.message.splitlines()[0]\n if not re.match(r\".*\\(#(\\d+)\\)$\", title):\n title += f\" ({obj.short_id})\"\n return title\n\n\ndef make_notes(args):\n wrapper = GitWrapper()\n release_type = \"minor\" if wrapper.is_on_master() else \"patch\"\n sys.stderr.write(f\"Detected release type: {release_type}\\n\")\n\n prev_ref, prev_commit, prev_ver = wrapper.get_previous_release(release_type)\n sys.stderr.write(f\"Previous {release_type} release: {prev_ref}\\n\")\n\n next_major, next_minor, next_patch = prev_ver.release\n if release_type == \"minor\":\n next_minor += 1\n elif release_type == \"patch\":\n next_patch += 1\n else:\n raise ValueError(f\"Unexpected release type: {release_type}\")\n next_ver = version.Version(f\"{next_major}.{next_minor}.{next_patch}\")\n\n sys.stderr.write(f\"Computing release notes for {prev_ver} -> {next_ver}...\\n\")\n try:\n history = wrapper.get_commits_upto(prev_commit)\n except ValueError as ex:\n sys.stderr.write(\n f\"{ex}: did you forget to checkout correct branch or pull tags?\"\n )\n return 1\n if not history:\n sys.stderr.write(f\"No commits since {prev_ver} found, nothing to generate!\\n\")\n return 1\n\n titles = collections.defaultdict(list)\n people = set()\n email2commit, email2pr = {}, {}\n for obj in history:\n title = obj.message.splitlines()[0]\n titles[title.split(\"-\")[0]].append(obj)\n new_people = set(\n re.findall(\n r\"(?:(?:Signed-off-by|Co-authored-by):\\s*)([\\w\\s,]+?)\\s*<([^>]+)>\",\n obj.message,\n )\n )\n for _, email in new_people:\n email2pr[email] = obj.id\n people |= new_people\n email2commit[obj.author.email] = obj.id\n sys.stderr.write(f\"Found {len(history)} commit(s) since {prev_ref}\\n\")\n\n sys.stderr.write(\"Resolving contributors...\\n\")\n user_resolver = GithubUserResolver(email2commit, args.token)\n logins, unknowns = user_resolver.resolve(people)\n new_logins, unknowns = user_resolver.resolve_by_reviews(unknowns, email2pr)\n logins |= new_logins\n sys.stderr.write(f\"Found {len(logins)} GitHub usernames.\\n\")\n if unknowns:\n sys.stderr.write(\n f\"Warning! Failed to resolve {len(unknowns)} usernames, please resolve them manually!\\n\"\n )\n\n sections = [\n (\"Stability and Bugfixes\", \"FIX\"),\n (\"Performance enhancements\", \"PERF\"),\n (\"Refactor Codebase\", \"REFACTOR\"),\n (\"Update testing suite\", \"TEST\"),\n (\"Documentation improvements\", \"DOCS\"),\n (\"New Features\", \"FEAT\"),\n ]\n\n notes = rf\"\"\"Modin {next_ver}\n\n<Please fill in short release summary>\n\nKey Features and Updates Since {prev_ver}\n-------------------------------{'-' * len(str(prev_ver))}\n\"\"\"\n\n def _add_section(section, prs):\n nonlocal notes\n if prs:\n notes += f\"* {section}\\n\"\n notes += \"\\n\".join(\n [\n f\" * {wrapper.ensure_title_link(obj)}\"\n for obj in sorted(prs, key=lambda obj: obj.message)\n ]\n )\n notes += \"\\n\"\n\n for section, key in sections:\n _add_section(section, titles.pop(key, None))\n\n uncategorized = sum(titles.values(), [])\n _add_section(\"Uncategorized improvements\", uncategorized)\n\n notes += r\"\"\"\nContributors\n------------\n\"\"\"\n notes += \"\\n\".join(f\"@{login}\" for login in sorted(logins)) + \"\\n\"\n notes += (\n \"\\n\".join(\n f\"<unknown-login> {name} <{email}>\" for name, email in sorted(unknowns)\n )\n + \"\\n\"\n )\n\n sys.stdout.write(notes)\n\n\ndef main():\n parse = argparse.ArgumentParser()\n parse.add_argument(\n \"--token\",\n type=str,\n default=\"\",\n help=\"GitHub token for queries (optional, bumps up rate limit)\",\n )\n parse.set_defaults(func=lambda _: parse.print_usage())\n subparsers = parse.add_subparsers()\n\n notes = subparsers.add_parser(\"notes\", help=\"Generate release notes\")\n notes.set_defaults(func=make_notes)\n\n args = parse.parse_args()\n sys.exit(args.func(args))\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "scripts/release.py"}], "after_files": [{"content": "import argparse\nimport atexit\nimport collections\nimport json\nimport re\nimport sys\nfrom pathlib import Path\n\nimport github\nimport pygit2\nfrom packaging import version\n\n\nclass GithubUserResolver:\n def __init__(self, email2commit, token):\n self.__cache_file = Path(__file__).parent / \"gh-users-cache.json\"\n self.__cache = (\n json.loads(self.__cache_file.read_text())\n if self.__cache_file.exists()\n else {}\n )\n # filter unknown users hoping we'd be able to find them this time\n self.__cache = {key: value for key, value in self.__cache.items() if value}\n # using anonymous access if token not specified\n self.__github = github.Github(token or None)\n self.__modin_repo = self.__github.get_repo(\"modin-project/modin\")\n self.__email2commit = email2commit\n atexit.register(self.__save)\n\n def __search_commits(self, term):\n if commit := self.__email2commit.get(term):\n gh_commit = self.__modin_repo.get_commit(str(commit))\n return gh_commit.author.login\n return None\n\n @staticmethod\n def __is_email(term):\n return re.match(r\".*@.*\\..*\", term)\n\n def __search_github(self, term):\n search = f\"in:email {term}\" if self.__is_email(term) else f\"fullname:{term}\"\n match = [user.login for user in self.__github.search_users(search)]\n return match[0] if len(match) == 1 else None\n\n def __try_user(self, term):\n if self.__is_email(term):\n return None\n try:\n return self.__github.get_user(term).login\n except github.GithubException as ex:\n if ex.status != 404:\n raise\n return None\n\n def __resolve_single(self, term):\n return (\n self.__search_commits(term)\n or self.__search_github(term)\n or self.__try_user(term)\n )\n\n def __resolve_cache(self, name, email):\n return self.__cache.get(f\"{name} <{email}>\", None)\n\n def __register(self, name, email, match):\n self.__cache[f\"{name} <{email}>\"] = match\n\n def resolve(self, people):\n logins, unknowns = set(), set()\n\n for name, email in people:\n if match := self.__resolve_cache(name, email):\n logins.add(match)\n elif match := self.__resolve_single(email):\n self.__register(name, email, match)\n logins.add(match)\n else:\n if match := self.__resolve_single(name):\n logins.add(match)\n else:\n unknowns.add((name, email))\n self.__register(name, email, match)\n\n return logins, unknowns\n\n def resolve_by_reviews(self, unknowns, email2pr):\n logins, new_unknowns = set(), set()\n for name, email in unknowns:\n commit = self.__modin_repo.get_commit(str(email2pr[email]))\n found = set()\n for pull in commit.get_pulls():\n for review in pull.get_reviews():\n user = review.user\n if user.name == name and (not user.email or user.email == email):\n found.add(user.login)\n\n if len(found) == 1:\n self.__register(name, email, list(found)[0])\n logins |= found\n else:\n new_unknowns.add((name, email))\n\n return logins, new_unknowns\n\n def __save(self):\n self.__cache_file.write_text(json.dumps(self.__cache, indent=4, sort_keys=True))\n\n\nclass GitWrapper:\n def __init__(self):\n self.repo = pygit2.Repository(Path(__file__).parent)\n\n def is_on_main(self):\n return self.repo.references[\"refs/heads/main\"] == self.repo.head\n\n @staticmethod\n def __get_tag_version(entry):\n try:\n return version.parse(entry.lstrip(\"refs/tags/\"))\n except version.InvalidVersion as ex:\n return f'<bad version \"{entry}\": {ex}>'\n\n def get_previous_release(self, rel_type):\n tags = [\n (entry, self.__get_tag_version(entry))\n for entry in self.repo.references\n if entry.startswith(\"refs/tags/\")\n ]\n # filter away legacy versions (which aren't following the proper naming schema);\n # also skip pre-releases\n tags = [\n (entry, ver)\n for entry, ver in tags\n if isinstance(ver, version.Version) and not ver.pre\n ]\n if rel_type == \"minor\":\n # leave only minor releases\n tags = [(entry, ver) for entry, ver in tags if ver.micro == 0]\n else:\n assert rel_type == \"patch\"\n prev_ref, prev_ver = max(tags, key=lambda pair: pair[1])\n return prev_ref, self.repo.references[prev_ref].peel(), prev_ver\n\n def get_commits_upto(self, stop_commit):\n history = []\n for obj in self.repo.walk(self.repo.head.target):\n if obj.id == stop_commit.id:\n break\n history.append(obj)\n else:\n raise ValueError(\"Current HEAD is not derived from previous release\")\n return history\n\n def ensure_title_link(self, obj: pygit2.Commit):\n title = obj.message.splitlines()[0]\n if not re.match(r\".*\\(#(\\d+)\\)$\", title):\n title += f\" ({obj.short_id})\"\n return title\n\n\ndef make_notes(args):\n wrapper = GitWrapper()\n release_type = \"minor\" if wrapper.is_on_main() else \"patch\"\n sys.stderr.write(f\"Detected release type: {release_type}\\n\")\n\n prev_ref, prev_commit, prev_ver = wrapper.get_previous_release(release_type)\n sys.stderr.write(f\"Previous {release_type} release: {prev_ref}\\n\")\n\n next_major, next_minor, next_patch = prev_ver.release\n if release_type == \"minor\":\n next_minor += 1\n elif release_type == \"patch\":\n next_patch += 1\n else:\n raise ValueError(f\"Unexpected release type: {release_type}\")\n next_ver = version.Version(f\"{next_major}.{next_minor}.{next_patch}\")\n\n sys.stderr.write(f\"Computing release notes for {prev_ver} -> {next_ver}...\\n\")\n try:\n history = wrapper.get_commits_upto(prev_commit)\n except ValueError as ex:\n sys.stderr.write(\n f\"{ex}: did you forget to checkout correct branch or pull tags?\"\n )\n return 1\n if not history:\n sys.stderr.write(f\"No commits since {prev_ver} found, nothing to generate!\\n\")\n return 1\n\n titles = collections.defaultdict(list)\n people = set()\n email2commit, email2pr = {}, {}\n for obj in history:\n title = obj.message.splitlines()[0]\n titles[title.split(\"-\")[0]].append(obj)\n new_people = set(\n re.findall(\n r\"(?:(?:Signed-off-by|Co-authored-by):\\s*)([\\w\\s,]+?)\\s*<([^>]+)>\",\n obj.message,\n )\n )\n for _, email in new_people:\n email2pr[email] = obj.id\n people |= new_people\n email2commit[obj.author.email] = obj.id\n sys.stderr.write(f\"Found {len(history)} commit(s) since {prev_ref}\\n\")\n\n sys.stderr.write(\"Resolving contributors...\\n\")\n user_resolver = GithubUserResolver(email2commit, args.token)\n logins, unknowns = user_resolver.resolve(people)\n new_logins, unknowns = user_resolver.resolve_by_reviews(unknowns, email2pr)\n logins |= new_logins\n sys.stderr.write(f\"Found {len(logins)} GitHub usernames.\\n\")\n if unknowns:\n sys.stderr.write(\n f\"Warning! Failed to resolve {len(unknowns)} usernames, please resolve them manually!\\n\"\n )\n\n sections = [\n (\"Stability and Bugfixes\", \"FIX\"),\n (\"Performance enhancements\", \"PERF\"),\n (\"Refactor Codebase\", \"REFACTOR\"),\n (\"Update testing suite\", \"TEST\"),\n (\"Documentation improvements\", \"DOCS\"),\n (\"New Features\", \"FEAT\"),\n ]\n\n notes = rf\"\"\"Modin {next_ver}\n\n<Please fill in short release summary>\n\nKey Features and Updates Since {prev_ver}\n-------------------------------{'-' * len(str(prev_ver))}\n\"\"\"\n\n def _add_section(section, prs):\n nonlocal notes\n if prs:\n notes += f\"* {section}\\n\"\n notes += \"\\n\".join(\n [\n f\" * {wrapper.ensure_title_link(obj)}\"\n for obj in sorted(prs, key=lambda obj: obj.message)\n ]\n )\n notes += \"\\n\"\n\n for section, key in sections:\n _add_section(section, titles.pop(key, None))\n\n uncategorized = sum(titles.values(), [])\n _add_section(\"Uncategorized improvements\", uncategorized)\n\n notes += r\"\"\"\nContributors\n------------\n\"\"\"\n notes += \"\\n\".join(f\"@{login}\" for login in sorted(logins)) + \"\\n\"\n notes += (\n \"\\n\".join(\n f\"<unknown-login> {name} <{email}>\" for name, email in sorted(unknowns)\n )\n + \"\\n\"\n )\n\n sys.stdout.write(notes)\n\n\ndef main():\n parse = argparse.ArgumentParser()\n parse.add_argument(\n \"--token\",\n type=str,\n default=\"\",\n help=\"GitHub token for queries (optional, bumps up rate limit)\",\n )\n parse.set_defaults(func=lambda _: parse.print_usage())\n subparsers = parse.add_subparsers()\n\n notes = subparsers.add_parser(\"notes\", help=\"Generate release notes\")\n notes.set_defaults(func=make_notes)\n\n args = parse.parse_args()\n sys.exit(args.func(args))\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "scripts/release.py"}]}
3,230
220
gh_patches_debug_28332
rasdani/github-patches
git_diff
Kinto__kinto-1076
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- _since overflow error: long too big to convert When since is too big with the Postgresql backend it fails with an OverflowError: https://sentry.prod.mozaws.net/operations/kinto-prod-1/issues/382250/ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kinto/core/resource/schema.py` Content: ``` 1 import warnings 2 3 import colander 4 5 from kinto.core.schema import (Any, HeaderField, QueryField, HeaderQuotedInteger, 6 FieldList, TimeStamp, URL) 7 from kinto.core.utils import native_value 8 9 10 class TimeStamp(TimeStamp): 11 """This schema is deprecated, you shoud use `kinto.core.schema.TimeStamp` instead.""" 12 13 def __init__(self, *args, **kwargs): 14 message = ("`kinto.core.resource.schema.TimeStamp` is deprecated, " 15 "use `kinto.core.schema.TimeStamp` instead.") 16 warnings.warn(message, DeprecationWarning) 17 super(TimeStamp, self).__init__(*args, **kwargs) 18 19 20 class URL(URL): 21 """This schema is deprecated, you shoud use `kinto.core.schema.URL` instead.""" 22 23 def __init__(self, *args, **kwargs): 24 message = ("`kinto.core.resource.schema.URL` is deprecated, " 25 "use `kinto.core.schema.URL` instead.") 26 warnings.warn(message, DeprecationWarning) 27 super(URL, self).__init__(*args, **kwargs) 28 29 30 # Resource related schemas 31 32 33 class ResourceSchema(colander.MappingSchema): 34 """Base resource schema, with *Cliquet* specific built-in options.""" 35 36 class Options: 37 """ 38 Resource schema options. 39 40 This is meant to be overriden for changing values: 41 42 .. code-block:: python 43 44 class Product(ResourceSchema): 45 reference = colander.SchemaNode(colander.String()) 46 47 class Options: 48 readonly_fields = ('reference',) 49 """ 50 readonly_fields = tuple() 51 """Fields that cannot be updated. Values for fields will have to be 52 provided either during record creation, through default values using 53 ``missing`` attribute or implementing a custom logic in 54 :meth:`kinto.core.resource.UserResource.process_record`. 55 """ 56 57 preserve_unknown = True 58 """Define if unknown fields should be preserved or not. 59 60 The resource is schema-less by default. In other words, any field name 61 will be accepted on records. Set this to ``False`` in order to limit 62 the accepted fields to the ones defined in the schema. 63 """ 64 65 @classmethod 66 def get_option(cls, attr): 67 default_value = getattr(ResourceSchema.Options, attr) 68 return getattr(cls.Options, attr, default_value) 69 70 @classmethod 71 def is_readonly(cls, field): 72 """Return True if specified field name is read-only. 73 74 :param str field: the field name in the schema 75 :returns: ``True`` if the specified field is read-only, 76 ``False`` otherwise. 77 :rtype: bool 78 """ 79 return field in cls.get_option("readonly_fields") 80 81 def schema_type(self): 82 if self.get_option("preserve_unknown") is True: 83 unknown = 'preserve' 84 else: 85 unknown = 'ignore' 86 return colander.Mapping(unknown=unknown) 87 88 89 class PermissionsSchema(colander.SchemaNode): 90 """A permission mapping defines ACEs. 91 92 It has permission names as keys and principals as values. 93 94 :: 95 96 { 97 "write": ["fxa:af3e077eb9f5444a949ad65aa86e82ff"], 98 "groups:create": ["fxa:70a9335eecfe440fa445ba752a750f3d"] 99 } 100 101 """ 102 103 def __init__(self, *args, **kwargs): 104 self.known_perms = kwargs.pop('permissions', tuple()) 105 super(PermissionsSchema, self).__init__(*args, **kwargs) 106 107 for perm in self.known_perms: 108 self[perm] = self._get_node_principals(perm) 109 110 def schema_type(self): 111 if self.known_perms: 112 return colander.Mapping(unknown='raise') 113 else: 114 return colander.Mapping(unknown='preserve') 115 116 def deserialize(self, cstruct=colander.null): 117 118 # If permissions are not a mapping (e.g null or invalid), try deserializing 119 if not isinstance(cstruct, dict): 120 return super(PermissionsSchema, self).deserialize(cstruct) 121 122 # If permissions are listed, check fields and produce fancy error messages 123 if self.known_perms: 124 for perm in cstruct: 125 colander.OneOf(choices=self.known_perms)(self, perm) 126 return super(PermissionsSchema, self).deserialize(cstruct) 127 128 # Else deserialize the fields that are not on the schema 129 permissions = {} 130 perm_schema = colander.SequenceSchema(colander.SchemaNode(colander.String())) 131 for perm, principals in cstruct.items(): 132 permissions[perm] = perm_schema.deserialize(principals) 133 134 return permissions 135 136 def _get_node_principals(self, perm): 137 principal = colander.SchemaNode(colander.String()) 138 return colander.SchemaNode(colander.Sequence(), principal, name=perm, 139 missing=colander.drop) 140 141 142 # Header schemas 143 144 145 class HeaderSchema(colander.MappingSchema): 146 """Base schema used for validating and deserializing request headers. """ 147 148 missing = colander.drop 149 150 if_match = HeaderQuotedInteger(name='If-Match') 151 if_none_match = HeaderQuotedInteger(name='If-None-Match') 152 153 @staticmethod 154 def schema_type(): 155 return colander.Mapping(unknown='preserve') 156 157 158 class PatchHeaderSchema(HeaderSchema): 159 """Header schema used with PATCH requests.""" 160 161 def response_behavior_validator(): 162 return colander.OneOf(['full', 'light', 'diff']) 163 164 response_behaviour = HeaderField(colander.String(), name='Response-Behavior', 165 validator=response_behavior_validator()) 166 167 168 # Querystring schemas 169 170 171 class QuerySchema(colander.MappingSchema): 172 """ 173 Schema used for validating and deserializing querystrings. It will include 174 and try to guess the type of unknown fields (field filters) on deserialization. 175 """ 176 missing = colander.drop 177 178 @staticmethod 179 def schema_type(): 180 return colander.Mapping(unknown='ignore') 181 182 def deserialize(self, cstruct=colander.null): 183 """ 184 Deserialize and validate the QuerySchema fields and try to deserialize and 185 get the native value of additional filds (field filters) that may be present 186 on the cstruct. 187 188 e.g:: ?exclude_id=a,b&deleted=true -> {'exclude_id': ['a', 'b'], deleted: True} 189 """ 190 values = {} 191 192 schema_values = super(QuerySchema, self).deserialize(cstruct) 193 if schema_values is colander.drop: 194 return schema_values 195 196 # Deserialize querystring field filters (see docstring e.g) 197 for k, v in cstruct.items(): 198 # Deserialize lists used on in_ and exclude_ filters 199 if k.startswith('in_') or k.startswith('exclude_'): 200 as_list = FieldList().deserialize(v) 201 if isinstance(as_list, list): 202 values[k] = [native_value(v) for v in as_list] 203 else: 204 values[k] = native_value(v) 205 206 values.update(schema_values) 207 return values 208 209 210 class CollectionQuerySchema(QuerySchema): 211 """Querystring schema used with collections.""" 212 213 _limit = QueryField(colander.Integer()) 214 _sort = FieldList() 215 _token = QueryField(colander.String()) 216 _since = QueryField(colander.Integer()) 217 _to = QueryField(colander.Integer()) 218 _before = QueryField(colander.Integer()) 219 id = QueryField(colander.String()) 220 last_modified = QueryField(colander.Integer()) 221 222 223 class RecordGetQuerySchema(QuerySchema): 224 """Querystring schema for GET record requests.""" 225 226 _fields = FieldList() 227 228 229 class CollectionGetQuerySchema(CollectionQuerySchema): 230 """Querystring schema for GET collection requests.""" 231 232 _fields = FieldList() 233 234 235 # Body Schemas 236 237 238 class RecordSchema(colander.MappingSchema): 239 240 @colander.deferred 241 def data(node, kwargs): 242 data = kwargs.get('data') 243 if data: 244 # Check if empty record is allowed. 245 # (e.g every schema fields have defaults) 246 try: 247 data.deserialize({}) 248 except colander.Invalid: 249 pass 250 else: 251 data.default = {} 252 data.missing = colander.drop 253 return data 254 255 @colander.deferred 256 def permissions(node, kwargs): 257 def get_perms(node, kwargs): 258 return kwargs.get('permissions') 259 # Set if node is provided, else keep deferred. This allows binding the body 260 # on Resource first and bind permissions later if using SharableResource. 261 return get_perms(node, kwargs) or colander.deferred(get_perms) 262 263 @staticmethod 264 def schema_type(): 265 return colander.Mapping(unknown='raise') 266 267 268 class JsonPatchOperationSchema(colander.MappingSchema): 269 """Single JSON Patch Operation.""" 270 271 def op_validator(): 272 op_values = ['test', 'add', 'remove', 'replace', 'move', 'copy'] 273 return colander.OneOf(op_values) 274 275 def path_validator(): 276 return colander.Regex('(/\w*)+') 277 278 op = colander.SchemaNode(colander.String(), validator=op_validator()) 279 path = colander.SchemaNode(colander.String(), validator=path_validator()) 280 from_ = colander.SchemaNode(colander.String(), name='from', 281 validator=path_validator(), missing=colander.drop) 282 value = colander.SchemaNode(Any(), missing=colander.drop) 283 284 @staticmethod 285 def schema_type(): 286 return colander.Mapping(unknown='raise') 287 288 289 class JsonPatchBodySchema(colander.SequenceSchema): 290 """Body used with JSON Patch (application/json-patch+json) as in RFC 6902.""" 291 292 operations = JsonPatchOperationSchema(missing=colander.drop) 293 294 295 # Request schemas 296 297 298 class RequestSchema(colander.MappingSchema): 299 """Base schema for kinto requests.""" 300 301 @colander.deferred 302 def header(node, kwargs): 303 return kwargs.get('header') 304 305 @colander.deferred 306 def querystring(node, kwargs): 307 return kwargs.get('querystring') 308 309 def after_bind(self, node, kw): 310 # Set default bindings 311 if not self.get('header'): 312 self['header'] = HeaderSchema() 313 if not self.get('querystring'): 314 self['querystring'] = QuerySchema() 315 316 317 class PayloadRequestSchema(RequestSchema): 318 """Base schema for methods that use a JSON request body.""" 319 320 @colander.deferred 321 def body(node, kwargs): 322 def get_body(node, kwargs): 323 return kwargs.get('body') 324 # Set if node is provided, else keep deferred (and allow bindind later) 325 return get_body(node, kwargs) or colander.deferred(get_body) 326 327 328 class JsonPatchRequestSchema(RequestSchema): 329 """JSON Patch (application/json-patch+json) request schema.""" 330 331 body = JsonPatchBodySchema() 332 querystring = QuerySchema() 333 header = PatchHeaderSchema() 334 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kinto/core/resource/schema.py b/kinto/core/resource/schema.py --- a/kinto/core/resource/schema.py +++ b/kinto/core/resource/schema.py @@ -1,3 +1,4 @@ +from __future__ import division import warnings import colander @@ -6,6 +7,10 @@ FieldList, TimeStamp, URL) from kinto.core.utils import native_value +POSTGRESQL_MAX_INTEGER_VALUE = 2**64 // 2 + +positive_big_integer = colander.Range(min=0, max=POSTGRESQL_MAX_INTEGER_VALUE) + class TimeStamp(TimeStamp): """This schema is deprecated, you shoud use `kinto.core.schema.TimeStamp` instead.""" @@ -210,14 +215,14 @@ class CollectionQuerySchema(QuerySchema): """Querystring schema used with collections.""" - _limit = QueryField(colander.Integer()) + _limit = QueryField(colander.Integer(), validator=positive_big_integer) _sort = FieldList() _token = QueryField(colander.String()) - _since = QueryField(colander.Integer()) - _to = QueryField(colander.Integer()) - _before = QueryField(colander.Integer()) + _since = QueryField(colander.Integer(), validator=positive_big_integer) + _to = QueryField(colander.Integer(), validator=positive_big_integer) + _before = QueryField(colander.Integer(), validator=positive_big_integer) id = QueryField(colander.String()) - last_modified = QueryField(colander.Integer()) + last_modified = QueryField(colander.Integer(), validator=positive_big_integer) class RecordGetQuerySchema(QuerySchema):
{"golden_diff": "diff --git a/kinto/core/resource/schema.py b/kinto/core/resource/schema.py\n--- a/kinto/core/resource/schema.py\n+++ b/kinto/core/resource/schema.py\n@@ -1,3 +1,4 @@\n+from __future__ import division\n import warnings\n \n import colander\n@@ -6,6 +7,10 @@\n FieldList, TimeStamp, URL)\n from kinto.core.utils import native_value\n \n+POSTGRESQL_MAX_INTEGER_VALUE = 2**64 // 2\n+\n+positive_big_integer = colander.Range(min=0, max=POSTGRESQL_MAX_INTEGER_VALUE)\n+\n \n class TimeStamp(TimeStamp):\n \"\"\"This schema is deprecated, you shoud use `kinto.core.schema.TimeStamp` instead.\"\"\"\n@@ -210,14 +215,14 @@\n class CollectionQuerySchema(QuerySchema):\n \"\"\"Querystring schema used with collections.\"\"\"\n \n- _limit = QueryField(colander.Integer())\n+ _limit = QueryField(colander.Integer(), validator=positive_big_integer)\n _sort = FieldList()\n _token = QueryField(colander.String())\n- _since = QueryField(colander.Integer())\n- _to = QueryField(colander.Integer())\n- _before = QueryField(colander.Integer())\n+ _since = QueryField(colander.Integer(), validator=positive_big_integer)\n+ _to = QueryField(colander.Integer(), validator=positive_big_integer)\n+ _before = QueryField(colander.Integer(), validator=positive_big_integer)\n id = QueryField(colander.String())\n- last_modified = QueryField(colander.Integer())\n+ last_modified = QueryField(colander.Integer(), validator=positive_big_integer)\n \n \n class RecordGetQuerySchema(QuerySchema):\n", "issue": "_since overflow error: long too big to convert\nWhen since is too big with the Postgresql backend it fails with an OverflowError: https://sentry.prod.mozaws.net/operations/kinto-prod-1/issues/382250/\n", "before_files": [{"content": "import warnings\n\nimport colander\n\nfrom kinto.core.schema import (Any, HeaderField, QueryField, HeaderQuotedInteger,\n FieldList, TimeStamp, URL)\nfrom kinto.core.utils import native_value\n\n\nclass TimeStamp(TimeStamp):\n \"\"\"This schema is deprecated, you shoud use `kinto.core.schema.TimeStamp` instead.\"\"\"\n\n def __init__(self, *args, **kwargs):\n message = (\"`kinto.core.resource.schema.TimeStamp` is deprecated, \"\n \"use `kinto.core.schema.TimeStamp` instead.\")\n warnings.warn(message, DeprecationWarning)\n super(TimeStamp, self).__init__(*args, **kwargs)\n\n\nclass URL(URL):\n \"\"\"This schema is deprecated, you shoud use `kinto.core.schema.URL` instead.\"\"\"\n\n def __init__(self, *args, **kwargs):\n message = (\"`kinto.core.resource.schema.URL` is deprecated, \"\n \"use `kinto.core.schema.URL` instead.\")\n warnings.warn(message, DeprecationWarning)\n super(URL, self).__init__(*args, **kwargs)\n\n\n# Resource related schemas\n\n\nclass ResourceSchema(colander.MappingSchema):\n \"\"\"Base resource schema, with *Cliquet* specific built-in options.\"\"\"\n\n class Options:\n \"\"\"\n Resource schema options.\n\n This is meant to be overriden for changing values:\n\n .. code-block:: python\n\n class Product(ResourceSchema):\n reference = colander.SchemaNode(colander.String())\n\n class Options:\n readonly_fields = ('reference',)\n \"\"\"\n readonly_fields = tuple()\n \"\"\"Fields that cannot be updated. Values for fields will have to be\n provided either during record creation, through default values using\n ``missing`` attribute or implementing a custom logic in\n :meth:`kinto.core.resource.UserResource.process_record`.\n \"\"\"\n\n preserve_unknown = True\n \"\"\"Define if unknown fields should be preserved or not.\n\n The resource is schema-less by default. In other words, any field name\n will be accepted on records. Set this to ``False`` in order to limit\n the accepted fields to the ones defined in the schema.\n \"\"\"\n\n @classmethod\n def get_option(cls, attr):\n default_value = getattr(ResourceSchema.Options, attr)\n return getattr(cls.Options, attr, default_value)\n\n @classmethod\n def is_readonly(cls, field):\n \"\"\"Return True if specified field name is read-only.\n\n :param str field: the field name in the schema\n :returns: ``True`` if the specified field is read-only,\n ``False`` otherwise.\n :rtype: bool\n \"\"\"\n return field in cls.get_option(\"readonly_fields\")\n\n def schema_type(self):\n if self.get_option(\"preserve_unknown\") is True:\n unknown = 'preserve'\n else:\n unknown = 'ignore'\n return colander.Mapping(unknown=unknown)\n\n\nclass PermissionsSchema(colander.SchemaNode):\n \"\"\"A permission mapping defines ACEs.\n\n It has permission names as keys and principals as values.\n\n ::\n\n {\n \"write\": [\"fxa:af3e077eb9f5444a949ad65aa86e82ff\"],\n \"groups:create\": [\"fxa:70a9335eecfe440fa445ba752a750f3d\"]\n }\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.known_perms = kwargs.pop('permissions', tuple())\n super(PermissionsSchema, self).__init__(*args, **kwargs)\n\n for perm in self.known_perms:\n self[perm] = self._get_node_principals(perm)\n\n def schema_type(self):\n if self.known_perms:\n return colander.Mapping(unknown='raise')\n else:\n return colander.Mapping(unknown='preserve')\n\n def deserialize(self, cstruct=colander.null):\n\n # If permissions are not a mapping (e.g null or invalid), try deserializing\n if not isinstance(cstruct, dict):\n return super(PermissionsSchema, self).deserialize(cstruct)\n\n # If permissions are listed, check fields and produce fancy error messages\n if self.known_perms:\n for perm in cstruct:\n colander.OneOf(choices=self.known_perms)(self, perm)\n return super(PermissionsSchema, self).deserialize(cstruct)\n\n # Else deserialize the fields that are not on the schema\n permissions = {}\n perm_schema = colander.SequenceSchema(colander.SchemaNode(colander.String()))\n for perm, principals in cstruct.items():\n permissions[perm] = perm_schema.deserialize(principals)\n\n return permissions\n\n def _get_node_principals(self, perm):\n principal = colander.SchemaNode(colander.String())\n return colander.SchemaNode(colander.Sequence(), principal, name=perm,\n missing=colander.drop)\n\n\n# Header schemas\n\n\nclass HeaderSchema(colander.MappingSchema):\n \"\"\"Base schema used for validating and deserializing request headers. \"\"\"\n\n missing = colander.drop\n\n if_match = HeaderQuotedInteger(name='If-Match')\n if_none_match = HeaderQuotedInteger(name='If-None-Match')\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='preserve')\n\n\nclass PatchHeaderSchema(HeaderSchema):\n \"\"\"Header schema used with PATCH requests.\"\"\"\n\n def response_behavior_validator():\n return colander.OneOf(['full', 'light', 'diff'])\n\n response_behaviour = HeaderField(colander.String(), name='Response-Behavior',\n validator=response_behavior_validator())\n\n\n# Querystring schemas\n\n\nclass QuerySchema(colander.MappingSchema):\n \"\"\"\n Schema used for validating and deserializing querystrings. It will include\n and try to guess the type of unknown fields (field filters) on deserialization.\n \"\"\"\n missing = colander.drop\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='ignore')\n\n def deserialize(self, cstruct=colander.null):\n \"\"\"\n Deserialize and validate the QuerySchema fields and try to deserialize and\n get the native value of additional filds (field filters) that may be present\n on the cstruct.\n\n e.g:: ?exclude_id=a,b&deleted=true -> {'exclude_id': ['a', 'b'], deleted: True}\n \"\"\"\n values = {}\n\n schema_values = super(QuerySchema, self).deserialize(cstruct)\n if schema_values is colander.drop:\n return schema_values\n\n # Deserialize querystring field filters (see docstring e.g)\n for k, v in cstruct.items():\n # Deserialize lists used on in_ and exclude_ filters\n if k.startswith('in_') or k.startswith('exclude_'):\n as_list = FieldList().deserialize(v)\n if isinstance(as_list, list):\n values[k] = [native_value(v) for v in as_list]\n else:\n values[k] = native_value(v)\n\n values.update(schema_values)\n return values\n\n\nclass CollectionQuerySchema(QuerySchema):\n \"\"\"Querystring schema used with collections.\"\"\"\n\n _limit = QueryField(colander.Integer())\n _sort = FieldList()\n _token = QueryField(colander.String())\n _since = QueryField(colander.Integer())\n _to = QueryField(colander.Integer())\n _before = QueryField(colander.Integer())\n id = QueryField(colander.String())\n last_modified = QueryField(colander.Integer())\n\n\nclass RecordGetQuerySchema(QuerySchema):\n \"\"\"Querystring schema for GET record requests.\"\"\"\n\n _fields = FieldList()\n\n\nclass CollectionGetQuerySchema(CollectionQuerySchema):\n \"\"\"Querystring schema for GET collection requests.\"\"\"\n\n _fields = FieldList()\n\n\n# Body Schemas\n\n\nclass RecordSchema(colander.MappingSchema):\n\n @colander.deferred\n def data(node, kwargs):\n data = kwargs.get('data')\n if data:\n # Check if empty record is allowed.\n # (e.g every schema fields have defaults)\n try:\n data.deserialize({})\n except colander.Invalid:\n pass\n else:\n data.default = {}\n data.missing = colander.drop\n return data\n\n @colander.deferred\n def permissions(node, kwargs):\n def get_perms(node, kwargs):\n return kwargs.get('permissions')\n # Set if node is provided, else keep deferred. This allows binding the body\n # on Resource first and bind permissions later if using SharableResource.\n return get_perms(node, kwargs) or colander.deferred(get_perms)\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n\nclass JsonPatchOperationSchema(colander.MappingSchema):\n \"\"\"Single JSON Patch Operation.\"\"\"\n\n def op_validator():\n op_values = ['test', 'add', 'remove', 'replace', 'move', 'copy']\n return colander.OneOf(op_values)\n\n def path_validator():\n return colander.Regex('(/\\w*)+')\n\n op = colander.SchemaNode(colander.String(), validator=op_validator())\n path = colander.SchemaNode(colander.String(), validator=path_validator())\n from_ = colander.SchemaNode(colander.String(), name='from',\n validator=path_validator(), missing=colander.drop)\n value = colander.SchemaNode(Any(), missing=colander.drop)\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n\nclass JsonPatchBodySchema(colander.SequenceSchema):\n \"\"\"Body used with JSON Patch (application/json-patch+json) as in RFC 6902.\"\"\"\n\n operations = JsonPatchOperationSchema(missing=colander.drop)\n\n\n# Request schemas\n\n\nclass RequestSchema(colander.MappingSchema):\n \"\"\"Base schema for kinto requests.\"\"\"\n\n @colander.deferred\n def header(node, kwargs):\n return kwargs.get('header')\n\n @colander.deferred\n def querystring(node, kwargs):\n return kwargs.get('querystring')\n\n def after_bind(self, node, kw):\n # Set default bindings\n if not self.get('header'):\n self['header'] = HeaderSchema()\n if not self.get('querystring'):\n self['querystring'] = QuerySchema()\n\n\nclass PayloadRequestSchema(RequestSchema):\n \"\"\"Base schema for methods that use a JSON request body.\"\"\"\n\n @colander.deferred\n def body(node, kwargs):\n def get_body(node, kwargs):\n return kwargs.get('body')\n # Set if node is provided, else keep deferred (and allow bindind later)\n return get_body(node, kwargs) or colander.deferred(get_body)\n\n\nclass JsonPatchRequestSchema(RequestSchema):\n \"\"\"JSON Patch (application/json-patch+json) request schema.\"\"\"\n\n body = JsonPatchBodySchema()\n querystring = QuerySchema()\n header = PatchHeaderSchema()\n", "path": "kinto/core/resource/schema.py"}], "after_files": [{"content": "from __future__ import division\nimport warnings\n\nimport colander\n\nfrom kinto.core.schema import (Any, HeaderField, QueryField, HeaderQuotedInteger,\n FieldList, TimeStamp, URL)\nfrom kinto.core.utils import native_value\n\nPOSTGRESQL_MAX_INTEGER_VALUE = 2**64 // 2\n\npositive_big_integer = colander.Range(min=0, max=POSTGRESQL_MAX_INTEGER_VALUE)\n\n\nclass TimeStamp(TimeStamp):\n \"\"\"This schema is deprecated, you shoud use `kinto.core.schema.TimeStamp` instead.\"\"\"\n\n def __init__(self, *args, **kwargs):\n message = (\"`kinto.core.resource.schema.TimeStamp` is deprecated, \"\n \"use `kinto.core.schema.TimeStamp` instead.\")\n warnings.warn(message, DeprecationWarning)\n super(TimeStamp, self).__init__(*args, **kwargs)\n\n\nclass URL(URL):\n \"\"\"This schema is deprecated, you shoud use `kinto.core.schema.URL` instead.\"\"\"\n\n def __init__(self, *args, **kwargs):\n message = (\"`kinto.core.resource.schema.URL` is deprecated, \"\n \"use `kinto.core.schema.URL` instead.\")\n warnings.warn(message, DeprecationWarning)\n super(URL, self).__init__(*args, **kwargs)\n\n\n# Resource related schemas\n\n\nclass ResourceSchema(colander.MappingSchema):\n \"\"\"Base resource schema, with *Cliquet* specific built-in options.\"\"\"\n\n class Options:\n \"\"\"\n Resource schema options.\n\n This is meant to be overriden for changing values:\n\n .. code-block:: python\n\n class Product(ResourceSchema):\n reference = colander.SchemaNode(colander.String())\n\n class Options:\n readonly_fields = ('reference',)\n \"\"\"\n readonly_fields = tuple()\n \"\"\"Fields that cannot be updated. Values for fields will have to be\n provided either during record creation, through default values using\n ``missing`` attribute or implementing a custom logic in\n :meth:`kinto.core.resource.UserResource.process_record`.\n \"\"\"\n\n preserve_unknown = True\n \"\"\"Define if unknown fields should be preserved or not.\n\n The resource is schema-less by default. In other words, any field name\n will be accepted on records. Set this to ``False`` in order to limit\n the accepted fields to the ones defined in the schema.\n \"\"\"\n\n @classmethod\n def get_option(cls, attr):\n default_value = getattr(ResourceSchema.Options, attr)\n return getattr(cls.Options, attr, default_value)\n\n @classmethod\n def is_readonly(cls, field):\n \"\"\"Return True if specified field name is read-only.\n\n :param str field: the field name in the schema\n :returns: ``True`` if the specified field is read-only,\n ``False`` otherwise.\n :rtype: bool\n \"\"\"\n return field in cls.get_option(\"readonly_fields\")\n\n def schema_type(self):\n if self.get_option(\"preserve_unknown\") is True:\n unknown = 'preserve'\n else:\n unknown = 'ignore'\n return colander.Mapping(unknown=unknown)\n\n\nclass PermissionsSchema(colander.SchemaNode):\n \"\"\"A permission mapping defines ACEs.\n\n It has permission names as keys and principals as values.\n\n ::\n\n {\n \"write\": [\"fxa:af3e077eb9f5444a949ad65aa86e82ff\"],\n \"groups:create\": [\"fxa:70a9335eecfe440fa445ba752a750f3d\"]\n }\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.known_perms = kwargs.pop('permissions', tuple())\n super(PermissionsSchema, self).__init__(*args, **kwargs)\n\n for perm in self.known_perms:\n self[perm] = self._get_node_principals(perm)\n\n def schema_type(self):\n if self.known_perms:\n return colander.Mapping(unknown='raise')\n else:\n return colander.Mapping(unknown='preserve')\n\n def deserialize(self, cstruct=colander.null):\n\n # If permissions are not a mapping (e.g null or invalid), try deserializing\n if not isinstance(cstruct, dict):\n return super(PermissionsSchema, self).deserialize(cstruct)\n\n # If permissions are listed, check fields and produce fancy error messages\n if self.known_perms:\n for perm in cstruct:\n colander.OneOf(choices=self.known_perms)(self, perm)\n return super(PermissionsSchema, self).deserialize(cstruct)\n\n # Else deserialize the fields that are not on the schema\n permissions = {}\n perm_schema = colander.SequenceSchema(colander.SchemaNode(colander.String()))\n for perm, principals in cstruct.items():\n permissions[perm] = perm_schema.deserialize(principals)\n\n return permissions\n\n def _get_node_principals(self, perm):\n principal = colander.SchemaNode(colander.String())\n return colander.SchemaNode(colander.Sequence(), principal, name=perm,\n missing=colander.drop)\n\n\n# Header schemas\n\n\nclass HeaderSchema(colander.MappingSchema):\n \"\"\"Base schema used for validating and deserializing request headers. \"\"\"\n\n missing = colander.drop\n\n if_match = HeaderQuotedInteger(name='If-Match')\n if_none_match = HeaderQuotedInteger(name='If-None-Match')\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='preserve')\n\n\nclass PatchHeaderSchema(HeaderSchema):\n \"\"\"Header schema used with PATCH requests.\"\"\"\n\n def response_behavior_validator():\n return colander.OneOf(['full', 'light', 'diff'])\n\n response_behaviour = HeaderField(colander.String(), name='Response-Behavior',\n validator=response_behavior_validator())\n\n\n# Querystring schemas\n\n\nclass QuerySchema(colander.MappingSchema):\n \"\"\"\n Schema used for validating and deserializing querystrings. It will include\n and try to guess the type of unknown fields (field filters) on deserialization.\n \"\"\"\n missing = colander.drop\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='ignore')\n\n def deserialize(self, cstruct=colander.null):\n \"\"\"\n Deserialize and validate the QuerySchema fields and try to deserialize and\n get the native value of additional filds (field filters) that may be present\n on the cstruct.\n\n e.g:: ?exclude_id=a,b&deleted=true -> {'exclude_id': ['a', 'b'], deleted: True}\n \"\"\"\n values = {}\n\n schema_values = super(QuerySchema, self).deserialize(cstruct)\n if schema_values is colander.drop:\n return schema_values\n\n # Deserialize querystring field filters (see docstring e.g)\n for k, v in cstruct.items():\n # Deserialize lists used on in_ and exclude_ filters\n if k.startswith('in_') or k.startswith('exclude_'):\n as_list = FieldList().deserialize(v)\n if isinstance(as_list, list):\n values[k] = [native_value(v) for v in as_list]\n else:\n values[k] = native_value(v)\n\n values.update(schema_values)\n return values\n\n\nclass CollectionQuerySchema(QuerySchema):\n \"\"\"Querystring schema used with collections.\"\"\"\n\n _limit = QueryField(colander.Integer(), validator=positive_big_integer)\n _sort = FieldList()\n _token = QueryField(colander.String())\n _since = QueryField(colander.Integer(), validator=positive_big_integer)\n _to = QueryField(colander.Integer(), validator=positive_big_integer)\n _before = QueryField(colander.Integer(), validator=positive_big_integer)\n id = QueryField(colander.String())\n last_modified = QueryField(colander.Integer(), validator=positive_big_integer)\n\n\nclass RecordGetQuerySchema(QuerySchema):\n \"\"\"Querystring schema for GET record requests.\"\"\"\n\n _fields = FieldList()\n\n\nclass CollectionGetQuerySchema(CollectionQuerySchema):\n \"\"\"Querystring schema for GET collection requests.\"\"\"\n\n _fields = FieldList()\n\n\n# Body Schemas\n\n\nclass RecordSchema(colander.MappingSchema):\n\n @colander.deferred\n def data(node, kwargs):\n data = kwargs.get('data')\n if data:\n # Check if empty record is allowed.\n # (e.g every schema fields have defaults)\n try:\n data.deserialize({})\n except colander.Invalid:\n pass\n else:\n data.default = {}\n data.missing = colander.drop\n return data\n\n @colander.deferred\n def permissions(node, kwargs):\n def get_perms(node, kwargs):\n return kwargs.get('permissions')\n # Set if node is provided, else keep deferred. This allows binding the body\n # on Resource first and bind permissions later if using SharableResource.\n return get_perms(node, kwargs) or colander.deferred(get_perms)\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n\nclass JsonPatchOperationSchema(colander.MappingSchema):\n \"\"\"Single JSON Patch Operation.\"\"\"\n\n def op_validator():\n op_values = ['test', 'add', 'remove', 'replace', 'move', 'copy']\n return colander.OneOf(op_values)\n\n def path_validator():\n return colander.Regex('(/\\w*)+')\n\n op = colander.SchemaNode(colander.String(), validator=op_validator())\n path = colander.SchemaNode(colander.String(), validator=path_validator())\n from_ = colander.SchemaNode(colander.String(), name='from',\n validator=path_validator(), missing=colander.drop)\n value = colander.SchemaNode(Any(), missing=colander.drop)\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n\nclass JsonPatchBodySchema(colander.SequenceSchema):\n \"\"\"Body used with JSON Patch (application/json-patch+json) as in RFC 6902.\"\"\"\n\n operations = JsonPatchOperationSchema(missing=colander.drop)\n\n\n# Request schemas\n\n\nclass RequestSchema(colander.MappingSchema):\n \"\"\"Base schema for kinto requests.\"\"\"\n\n @colander.deferred\n def header(node, kwargs):\n return kwargs.get('header')\n\n @colander.deferred\n def querystring(node, kwargs):\n return kwargs.get('querystring')\n\n def after_bind(self, node, kw):\n # Set default bindings\n if not self.get('header'):\n self['header'] = HeaderSchema()\n if not self.get('querystring'):\n self['querystring'] = QuerySchema()\n\n\nclass PayloadRequestSchema(RequestSchema):\n \"\"\"Base schema for methods that use a JSON request body.\"\"\"\n\n @colander.deferred\n def body(node, kwargs):\n def get_body(node, kwargs):\n return kwargs.get('body')\n # Set if node is provided, else keep deferred (and allow bindind later)\n return get_body(node, kwargs) or colander.deferred(get_body)\n\n\nclass JsonPatchRequestSchema(RequestSchema):\n \"\"\"JSON Patch (application/json-patch+json) request schema.\"\"\"\n\n body = JsonPatchBodySchema()\n querystring = QuerySchema()\n header = PatchHeaderSchema()\n", "path": "kinto/core/resource/schema.py"}]}
3,571
366
gh_patches_debug_30939
rasdani/github-patches
git_diff
keras-team__keras-nlp-357
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Improve our continuous testing for model presets Opening an issue to track the changes proposed on https://github.com/keras-team/keras-nlp/pull/357, as it has gotten slightly larger in scope. I would like to propose the following changes to our "network_tests" for presets: - We collocate the preset testing within the model directory, and use test annotations to control how they are run. - We run the smallest available preset (per model) continuously on GCP, so we get some automated coverage for our preset code. - We actually test the output of our smallest available preset (with a relaxed float tolerance), so we can catch code updates that would break our checkpoints. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `keras_nlp/conftest.py` Content: ``` 1 # Copyright 2022 The KerasNLP Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 import sys 15 16 import pytest 17 18 19 def pytest_addoption(parser): 20 parser.addoption( 21 "--runslow", action="store_true", default=False, help="run slow tests" 22 ) 23 24 25 def pytest_configure(config): 26 config.addinivalue_line("markers", "slow: mark test as slow to run") 27 28 29 def pytest_collection_modifyitems(config, items): 30 if config.getoption("--runslow"): 31 # --runslow given in cli: do not skip slow tests 32 return 33 skip_slow = pytest.mark.skip(reason="need --runslow option to run") 34 skip_xla = pytest.mark.skipif( 35 sys.platform == "darwin", reason="XLA unsupported on MacOS." 36 ) 37 38 for item in items: 39 if "slow" in item.keywords: 40 item.add_marker(skip_slow) 41 if "jit_compile_true" in item.name: 42 item.add_marker(skip_xla) 43 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/keras_nlp/conftest.py b/keras_nlp/conftest.py --- a/keras_nlp/conftest.py +++ b/keras_nlp/conftest.py @@ -18,25 +18,48 @@ def pytest_addoption(parser): parser.addoption( - "--runslow", action="store_true", default=False, help="run slow tests" + "--run_large", + action="store_true", + default=False, + help="run large tests", + ) + parser.addoption( + "--run_extra_large", + action="store_true", + default=False, + help="run extra_large tests", ) def pytest_configure(config): - config.addinivalue_line("markers", "slow: mark test as slow to run") + config.addinivalue_line( + "markers", "large: mark test as being slow or requiring a network" + ) + config.addinivalue_line( + "markers", + "extra_large: mark test as being too large to run continuously", + ) def pytest_collection_modifyitems(config, items): - if config.getoption("--runslow"): - # --runslow given in cli: do not skip slow tests - return - skip_slow = pytest.mark.skip(reason="need --runslow option to run") + run_extra_large_tests = config.getoption("--run_extra_large") + # Run large tests for --run_extra_large or --run_large. + run_large_tests = config.getoption("--run_large") or run_extra_large_tests + + # Messages to annotate skipped tests with. skip_xla = pytest.mark.skipif( sys.platform == "darwin", reason="XLA unsupported on MacOS." ) - + skip_large = pytest.mark.skipif( + not run_large_tests, reason="need --run_large option to run" + ) + skip_extra_large = pytest.mark.skipif( + not run_extra_large_tests, reason="need --run_extra_large option to run" + ) for item in items: - if "slow" in item.keywords: - item.add_marker(skip_slow) if "jit_compile_true" in item.name: item.add_marker(skip_xla) + if "large" in item.keywords: + item.add_marker(skip_large) + if "extra_large" in item.keywords: + item.add_marker(skip_extra_large)
{"golden_diff": "diff --git a/keras_nlp/conftest.py b/keras_nlp/conftest.py\n--- a/keras_nlp/conftest.py\n+++ b/keras_nlp/conftest.py\n@@ -18,25 +18,48 @@\n \n def pytest_addoption(parser):\n parser.addoption(\n- \"--runslow\", action=\"store_true\", default=False, help=\"run slow tests\"\n+ \"--run_large\",\n+ action=\"store_true\",\n+ default=False,\n+ help=\"run large tests\",\n+ )\n+ parser.addoption(\n+ \"--run_extra_large\",\n+ action=\"store_true\",\n+ default=False,\n+ help=\"run extra_large tests\",\n )\n \n \n def pytest_configure(config):\n- config.addinivalue_line(\"markers\", \"slow: mark test as slow to run\")\n+ config.addinivalue_line(\n+ \"markers\", \"large: mark test as being slow or requiring a network\"\n+ )\n+ config.addinivalue_line(\n+ \"markers\",\n+ \"extra_large: mark test as being too large to run continuously\",\n+ )\n \n \n def pytest_collection_modifyitems(config, items):\n- if config.getoption(\"--runslow\"):\n- # --runslow given in cli: do not skip slow tests\n- return\n- skip_slow = pytest.mark.skip(reason=\"need --runslow option to run\")\n+ run_extra_large_tests = config.getoption(\"--run_extra_large\")\n+ # Run large tests for --run_extra_large or --run_large.\n+ run_large_tests = config.getoption(\"--run_large\") or run_extra_large_tests\n+\n+ # Messages to annotate skipped tests with.\n skip_xla = pytest.mark.skipif(\n sys.platform == \"darwin\", reason=\"XLA unsupported on MacOS.\"\n )\n-\n+ skip_large = pytest.mark.skipif(\n+ not run_large_tests, reason=\"need --run_large option to run\"\n+ )\n+ skip_extra_large = pytest.mark.skipif(\n+ not run_extra_large_tests, reason=\"need --run_extra_large option to run\"\n+ )\n for item in items:\n- if \"slow\" in item.keywords:\n- item.add_marker(skip_slow)\n if \"jit_compile_true\" in item.name:\n item.add_marker(skip_xla)\n+ if \"large\" in item.keywords:\n+ item.add_marker(skip_large)\n+ if \"extra_large\" in item.keywords:\n+ item.add_marker(skip_extra_large)\n", "issue": "Improve our continuous testing for model presets\nOpening an issue to track the changes proposed on https://github.com/keras-team/keras-nlp/pull/357, as it has gotten slightly larger in scope.\r\n\r\nI would like to propose the following changes to our \"network_tests\" for presets:\r\n\r\n - We collocate the preset testing within the model directory, and use test annotations to control how they are run.\r\n - We run the smallest available preset (per model) continuously on GCP, so we get some automated coverage for our preset code.\r\n - We actually test the output of our smallest available preset (with a relaxed float tolerance), so we can catch code updates that would break our checkpoints.\n", "before_files": [{"content": "# Copyright 2022 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\n\nimport pytest\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--runslow\", action=\"store_true\", default=False, help=\"run slow tests\"\n )\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\"markers\", \"slow: mark test as slow to run\")\n\n\ndef pytest_collection_modifyitems(config, items):\n if config.getoption(\"--runslow\"):\n # --runslow given in cli: do not skip slow tests\n return\n skip_slow = pytest.mark.skip(reason=\"need --runslow option to run\")\n skip_xla = pytest.mark.skipif(\n sys.platform == \"darwin\", reason=\"XLA unsupported on MacOS.\"\n )\n\n for item in items:\n if \"slow\" in item.keywords:\n item.add_marker(skip_slow)\n if \"jit_compile_true\" in item.name:\n item.add_marker(skip_xla)\n", "path": "keras_nlp/conftest.py"}], "after_files": [{"content": "# Copyright 2022 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\n\nimport pytest\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--run_large\",\n action=\"store_true\",\n default=False,\n help=\"run large tests\",\n )\n parser.addoption(\n \"--run_extra_large\",\n action=\"store_true\",\n default=False,\n help=\"run extra_large tests\",\n )\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\n \"markers\", \"large: mark test as being slow or requiring a network\"\n )\n config.addinivalue_line(\n \"markers\",\n \"extra_large: mark test as being too large to run continuously\",\n )\n\n\ndef pytest_collection_modifyitems(config, items):\n run_extra_large_tests = config.getoption(\"--run_extra_large\")\n # Run large tests for --run_extra_large or --run_large.\n run_large_tests = config.getoption(\"--run_large\") or run_extra_large_tests\n\n # Messages to annotate skipped tests with.\n skip_xla = pytest.mark.skipif(\n sys.platform == \"darwin\", reason=\"XLA unsupported on MacOS.\"\n )\n skip_large = pytest.mark.skipif(\n not run_large_tests, reason=\"need --run_large option to run\"\n )\n skip_extra_large = pytest.mark.skipif(\n not run_extra_large_tests, reason=\"need --run_extra_large option to run\"\n )\n for item in items:\n if \"jit_compile_true\" in item.name:\n item.add_marker(skip_xla)\n if \"large\" in item.keywords:\n item.add_marker(skip_large)\n if \"extra_large\" in item.keywords:\n item.add_marker(skip_extra_large)\n", "path": "keras_nlp/conftest.py"}]}
809
540
gh_patches_debug_59678
rasdani/github-patches
git_diff
mozilla__bugbug-31
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Create a classifier to detect bugs that need QA Needed for https://github.com/mozilla/relman-auto-nag/issues/227. To do this, we'll need to collect some labels. We can automatically create some positive labels by getting bugs that have the `qawanted` keyword or that have `qe-verify` flag. We can't automatically create negative labels because we can't be sure that QA was not needed when the keyword/flag was not set. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bugbug/models/qaneeded.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # This Source Code Form is subject to the terms of the Mozilla Public 3 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 # You can obtain one at http://mozilla.org/MPL/2.0/. 5 6 import xgboost 7 from sklearn.feature_extraction import DictVectorizer 8 from sklearn.pipeline import FeatureUnion 9 from sklearn.pipeline import Pipeline 10 11 from bugbug import bug_features 12 from bugbug import labels 13 from bugbug.model import Model 14 from bugbug.utils import DictSelector 15 16 17 class QANeededModel(Model): 18 def __init__(self, lemmatization=False): 19 Model.__init__(self, lemmatization) 20 21 self.classes = labels.get_qa_needed_labels() 22 23 feature_extractors = [ 24 bug_features.has_str(), 25 bug_features.has_regression_range(), 26 bug_features.severity(), 27 bug_features.keywords(), 28 bug_features.is_coverity_issue(), 29 bug_features.has_crash_signature(), 30 bug_features.has_url(), 31 bug_features.has_w3c_url(), 32 bug_features.has_github_url(), 33 bug_features.whiteboard(), 34 bug_features.patches(), 35 bug_features.landings(), 36 bug_features.title(), 37 bug_features.comments(), 38 ] 39 40 self.extraction_pipeline = Pipeline([ 41 ('bug_extractor', bug_features.BugExtractor(feature_extractors)), 42 ('union', FeatureUnion( 43 transformer_list=[ 44 ('data', Pipeline([ 45 ('selector', DictSelector(key='data')), 46 ('vect', DictVectorizer()), 47 ])), 48 49 ('title', Pipeline([ 50 ('selector', DictSelector(key='title')), 51 ('tfidf', self.text_vectorizer(stop_words='english')), 52 ])), 53 54 ('comments', Pipeline([ 55 ('selector', DictSelector(key='comments')), 56 ('tfidf', self.text_vectorizer(stop_words='english')), 57 ])), 58 ], 59 )), 60 ]) 61 62 self.clf = xgboost.XGBClassifier(n_jobs=16) 63 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bugbug/models/qaneeded.py b/bugbug/models/qaneeded.py --- a/bugbug/models/qaneeded.py +++ b/bugbug/models/qaneeded.py @@ -24,7 +24,7 @@ bug_features.has_str(), bug_features.has_regression_range(), bug_features.severity(), - bug_features.keywords(), + bug_features.keywords(set(['qawanted'])), bug_features.is_coverity_issue(), bug_features.has_crash_signature(), bug_features.has_url(),
{"golden_diff": "diff --git a/bugbug/models/qaneeded.py b/bugbug/models/qaneeded.py\n--- a/bugbug/models/qaneeded.py\n+++ b/bugbug/models/qaneeded.py\n@@ -24,7 +24,7 @@\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n- bug_features.keywords(),\n+ bug_features.keywords(set(['qawanted'])),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n", "issue": "Create a classifier to detect bugs that need QA\nNeeded for https://github.com/mozilla/relman-auto-nag/issues/227.\r\n\r\nTo do this, we'll need to collect some labels.\r\nWe can automatically create some positive labels by getting bugs that have the `qawanted` keyword or that have `qe-verify` flag.\r\nWe can't automatically create negative labels because we can't be sure that QA was not needed when the keyword/flag was not set.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import FeatureUnion\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features\nfrom bugbug import labels\nfrom bugbug.model import Model\nfrom bugbug.utils import DictSelector\n\n\nclass QANeededModel(Model):\n def __init__(self, lemmatization=False):\n Model.__init__(self, lemmatization)\n\n self.classes = labels.get_qa_needed_labels()\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.keywords(),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n bug_features.title(),\n bug_features.comments(),\n ]\n\n self.extraction_pipeline = Pipeline([\n ('bug_extractor', bug_features.BugExtractor(feature_extractors)),\n ('union', FeatureUnion(\n transformer_list=[\n ('data', Pipeline([\n ('selector', DictSelector(key='data')),\n ('vect', DictVectorizer()),\n ])),\n\n ('title', Pipeline([\n ('selector', DictSelector(key='title')),\n ('tfidf', self.text_vectorizer(stop_words='english')),\n ])),\n\n ('comments', Pipeline([\n ('selector', DictSelector(key='comments')),\n ('tfidf', self.text_vectorizer(stop_words='english')),\n ])),\n ],\n )),\n ])\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n", "path": "bugbug/models/qaneeded.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import FeatureUnion\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features\nfrom bugbug import labels\nfrom bugbug.model import Model\nfrom bugbug.utils import DictSelector\n\n\nclass QANeededModel(Model):\n def __init__(self, lemmatization=False):\n Model.__init__(self, lemmatization)\n\n self.classes = labels.get_qa_needed_labels()\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.keywords(set(['qawanted'])),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n bug_features.title(),\n bug_features.comments(),\n ]\n\n self.extraction_pipeline = Pipeline([\n ('bug_extractor', bug_features.BugExtractor(feature_extractors)),\n ('union', FeatureUnion(\n transformer_list=[\n ('data', Pipeline([\n ('selector', DictSelector(key='data')),\n ('vect', DictVectorizer()),\n ])),\n\n ('title', Pipeline([\n ('selector', DictSelector(key='title')),\n ('tfidf', self.text_vectorizer(stop_words='english')),\n ])),\n\n ('comments', Pipeline([\n ('selector', DictSelector(key='comments')),\n ('tfidf', self.text_vectorizer(stop_words='english')),\n ])),\n ],\n )),\n ])\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n", "path": "bugbug/models/qaneeded.py"}]}
892
115
gh_patches_debug_6590
rasdani/github-patches
git_diff
napalm-automation__napalm-836
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- napalm validate is modifying the source dictionary with pop operations ```python def napalm_validate(napalm, file=None, source=None): complies = napalm.compliance_report(validation_file=file, validation_source=source)[ "complies" ] return complies ``` and i'm using it like this: ```python def whatever(task): napalm = task.host.get_connection("napalm") ping_gw = [ { "ping": { "_name": "ping_gw", "_kwargs": {"destination": "10.0.0.1", "source": "Vlan1000"}, "success": {"packet_loss": 0}, "_mode": "strict", } } ] print("1", task.host, ping_gw) ping = napalm_validate(napalm, source=ping_gw) print("2", task.host, ping_gw) ping = napalm_validate(napalm, source=ping_gw) ``` which results in ``` 1 host02 [{'ping': {'_name': 'ping_gw', '_kwargs': {'destination': '10.0.0.1', 'source': 'Vlan1000'}, 'success': {'packet_loss': 0}, '_mode': 'strict'}}] 2 host02 [{'ping': {'success': {'packet_loss': 0}}}] ``` is it because of `kwargs = expected_results.pop('_kwargs', {})` in `compliance_report`? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `napalm/base/validate.py` Content: ``` 1 """ 2 Validation methods for the NAPALM base. 3 4 See: https://napalm.readthedocs.io/en/latest/validate.html 5 """ 6 from __future__ import unicode_literals 7 8 import yaml 9 10 from napalm.base.exceptions import ValidationException 11 from napalm.base.utils import py23_compat 12 13 import copy 14 import re 15 16 17 # We put it here to compile it only once 18 numeric_compare_regex = re.compile(r"^(<|>|<=|>=|==|!=)(\d+(\.\d+){0,1})$") 19 20 21 def _get_validation_file(validation_file): 22 try: 23 with open(validation_file, 'r') as stream: 24 try: 25 validation_source = yaml.safe_load(stream) 26 except yaml.YAMLError as exc: 27 raise ValidationException(exc) 28 except IOError: 29 raise ValidationException("File {0} not found.".format(validation_file)) 30 return validation_source 31 32 33 def _mode(mode_string): 34 mode = {'strict': False} 35 36 for m in mode_string.split(): 37 if m not in mode.keys(): 38 raise ValidationException("mode '{}' not recognized".format(m)) 39 mode[m] = True 40 return mode 41 42 43 def _compare_getter_list(src, dst, mode): 44 result = {"complies": True, "present": [], "missing": [], "extra": []} 45 for src_element in src: 46 found = False 47 48 i = 0 49 while True: 50 try: 51 intermediate_match = compare(src_element, dst[i]) 52 if isinstance(intermediate_match, dict) and intermediate_match["complies"] or \ 53 not isinstance(intermediate_match, dict) and intermediate_match: 54 found = True 55 result["present"].append(src_element) 56 dst.pop(i) 57 break 58 else: 59 i += 1 60 except IndexError: 61 break 62 63 if not found: 64 result["complies"] = False 65 result["missing"].append(src_element) 66 67 if mode["strict"] and dst: 68 result["extra"] = dst 69 result["complies"] = False 70 71 return result 72 73 74 def _compare_getter_dict(src, dst, mode): 75 result = {"complies": True, "present": {}, "missing": [], "extra": []} 76 dst = copy.deepcopy(dst) # Otherwise we are going to modify a "live" object 77 78 for key, src_element in src.items(): 79 try: 80 dst_element = dst.pop(key) 81 result["present"][key] = {} 82 intermediate_result = compare(src_element, dst_element) 83 84 if isinstance(intermediate_result, dict): 85 nested = True 86 87 complies = intermediate_result["complies"] 88 89 if not complies: 90 result["present"][key]['diff'] = intermediate_result 91 else: 92 complies = intermediate_result 93 nested = False 94 if not complies: 95 result["present"][key]["expected_value"] = src_element 96 result["present"][key]["actual_value"] = dst_element 97 98 if not complies: 99 result["complies"] = False 100 101 result["present"][key]["complies"] = complies 102 result["present"][key]["nested"] = nested 103 except KeyError: 104 result["missing"].append(key) 105 result["complies"] = False 106 107 if mode["strict"] and dst: 108 result["extra"] = list(dst.keys()) 109 result["complies"] = False 110 111 return result 112 113 114 def compare(src, dst): 115 if isinstance(src, py23_compat.string_types): 116 src = py23_compat.text_type(src) 117 118 if isinstance(src, dict): 119 mode = _mode(src.pop('_mode', '')) 120 if 'list' in src.keys(): 121 if not isinstance(dst, list): 122 # This can happen with nested lists 123 return False 124 125 return _compare_getter_list(src['list'], dst, mode) 126 return _compare_getter_dict(src, dst, mode) 127 128 elif isinstance(src, py23_compat.string_types): 129 if src.startswith('<') or src.startswith('>'): 130 cmp_result = _compare_numeric(src, dst) 131 return cmp_result 132 else: 133 m = re.search(src, py23_compat.text_type(dst)) 134 if m: 135 return bool(m) 136 else: 137 return src == dst 138 139 elif(type(src) == type(dst) == list): 140 pairs = zip(src, dst) 141 diff_lists = [[(k, x[k], y[k]) 142 for k in x if not re.search(x[k], y[k])] 143 for x, y in pairs if x != y] 144 return empty_tree(diff_lists) 145 146 else: 147 return src == dst 148 149 150 def _compare_numeric(src_num, dst_num): 151 """Compare numerical values. You can use '<%d','>%d'.""" 152 dst_num = float(dst_num) 153 154 match = numeric_compare_regex.match(src_num) 155 if not match: 156 error = "Failed numeric comparison. Collected: {}. Expected: {}".format(dst_num, src_num) 157 raise ValueError(error) 158 159 operand = { 160 "<": "__lt__", 161 ">": "__gt__", 162 ">=": "__ge__", 163 "<=": "__le__", 164 "==": "__eq__", 165 "!=": "__ne__", 166 } 167 return getattr(dst_num, operand[match.group(1)])(float(match.group(2))) 168 169 170 def empty_tree(input_list): 171 """Recursively iterate through values in nested lists.""" 172 for item in input_list: 173 if not isinstance(item, list) or not empty_tree(item): 174 return False 175 return True 176 177 178 def compliance_report(cls, validation_file=None, validation_source=None): 179 report = {} 180 if validation_file: 181 validation_source = _get_validation_file(validation_file) 182 183 for validation_check in validation_source: 184 for getter, expected_results in validation_check.items(): 185 if getter == "get_config": 186 # TBD 187 pass 188 else: 189 key = expected_results.pop("_name", "") or getter 190 191 try: 192 kwargs = expected_results.pop('_kwargs', {}) 193 actual_results = getattr(cls, getter)(**kwargs) 194 report[key] = compare(expected_results, actual_results) 195 except NotImplementedError: 196 report[key] = {"skipped": True, "reason": "NotImplemented"} 197 198 complies = all([e.get("complies", True) for e in report.values()]) 199 report["skipped"] = [k for k, v in report.items() if v.get("skipped", False)] 200 report["complies"] = complies 201 return report 202 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/napalm/base/validate.py b/napalm/base/validate.py --- a/napalm/base/validate.py +++ b/napalm/base/validate.py @@ -180,6 +180,9 @@ if validation_file: validation_source = _get_validation_file(validation_file) + # Otherwise we are going to modify a "live" object + validation_source = copy.deepcopy(validation_source) + for validation_check in validation_source: for getter, expected_results in validation_check.items(): if getter == "get_config":
{"golden_diff": "diff --git a/napalm/base/validate.py b/napalm/base/validate.py\n--- a/napalm/base/validate.py\n+++ b/napalm/base/validate.py\n@@ -180,6 +180,9 @@\n if validation_file:\n validation_source = _get_validation_file(validation_file)\n \n+ # Otherwise we are going to modify a \"live\" object\n+ validation_source = copy.deepcopy(validation_source)\n+\n for validation_check in validation_source:\n for getter, expected_results in validation_check.items():\n if getter == \"get_config\":\n", "issue": "napalm validate is modifying the source dictionary with pop operations\n```python\r\ndef napalm_validate(napalm, file=None, source=None):\r\n complies = napalm.compliance_report(validation_file=file, validation_source=source)[\r\n \"complies\"\r\n ]\r\n return complies\r\n```\r\n\r\nand i'm using it like this:\r\n\r\n```python\r\ndef whatever(task):\r\n napalm = task.host.get_connection(\"napalm\")\r\n\r\n ping_gw = [\r\n {\r\n \"ping\": {\r\n \"_name\": \"ping_gw\",\r\n \"_kwargs\": {\"destination\": \"10.0.0.1\", \"source\": \"Vlan1000\"},\r\n \"success\": {\"packet_loss\": 0},\r\n \"_mode\": \"strict\",\r\n }\r\n }\r\n ]\r\n\r\n print(\"1\", task.host, ping_gw)\r\n ping = napalm_validate(napalm, source=ping_gw)\r\n\r\n print(\"2\", task.host, ping_gw)\r\n ping = napalm_validate(napalm, source=ping_gw)\r\n```\r\n\r\nwhich results in\r\n```\r\n1 host02 [{'ping': {'_name': 'ping_gw', '_kwargs': \r\n {'destination': '10.0.0.1', 'source': 'Vlan1000'}, \r\n 'success': {'packet_loss': 0}, '_mode': 'strict'}}]\r\n2 host02 [{'ping': {'success': {'packet_loss': 0}}}]\r\n```\r\n\r\nis it because of `kwargs = expected_results.pop('_kwargs', {})` in `compliance_report`?\n", "before_files": [{"content": "\"\"\"\nValidation methods for the NAPALM base.\n\nSee: https://napalm.readthedocs.io/en/latest/validate.html\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport yaml\n\nfrom napalm.base.exceptions import ValidationException\nfrom napalm.base.utils import py23_compat\n\nimport copy\nimport re\n\n\n# We put it here to compile it only once\nnumeric_compare_regex = re.compile(r\"^(<|>|<=|>=|==|!=)(\\d+(\\.\\d+){0,1})$\")\n\n\ndef _get_validation_file(validation_file):\n try:\n with open(validation_file, 'r') as stream:\n try:\n validation_source = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n raise ValidationException(exc)\n except IOError:\n raise ValidationException(\"File {0} not found.\".format(validation_file))\n return validation_source\n\n\ndef _mode(mode_string):\n mode = {'strict': False}\n\n for m in mode_string.split():\n if m not in mode.keys():\n raise ValidationException(\"mode '{}' not recognized\".format(m))\n mode[m] = True\n return mode\n\n\ndef _compare_getter_list(src, dst, mode):\n result = {\"complies\": True, \"present\": [], \"missing\": [], \"extra\": []}\n for src_element in src:\n found = False\n\n i = 0\n while True:\n try:\n intermediate_match = compare(src_element, dst[i])\n if isinstance(intermediate_match, dict) and intermediate_match[\"complies\"] or \\\n not isinstance(intermediate_match, dict) and intermediate_match:\n found = True\n result[\"present\"].append(src_element)\n dst.pop(i)\n break\n else:\n i += 1\n except IndexError:\n break\n\n if not found:\n result[\"complies\"] = False\n result[\"missing\"].append(src_element)\n\n if mode[\"strict\"] and dst:\n result[\"extra\"] = dst\n result[\"complies\"] = False\n\n return result\n\n\ndef _compare_getter_dict(src, dst, mode):\n result = {\"complies\": True, \"present\": {}, \"missing\": [], \"extra\": []}\n dst = copy.deepcopy(dst) # Otherwise we are going to modify a \"live\" object\n\n for key, src_element in src.items():\n try:\n dst_element = dst.pop(key)\n result[\"present\"][key] = {}\n intermediate_result = compare(src_element, dst_element)\n\n if isinstance(intermediate_result, dict):\n nested = True\n\n complies = intermediate_result[\"complies\"]\n\n if not complies:\n result[\"present\"][key]['diff'] = intermediate_result\n else:\n complies = intermediate_result\n nested = False\n if not complies:\n result[\"present\"][key][\"expected_value\"] = src_element\n result[\"present\"][key][\"actual_value\"] = dst_element\n\n if not complies:\n result[\"complies\"] = False\n\n result[\"present\"][key][\"complies\"] = complies\n result[\"present\"][key][\"nested\"] = nested\n except KeyError:\n result[\"missing\"].append(key)\n result[\"complies\"] = False\n\n if mode[\"strict\"] and dst:\n result[\"extra\"] = list(dst.keys())\n result[\"complies\"] = False\n\n return result\n\n\ndef compare(src, dst):\n if isinstance(src, py23_compat.string_types):\n src = py23_compat.text_type(src)\n\n if isinstance(src, dict):\n mode = _mode(src.pop('_mode', ''))\n if 'list' in src.keys():\n if not isinstance(dst, list):\n # This can happen with nested lists\n return False\n\n return _compare_getter_list(src['list'], dst, mode)\n return _compare_getter_dict(src, dst, mode)\n\n elif isinstance(src, py23_compat.string_types):\n if src.startswith('<') or src.startswith('>'):\n cmp_result = _compare_numeric(src, dst)\n return cmp_result\n else:\n m = re.search(src, py23_compat.text_type(dst))\n if m:\n return bool(m)\n else:\n return src == dst\n\n elif(type(src) == type(dst) == list):\n pairs = zip(src, dst)\n diff_lists = [[(k, x[k], y[k])\n for k in x if not re.search(x[k], y[k])]\n for x, y in pairs if x != y]\n return empty_tree(diff_lists)\n\n else:\n return src == dst\n\n\ndef _compare_numeric(src_num, dst_num):\n \"\"\"Compare numerical values. You can use '<%d','>%d'.\"\"\"\n dst_num = float(dst_num)\n\n match = numeric_compare_regex.match(src_num)\n if not match:\n error = \"Failed numeric comparison. Collected: {}. Expected: {}\".format(dst_num, src_num)\n raise ValueError(error)\n\n operand = {\n \"<\": \"__lt__\",\n \">\": \"__gt__\",\n \">=\": \"__ge__\",\n \"<=\": \"__le__\",\n \"==\": \"__eq__\",\n \"!=\": \"__ne__\",\n }\n return getattr(dst_num, operand[match.group(1)])(float(match.group(2)))\n\n\ndef empty_tree(input_list):\n \"\"\"Recursively iterate through values in nested lists.\"\"\"\n for item in input_list:\n if not isinstance(item, list) or not empty_tree(item):\n return False\n return True\n\n\ndef compliance_report(cls, validation_file=None, validation_source=None):\n report = {}\n if validation_file:\n validation_source = _get_validation_file(validation_file)\n\n for validation_check in validation_source:\n for getter, expected_results in validation_check.items():\n if getter == \"get_config\":\n # TBD\n pass\n else:\n key = expected_results.pop(\"_name\", \"\") or getter\n\n try:\n kwargs = expected_results.pop('_kwargs', {})\n actual_results = getattr(cls, getter)(**kwargs)\n report[key] = compare(expected_results, actual_results)\n except NotImplementedError:\n report[key] = {\"skipped\": True, \"reason\": \"NotImplemented\"}\n\n complies = all([e.get(\"complies\", True) for e in report.values()])\n report[\"skipped\"] = [k for k, v in report.items() if v.get(\"skipped\", False)]\n report[\"complies\"] = complies\n return report\n", "path": "napalm/base/validate.py"}], "after_files": [{"content": "\"\"\"\nValidation methods for the NAPALM base.\n\nSee: https://napalm.readthedocs.io/en/latest/validate.html\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport yaml\n\nfrom napalm.base.exceptions import ValidationException\nfrom napalm.base.utils import py23_compat\n\nimport copy\nimport re\n\n\n# We put it here to compile it only once\nnumeric_compare_regex = re.compile(r\"^(<|>|<=|>=|==|!=)(\\d+(\\.\\d+){0,1})$\")\n\n\ndef _get_validation_file(validation_file):\n try:\n with open(validation_file, 'r') as stream:\n try:\n validation_source = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n raise ValidationException(exc)\n except IOError:\n raise ValidationException(\"File {0} not found.\".format(validation_file))\n return validation_source\n\n\ndef _mode(mode_string):\n mode = {'strict': False}\n\n for m in mode_string.split():\n if m not in mode.keys():\n raise ValidationException(\"mode '{}' not recognized\".format(m))\n mode[m] = True\n return mode\n\n\ndef _compare_getter_list(src, dst, mode):\n result = {\"complies\": True, \"present\": [], \"missing\": [], \"extra\": []}\n for src_element in src:\n found = False\n\n i = 0\n while True:\n try:\n intermediate_match = compare(src_element, dst[i])\n if isinstance(intermediate_match, dict) and intermediate_match[\"complies\"] or \\\n not isinstance(intermediate_match, dict) and intermediate_match:\n found = True\n result[\"present\"].append(src_element)\n dst.pop(i)\n break\n else:\n i += 1\n except IndexError:\n break\n\n if not found:\n result[\"complies\"] = False\n result[\"missing\"].append(src_element)\n\n if mode[\"strict\"] and dst:\n result[\"extra\"] = dst\n result[\"complies\"] = False\n\n return result\n\n\ndef _compare_getter_dict(src, dst, mode):\n result = {\"complies\": True, \"present\": {}, \"missing\": [], \"extra\": []}\n dst = copy.deepcopy(dst) # Otherwise we are going to modify a \"live\" object\n\n for key, src_element in src.items():\n try:\n dst_element = dst.pop(key)\n result[\"present\"][key] = {}\n intermediate_result = compare(src_element, dst_element)\n\n if isinstance(intermediate_result, dict):\n nested = True\n\n complies = intermediate_result[\"complies\"]\n\n if not complies:\n result[\"present\"][key]['diff'] = intermediate_result\n else:\n complies = intermediate_result\n nested = False\n if not complies:\n result[\"present\"][key][\"expected_value\"] = src_element\n result[\"present\"][key][\"actual_value\"] = dst_element\n\n if not complies:\n result[\"complies\"] = False\n\n result[\"present\"][key][\"complies\"] = complies\n result[\"present\"][key][\"nested\"] = nested\n except KeyError:\n result[\"missing\"].append(key)\n result[\"complies\"] = False\n\n if mode[\"strict\"] and dst:\n result[\"extra\"] = list(dst.keys())\n result[\"complies\"] = False\n\n return result\n\n\ndef compare(src, dst):\n if isinstance(src, py23_compat.string_types):\n src = py23_compat.text_type(src)\n\n if isinstance(src, dict):\n mode = _mode(src.pop('_mode', ''))\n if 'list' in src.keys():\n if not isinstance(dst, list):\n # This can happen with nested lists\n return False\n\n return _compare_getter_list(src['list'], dst, mode)\n return _compare_getter_dict(src, dst, mode)\n\n elif isinstance(src, py23_compat.string_types):\n if src.startswith('<') or src.startswith('>'):\n cmp_result = _compare_numeric(src, dst)\n return cmp_result\n else:\n m = re.search(src, py23_compat.text_type(dst))\n if m:\n return bool(m)\n else:\n return src == dst\n\n elif(type(src) == type(dst) == list):\n pairs = zip(src, dst)\n diff_lists = [[(k, x[k], y[k])\n for k in x if not re.search(x[k], y[k])]\n for x, y in pairs if x != y]\n return empty_tree(diff_lists)\n\n else:\n return src == dst\n\n\ndef _compare_numeric(src_num, dst_num):\n \"\"\"Compare numerical values. You can use '<%d','>%d'.\"\"\"\n dst_num = float(dst_num)\n\n match = numeric_compare_regex.match(src_num)\n if not match:\n error = \"Failed numeric comparison. Collected: {}. Expected: {}\".format(dst_num, src_num)\n raise ValueError(error)\n\n operand = {\n \"<\": \"__lt__\",\n \">\": \"__gt__\",\n \">=\": \"__ge__\",\n \"<=\": \"__le__\",\n \"==\": \"__eq__\",\n \"!=\": \"__ne__\",\n }\n return getattr(dst_num, operand[match.group(1)])(float(match.group(2)))\n\n\ndef empty_tree(input_list):\n \"\"\"Recursively iterate through values in nested lists.\"\"\"\n for item in input_list:\n if not isinstance(item, list) or not empty_tree(item):\n return False\n return True\n\n\ndef compliance_report(cls, validation_file=None, validation_source=None):\n report = {}\n if validation_file:\n validation_source = _get_validation_file(validation_file)\n\n # Otherwise we are going to modify a \"live\" object\n validation_source = copy.deepcopy(validation_source)\n\n for validation_check in validation_source:\n for getter, expected_results in validation_check.items():\n if getter == \"get_config\":\n # TBD\n pass\n else:\n key = expected_results.pop(\"_name\", \"\") or getter\n\n try:\n kwargs = expected_results.pop('_kwargs', {})\n actual_results = getattr(cls, getter)(**kwargs)\n report[key] = compare(expected_results, actual_results)\n except NotImplementedError:\n report[key] = {\"skipped\": True, \"reason\": \"NotImplemented\"}\n\n complies = all([e.get(\"complies\", True) for e in report.values()])\n report[\"skipped\"] = [k for k, v in report.items() if v.get(\"skipped\", False)]\n report[\"complies\"] = complies\n return report\n", "path": "napalm/base/validate.py"}]}
2,502
124
gh_patches_debug_13990
rasdani/github-patches
git_diff
coala__coala-5853
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Result.py: Add docstring regarding the case that line or column is None The output generated by coala can have ```line``` being ```None``` or ```column``` being ```None```. ```column = None``` means there is an error with the whole line. ```line = None``` means the whole file. The reason behind it is that it's an unknown line means it can occur anywhere in the file. All combinations with None values and numbers for line and column are allowed, except ```line=None``` and ```column=<some number>``` This should be clarified in the documentation. (ref: http://api.coala.io/en/latest/coalib.results.html#module-coalib.results.Result) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `coalib/results/Result.py` Content: ``` 1 import uuid 2 from os.path import relpath 3 4 from coala_utils.decorators import ( 5 enforce_signature, generate_ordering, generate_repr, get_public_members) 6 from coalib.bearlib.aspects import aspectbase 7 from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY 8 from coalib.results.SourceRange import SourceRange 9 10 11 # Omit additional info, debug message and diffs for brevity 12 @generate_repr(('id', hex), 13 'origin', 14 'affected_code', 15 ('severity', RESULT_SEVERITY.reverse.get), 16 'confidence', 17 'message', 18 ('aspect', lambda aspect: type(aspect).__qualname__), 19 'applied_actions') 20 @generate_ordering('affected_code', 21 'severity', 22 'confidence', 23 'origin', 24 'message_base', 25 'message_arguments', 26 'aspect', 27 'additional_info', 28 'diffs', 29 'debug_msg', 30 'applied_actions') 31 class Result: 32 """ 33 A result is anything that has an origin and a message. 34 35 Optionally it might affect a file. 36 37 Result messages can also have arguments. The message is python 38 style formatted with these arguments. 39 40 >>> r = Result('origin','{arg1} and {arg2}', \ 41 message_arguments={'arg1': 'foo', 'arg2': 'bar'}) 42 >>> r.message 43 'foo and bar' 44 45 Message arguments may be changed later. The result message 46 will also reflect these changes. 47 48 >>> r.message_arguments = {'arg1': 'spam', 'arg2': 'eggs'} 49 >>> r.message 50 'spam and eggs' 51 52 """ 53 54 @enforce_signature 55 def __init__(self, 56 origin, 57 message: str, 58 affected_code: (tuple, list) = (), 59 severity: int = RESULT_SEVERITY.NORMAL, 60 additional_info: str = '', 61 debug_msg='', 62 diffs: (dict, None) = None, 63 confidence: int = 100, 64 aspect: (aspectbase, None) = None, 65 message_arguments: dict = {}, 66 applied_actions: dict = {}): 67 """ 68 :param origin: 69 Class name or creator object of this object. 70 :param message: 71 Base message to show with this result. 72 :param affected_code: 73 A tuple of ``SourceRange`` objects pointing to related positions 74 in the source code. 75 :param severity: 76 Severity of this result. 77 :param additional_info: 78 A long description holding additional information about the issue 79 and/or how to fix it. You can use this like a manual entry for a 80 category of issues. 81 :param debug_msg: 82 A message which may help the user find out why this result was 83 yielded. 84 :param diffs: 85 A dictionary with filename as key and ``Diff`` object 86 associated with it as value. 87 :param confidence: 88 A number between 0 and 100 describing the likelihood of this result 89 being a real issue. 90 :param aspect: 91 An aspectclass instance which this result is associated to. 92 Note that this should be a leaf of the aspect tree! 93 (If you have a node, spend some time figuring out which of 94 the leafs exactly your result belongs to.) 95 :param message_arguments: 96 Arguments to be provided to the base message. 97 :param applied_actions: 98 A dictionary that contains the result, file_dict, file_diff_dict and 99 the section for an action. 100 :raises ValueError: 101 Raised when confidence is not between 0 and 100. 102 :raises KeyError: 103 Raised when message_base can not be formatted with 104 message_arguments. 105 """ 106 origin = origin or '' 107 if not isinstance(origin, str): 108 origin = origin.__class__.__name__ 109 if severity not in RESULT_SEVERITY.reverse: 110 raise ValueError('severity is not a valid RESULT_SEVERITY') 111 112 self.origin = origin 113 self.message_base = message 114 self.message_arguments = message_arguments 115 self.applied_actions = applied_actions 116 if message_arguments: 117 self.message_base.format(**self.message_arguments) 118 self.debug_msg = debug_msg 119 self.additional_info = additional_info 120 # Sorting is important for tuple comparison 121 self.affected_code = tuple(sorted(affected_code)) 122 self.severity = severity 123 if confidence < 0 or confidence > 100: 124 raise ValueError('Value of confidence should be between 0 and 100.') 125 self.confidence = confidence 126 self.diffs = diffs 127 self.id = uuid.uuid4().int 128 self.aspect = aspect 129 if self.aspect and not self.additional_info: 130 self.additional_info = '{} {}'.format( 131 aspect.Docs.importance_reason, aspect.Docs.fix_suggestions) 132 133 @property 134 def message(self): 135 if not self.message_arguments: 136 return self.message_base 137 return self.message_base.format(**self.message_arguments) 138 139 @message.setter 140 def message(self, value: str): 141 self.message_base = value 142 143 def set_applied_actions(self, applied_actions): 144 self.applied_actions = applied_actions 145 146 def get_applied_actions(self): 147 return self.applied_actions 148 149 @classmethod 150 @enforce_signature 151 def from_values(cls, 152 origin, 153 message: str, 154 file: str, 155 line: (int, None) = None, 156 column: (int, None) = None, 157 end_line: (int, None) = None, 158 end_column: (int, None) = None, 159 severity: int = RESULT_SEVERITY.NORMAL, 160 additional_info: str = '', 161 debug_msg='', 162 diffs: (dict, None) = None, 163 confidence: int = 100, 164 aspect: (aspectbase, None) = None, 165 message_arguments: dict = {}): 166 """ 167 Creates a result with only one SourceRange with the given start and end 168 locations. 169 170 :param origin: 171 Class name or creator object of this object. 172 :param message: 173 Base message to show with this result. 174 :param message_arguments: 175 Arguments to be provided to the base message 176 :param file: 177 The related file. 178 :param line: 179 The first related line in the file. (First line is 1) 180 :param column: 181 The column indicating the first character. (First character is 1) 182 :param end_line: 183 The last related line in the file. 184 :param end_column: 185 The column indicating the last character. 186 :param severity: 187 Severity of this result. 188 :param additional_info: 189 A long description holding additional information about the issue 190 and/or how to fix it. You can use this like a manual entry for a 191 category of issues. 192 :param debug_msg: 193 A message which may help the user find out why this result was 194 yielded. 195 :param diffs: 196 A dictionary with filename as key and ``Diff`` object 197 associated with it as value. 198 :param confidence: 199 A number between 0 and 100 describing the likelihood of this result 200 being a real issue. 201 :param aspect: 202 An Aspect object which this result is associated to. Note that this 203 should be a leaf of the aspect tree! (If you have a node, spend 204 some time figuring out which of the leafs exactly your result 205 belongs to.) 206 """ 207 source_range = SourceRange.from_values(file, 208 line, 209 column, 210 end_line, 211 end_column) 212 213 return cls(origin=origin, 214 message=message, 215 affected_code=(source_range,), 216 severity=severity, 217 additional_info=additional_info, 218 debug_msg=debug_msg, 219 diffs=diffs, 220 confidence=confidence, 221 aspect=aspect, 222 message_arguments=message_arguments) 223 224 def to_string_dict(self): 225 """ 226 Makes a dictionary which has all keys and values as strings and 227 contains all the data that the base Result has. 228 229 FIXME: diffs are not serialized ATM. 230 FIXME: Only the first SourceRange of affected_code is serialized. If 231 there are more, this data is currently missing. 232 233 :return: Dictionary with keys and values as string. 234 """ 235 retval = {} 236 237 members = ['id', 238 'additional_info', 239 'debug_msg', 240 'message', 241 'message_base', 242 'message_arguments', 243 'origin', 244 'confidence'] 245 246 for member in members: 247 value = getattr(self, member) 248 retval[member] = '' if value is None else str(value) 249 250 retval['severity'] = str(RESULT_SEVERITY.reverse.get( 251 self.severity, '')) 252 if len(self.affected_code) > 0: 253 retval['file'] = self.affected_code[0].file 254 line = self.affected_code[0].start.line 255 retval['line_nr'] = '' if line is None else str(line) 256 else: 257 retval['file'], retval['line_nr'] = '', '' 258 259 return retval 260 261 @enforce_signature 262 def apply(self, file_dict: dict): 263 """ 264 Applies all contained diffs to the given file_dict. This operation will 265 be done in-place. 266 267 :param file_dict: A dictionary containing all files with filename as 268 key and all lines a value. Will be modified. 269 """ 270 for filename, diff in self.diffs.items(): 271 file_dict[filename] = diff.modified 272 273 def __add__(self, other): 274 """ 275 Joins those patches to one patch. 276 277 :param other: The other patch. 278 """ 279 assert isinstance(self.diffs, dict) 280 assert isinstance(other.diffs, dict) 281 282 for filename in other.diffs: 283 if filename in self.diffs: 284 self.diffs[filename] += other.diffs[filename] 285 else: 286 self.diffs[filename] = other.diffs[filename] 287 288 return self 289 290 def overlaps(self, ranges): 291 """ 292 Determines if the result overlaps with source ranges provided. 293 294 :param ranges: A list SourceRange objects to check for overlap. 295 :return: True if the ranges overlap with the result. 296 """ 297 if isinstance(ranges, SourceRange): 298 ranges = [ranges] 299 300 for range in ranges: 301 for self_range in self.affected_code: 302 if range.overlaps(self_range): 303 return True 304 305 return False 306 307 def location_repr(self): 308 """ 309 Retrieves a string, that briefly represents 310 the affected code of the result. 311 312 :return: A string containing all of the affected files 313 separated by a comma. 314 """ 315 316 if not self.affected_code: 317 return 'the whole project' 318 319 # Set important to exclude duplicate file names 320 range_paths = set(sourcerange.file 321 for sourcerange in self.affected_code) 322 323 return ', '.join(repr(relpath(range_path)) 324 for range_path in sorted(range_paths)) 325 326 def __json__(self, use_relpath=False): 327 _dict = get_public_members(self) 328 if use_relpath and _dict['diffs']: 329 _dict['diffs'] = {relpath(file): diff 330 for file, diff in _dict['diffs'].items()} 331 _dict['aspect'] = type(self.aspect).__qualname__ 332 return _dict 333 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/coalib/results/Result.py b/coalib/results/Result.py --- a/coalib/results/Result.py +++ b/coalib/results/Result.py @@ -177,8 +177,12 @@ The related file. :param line: The first related line in the file. (First line is 1) + line = None means an unknown line occurs anywhere in the file. :param column: The column indicating the first character. (First character is 1) + column = None means there is an error with the whole line. + All combinations of None values are allowed for line and column, + Except line = None and column = <some number> :param end_line: The last related line in the file. :param end_column:
{"golden_diff": "diff --git a/coalib/results/Result.py b/coalib/results/Result.py\n--- a/coalib/results/Result.py\n+++ b/coalib/results/Result.py\n@@ -177,8 +177,12 @@\n The related file.\n :param line:\n The first related line in the file. (First line is 1)\n+ line = None means an unknown line occurs anywhere in the file.\n :param column:\n The column indicating the first character. (First character is 1)\n+ column = None means there is an error with the whole line.\n+ All combinations of None values are allowed for line and column,\n+ Except line = None and column = <some number>\n :param end_line:\n The last related line in the file.\n :param end_column:\n", "issue": "Result.py: Add docstring regarding the case that line or column is None\nThe output generated by coala can have ```line``` being ```None``` or ```column``` being ```None```.\r\n\r\n```column = None``` means there is an error with the whole line.\r\n```line = None``` means the whole file. The reason behind it is that it's an unknown line means it can occur anywhere in the file.\r\n\r\nAll combinations with None values and numbers for line and column are allowed, except ```line=None``` and ```column=<some number>```\r\n\r\nThis should be clarified in the documentation.\r\n\r\n(ref: http://api.coala.io/en/latest/coalib.results.html#module-coalib.results.Result)\n", "before_files": [{"content": "import uuid\nfrom os.path import relpath\n\nfrom coala_utils.decorators import (\n enforce_signature, generate_ordering, generate_repr, get_public_members)\nfrom coalib.bearlib.aspects import aspectbase\nfrom coalib.results.RESULT_SEVERITY import RESULT_SEVERITY\nfrom coalib.results.SourceRange import SourceRange\n\n\n# Omit additional info, debug message and diffs for brevity\n@generate_repr(('id', hex),\n 'origin',\n 'affected_code',\n ('severity', RESULT_SEVERITY.reverse.get),\n 'confidence',\n 'message',\n ('aspect', lambda aspect: type(aspect).__qualname__),\n 'applied_actions')\n@generate_ordering('affected_code',\n 'severity',\n 'confidence',\n 'origin',\n 'message_base',\n 'message_arguments',\n 'aspect',\n 'additional_info',\n 'diffs',\n 'debug_msg',\n 'applied_actions')\nclass Result:\n \"\"\"\n A result is anything that has an origin and a message.\n\n Optionally it might affect a file.\n\n Result messages can also have arguments. The message is python\n style formatted with these arguments.\n\n >>> r = Result('origin','{arg1} and {arg2}', \\\n message_arguments={'arg1': 'foo', 'arg2': 'bar'})\n >>> r.message\n 'foo and bar'\n\n Message arguments may be changed later. The result message\n will also reflect these changes.\n\n >>> r.message_arguments = {'arg1': 'spam', 'arg2': 'eggs'}\n >>> r.message\n 'spam and eggs'\n\n \"\"\"\n\n @enforce_signature\n def __init__(self,\n origin,\n message: str,\n affected_code: (tuple, list) = (),\n severity: int = RESULT_SEVERITY.NORMAL,\n additional_info: str = '',\n debug_msg='',\n diffs: (dict, None) = None,\n confidence: int = 100,\n aspect: (aspectbase, None) = None,\n message_arguments: dict = {},\n applied_actions: dict = {}):\n \"\"\"\n :param origin:\n Class name or creator object of this object.\n :param message:\n Base message to show with this result.\n :param affected_code:\n A tuple of ``SourceRange`` objects pointing to related positions\n in the source code.\n :param severity:\n Severity of this result.\n :param additional_info:\n A long description holding additional information about the issue\n and/or how to fix it. You can use this like a manual entry for a\n category of issues.\n :param debug_msg:\n A message which may help the user find out why this result was\n yielded.\n :param diffs:\n A dictionary with filename as key and ``Diff`` object\n associated with it as value.\n :param confidence:\n A number between 0 and 100 describing the likelihood of this result\n being a real issue.\n :param aspect:\n An aspectclass instance which this result is associated to.\n Note that this should be a leaf of the aspect tree!\n (If you have a node, spend some time figuring out which of\n the leafs exactly your result belongs to.)\n :param message_arguments:\n Arguments to be provided to the base message.\n :param applied_actions:\n A dictionary that contains the result, file_dict, file_diff_dict and\n the section for an action.\n :raises ValueError:\n Raised when confidence is not between 0 and 100.\n :raises KeyError:\n Raised when message_base can not be formatted with\n message_arguments.\n \"\"\"\n origin = origin or ''\n if not isinstance(origin, str):\n origin = origin.__class__.__name__\n if severity not in RESULT_SEVERITY.reverse:\n raise ValueError('severity is not a valid RESULT_SEVERITY')\n\n self.origin = origin\n self.message_base = message\n self.message_arguments = message_arguments\n self.applied_actions = applied_actions\n if message_arguments:\n self.message_base.format(**self.message_arguments)\n self.debug_msg = debug_msg\n self.additional_info = additional_info\n # Sorting is important for tuple comparison\n self.affected_code = tuple(sorted(affected_code))\n self.severity = severity\n if confidence < 0 or confidence > 100:\n raise ValueError('Value of confidence should be between 0 and 100.')\n self.confidence = confidence\n self.diffs = diffs\n self.id = uuid.uuid4().int\n self.aspect = aspect\n if self.aspect and not self.additional_info:\n self.additional_info = '{} {}'.format(\n aspect.Docs.importance_reason, aspect.Docs.fix_suggestions)\n\n @property\n def message(self):\n if not self.message_arguments:\n return self.message_base\n return self.message_base.format(**self.message_arguments)\n\n @message.setter\n def message(self, value: str):\n self.message_base = value\n\n def set_applied_actions(self, applied_actions):\n self.applied_actions = applied_actions\n\n def get_applied_actions(self):\n return self.applied_actions\n\n @classmethod\n @enforce_signature\n def from_values(cls,\n origin,\n message: str,\n file: str,\n line: (int, None) = None,\n column: (int, None) = None,\n end_line: (int, None) = None,\n end_column: (int, None) = None,\n severity: int = RESULT_SEVERITY.NORMAL,\n additional_info: str = '',\n debug_msg='',\n diffs: (dict, None) = None,\n confidence: int = 100,\n aspect: (aspectbase, None) = None,\n message_arguments: dict = {}):\n \"\"\"\n Creates a result with only one SourceRange with the given start and end\n locations.\n\n :param origin:\n Class name or creator object of this object.\n :param message:\n Base message to show with this result.\n :param message_arguments:\n Arguments to be provided to the base message\n :param file:\n The related file.\n :param line:\n The first related line in the file. (First line is 1)\n :param column:\n The column indicating the first character. (First character is 1)\n :param end_line:\n The last related line in the file.\n :param end_column:\n The column indicating the last character.\n :param severity:\n Severity of this result.\n :param additional_info:\n A long description holding additional information about the issue\n and/or how to fix it. You can use this like a manual entry for a\n category of issues.\n :param debug_msg:\n A message which may help the user find out why this result was\n yielded.\n :param diffs:\n A dictionary with filename as key and ``Diff`` object\n associated with it as value.\n :param confidence:\n A number between 0 and 100 describing the likelihood of this result\n being a real issue.\n :param aspect:\n An Aspect object which this result is associated to. Note that this\n should be a leaf of the aspect tree! (If you have a node, spend\n some time figuring out which of the leafs exactly your result\n belongs to.)\n \"\"\"\n source_range = SourceRange.from_values(file,\n line,\n column,\n end_line,\n end_column)\n\n return cls(origin=origin,\n message=message,\n affected_code=(source_range,),\n severity=severity,\n additional_info=additional_info,\n debug_msg=debug_msg,\n diffs=diffs,\n confidence=confidence,\n aspect=aspect,\n message_arguments=message_arguments)\n\n def to_string_dict(self):\n \"\"\"\n Makes a dictionary which has all keys and values as strings and\n contains all the data that the base Result has.\n\n FIXME: diffs are not serialized ATM.\n FIXME: Only the first SourceRange of affected_code is serialized. If\n there are more, this data is currently missing.\n\n :return: Dictionary with keys and values as string.\n \"\"\"\n retval = {}\n\n members = ['id',\n 'additional_info',\n 'debug_msg',\n 'message',\n 'message_base',\n 'message_arguments',\n 'origin',\n 'confidence']\n\n for member in members:\n value = getattr(self, member)\n retval[member] = '' if value is None else str(value)\n\n retval['severity'] = str(RESULT_SEVERITY.reverse.get(\n self.severity, ''))\n if len(self.affected_code) > 0:\n retval['file'] = self.affected_code[0].file\n line = self.affected_code[0].start.line\n retval['line_nr'] = '' if line is None else str(line)\n else:\n retval['file'], retval['line_nr'] = '', ''\n\n return retval\n\n @enforce_signature\n def apply(self, file_dict: dict):\n \"\"\"\n Applies all contained diffs to the given file_dict. This operation will\n be done in-place.\n\n :param file_dict: A dictionary containing all files with filename as\n key and all lines a value. Will be modified.\n \"\"\"\n for filename, diff in self.diffs.items():\n file_dict[filename] = diff.modified\n\n def __add__(self, other):\n \"\"\"\n Joins those patches to one patch.\n\n :param other: The other patch.\n \"\"\"\n assert isinstance(self.diffs, dict)\n assert isinstance(other.diffs, dict)\n\n for filename in other.diffs:\n if filename in self.diffs:\n self.diffs[filename] += other.diffs[filename]\n else:\n self.diffs[filename] = other.diffs[filename]\n\n return self\n\n def overlaps(self, ranges):\n \"\"\"\n Determines if the result overlaps with source ranges provided.\n\n :param ranges: A list SourceRange objects to check for overlap.\n :return: True if the ranges overlap with the result.\n \"\"\"\n if isinstance(ranges, SourceRange):\n ranges = [ranges]\n\n for range in ranges:\n for self_range in self.affected_code:\n if range.overlaps(self_range):\n return True\n\n return False\n\n def location_repr(self):\n \"\"\"\n Retrieves a string, that briefly represents\n the affected code of the result.\n\n :return: A string containing all of the affected files\n separated by a comma.\n \"\"\"\n\n if not self.affected_code:\n return 'the whole project'\n\n # Set important to exclude duplicate file names\n range_paths = set(sourcerange.file\n for sourcerange in self.affected_code)\n\n return ', '.join(repr(relpath(range_path))\n for range_path in sorted(range_paths))\n\n def __json__(self, use_relpath=False):\n _dict = get_public_members(self)\n if use_relpath and _dict['diffs']:\n _dict['diffs'] = {relpath(file): diff\n for file, diff in _dict['diffs'].items()}\n _dict['aspect'] = type(self.aspect).__qualname__\n return _dict\n", "path": "coalib/results/Result.py"}], "after_files": [{"content": "import uuid\nfrom os.path import relpath\n\nfrom coala_utils.decorators import (\n enforce_signature, generate_ordering, generate_repr, get_public_members)\nfrom coalib.bearlib.aspects import aspectbase\nfrom coalib.results.RESULT_SEVERITY import RESULT_SEVERITY\nfrom coalib.results.SourceRange import SourceRange\n\n\n# Omit additional info, debug message and diffs for brevity\n@generate_repr(('id', hex),\n 'origin',\n 'affected_code',\n ('severity', RESULT_SEVERITY.reverse.get),\n 'confidence',\n 'message',\n ('aspect', lambda aspect: type(aspect).__qualname__),\n 'applied_actions')\n@generate_ordering('affected_code',\n 'severity',\n 'confidence',\n 'origin',\n 'message_base',\n 'message_arguments',\n 'aspect',\n 'additional_info',\n 'diffs',\n 'debug_msg',\n 'applied_actions')\nclass Result:\n \"\"\"\n A result is anything that has an origin and a message.\n\n Optionally it might affect a file.\n\n Result messages can also have arguments. The message is python\n style formatted with these arguments.\n\n >>> r = Result('origin','{arg1} and {arg2}', \\\n message_arguments={'arg1': 'foo', 'arg2': 'bar'})\n >>> r.message\n 'foo and bar'\n\n Message arguments may be changed later. The result message\n will also reflect these changes.\n\n >>> r.message_arguments = {'arg1': 'spam', 'arg2': 'eggs'}\n >>> r.message\n 'spam and eggs'\n\n \"\"\"\n\n @enforce_signature\n def __init__(self,\n origin,\n message: str,\n affected_code: (tuple, list) = (),\n severity: int = RESULT_SEVERITY.NORMAL,\n additional_info: str = '',\n debug_msg='',\n diffs: (dict, None) = None,\n confidence: int = 100,\n aspect: (aspectbase, None) = None,\n message_arguments: dict = {},\n applied_actions: dict = {}):\n \"\"\"\n :param origin:\n Class name or creator object of this object.\n :param message:\n Base message to show with this result.\n :param affected_code:\n A tuple of ``SourceRange`` objects pointing to related positions\n in the source code.\n :param severity:\n Severity of this result.\n :param additional_info:\n A long description holding additional information about the issue\n and/or how to fix it. You can use this like a manual entry for a\n category of issues.\n :param debug_msg:\n A message which may help the user find out why this result was\n yielded.\n :param diffs:\n A dictionary with filename as key and ``Diff`` object\n associated with it as value.\n :param confidence:\n A number between 0 and 100 describing the likelihood of this result\n being a real issue.\n :param aspect:\n An aspectclass instance which this result is associated to.\n Note that this should be a leaf of the aspect tree!\n (If you have a node, spend some time figuring out which of\n the leafs exactly your result belongs to.)\n :param message_arguments:\n Arguments to be provided to the base message.\n :param applied_actions:\n A dictionary that contains the result, file_dict, file_diff_dict and\n the section for an action.\n :raises ValueError:\n Raised when confidence is not between 0 and 100.\n :raises KeyError:\n Raised when message_base can not be formatted with\n message_arguments.\n \"\"\"\n origin = origin or ''\n if not isinstance(origin, str):\n origin = origin.__class__.__name__\n if severity not in RESULT_SEVERITY.reverse:\n raise ValueError('severity is not a valid RESULT_SEVERITY')\n\n self.origin = origin\n self.message_base = message\n self.message_arguments = message_arguments\n self.applied_actions = applied_actions\n if message_arguments:\n self.message_base.format(**self.message_arguments)\n self.debug_msg = debug_msg\n self.additional_info = additional_info\n # Sorting is important for tuple comparison\n self.affected_code = tuple(sorted(affected_code))\n self.severity = severity\n if confidence < 0 or confidence > 100:\n raise ValueError('Value of confidence should be between 0 and 100.')\n self.confidence = confidence\n self.diffs = diffs\n self.id = uuid.uuid4().int\n self.aspect = aspect\n if self.aspect and not self.additional_info:\n self.additional_info = '{} {}'.format(\n aspect.Docs.importance_reason, aspect.Docs.fix_suggestions)\n\n @property\n def message(self):\n if not self.message_arguments:\n return self.message_base\n return self.message_base.format(**self.message_arguments)\n\n @message.setter\n def message(self, value: str):\n self.message_base = value\n\n def set_applied_actions(self, applied_actions):\n self.applied_actions = applied_actions\n\n def get_applied_actions(self):\n return self.applied_actions\n\n @classmethod\n @enforce_signature\n def from_values(cls,\n origin,\n message: str,\n file: str,\n line: (int, None) = None,\n column: (int, None) = None,\n end_line: (int, None) = None,\n end_column: (int, None) = None,\n severity: int = RESULT_SEVERITY.NORMAL,\n additional_info: str = '',\n debug_msg='',\n diffs: (dict, None) = None,\n confidence: int = 100,\n aspect: (aspectbase, None) = None,\n message_arguments: dict = {}):\n \"\"\"\n Creates a result with only one SourceRange with the given start and end\n locations.\n\n :param origin:\n Class name or creator object of this object.\n :param message:\n Base message to show with this result.\n :param message_arguments:\n Arguments to be provided to the base message\n :param file:\n The related file.\n :param line:\n The first related line in the file. (First line is 1)\n line = None means an unknown line occurs anywhere in the file.\n :param column:\n The column indicating the first character. (First character is 1)\n column = None means there is an error with the whole line.\n All combinations of None values are allowed for line and column,\n Except line = None and column = <some number>\n :param end_line:\n The last related line in the file.\n :param end_column:\n The column indicating the last character.\n :param severity:\n Severity of this result.\n :param additional_info:\n A long description holding additional information about the issue\n and/or how to fix it. You can use this like a manual entry for a\n category of issues.\n :param debug_msg:\n A message which may help the user find out why this result was\n yielded.\n :param diffs:\n A dictionary with filename as key and ``Diff`` object\n associated with it as value.\n :param confidence:\n A number between 0 and 100 describing the likelihood of this result\n being a real issue.\n :param aspect:\n An Aspect object which this result is associated to. Note that this\n should be a leaf of the aspect tree! (If you have a node, spend\n some time figuring out which of the leafs exactly your result\n belongs to.)\n \"\"\"\n source_range = SourceRange.from_values(file,\n line,\n column,\n end_line,\n end_column)\n\n return cls(origin=origin,\n message=message,\n affected_code=(source_range,),\n severity=severity,\n additional_info=additional_info,\n debug_msg=debug_msg,\n diffs=diffs,\n confidence=confidence,\n aspect=aspect,\n message_arguments=message_arguments)\n\n def to_string_dict(self):\n \"\"\"\n Makes a dictionary which has all keys and values as strings and\n contains all the data that the base Result has.\n\n FIXME: diffs are not serialized ATM.\n FIXME: Only the first SourceRange of affected_code is serialized. If\n there are more, this data is currently missing.\n\n :return: Dictionary with keys and values as string.\n \"\"\"\n retval = {}\n\n members = ['id',\n 'additional_info',\n 'debug_msg',\n 'message',\n 'message_base',\n 'message_arguments',\n 'origin',\n 'confidence']\n\n for member in members:\n value = getattr(self, member)\n retval[member] = '' if value is None else str(value)\n\n retval['severity'] = str(RESULT_SEVERITY.reverse.get(\n self.severity, ''))\n if len(self.affected_code) > 0:\n retval['file'] = self.affected_code[0].file\n line = self.affected_code[0].start.line\n retval['line_nr'] = '' if line is None else str(line)\n else:\n retval['file'], retval['line_nr'] = '', ''\n\n return retval\n\n @enforce_signature\n def apply(self, file_dict: dict):\n \"\"\"\n Applies all contained diffs to the given file_dict. This operation will\n be done in-place.\n\n :param file_dict: A dictionary containing all files with filename as\n key and all lines a value. Will be modified.\n \"\"\"\n for filename, diff in self.diffs.items():\n file_dict[filename] = diff.modified\n\n def __add__(self, other):\n \"\"\"\n Joins those patches to one patch.\n\n :param other: The other patch.\n \"\"\"\n assert isinstance(self.diffs, dict)\n assert isinstance(other.diffs, dict)\n\n for filename in other.diffs:\n if filename in self.diffs:\n self.diffs[filename] += other.diffs[filename]\n else:\n self.diffs[filename] = other.diffs[filename]\n\n return self\n\n def overlaps(self, ranges):\n \"\"\"\n Determines if the result overlaps with source ranges provided.\n\n :param ranges: A list SourceRange objects to check for overlap.\n :return: True if the ranges overlap with the result.\n \"\"\"\n if isinstance(ranges, SourceRange):\n ranges = [ranges]\n\n for range in ranges:\n for self_range in self.affected_code:\n if range.overlaps(self_range):\n return True\n\n return False\n\n def location_repr(self):\n \"\"\"\n Retrieves a string, that briefly represents\n the affected code of the result.\n\n :return: A string containing all of the affected files\n separated by a comma.\n \"\"\"\n\n if not self.affected_code:\n return 'the whole project'\n\n # Set important to exclude duplicate file names\n range_paths = set(sourcerange.file\n for sourcerange in self.affected_code)\n\n return ', '.join(repr(relpath(range_path))\n for range_path in sorted(range_paths))\n\n def __json__(self, use_relpath=False):\n _dict = get_public_members(self)\n if use_relpath and _dict['diffs']:\n _dict['diffs'] = {relpath(file): diff\n for file, diff in _dict['diffs'].items()}\n _dict['aspect'] = type(self.aspect).__qualname__\n return _dict\n", "path": "coalib/results/Result.py"}]}
3,721
179