problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
25.4k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
582
39.1k
num_tokens
int64
271
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_38351
rasdani/github-patches
git_diff
meltano__meltano-6534
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Meltano lock fails on migrating to >2.0.0 when variant didnt previously exist We should update the log message and add a section to the migration docs to help users get variants added for plugins that didnt have a variant previously. For example Airflow locking fails because a variant isnt set for all installations prior to 2.0.0. The error message isnt totally clear in saying this and users will need to manually update their meltano.yml. Originally discussed in https://github.com/meltano/meltano/issues/6359#issuecomment-1175432513 > Additionally I'm getting Orchestrator 'airflow' variant 'original' is not known to Meltano. Variants: ['apache (default)'] from a default installation of Airflow. I know that its hard because when I originally installed it there wasnt a variant name for airflow but now there is. Is there a way for us to resolve that its the default variant? Maybe that wont work because the default could change, maybe using the discovery.yml when we think were migrating to >2.0.0? > > Or we could log it better and say something like "I notice youre migrating and you dont have a variant, go to the Migration guide and learn how to define a variant" and we write up the steps for locking post migration and adding a variant name: > > remove executable/namespace if defined > add variant using one available on MeltanoHub. We can list the mapping like airflow -> apache, dbt -> dbt-labs, etc. for plugins that just got variant names > run meltano lock --all cc @edgarrmondragon --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/meltano/core/hub/client.py` Content: ``` 1 """Meltano Hub Client.""" 2 3 from __future__ import annotations 4 5 from typing import Any 6 7 import requests 8 from structlog.stdlib import get_logger 9 10 import meltano 11 from meltano.core.plugin import ( 12 BasePlugin, 13 PluginDefinition, 14 PluginRef, 15 PluginType, 16 Variant, 17 ) 18 from meltano.core.plugin.error import PluginNotFoundError 19 from meltano.core.plugin.factory import base_plugin_factory 20 from meltano.core.plugin_discovery_service import PluginRepository 21 from meltano.core.project import Project 22 from meltano.core.project_settings_service import ProjectSettingsService 23 24 from .schema import IndexedPlugin, VariantRef 25 26 logger = get_logger(__name__) 27 28 29 class HubPluginTypeNotFound(Exception): 30 """Raised when a Hub plugin type is not found.""" 31 32 def __init__(self, plugin_type: PluginType): 33 """Create a new HubPluginVariantNotFound. 34 35 Args: 36 plugin_type: The type of the plugin. 37 """ 38 self.plugin_type = plugin_type 39 40 def __str__(self) -> str: 41 """Return a string representation of the error. 42 43 Returns: 44 The string representation of the error. 45 """ 46 return "{type} is not supported in Meltano Hub. Available plugin types: {types}".format( 47 type=self.plugin_type.descriptor.capitalize(), 48 types=list(PluginType), 49 ) 50 51 52 class HubPluginVariantNotFound(Exception): 53 """Raised when a Hub plugin variant is not found.""" 54 55 def __init__( 56 self, 57 plugin_type: PluginType, 58 plugin: IndexedPlugin, 59 variant_name: str, 60 ): 61 """Create a new HubPluginVariantNotFound. 62 63 Args: 64 plugin_type: The type of the plugin. 65 plugin: The indexed plugin. 66 variant_name: The name of the variant that was not found. 67 """ 68 self.plugin_type = plugin_type 69 self.plugin = plugin 70 self.variant_name = variant_name 71 72 def __str__(self) -> str: 73 """Return a string representation of the error. 74 75 Returns: 76 The string representation of the error. 77 """ 78 return "{type} '{name}' variant '{variant}' is not known to Meltano. Variants: {variant_labels}".format( 79 type=self.plugin_type.descriptor.capitalize(), 80 name=self.plugin.name, 81 variant=self.variant_name, 82 variant_labels=self.plugin.variant_labels, 83 ) 84 85 86 class MeltanoHubService(PluginRepository): 87 """PluginRepository implementation for the Meltano Hub.""" 88 89 def __init__(self, project: Project) -> None: 90 """Initialize the service. 91 92 Args: 93 project: The Meltano project. 94 """ 95 self.project = project 96 self.session = requests.Session() 97 self.session.headers.update( 98 { 99 "Accept": "application/json", 100 "User-Agent": f"Meltano/{meltano.__version__}", 101 } 102 ) 103 104 self.settings_service = ProjectSettingsService(self.project) 105 106 if self.settings_service.get("send_anonymous_usage_stats"): 107 project_id = self.settings_service.get("project_id") 108 109 self.session.headers["X-Project-ID"] = project_id 110 111 @property 112 def hub_api_url(self): 113 """Return the URL of the Hub API. 114 115 Returns: 116 The URL of the Hub API. 117 """ 118 hub_url = self.settings_service.get("hub_url") 119 return f"{hub_url}/meltano/api/v1" 120 121 def plugin_type_endpoint(self, plugin_type: PluginType) -> str: 122 """Return the list endpoint for the given plugin type. 123 124 Args: 125 plugin_type: The plugin type. 126 127 Returns: 128 The endpoint for the given plugin type. 129 """ 130 return f"{self.hub_api_url}/plugins/{plugin_type.value}/index" 131 132 def plugin_endpoint( 133 self, 134 plugin_type: PluginType, 135 plugin_name: str, 136 variant_name: str | None = None, 137 ) -> str: 138 """Return the resource endpoint for the given plugin. 139 140 Args: 141 plugin_type: The plugin type. 142 plugin_name: The plugin name. 143 variant_name: The plugin variant name. 144 145 Returns: 146 The endpoint for the given plugin type. 147 """ 148 url = f"{self.hub_api_url}/plugins/{plugin_type.value}/{plugin_name}" 149 if variant_name: 150 url = f"{url}--{variant_name}" 151 152 return url 153 154 def find_definition( 155 self, 156 plugin_type: PluginType, 157 plugin_name: str, 158 variant_name: str | None = None, 159 ) -> PluginDefinition: 160 """Find a locked plugin definition. 161 162 Args: 163 plugin_type: The plugin type. 164 plugin_name: The plugin name. 165 variant_name: The plugin variant name. 166 167 Returns: 168 The plugin definition. 169 170 Raises: 171 PluginNotFoundError: If the plugin definition could not be found. 172 HubPluginVariantNotFound: If the plugin variant could not be found. 173 """ 174 plugins = self.get_plugins_of_type(plugin_type) 175 176 try: 177 plugin = plugins[plugin_name] 178 except KeyError as plugins_key_err: 179 raise PluginNotFoundError( 180 PluginRef(plugin_type, plugin_name) 181 ) from plugins_key_err 182 183 if variant_name is None or variant_name == Variant.DEFAULT_NAME: 184 variant_name = plugin.default_variant 185 186 try: 187 url = plugin.variants[variant_name].ref 188 except KeyError as variant_key_err: 189 raise HubPluginVariantNotFound( 190 plugin_type, plugin, variant_name 191 ) from variant_key_err 192 193 response = self.session.get(url) 194 195 try: 196 response.raise_for_status() 197 except requests.HTTPError as http_err: 198 logger.error( 199 "Can not retrieve plugin", 200 status_code=http_err.response.status_code, 201 error=http_err, 202 ) 203 raise PluginNotFoundError(PluginRef(plugin_type, plugin_name)) from http_err 204 205 return PluginDefinition(**response.json(), plugin_type=plugin_type) 206 207 def find_base_plugin( 208 self, 209 plugin_type: PluginType, 210 plugin_name: str, 211 variant: str | None = None, 212 ) -> BasePlugin: 213 """Get the base plugin for a project plugin. 214 215 Args: 216 plugin_type: The plugin type. 217 plugin_name: The plugin name. 218 variant: The plugin variant. 219 220 Returns: 221 The base plugin. 222 """ 223 plugin = self.find_definition( 224 plugin_type, 225 plugin_name, 226 variant_name=variant, 227 ) 228 229 return base_plugin_factory(plugin, plugin.variants[0]) 230 231 def get_plugins_of_type(self, plugin_type: PluginType) -> dict[str, IndexedPlugin]: 232 """Get all plugins of a given type. 233 234 Args: 235 plugin_type: The plugin type. 236 237 Returns: 238 The plugin definitions. 239 240 Raises: 241 HubPluginTypeNotFound: If the plugin type is not supported. 242 """ 243 if not plugin_type.discoverable: 244 return {} 245 246 url = self.plugin_type_endpoint(plugin_type) 247 response = self.session.get(url) 248 249 try: 250 response.raise_for_status() 251 except requests.HTTPError as err: 252 logger.error( 253 "Can not retrieve plugin type", 254 status_code=err.response.status_code, 255 error=err, 256 ) 257 raise HubPluginTypeNotFound(plugin_type) from err 258 259 plugins: dict[str, dict[str, Any]] = response.json() 260 return { 261 name: IndexedPlugin( 262 name, 263 logo_url=plugin["logo_url"], 264 default_variant=plugin["default_variant"], 265 variants={ 266 variant_name: VariantRef(variant_name, ref=variant["ref"]) 267 for variant_name, variant in plugin["variants"].items() 268 }, 269 ) 270 for name, plugin in plugins.items() 271 } 272 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/meltano/core/hub/client.py b/src/meltano/core/hub/client.py --- a/src/meltano/core/hub/client.py +++ b/src/meltano/core/hub/client.py @@ -26,7 +26,7 @@ logger = get_logger(__name__) -class HubPluginTypeNotFound(Exception): +class HubPluginTypeNotFoundError(Exception): """Raised when a Hub plugin type is not found.""" def __init__(self, plugin_type: PluginType): @@ -49,7 +49,7 @@ ) -class HubPluginVariantNotFound(Exception): +class HubPluginVariantNotFoundError(Exception): """Raised when a Hub plugin variant is not found.""" def __init__( @@ -169,7 +169,7 @@ Raises: PluginNotFoundError: If the plugin definition could not be found. - HubPluginVariantNotFound: If the plugin variant could not be found. + HubPluginVariantNotFoundError: If the plugin variant could not be found. """ plugins = self.get_plugins_of_type(plugin_type) @@ -180,13 +180,16 @@ PluginRef(plugin_type, plugin_name) ) from plugins_key_err - if variant_name is None or variant_name == Variant.DEFAULT_NAME: + if variant_name is None or variant_name in { + Variant.DEFAULT_NAME, + Variant.ORIGINAL_NAME, + }: variant_name = plugin.default_variant try: url = plugin.variants[variant_name].ref except KeyError as variant_key_err: - raise HubPluginVariantNotFound( + raise HubPluginVariantNotFoundError( plugin_type, plugin, variant_name ) from variant_key_err @@ -238,7 +241,7 @@ The plugin definitions. Raises: - HubPluginTypeNotFound: If the plugin type is not supported. + HubPluginTypeNotFoundError: If the plugin type is not supported. """ if not plugin_type.discoverable: return {} @@ -254,7 +257,7 @@ status_code=err.response.status_code, error=err, ) - raise HubPluginTypeNotFound(plugin_type) from err + raise HubPluginTypeNotFoundError(plugin_type) from err plugins: dict[str, dict[str, Any]] = response.json() return {
{"golden_diff": "diff --git a/src/meltano/core/hub/client.py b/src/meltano/core/hub/client.py\n--- a/src/meltano/core/hub/client.py\n+++ b/src/meltano/core/hub/client.py\n@@ -26,7 +26,7 @@\n logger = get_logger(__name__)\n \n \n-class HubPluginTypeNotFound(Exception):\n+class HubPluginTypeNotFoundError(Exception):\n \"\"\"Raised when a Hub plugin type is not found.\"\"\"\n \n def __init__(self, plugin_type: PluginType):\n@@ -49,7 +49,7 @@\n )\n \n \n-class HubPluginVariantNotFound(Exception):\n+class HubPluginVariantNotFoundError(Exception):\n \"\"\"Raised when a Hub plugin variant is not found.\"\"\"\n \n def __init__(\n@@ -169,7 +169,7 @@\n \n Raises:\n PluginNotFoundError: If the plugin definition could not be found.\n- HubPluginVariantNotFound: If the plugin variant could not be found.\n+ HubPluginVariantNotFoundError: If the plugin variant could not be found.\n \"\"\"\n plugins = self.get_plugins_of_type(plugin_type)\n \n@@ -180,13 +180,16 @@\n PluginRef(plugin_type, plugin_name)\n ) from plugins_key_err\n \n- if variant_name is None or variant_name == Variant.DEFAULT_NAME:\n+ if variant_name is None or variant_name in {\n+ Variant.DEFAULT_NAME,\n+ Variant.ORIGINAL_NAME,\n+ }:\n variant_name = plugin.default_variant\n \n try:\n url = plugin.variants[variant_name].ref\n except KeyError as variant_key_err:\n- raise HubPluginVariantNotFound(\n+ raise HubPluginVariantNotFoundError(\n plugin_type, plugin, variant_name\n ) from variant_key_err\n \n@@ -238,7 +241,7 @@\n The plugin definitions.\n \n Raises:\n- HubPluginTypeNotFound: If the plugin type is not supported.\n+ HubPluginTypeNotFoundError: If the plugin type is not supported.\n \"\"\"\n if not plugin_type.discoverable:\n return {}\n@@ -254,7 +257,7 @@\n status_code=err.response.status_code,\n error=err,\n )\n- raise HubPluginTypeNotFound(plugin_type) from err\n+ raise HubPluginTypeNotFoundError(plugin_type) from err\n \n plugins: dict[str, dict[str, Any]] = response.json()\n return {\n", "issue": "Meltano lock fails on migrating to >2.0.0 when variant didnt previously exist\nWe should update the log message and add a section to the migration docs to help users get variants added for plugins that didnt have a variant previously. For example Airflow locking fails because a variant isnt set for all installations prior to 2.0.0. The error message isnt totally clear in saying this and users will need to manually update their meltano.yml.\r\n\r\nOriginally discussed in https://github.com/meltano/meltano/issues/6359#issuecomment-1175432513\r\n\r\n> Additionally I'm getting Orchestrator 'airflow' variant 'original' is not known to Meltano. Variants: ['apache (default)'] from a default installation of Airflow. I know that its hard because when I originally installed it there wasnt a variant name for airflow but now there is. Is there a way for us to resolve that its the default variant? Maybe that wont work because the default could change, maybe using the discovery.yml when we think were migrating to >2.0.0?\r\n> \r\n> Or we could log it better and say something like \"I notice youre migrating and you dont have a variant, go to the Migration guide and learn how to define a variant\" and we write up the steps for locking post migration and adding a variant name:\r\n> \r\n> remove executable/namespace if defined\r\n> add variant using one available on MeltanoHub. We can list the mapping like airflow -> apache, dbt -> dbt-labs, etc. for plugins that just got variant names\r\n> run meltano lock --all\r\n\r\ncc @edgarrmondragon \n", "before_files": [{"content": "\"\"\"Meltano Hub Client.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Any\n\nimport requests\nfrom structlog.stdlib import get_logger\n\nimport meltano\nfrom meltano.core.plugin import (\n BasePlugin,\n PluginDefinition,\n PluginRef,\n PluginType,\n Variant,\n)\nfrom meltano.core.plugin.error import PluginNotFoundError\nfrom meltano.core.plugin.factory import base_plugin_factory\nfrom meltano.core.plugin_discovery_service import PluginRepository\nfrom meltano.core.project import Project\nfrom meltano.core.project_settings_service import ProjectSettingsService\n\nfrom .schema import IndexedPlugin, VariantRef\n\nlogger = get_logger(__name__)\n\n\nclass HubPluginTypeNotFound(Exception):\n \"\"\"Raised when a Hub plugin type is not found.\"\"\"\n\n def __init__(self, plugin_type: PluginType):\n \"\"\"Create a new HubPluginVariantNotFound.\n\n Args:\n plugin_type: The type of the plugin.\n \"\"\"\n self.plugin_type = plugin_type\n\n def __str__(self) -> str:\n \"\"\"Return a string representation of the error.\n\n Returns:\n The string representation of the error.\n \"\"\"\n return \"{type} is not supported in Meltano Hub. Available plugin types: {types}\".format(\n type=self.plugin_type.descriptor.capitalize(),\n types=list(PluginType),\n )\n\n\nclass HubPluginVariantNotFound(Exception):\n \"\"\"Raised when a Hub plugin variant is not found.\"\"\"\n\n def __init__(\n self,\n plugin_type: PluginType,\n plugin: IndexedPlugin,\n variant_name: str,\n ):\n \"\"\"Create a new HubPluginVariantNotFound.\n\n Args:\n plugin_type: The type of the plugin.\n plugin: The indexed plugin.\n variant_name: The name of the variant that was not found.\n \"\"\"\n self.plugin_type = plugin_type\n self.plugin = plugin\n self.variant_name = variant_name\n\n def __str__(self) -> str:\n \"\"\"Return a string representation of the error.\n\n Returns:\n The string representation of the error.\n \"\"\"\n return \"{type} '{name}' variant '{variant}' is not known to Meltano. Variants: {variant_labels}\".format(\n type=self.plugin_type.descriptor.capitalize(),\n name=self.plugin.name,\n variant=self.variant_name,\n variant_labels=self.plugin.variant_labels,\n )\n\n\nclass MeltanoHubService(PluginRepository):\n \"\"\"PluginRepository implementation for the Meltano Hub.\"\"\"\n\n def __init__(self, project: Project) -> None:\n \"\"\"Initialize the service.\n\n Args:\n project: The Meltano project.\n \"\"\"\n self.project = project\n self.session = requests.Session()\n self.session.headers.update(\n {\n \"Accept\": \"application/json\",\n \"User-Agent\": f\"Meltano/{meltano.__version__}\",\n }\n )\n\n self.settings_service = ProjectSettingsService(self.project)\n\n if self.settings_service.get(\"send_anonymous_usage_stats\"):\n project_id = self.settings_service.get(\"project_id\")\n\n self.session.headers[\"X-Project-ID\"] = project_id\n\n @property\n def hub_api_url(self):\n \"\"\"Return the URL of the Hub API.\n\n Returns:\n The URL of the Hub API.\n \"\"\"\n hub_url = self.settings_service.get(\"hub_url\")\n return f\"{hub_url}/meltano/api/v1\"\n\n def plugin_type_endpoint(self, plugin_type: PluginType) -> str:\n \"\"\"Return the list endpoint for the given plugin type.\n\n Args:\n plugin_type: The plugin type.\n\n Returns:\n The endpoint for the given plugin type.\n \"\"\"\n return f\"{self.hub_api_url}/plugins/{plugin_type.value}/index\"\n\n def plugin_endpoint(\n self,\n plugin_type: PluginType,\n plugin_name: str,\n variant_name: str | None = None,\n ) -> str:\n \"\"\"Return the resource endpoint for the given plugin.\n\n Args:\n plugin_type: The plugin type.\n plugin_name: The plugin name.\n variant_name: The plugin variant name.\n\n Returns:\n The endpoint for the given plugin type.\n \"\"\"\n url = f\"{self.hub_api_url}/plugins/{plugin_type.value}/{plugin_name}\"\n if variant_name:\n url = f\"{url}--{variant_name}\"\n\n return url\n\n def find_definition(\n self,\n plugin_type: PluginType,\n plugin_name: str,\n variant_name: str | None = None,\n ) -> PluginDefinition:\n \"\"\"Find a locked plugin definition.\n\n Args:\n plugin_type: The plugin type.\n plugin_name: The plugin name.\n variant_name: The plugin variant name.\n\n Returns:\n The plugin definition.\n\n Raises:\n PluginNotFoundError: If the plugin definition could not be found.\n HubPluginVariantNotFound: If the plugin variant could not be found.\n \"\"\"\n plugins = self.get_plugins_of_type(plugin_type)\n\n try:\n plugin = plugins[plugin_name]\n except KeyError as plugins_key_err:\n raise PluginNotFoundError(\n PluginRef(plugin_type, plugin_name)\n ) from plugins_key_err\n\n if variant_name is None or variant_name == Variant.DEFAULT_NAME:\n variant_name = plugin.default_variant\n\n try:\n url = plugin.variants[variant_name].ref\n except KeyError as variant_key_err:\n raise HubPluginVariantNotFound(\n plugin_type, plugin, variant_name\n ) from variant_key_err\n\n response = self.session.get(url)\n\n try:\n response.raise_for_status()\n except requests.HTTPError as http_err:\n logger.error(\n \"Can not retrieve plugin\",\n status_code=http_err.response.status_code,\n error=http_err,\n )\n raise PluginNotFoundError(PluginRef(plugin_type, plugin_name)) from http_err\n\n return PluginDefinition(**response.json(), plugin_type=plugin_type)\n\n def find_base_plugin(\n self,\n plugin_type: PluginType,\n plugin_name: str,\n variant: str | None = None,\n ) -> BasePlugin:\n \"\"\"Get the base plugin for a project plugin.\n\n Args:\n plugin_type: The plugin type.\n plugin_name: The plugin name.\n variant: The plugin variant.\n\n Returns:\n The base plugin.\n \"\"\"\n plugin = self.find_definition(\n plugin_type,\n plugin_name,\n variant_name=variant,\n )\n\n return base_plugin_factory(plugin, plugin.variants[0])\n\n def get_plugins_of_type(self, plugin_type: PluginType) -> dict[str, IndexedPlugin]:\n \"\"\"Get all plugins of a given type.\n\n Args:\n plugin_type: The plugin type.\n\n Returns:\n The plugin definitions.\n\n Raises:\n HubPluginTypeNotFound: If the plugin type is not supported.\n \"\"\"\n if not plugin_type.discoverable:\n return {}\n\n url = self.plugin_type_endpoint(plugin_type)\n response = self.session.get(url)\n\n try:\n response.raise_for_status()\n except requests.HTTPError as err:\n logger.error(\n \"Can not retrieve plugin type\",\n status_code=err.response.status_code,\n error=err,\n )\n raise HubPluginTypeNotFound(plugin_type) from err\n\n plugins: dict[str, dict[str, Any]] = response.json()\n return {\n name: IndexedPlugin(\n name,\n logo_url=plugin[\"logo_url\"],\n default_variant=plugin[\"default_variant\"],\n variants={\n variant_name: VariantRef(variant_name, ref=variant[\"ref\"])\n for variant_name, variant in plugin[\"variants\"].items()\n },\n )\n for name, plugin in plugins.items()\n }\n", "path": "src/meltano/core/hub/client.py"}], "after_files": [{"content": "\"\"\"Meltano Hub Client.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Any\n\nimport requests\nfrom structlog.stdlib import get_logger\n\nimport meltano\nfrom meltano.core.plugin import (\n BasePlugin,\n PluginDefinition,\n PluginRef,\n PluginType,\n Variant,\n)\nfrom meltano.core.plugin.error import PluginNotFoundError\nfrom meltano.core.plugin.factory import base_plugin_factory\nfrom meltano.core.plugin_discovery_service import PluginRepository\nfrom meltano.core.project import Project\nfrom meltano.core.project_settings_service import ProjectSettingsService\n\nfrom .schema import IndexedPlugin, VariantRef\n\nlogger = get_logger(__name__)\n\n\nclass HubPluginTypeNotFoundError(Exception):\n \"\"\"Raised when a Hub plugin type is not found.\"\"\"\n\n def __init__(self, plugin_type: PluginType):\n \"\"\"Create a new HubPluginVariantNotFound.\n\n Args:\n plugin_type: The type of the plugin.\n \"\"\"\n self.plugin_type = plugin_type\n\n def __str__(self) -> str:\n \"\"\"Return a string representation of the error.\n\n Returns:\n The string representation of the error.\n \"\"\"\n return \"{type} is not supported in Meltano Hub. Available plugin types: {types}\".format(\n type=self.plugin_type.descriptor.capitalize(),\n types=list(PluginType),\n )\n\n\nclass HubPluginVariantNotFoundError(Exception):\n \"\"\"Raised when a Hub plugin variant is not found.\"\"\"\n\n def __init__(\n self,\n plugin_type: PluginType,\n plugin: IndexedPlugin,\n variant_name: str,\n ):\n \"\"\"Create a new HubPluginVariantNotFound.\n\n Args:\n plugin_type: The type of the plugin.\n plugin: The indexed plugin.\n variant_name: The name of the variant that was not found.\n \"\"\"\n self.plugin_type = plugin_type\n self.plugin = plugin\n self.variant_name = variant_name\n\n def __str__(self) -> str:\n \"\"\"Return a string representation of the error.\n\n Returns:\n The string representation of the error.\n \"\"\"\n return \"{type} '{name}' variant '{variant}' is not known to Meltano. Variants: {variant_labels}\".format(\n type=self.plugin_type.descriptor.capitalize(),\n name=self.plugin.name,\n variant=self.variant_name,\n variant_labels=self.plugin.variant_labels,\n )\n\n\nclass MeltanoHubService(PluginRepository):\n \"\"\"PluginRepository implementation for the Meltano Hub.\"\"\"\n\n def __init__(self, project: Project) -> None:\n \"\"\"Initialize the service.\n\n Args:\n project: The Meltano project.\n \"\"\"\n self.project = project\n self.session = requests.Session()\n self.session.headers.update(\n {\n \"Accept\": \"application/json\",\n \"User-Agent\": f\"Meltano/{meltano.__version__}\",\n }\n )\n\n self.settings_service = ProjectSettingsService(self.project)\n\n if self.settings_service.get(\"send_anonymous_usage_stats\"):\n project_id = self.settings_service.get(\"project_id\")\n\n self.session.headers[\"X-Project-ID\"] = project_id\n\n @property\n def hub_api_url(self):\n \"\"\"Return the URL of the Hub API.\n\n Returns:\n The URL of the Hub API.\n \"\"\"\n hub_url = self.settings_service.get(\"hub_url\")\n return f\"{hub_url}/meltano/api/v1\"\n\n def plugin_type_endpoint(self, plugin_type: PluginType) -> str:\n \"\"\"Return the list endpoint for the given plugin type.\n\n Args:\n plugin_type: The plugin type.\n\n Returns:\n The endpoint for the given plugin type.\n \"\"\"\n return f\"{self.hub_api_url}/plugins/{plugin_type.value}/index\"\n\n def plugin_endpoint(\n self,\n plugin_type: PluginType,\n plugin_name: str,\n variant_name: str | None = None,\n ) -> str:\n \"\"\"Return the resource endpoint for the given plugin.\n\n Args:\n plugin_type: The plugin type.\n plugin_name: The plugin name.\n variant_name: The plugin variant name.\n\n Returns:\n The endpoint for the given plugin type.\n \"\"\"\n url = f\"{self.hub_api_url}/plugins/{plugin_type.value}/{plugin_name}\"\n if variant_name:\n url = f\"{url}--{variant_name}\"\n\n return url\n\n def find_definition(\n self,\n plugin_type: PluginType,\n plugin_name: str,\n variant_name: str | None = None,\n ) -> PluginDefinition:\n \"\"\"Find a locked plugin definition.\n\n Args:\n plugin_type: The plugin type.\n plugin_name: The plugin name.\n variant_name: The plugin variant name.\n\n Returns:\n The plugin definition.\n\n Raises:\n PluginNotFoundError: If the plugin definition could not be found.\n HubPluginVariantNotFoundError: If the plugin variant could not be found.\n \"\"\"\n plugins = self.get_plugins_of_type(plugin_type)\n\n try:\n plugin = plugins[plugin_name]\n except KeyError as plugins_key_err:\n raise PluginNotFoundError(\n PluginRef(plugin_type, plugin_name)\n ) from plugins_key_err\n\n if variant_name is None or variant_name in {\n Variant.DEFAULT_NAME,\n Variant.ORIGINAL_NAME,\n }:\n variant_name = plugin.default_variant\n\n try:\n url = plugin.variants[variant_name].ref\n except KeyError as variant_key_err:\n raise HubPluginVariantNotFoundError(\n plugin_type, plugin, variant_name\n ) from variant_key_err\n\n response = self.session.get(url)\n\n try:\n response.raise_for_status()\n except requests.HTTPError as http_err:\n logger.error(\n \"Can not retrieve plugin\",\n status_code=http_err.response.status_code,\n error=http_err,\n )\n raise PluginNotFoundError(PluginRef(plugin_type, plugin_name)) from http_err\n\n return PluginDefinition(**response.json(), plugin_type=plugin_type)\n\n def find_base_plugin(\n self,\n plugin_type: PluginType,\n plugin_name: str,\n variant: str | None = None,\n ) -> BasePlugin:\n \"\"\"Get the base plugin for a project plugin.\n\n Args:\n plugin_type: The plugin type.\n plugin_name: The plugin name.\n variant: The plugin variant.\n\n Returns:\n The base plugin.\n \"\"\"\n plugin = self.find_definition(\n plugin_type,\n plugin_name,\n variant_name=variant,\n )\n\n return base_plugin_factory(plugin, plugin.variants[0])\n\n def get_plugins_of_type(self, plugin_type: PluginType) -> dict[str, IndexedPlugin]:\n \"\"\"Get all plugins of a given type.\n\n Args:\n plugin_type: The plugin type.\n\n Returns:\n The plugin definitions.\n\n Raises:\n HubPluginTypeNotFoundError: If the plugin type is not supported.\n \"\"\"\n if not plugin_type.discoverable:\n return {}\n\n url = self.plugin_type_endpoint(plugin_type)\n response = self.session.get(url)\n\n try:\n response.raise_for_status()\n except requests.HTTPError as err:\n logger.error(\n \"Can not retrieve plugin type\",\n status_code=err.response.status_code,\n error=err,\n )\n raise HubPluginTypeNotFoundError(plugin_type) from err\n\n plugins: dict[str, dict[str, Any]] = response.json()\n return {\n name: IndexedPlugin(\n name,\n logo_url=plugin[\"logo_url\"],\n default_variant=plugin[\"default_variant\"],\n variants={\n variant_name: VariantRef(variant_name, ref=variant[\"ref\"])\n for variant_name, variant in plugin[\"variants\"].items()\n },\n )\n for name, plugin in plugins.items()\n }\n", "path": "src/meltano/core/hub/client.py"}]}
2,960
514
gh_patches_debug_23166
rasdani/github-patches
git_diff
Kinto__kinto-1219
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Crash on DELETE /buckets when quota plugin is enabled Another collateral damage of #945 A `http DELETE /v1/buckets` when the quota plugin is enabled leads to: ``` File "/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/zope/interface/adapter.py", line 598, in subscribers subscription(*objects) File "/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/pyramid/config/adapters.py", line 130, in subscriber_wrapper return derived_subscriber(*arg) File "/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/pyramid/config/adapters.py", line 103, in derived_subscriber return subscriber(arg[0]) File "/home/mathieu/Code/Mozilla/kinto/kinto/plugins/quotas/listener.py", line 47, in on_resource_changed bucket_id = payload['bucket_id'] KeyError: 'bucket_id' ``` Crash on DELETE /buckets when quota plugin is enabled Another collateral damage of #945 A `http DELETE /v1/buckets` when the quota plugin is enabled leads to: ``` File "/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/zope/interface/adapter.py", line 598, in subscribers subscription(*objects) File "/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/pyramid/config/adapters.py", line 130, in subscriber_wrapper return derived_subscriber(*arg) File "/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/pyramid/config/adapters.py", line 103, in derived_subscriber return subscriber(arg[0]) File "/home/mathieu/Code/Mozilla/kinto/kinto/plugins/quotas/listener.py", line 47, in on_resource_changed bucket_id = payload['bucket_id'] KeyError: 'bucket_id' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kinto/plugins/quotas/listener.py` Content: ``` 1 import copy 2 3 from pyramid.httpexceptions import HTTPInsufficientStorage 4 from kinto.core.errors import http_error, ERRORS 5 from kinto.core.storage.exceptions import RecordNotFoundError 6 from kinto.core.utils import instance_uri 7 8 from .utils import record_size 9 10 11 QUOTA_RESOURCE_NAME = 'quota' 12 BUCKET_QUOTA_OBJECT_ID = 'bucket_info' 13 COLLECTION_QUOTA_OBJECT_ID = 'collection_info' 14 15 16 def get_bucket_settings(settings, bucket_id, name): 17 return settings.get( 18 # Bucket specific 19 'quotas.bucket_{}_{}'.format(bucket_id, name), 20 # Global to all buckets 21 settings.get('quotas.bucket_{}'.format(name), None)) 22 23 24 def get_collection_settings(settings, bucket_id, collection_id, name): 25 return settings.get( 26 # Specific for a given bucket collection 27 'quotas.collection_{}_{}_{}'.format(bucket_id, collection_id, name), 28 # Specific to given bucket collections 29 settings.get('quotas.collection_{}_{}'.format(bucket_id, name), 30 # Global to all buckets collections 31 settings.get('quotas.collection_{}'.format(name), None))) 32 33 34 def on_resource_changed(event): 35 """ 36 Everytime an object is created/changed/deleted, we update the 37 bucket counters. 38 39 If a new object exceeds the quotas, we reject the request. 40 """ 41 payload = event.payload 42 action = payload['action'] 43 resource_name = payload['resource_name'] 44 event_uri = payload['uri'] 45 46 settings = event.request.registry.settings 47 48 bucket_id = payload['bucket_id'] 49 bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id) 50 collection_id = None 51 collection_uri = None 52 if 'collection_id' in payload: 53 collection_id = payload['collection_id'] 54 collection_uri = instance_uri(event.request, 55 'collection', 56 bucket_id=bucket_id, 57 id=collection_id) 58 59 bucket_max_bytes = get_bucket_settings(settings, bucket_id, 'max_bytes') 60 bucket_max_items = get_bucket_settings(settings, bucket_id, 'max_items') 61 bucket_max_bytes_per_item = get_bucket_settings(settings, bucket_id, 62 'max_bytes_per_item') 63 collection_max_bytes = get_collection_settings(settings, bucket_id, 64 collection_id, 'max_bytes') 65 collection_max_items = get_collection_settings(settings, bucket_id, 66 collection_id, 'max_items') 67 collection_max_bytes_per_item = get_collection_settings( 68 settings, bucket_id, collection_id, 'max_bytes_per_item') 69 70 max_bytes_per_item = (collection_max_bytes_per_item or 71 bucket_max_bytes_per_item) 72 73 storage = event.request.registry.storage 74 75 if action == 'delete' and resource_name == 'bucket': 76 # Deleting a bucket already deletes everything underneath (including 77 # quotas info). See kinto/views/bucket. 78 return 79 80 targets = [] 81 for impacted in event.impacted_records: 82 target = impacted['new' if action != 'delete' else 'old'] 83 # On POST .../records, the URI does not contain the newly created 84 # record id. 85 obj_id = target['id'] 86 parts = event_uri.split('/') 87 if resource_name in parts[-1]: 88 parts.append(obj_id) 89 else: 90 # Make sure the id is correct on grouped events. 91 parts[-1] = obj_id 92 uri = '/'.join(parts) 93 94 old = impacted.get('old', {}) 95 new = impacted.get('new', {}) 96 97 targets.append((uri, obj_id, old, new)) 98 99 try: 100 bucket_info = copy.deepcopy( 101 storage.get(parent_id=bucket_uri, 102 collection_id=QUOTA_RESOURCE_NAME, 103 object_id=BUCKET_QUOTA_OBJECT_ID)) 104 except RecordNotFoundError: 105 bucket_info = { 106 "collection_count": 0, 107 "record_count": 0, 108 "storage_size": 0, 109 } 110 111 collection_info = { 112 "record_count": 0, 113 "storage_size": 0, 114 } 115 if collection_id: 116 try: 117 collection_info = copy.deepcopy( 118 storage.get(parent_id=collection_uri, 119 collection_id=QUOTA_RESOURCE_NAME, 120 object_id=COLLECTION_QUOTA_OBJECT_ID)) 121 except RecordNotFoundError: 122 pass 123 124 # Update the bucket quotas values for each impacted record. 125 for (uri, obj_id, old, new) in targets: 126 old_size = record_size(old) 127 new_size = record_size(new) 128 129 if max_bytes_per_item is not None and action != "delete": 130 if new_size > max_bytes_per_item: 131 message = ("Maximum bytes per object exceeded " 132 "({} > {} Bytes.".format(new_size, max_bytes_per_item)) 133 raise http_error(HTTPInsufficientStorage(), 134 errno=ERRORS.FORBIDDEN.value, 135 message=message) 136 137 if action == 'create': 138 bucket_info['storage_size'] += new_size 139 if resource_name == 'collection': 140 bucket_info['collection_count'] += 1 141 collection_info['storage_size'] += new_size 142 if resource_name == 'record': 143 bucket_info['record_count'] += 1 144 collection_info['record_count'] += 1 145 collection_info['storage_size'] += new_size 146 elif action == 'update': 147 bucket_info['storage_size'] -= old_size 148 bucket_info['storage_size'] += new_size 149 if resource_name in ('collection', 'record'): 150 collection_info['storage_size'] -= old_size 151 collection_info['storage_size'] += new_size 152 else: # action == 'delete': 153 bucket_info['storage_size'] -= old_size 154 if resource_name == 'collection': 155 collection_uri = uri 156 bucket_info['collection_count'] -= 1 157 # When we delete the collection all the records in it 158 # are deleted without notification. 159 collection_records, _ = storage.get_all( 160 collection_id='record', 161 parent_id=collection_uri) 162 for r in collection_records: 163 old_record_size = record_size(r) 164 bucket_info['record_count'] -= 1 165 bucket_info['storage_size'] -= old_record_size 166 collection_info['record_count'] -= 1 167 collection_info['storage_size'] -= old_record_size 168 collection_info['storage_size'] -= old_size 169 170 if resource_name == 'record': 171 bucket_info['record_count'] -= 1 172 collection_info['record_count'] -= 1 173 collection_info['storage_size'] -= old_size 174 175 if bucket_max_bytes is not None: 176 if bucket_info['storage_size'] > bucket_max_bytes: 177 message = ("Bucket maximum total size exceeded " 178 "({} > {} Bytes). ".format(bucket_info['storage_size'], 179 bucket_max_bytes)) 180 raise http_error(HTTPInsufficientStorage(), 181 errno=ERRORS.FORBIDDEN.value, 182 message=message) 183 184 if bucket_max_items is not None: 185 if bucket_info['record_count'] > bucket_max_items: 186 message = ("Bucket maximum number of objects exceeded " 187 "({} > {} objects).".format(bucket_info['record_count'], 188 bucket_max_items)) 189 raise http_error(HTTPInsufficientStorage(), 190 errno=ERRORS.FORBIDDEN.value, 191 message=message) 192 193 if collection_max_bytes is not None: 194 if collection_info['storage_size'] > collection_max_bytes: 195 message = ("Collection maximum size exceeded " 196 "({} > {} Bytes).".format(collection_info['storage_size'], 197 collection_max_bytes)) 198 raise http_error(HTTPInsufficientStorage(), 199 errno=ERRORS.FORBIDDEN.value, 200 message=message) 201 202 if collection_max_items is not None: 203 if collection_info['record_count'] > collection_max_items: 204 message = ("Collection maximum number of objects exceeded " 205 "({} > {} objects).".format(collection_info['record_count'], 206 collection_max_items)) 207 raise http_error(HTTPInsufficientStorage(), 208 errno=ERRORS.FORBIDDEN.value, 209 message=message) 210 211 storage.update(parent_id=bucket_uri, 212 collection_id=QUOTA_RESOURCE_NAME, 213 object_id=BUCKET_QUOTA_OBJECT_ID, 214 record=bucket_info) 215 216 if collection_id: 217 if action == 'delete' and resource_name == 'collection': 218 # Deleting a collection already deletes everything underneath 219 # (including quotas info). See kinto/views/collection. 220 return 221 else: 222 storage.update(parent_id=collection_uri, 223 collection_id=QUOTA_RESOURCE_NAME, 224 object_id=COLLECTION_QUOTA_OBJECT_ID, 225 record=collection_info) 226 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kinto/plugins/quotas/listener.py b/kinto/plugins/quotas/listener.py --- a/kinto/plugins/quotas/listener.py +++ b/kinto/plugins/quotas/listener.py @@ -41,10 +41,15 @@ payload = event.payload action = payload['action'] resource_name = payload['resource_name'] - event_uri = payload['uri'] + + if action == 'delete' and resource_name == 'bucket': + # Deleting a bucket already deletes everything underneath (including + # quotas info). See kinto/views/bucket. + return settings = event.request.registry.settings + event_uri = payload['uri'] bucket_id = payload['bucket_id'] bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id) collection_id = None @@ -72,11 +77,6 @@ storage = event.request.registry.storage - if action == 'delete' and resource_name == 'bucket': - # Deleting a bucket already deletes everything underneath (including - # quotas info). See kinto/views/bucket. - return - targets = [] for impacted in event.impacted_records: target = impacted['new' if action != 'delete' else 'old']
{"golden_diff": "diff --git a/kinto/plugins/quotas/listener.py b/kinto/plugins/quotas/listener.py\n--- a/kinto/plugins/quotas/listener.py\n+++ b/kinto/plugins/quotas/listener.py\n@@ -41,10 +41,15 @@\n payload = event.payload\n action = payload['action']\n resource_name = payload['resource_name']\n- event_uri = payload['uri']\n+\n+ if action == 'delete' and resource_name == 'bucket':\n+ # Deleting a bucket already deletes everything underneath (including\n+ # quotas info). See kinto/views/bucket.\n+ return\n \n settings = event.request.registry.settings\n \n+ event_uri = payload['uri']\n bucket_id = payload['bucket_id']\n bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)\n collection_id = None\n@@ -72,11 +77,6 @@\n \n storage = event.request.registry.storage\n \n- if action == 'delete' and resource_name == 'bucket':\n- # Deleting a bucket already deletes everything underneath (including\n- # quotas info). See kinto/views/bucket.\n- return\n-\n targets = []\n for impacted in event.impacted_records:\n target = impacted['new' if action != 'delete' else 'old']\n", "issue": "Crash on DELETE /buckets when quota plugin is enabled\nAnother collateral damage of #945 \r\n\r\nA `http DELETE /v1/buckets` when the quota plugin is enabled leads to:\r\n\r\n```\r\n File \"/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/zope/interface/adapter.py\", line 598, in subscribers\r\n subscription(*objects)\r\n File \"/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/pyramid/config/adapters.py\", line 130, in subscriber_wrapper\r\n return derived_subscriber(*arg)\r\n File \"/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/pyramid/config/adapters.py\", line 103, in derived_subscriber\r\n return subscriber(arg[0])\r\n File \"/home/mathieu/Code/Mozilla/kinto/kinto/plugins/quotas/listener.py\", line 47, in on_resource_changed\r\n bucket_id = payload['bucket_id']\r\nKeyError: 'bucket_id'\r\n\r\n```\nCrash on DELETE /buckets when quota plugin is enabled\nAnother collateral damage of #945 \r\n\r\nA `http DELETE /v1/buckets` when the quota plugin is enabled leads to:\r\n\r\n```\r\n File \"/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/zope/interface/adapter.py\", line 598, in subscribers\r\n subscription(*objects)\r\n File \"/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/pyramid/config/adapters.py\", line 130, in subscriber_wrapper\r\n return derived_subscriber(*arg)\r\n File \"/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/pyramid/config/adapters.py\", line 103, in derived_subscriber\r\n return subscriber(arg[0])\r\n File \"/home/mathieu/Code/Mozilla/kinto/kinto/plugins/quotas/listener.py\", line 47, in on_resource_changed\r\n bucket_id = payload['bucket_id']\r\nKeyError: 'bucket_id'\r\n\r\n```\n", "before_files": [{"content": "import copy\n\nfrom pyramid.httpexceptions import HTTPInsufficientStorage\nfrom kinto.core.errors import http_error, ERRORS\nfrom kinto.core.storage.exceptions import RecordNotFoundError\nfrom kinto.core.utils import instance_uri\n\nfrom .utils import record_size\n\n\nQUOTA_RESOURCE_NAME = 'quota'\nBUCKET_QUOTA_OBJECT_ID = 'bucket_info'\nCOLLECTION_QUOTA_OBJECT_ID = 'collection_info'\n\n\ndef get_bucket_settings(settings, bucket_id, name):\n return settings.get(\n # Bucket specific\n 'quotas.bucket_{}_{}'.format(bucket_id, name),\n # Global to all buckets\n settings.get('quotas.bucket_{}'.format(name), None))\n\n\ndef get_collection_settings(settings, bucket_id, collection_id, name):\n return settings.get(\n # Specific for a given bucket collection\n 'quotas.collection_{}_{}_{}'.format(bucket_id, collection_id, name),\n # Specific to given bucket collections\n settings.get('quotas.collection_{}_{}'.format(bucket_id, name),\n # Global to all buckets collections\n settings.get('quotas.collection_{}'.format(name), None)))\n\n\ndef on_resource_changed(event):\n \"\"\"\n Everytime an object is created/changed/deleted, we update the\n bucket counters.\n\n If a new object exceeds the quotas, we reject the request.\n \"\"\"\n payload = event.payload\n action = payload['action']\n resource_name = payload['resource_name']\n event_uri = payload['uri']\n\n settings = event.request.registry.settings\n\n bucket_id = payload['bucket_id']\n bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)\n collection_id = None\n collection_uri = None\n if 'collection_id' in payload:\n collection_id = payload['collection_id']\n collection_uri = instance_uri(event.request,\n 'collection',\n bucket_id=bucket_id,\n id=collection_id)\n\n bucket_max_bytes = get_bucket_settings(settings, bucket_id, 'max_bytes')\n bucket_max_items = get_bucket_settings(settings, bucket_id, 'max_items')\n bucket_max_bytes_per_item = get_bucket_settings(settings, bucket_id,\n 'max_bytes_per_item')\n collection_max_bytes = get_collection_settings(settings, bucket_id,\n collection_id, 'max_bytes')\n collection_max_items = get_collection_settings(settings, bucket_id,\n collection_id, 'max_items')\n collection_max_bytes_per_item = get_collection_settings(\n settings, bucket_id, collection_id, 'max_bytes_per_item')\n\n max_bytes_per_item = (collection_max_bytes_per_item or\n bucket_max_bytes_per_item)\n\n storage = event.request.registry.storage\n\n if action == 'delete' and resource_name == 'bucket':\n # Deleting a bucket already deletes everything underneath (including\n # quotas info). See kinto/views/bucket.\n return\n\n targets = []\n for impacted in event.impacted_records:\n target = impacted['new' if action != 'delete' else 'old']\n # On POST .../records, the URI does not contain the newly created\n # record id.\n obj_id = target['id']\n parts = event_uri.split('/')\n if resource_name in parts[-1]:\n parts.append(obj_id)\n else:\n # Make sure the id is correct on grouped events.\n parts[-1] = obj_id\n uri = '/'.join(parts)\n\n old = impacted.get('old', {})\n new = impacted.get('new', {})\n\n targets.append((uri, obj_id, old, new))\n\n try:\n bucket_info = copy.deepcopy(\n storage.get(parent_id=bucket_uri,\n collection_id=QUOTA_RESOURCE_NAME,\n object_id=BUCKET_QUOTA_OBJECT_ID))\n except RecordNotFoundError:\n bucket_info = {\n \"collection_count\": 0,\n \"record_count\": 0,\n \"storage_size\": 0,\n }\n\n collection_info = {\n \"record_count\": 0,\n \"storage_size\": 0,\n }\n if collection_id:\n try:\n collection_info = copy.deepcopy(\n storage.get(parent_id=collection_uri,\n collection_id=QUOTA_RESOURCE_NAME,\n object_id=COLLECTION_QUOTA_OBJECT_ID))\n except RecordNotFoundError:\n pass\n\n # Update the bucket quotas values for each impacted record.\n for (uri, obj_id, old, new) in targets:\n old_size = record_size(old)\n new_size = record_size(new)\n\n if max_bytes_per_item is not None and action != \"delete\":\n if new_size > max_bytes_per_item:\n message = (\"Maximum bytes per object exceeded \"\n \"({} > {} Bytes.\".format(new_size, max_bytes_per_item))\n raise http_error(HTTPInsufficientStorage(),\n errno=ERRORS.FORBIDDEN.value,\n message=message)\n\n if action == 'create':\n bucket_info['storage_size'] += new_size\n if resource_name == 'collection':\n bucket_info['collection_count'] += 1\n collection_info['storage_size'] += new_size\n if resource_name == 'record':\n bucket_info['record_count'] += 1\n collection_info['record_count'] += 1\n collection_info['storage_size'] += new_size\n elif action == 'update':\n bucket_info['storage_size'] -= old_size\n bucket_info['storage_size'] += new_size\n if resource_name in ('collection', 'record'):\n collection_info['storage_size'] -= old_size\n collection_info['storage_size'] += new_size\n else: # action == 'delete':\n bucket_info['storage_size'] -= old_size\n if resource_name == 'collection':\n collection_uri = uri\n bucket_info['collection_count'] -= 1\n # When we delete the collection all the records in it\n # are deleted without notification.\n collection_records, _ = storage.get_all(\n collection_id='record',\n parent_id=collection_uri)\n for r in collection_records:\n old_record_size = record_size(r)\n bucket_info['record_count'] -= 1\n bucket_info['storage_size'] -= old_record_size\n collection_info['record_count'] -= 1\n collection_info['storage_size'] -= old_record_size\n collection_info['storage_size'] -= old_size\n\n if resource_name == 'record':\n bucket_info['record_count'] -= 1\n collection_info['record_count'] -= 1\n collection_info['storage_size'] -= old_size\n\n if bucket_max_bytes is not None:\n if bucket_info['storage_size'] > bucket_max_bytes:\n message = (\"Bucket maximum total size exceeded \"\n \"({} > {} Bytes). \".format(bucket_info['storage_size'],\n bucket_max_bytes))\n raise http_error(HTTPInsufficientStorage(),\n errno=ERRORS.FORBIDDEN.value,\n message=message)\n\n if bucket_max_items is not None:\n if bucket_info['record_count'] > bucket_max_items:\n message = (\"Bucket maximum number of objects exceeded \"\n \"({} > {} objects).\".format(bucket_info['record_count'],\n bucket_max_items))\n raise http_error(HTTPInsufficientStorage(),\n errno=ERRORS.FORBIDDEN.value,\n message=message)\n\n if collection_max_bytes is not None:\n if collection_info['storage_size'] > collection_max_bytes:\n message = (\"Collection maximum size exceeded \"\n \"({} > {} Bytes).\".format(collection_info['storage_size'],\n collection_max_bytes))\n raise http_error(HTTPInsufficientStorage(),\n errno=ERRORS.FORBIDDEN.value,\n message=message)\n\n if collection_max_items is not None:\n if collection_info['record_count'] > collection_max_items:\n message = (\"Collection maximum number of objects exceeded \"\n \"({} > {} objects).\".format(collection_info['record_count'],\n collection_max_items))\n raise http_error(HTTPInsufficientStorage(),\n errno=ERRORS.FORBIDDEN.value,\n message=message)\n\n storage.update(parent_id=bucket_uri,\n collection_id=QUOTA_RESOURCE_NAME,\n object_id=BUCKET_QUOTA_OBJECT_ID,\n record=bucket_info)\n\n if collection_id:\n if action == 'delete' and resource_name == 'collection':\n # Deleting a collection already deletes everything underneath\n # (including quotas info). See kinto/views/collection.\n return\n else:\n storage.update(parent_id=collection_uri,\n collection_id=QUOTA_RESOURCE_NAME,\n object_id=COLLECTION_QUOTA_OBJECT_ID,\n record=collection_info)\n", "path": "kinto/plugins/quotas/listener.py"}], "after_files": [{"content": "import copy\n\nfrom pyramid.httpexceptions import HTTPInsufficientStorage\nfrom kinto.core.errors import http_error, ERRORS\nfrom kinto.core.storage.exceptions import RecordNotFoundError\nfrom kinto.core.utils import instance_uri\n\nfrom .utils import record_size\n\n\nQUOTA_RESOURCE_NAME = 'quota'\nBUCKET_QUOTA_OBJECT_ID = 'bucket_info'\nCOLLECTION_QUOTA_OBJECT_ID = 'collection_info'\n\n\ndef get_bucket_settings(settings, bucket_id, name):\n return settings.get(\n # Bucket specific\n 'quotas.bucket_{}_{}'.format(bucket_id, name),\n # Global to all buckets\n settings.get('quotas.bucket_{}'.format(name), None))\n\n\ndef get_collection_settings(settings, bucket_id, collection_id, name):\n return settings.get(\n # Specific for a given bucket collection\n 'quotas.collection_{}_{}_{}'.format(bucket_id, collection_id, name),\n # Specific to given bucket collections\n settings.get('quotas.collection_{}_{}'.format(bucket_id, name),\n # Global to all buckets collections\n settings.get('quotas.collection_{}'.format(name), None)))\n\n\ndef on_resource_changed(event):\n \"\"\"\n Everytime an object is created/changed/deleted, we update the\n bucket counters.\n\n If a new object exceeds the quotas, we reject the request.\n \"\"\"\n payload = event.payload\n action = payload['action']\n resource_name = payload['resource_name']\n\n if action == 'delete' and resource_name == 'bucket':\n # Deleting a bucket already deletes everything underneath (including\n # quotas info). See kinto/views/bucket.\n return\n\n settings = event.request.registry.settings\n\n event_uri = payload['uri']\n bucket_id = payload['bucket_id']\n bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)\n collection_id = None\n collection_uri = None\n if 'collection_id' in payload:\n collection_id = payload['collection_id']\n collection_uri = instance_uri(event.request,\n 'collection',\n bucket_id=bucket_id,\n id=collection_id)\n\n bucket_max_bytes = get_bucket_settings(settings, bucket_id, 'max_bytes')\n bucket_max_items = get_bucket_settings(settings, bucket_id, 'max_items')\n bucket_max_bytes_per_item = get_bucket_settings(settings, bucket_id,\n 'max_bytes_per_item')\n collection_max_bytes = get_collection_settings(settings, bucket_id,\n collection_id, 'max_bytes')\n collection_max_items = get_collection_settings(settings, bucket_id,\n collection_id, 'max_items')\n collection_max_bytes_per_item = get_collection_settings(\n settings, bucket_id, collection_id, 'max_bytes_per_item')\n\n max_bytes_per_item = (collection_max_bytes_per_item or\n bucket_max_bytes_per_item)\n\n storage = event.request.registry.storage\n\n targets = []\n for impacted in event.impacted_records:\n target = impacted['new' if action != 'delete' else 'old']\n # On POST .../records, the URI does not contain the newly created\n # record id.\n obj_id = target['id']\n parts = event_uri.split('/')\n if resource_name in parts[-1]:\n parts.append(obj_id)\n else:\n # Make sure the id is correct on grouped events.\n parts[-1] = obj_id\n uri = '/'.join(parts)\n\n old = impacted.get('old', {})\n new = impacted.get('new', {})\n\n targets.append((uri, obj_id, old, new))\n\n try:\n bucket_info = copy.deepcopy(\n storage.get(parent_id=bucket_uri,\n collection_id=QUOTA_RESOURCE_NAME,\n object_id=BUCKET_QUOTA_OBJECT_ID))\n except RecordNotFoundError:\n bucket_info = {\n \"collection_count\": 0,\n \"record_count\": 0,\n \"storage_size\": 0,\n }\n\n collection_info = {\n \"record_count\": 0,\n \"storage_size\": 0,\n }\n if collection_id:\n try:\n collection_info = copy.deepcopy(\n storage.get(parent_id=collection_uri,\n collection_id=QUOTA_RESOURCE_NAME,\n object_id=COLLECTION_QUOTA_OBJECT_ID))\n except RecordNotFoundError:\n pass\n\n # Update the bucket quotas values for each impacted record.\n for (uri, obj_id, old, new) in targets:\n old_size = record_size(old)\n new_size = record_size(new)\n\n if max_bytes_per_item is not None and action != \"delete\":\n if new_size > max_bytes_per_item:\n message = (\"Maximum bytes per object exceeded \"\n \"({} > {} Bytes.\".format(new_size, max_bytes_per_item))\n raise http_error(HTTPInsufficientStorage(),\n errno=ERRORS.FORBIDDEN.value,\n message=message)\n\n if action == 'create':\n bucket_info['storage_size'] += new_size\n if resource_name == 'collection':\n bucket_info['collection_count'] += 1\n collection_info['storage_size'] += new_size\n if resource_name == 'record':\n bucket_info['record_count'] += 1\n collection_info['record_count'] += 1\n collection_info['storage_size'] += new_size\n elif action == 'update':\n bucket_info['storage_size'] -= old_size\n bucket_info['storage_size'] += new_size\n if resource_name in ('collection', 'record'):\n collection_info['storage_size'] -= old_size\n collection_info['storage_size'] += new_size\n else: # action == 'delete':\n bucket_info['storage_size'] -= old_size\n if resource_name == 'collection':\n collection_uri = uri\n bucket_info['collection_count'] -= 1\n # When we delete the collection all the records in it\n # are deleted without notification.\n collection_records, _ = storage.get_all(\n collection_id='record',\n parent_id=collection_uri)\n for r in collection_records:\n old_record_size = record_size(r)\n bucket_info['record_count'] -= 1\n bucket_info['storage_size'] -= old_record_size\n collection_info['record_count'] -= 1\n collection_info['storage_size'] -= old_record_size\n collection_info['storage_size'] -= old_size\n\n if resource_name == 'record':\n bucket_info['record_count'] -= 1\n collection_info['record_count'] -= 1\n collection_info['storage_size'] -= old_size\n\n if bucket_max_bytes is not None:\n if bucket_info['storage_size'] > bucket_max_bytes:\n message = (\"Bucket maximum total size exceeded \"\n \"({} > {} Bytes). \".format(bucket_info['storage_size'],\n bucket_max_bytes))\n raise http_error(HTTPInsufficientStorage(),\n errno=ERRORS.FORBIDDEN.value,\n message=message)\n\n if bucket_max_items is not None:\n if bucket_info['record_count'] > bucket_max_items:\n message = (\"Bucket maximum number of objects exceeded \"\n \"({} > {} objects).\".format(bucket_info['record_count'],\n bucket_max_items))\n raise http_error(HTTPInsufficientStorage(),\n errno=ERRORS.FORBIDDEN.value,\n message=message)\n\n if collection_max_bytes is not None:\n if collection_info['storage_size'] > collection_max_bytes:\n message = (\"Collection maximum size exceeded \"\n \"({} > {} Bytes).\".format(collection_info['storage_size'],\n collection_max_bytes))\n raise http_error(HTTPInsufficientStorage(),\n errno=ERRORS.FORBIDDEN.value,\n message=message)\n\n if collection_max_items is not None:\n if collection_info['record_count'] > collection_max_items:\n message = (\"Collection maximum number of objects exceeded \"\n \"({} > {} objects).\".format(collection_info['record_count'],\n collection_max_items))\n raise http_error(HTTPInsufficientStorage(),\n errno=ERRORS.FORBIDDEN.value,\n message=message)\n\n storage.update(parent_id=bucket_uri,\n collection_id=QUOTA_RESOURCE_NAME,\n object_id=BUCKET_QUOTA_OBJECT_ID,\n record=bucket_info)\n\n if collection_id:\n if action == 'delete' and resource_name == 'collection':\n # Deleting a collection already deletes everything underneath\n # (including quotas info). See kinto/views/collection.\n return\n else:\n storage.update(parent_id=collection_uri,\n collection_id=QUOTA_RESOURCE_NAME,\n object_id=COLLECTION_QUOTA_OBJECT_ID,\n record=collection_info)\n", "path": "kinto/plugins/quotas/listener.py"}]}
3,100
286
gh_patches_debug_228
rasdani/github-patches
git_diff
mlcommons__GaNDLF-766
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `gdown` does not seem to be working **Describe the bug** Current CI seems to be broken. **To Reproduce** Steps to reproduce the behavior: 1. Run any CI test 2. See error: ```python-traceback [SNIP!] if gdrive_file_id and is_gdrive_download_link: content_disposition = six.moves.urllib_parse.unquote( res.headers["Content-Disposition"] ) m = re.search(r"filename\*=UTF-8''(.*)", content_disposition) > filename_from_url = m.groups()[0] E AttributeError: 'NoneType' object has no attribute 'groups' ``` Example: https://github.com/mlcommons/GaNDLF/actions/runs/7489779631/job/20387346791?pr=764#step:9:219 **Expected behavior** The sample data file download should work. **Screenshots** N.A. **GaNDLF Version** Current master **Desktop (please complete the following information):** N.A. **Additional context** Basically, it is this error: https://github.com/wkentaro/gdown/issues/291 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 """The setup script.""" 4 5 6 import sys, re, os 7 from setuptools import setup, find_packages 8 from setuptools.command.install import install 9 from setuptools.command.develop import develop 10 from setuptools.command.egg_info import egg_info 11 12 try: 13 with open("README.md") as readme_file: 14 readme = readme_file.read() 15 except Exception as error: 16 readme = "No README information found." 17 sys.stderr.write( 18 "Warning: Could not open '%s' due %s\n" % ("README.md", error) 19 ) 20 21 22 class CustomInstallCommand(install): 23 def run(self): 24 install.run(self) 25 26 27 class CustomDevelopCommand(develop): 28 def run(self): 29 develop.run(self) 30 31 32 class CustomEggInfoCommand(egg_info): 33 def run(self): 34 egg_info.run(self) 35 36 37 try: 38 filepath = "GANDLF/version.py" 39 version_file = open(filepath) 40 (__version__,) = re.findall('__version__ = "(.*)"', version_file.read()) 41 42 except Exception as error: 43 __version__ = "0.0.1" 44 sys.stderr.write( 45 "Warning: Could not open '%s' due %s\n" % (filepath, error) 46 ) 47 48 # Handle cases where specific files need to be bundled into the final package as installed via PyPI 49 dockerfiles = [ 50 item 51 for item in os.listdir(os.path.dirname(os.path.abspath(__file__))) 52 if (os.path.isfile(item) and item.startswith("Dockerfile-")) 53 ] 54 entrypoint_files = [ 55 item 56 for item in os.listdir(os.path.dirname(os.path.abspath(__file__))) 57 if (os.path.isfile(item) and item.startswith("gandlf_")) 58 ] 59 setup_files = ["setup.py", ".dockerignore", "pyproject.toml", "MANIFEST.in"] 60 all_extra_files = dockerfiles + entrypoint_files + setup_files 61 all_extra_files_pathcorrected = [ 62 os.path.join("../", item) for item in all_extra_files 63 ] 64 # find_packages should only ever find these as subpackages of gandlf, not as top-level packages 65 # generate this dynamically? 66 # GANDLF.GANDLF is needed to prevent recursion madness in deployments 67 toplevel_package_excludes = [ 68 "GANDLF.GANDLF", 69 "anonymize", 70 "cli", 71 "compute", 72 "data", 73 "grad_clipping", 74 "losses", 75 "metrics", 76 "models", 77 "optimizers", 78 "schedulers", 79 "utils", 80 ] 81 82 83 requirements = [ 84 "torch==2.1.0", 85 "black==23.11.0", 86 "numpy==1.25.0", 87 "scipy", 88 "SimpleITK!=2.0.*", 89 "SimpleITK!=2.2.1", # https://github.com/mlcommons/GaNDLF/issues/536 90 "torchvision", 91 "tqdm", 92 "torchio==0.19.3", 93 "pandas>=2.0.0", 94 "scikit-learn>=0.23.2", 95 "scikit-image>=0.19.1", 96 "setuptools", 97 "seaborn", 98 "pyyaml", 99 "tiffslide", 100 "matplotlib", 101 "gdown", 102 "pytest", 103 "coverage", 104 "pytest-cov", 105 "psutil", 106 "medcam", 107 "opencv-python", 108 "torchmetrics==1.1.2", 109 "zarr==2.10.3", 110 "pydicom", 111 "onnx", 112 "torchinfo==1.7.0", 113 "segmentation-models-pytorch==0.3.3", 114 "ACSConv==0.1.1", 115 "docker", 116 "dicom-anonymizer", 117 "twine", 118 "zarr", 119 "keyring", 120 ] 121 122 if __name__ == "__main__": 123 setup( 124 name="GANDLF", 125 version=__version__, 126 author="MLCommons", 127 author_email="[email protected]", 128 python_requires=">3.8, <3.12", 129 packages=find_packages( 130 where=os.path.dirname(os.path.abspath(__file__)), 131 exclude=toplevel_package_excludes, 132 ), 133 cmdclass={ 134 "install": CustomInstallCommand, 135 "develop": CustomDevelopCommand, 136 "egg_info": CustomEggInfoCommand, 137 }, 138 scripts=[ 139 "gandlf_run", 140 "gandlf_constructCSV", 141 "gandlf_collectStats", 142 "gandlf_patchMiner", 143 "gandlf_preprocess", 144 "gandlf_anonymizer", 145 "gandlf_verifyInstall", 146 "gandlf_configGenerator", 147 "gandlf_recoverConfig", 148 "gandlf_deploy", 149 "gandlf_optimizeModel", 150 "gandlf_generateMetrics", 151 ], 152 classifiers=[ 153 "Development Status :: 3 - Alpha", 154 "Intended Audience :: Science/Research", 155 "License :: OSI Approved :: Apache Software License", 156 "Natural Language :: English", 157 "Operating System :: OS Independent", 158 "Programming Language :: Python :: 3.9", 159 "Programming Language :: Python :: 3.10", 160 "Programming Language :: Python :: 3.11", 161 "Topic :: Scientific/Engineering :: Medical Science Apps.", 162 ], 163 description=( 164 "PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging." 165 ), 166 install_requires=requirements, 167 license="Apache-2.0", 168 long_description=readme, 169 long_description_content_type="text/markdown", 170 include_package_data=True, 171 package_data={"GANDLF": all_extra_files_pathcorrected}, 172 keywords="semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch", 173 zip_safe=False, 174 ) 175 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -98,7 +98,7 @@ "pyyaml", "tiffslide", "matplotlib", - "gdown", + "gdown==4.6.3", "pytest", "coverage", "pytest-cov",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -98,7 +98,7 @@\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n- \"gdown\",\n+ \"gdown==4.6.3\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n", "issue": "`gdown` does not seem to be working\n**Describe the bug**\r\nCurrent CI seems to be broken.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Run any CI test\r\n2. See error:\r\n```python-traceback\r\n[SNIP!]\r\n if gdrive_file_id and is_gdrive_download_link:\r\n content_disposition = six.moves.urllib_parse.unquote(\r\n res.headers[\"Content-Disposition\"]\r\n )\r\n m = re.search(r\"filename\\*=UTF-8''(.*)\", content_disposition)\r\n> filename_from_url = m.groups()[0]\r\nE AttributeError: 'NoneType' object has no attribute 'groups'\r\n```\r\nExample: https://github.com/mlcommons/GaNDLF/actions/runs/7489779631/job/20387346791?pr=764#step:9:219\r\n\r\n**Expected behavior**\r\nThe sample data file download should work.\r\n\r\n**Screenshots**\r\nN.A.\r\n\r\n**GaNDLF Version**\r\nCurrent master\r\n\r\n**Desktop (please complete the following information):**\r\nN.A.\r\n\r\n**Additional context**\r\nBasically, it is this error: https://github.com/wkentaro/gdown/issues/291\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport sys, re, os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\ntry:\n with open(\"README.md\") as readme_file:\n readme = readme_file.read()\nexcept Exception as error:\n readme = \"No README information found.\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (\"README.md\", error)\n )\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (filepath, error)\n )\n\n# Handle cases where specific files need to be bundled into the final package as installed via PyPI\ndockerfiles = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"Dockerfile-\"))\n]\nentrypoint_files = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"gandlf_\"))\n]\nsetup_files = [\"setup.py\", \".dockerignore\", \"pyproject.toml\", \"MANIFEST.in\"]\nall_extra_files = dockerfiles + entrypoint_files + setup_files\nall_extra_files_pathcorrected = [\n os.path.join(\"../\", item) for item in all_extra_files\n]\n# find_packages should only ever find these as subpackages of gandlf, not as top-level packages\n# generate this dynamically?\n# GANDLF.GANDLF is needed to prevent recursion madness in deployments\ntoplevel_package_excludes = [\n \"GANDLF.GANDLF\",\n \"anonymize\",\n \"cli\",\n \"compute\",\n \"data\",\n \"grad_clipping\",\n \"losses\",\n \"metrics\",\n \"models\",\n \"optimizers\",\n \"schedulers\",\n \"utils\",\n]\n\n\nrequirements = [\n \"torch==2.1.0\",\n \"black==23.11.0\",\n \"numpy==1.25.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"SimpleITK!=2.2.1\", # https://github.com/mlcommons/GaNDLF/issues/536\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.19.3\",\n \"pandas>=2.0.0\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"gdown\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==1.1.2\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n \"segmentation-models-pytorch==0.3.3\",\n \"ACSConv==0.1.1\",\n \"docker\",\n \"dicom-anonymizer\",\n \"twine\",\n \"zarr\",\n \"keyring\",\n]\n\nif __name__ == \"__main__\":\n setup(\n name=\"GANDLF\",\n version=__version__,\n author=\"MLCommons\",\n author_email=\"[email protected]\",\n python_requires=\">3.8, <3.12\",\n packages=find_packages(\n where=os.path.dirname(os.path.abspath(__file__)),\n exclude=toplevel_package_excludes,\n ),\n cmdclass={\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n \"gandlf_configGenerator\",\n \"gandlf_recoverConfig\",\n \"gandlf_deploy\",\n \"gandlf_optimizeModel\",\n \"gandlf_generateMetrics\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"Apache-2.0\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n package_data={\"GANDLF\": all_extra_files_pathcorrected},\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch\",\n zip_safe=False,\n )\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport sys, re, os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\ntry:\n with open(\"README.md\") as readme_file:\n readme = readme_file.read()\nexcept Exception as error:\n readme = \"No README information found.\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (\"README.md\", error)\n )\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (filepath, error)\n )\n\n# Handle cases where specific files need to be bundled into the final package as installed via PyPI\ndockerfiles = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"Dockerfile-\"))\n]\nentrypoint_files = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"gandlf_\"))\n]\nsetup_files = [\"setup.py\", \".dockerignore\", \"pyproject.toml\", \"MANIFEST.in\"]\nall_extra_files = dockerfiles + entrypoint_files + setup_files\nall_extra_files_pathcorrected = [\n os.path.join(\"../\", item) for item in all_extra_files\n]\n# find_packages should only ever find these as subpackages of gandlf, not as top-level packages\n# generate this dynamically?\n# GANDLF.GANDLF is needed to prevent recursion madness in deployments\ntoplevel_package_excludes = [\n \"GANDLF.GANDLF\",\n \"anonymize\",\n \"cli\",\n \"compute\",\n \"data\",\n \"grad_clipping\",\n \"losses\",\n \"metrics\",\n \"models\",\n \"optimizers\",\n \"schedulers\",\n \"utils\",\n]\n\n\nrequirements = [\n \"torch==2.1.0\",\n \"black==23.11.0\",\n \"numpy==1.25.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"SimpleITK!=2.2.1\", # https://github.com/mlcommons/GaNDLF/issues/536\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.19.3\",\n \"pandas>=2.0.0\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"gdown==4.6.3\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==1.1.2\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n \"segmentation-models-pytorch==0.3.3\",\n \"ACSConv==0.1.1\",\n \"docker\",\n \"dicom-anonymizer\",\n \"twine\",\n \"zarr\",\n \"keyring\",\n]\n\nif __name__ == \"__main__\":\n setup(\n name=\"GANDLF\",\n version=__version__,\n author=\"MLCommons\",\n author_email=\"[email protected]\",\n python_requires=\">3.8, <3.12\",\n packages=find_packages(\n where=os.path.dirname(os.path.abspath(__file__)),\n exclude=toplevel_package_excludes,\n ),\n cmdclass={\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n \"gandlf_configGenerator\",\n \"gandlf_recoverConfig\",\n \"gandlf_deploy\",\n \"gandlf_optimizeModel\",\n \"gandlf_generateMetrics\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"Apache-2.0\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n package_data={\"GANDLF\": all_extra_files_pathcorrected},\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch\",\n zip_safe=False,\n )\n", "path": "setup.py"}]}
2,221
79
gh_patches_debug_14956
rasdani/github-patches
git_diff
holoviz__panel-3803
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Enable or document how to render options in a List Parameter as Literal elements ## Request Enable or document how to render options in a List Parameter as Literal elements ## Motivation I'm trying to show a user how to create a custom dropdown component using ReactiveHTML in https://discourse.holoviz.org/t/customize-panel-components-widgets-design/4187. There is already an example of a *child* template/ dropdown here https://panel.holoviz.org/user_guide/Custom_Components.html#child-templates Unfortunately the drop down options are not rendered as Literal values but instead as panels, i.e. wrapping the literal value in `bk` `div`s. In my example it makes the dropdown not look nice because a lot of margin is added. ![image](https://user-images.githubusercontent.com/42288570/188283341-7364227c-03d1-4aaf-ab07-b349acf4c214.png) ```python from panel.reactive import ReactiveHTML import param class ShoelaceSelect(ReactiveHTML): value = param.Parameter() options = param.List() _template = """ <sl-dropdown> <sl-button slot="trigger" caret>Dropdown</sl-button> <sl-menu> {% for obj in options %} <sl-menu-item id="option">${obj}</sl-menu-item> {% endfor %} </sl-menu> </sl-dropdown> """ __javascript_modules__=["https://cdn.jsdelivr.net/npm/@shoelace-style/[email protected]/dist/shoelace.js"] __css__ = ["https://cdn.jsdelivr.net/npm/@shoelace-style/[email protected]/dist/themes/light.css"] # _child_config = {'obj': 'literal'} import panel as pn pn.extension() select = ShoelaceSelect(options=["apple", "pear", "banana"]) pn.Column(select).servable() ``` I simple cannot find any documentation or example telling me how to provide these a literal values. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `panel/models/reactive_html.py` Content: ``` 1 import difflib 2 import re 3 4 from collections import defaultdict 5 from html.parser import HTMLParser 6 7 import bokeh.core.properties as bp 8 9 from bokeh.events import ModelEvent 10 from bokeh.model import DataModel 11 from bokeh.models import HTMLBox, LayoutDOM 12 13 endfor = '{%-? endfor -?%}' 14 list_iter_re = r'{%-? for (\s*[A-Za-z_]\w*\s*) in (\s*[A-Za-z_]\w*\s*) -?%}' 15 items_iter_re = r'{%-? for \s*[A-Za-z_]\w*\s*, (\s*[A-Za-z_]\w*\s*) in (\s*[A-Za-z_]\w*\s*)\.items\(\) -?%}' 16 values_iter_re = r'{%-? for (\s*[A-Za-z_]\w*\s*) in (\s*[A-Za-z_]\w*\s*)\.values\(\) -?%}' 17 18 19 class ReactiveHTMLParser(HTMLParser): 20 21 def __init__(self, cls, template=True): 22 super().__init__() 23 self.template = template 24 self.cls = cls 25 self.attrs = defaultdict(list) 26 self.children = {} 27 self.nodes = [] 28 self.looped = [] 29 self._template_re = re.compile(r'\$\{[^}]+\}') 30 self._literal_re = re.compile(r'\{\{[^}]+\}\}') 31 self._current_node = None 32 self._node_stack = [] 33 self._open_for = False 34 self.loop_map = {} 35 self.loop_var_map = defaultdict(list) 36 37 def handle_starttag(self, tag, attrs): 38 attrs = dict(attrs) 39 dom_id = attrs.pop('id', None) 40 self._current_node = None 41 self._node_stack.append((tag, dom_id)) 42 43 if not dom_id: 44 for attr, value in attrs.items(): 45 if value is None: 46 continue 47 params, methods = [], [] 48 for match in self._template_re.findall(value): 49 match = match[2:-1] 50 if match.startswith('model.'): 51 continue 52 if match in self.cls.param: 53 params.append(match) 54 elif hasattr(self.cls, match): 55 methods.append(match) 56 if methods: 57 raise ValueError( 58 "DOM nodes with an attached callback must declare " 59 f"an id. Found <{tag}> node with the `{attr}` callback " 60 f"referencing the `{methods[0]}` method. Add an id " 61 "attribute like this: " 62 f"<{tag} id=\"{tag}\" {attr}=\"${{{methods[0]}}}>...</{tag}>." 63 ) 64 elif params: 65 literal = value.replace(f'${{{params[0]}}}', f'{{{{{params[0]}}}}}') 66 raise ValueError( 67 "DOM node with a linked parameter declaration " 68 f"must declare an id. Found <{tag}> node with " 69 f"the `{attr}` attribute referencing the `{params[0]}` " 70 "parameter. Either declare an id on the node, " 71 f"i.e. <{tag} id=\"{tag}\" {attr}=\"{value}\">...</{tag}>, " 72 "or insert the value as a literal: " 73 f"<{tag} {attr}=\"{literal}\">...</{tag}>." 74 ) 75 return 76 77 if dom_id in self.nodes: 78 raise ValueError(f'Multiple DOM nodes with id="{dom_id}" found.') 79 self._current_node = dom_id 80 self.nodes.append(dom_id) 81 for attr, value in attrs.items(): 82 if value is None: 83 continue 84 matches = [] 85 for match in self._template_re.findall(value): 86 if not match[2:-1].startswith('model.'): 87 matches.append(match[2:-1]) 88 if matches: 89 self.attrs[dom_id].append((attr, matches, value.replace('${', '{'))) 90 91 def handle_endtag(self, tag): 92 self._node_stack.pop() 93 self._current_node = self._node_stack[-1][1] if self._node_stack else None 94 95 def handle_data(self, data): 96 if not self.template: 97 return 98 99 dom_id = self._current_node 100 matches = [] 101 for match in self._template_re.findall(data): 102 var = match[2:-1].strip() 103 if match[2:-1] not in self.loop_var_map[var]: 104 self.loop_var_map[var].append(match[2:-1]) 105 if var.endswith('.index0'): 106 matches.append('${%s }}]}' % var) 107 else: 108 matches.append('${%s}' % var) 109 110 literal_matches = [] 111 for match in self._literal_re.findall(data): 112 match = match[2:-2].strip() 113 if match.endswith('.index0'): 114 literal_matches.append('{{%s }}]}' % match) 115 else: 116 literal_matches.append('{{ %s }}' % match) 117 118 # Detect templating for loops 119 list_loop = re.findall(list_iter_re, data) 120 values_loop = re.findall(values_iter_re, data) 121 items_loop = re.findall(items_iter_re, data) 122 nloops = len(list_loop) + len(values_loop) + len(items_loop) 123 if nloops > 1 and nloops and self._open_for: 124 raise ValueError('Nested for loops currently not supported in templates.') 125 elif nloops: 126 loop = [loop for loop in (list_loop, values_loop, items_loop) if loop][0] 127 var, obj = loop[0] 128 if var in self.cls.param: 129 raise ValueError( 130 f'Loop variable {var} clashes with parameter name. ' 131 'Ensure loop variables have a unique name. Relevant ' 132 f'template section:\n\n{data}' 133 ) 134 self.loop_map[var] = obj 135 136 open_for = re.search(r'{%-? for', data) 137 end_for = re.search(endfor, data) 138 if open_for: 139 if self._current_node is None: 140 node = self._node_stack[-1][0] 141 raise ValueError( 142 'Loops may only be used inside a DOM node with an assigned ID. ' 143 f'The following loop could not be expanded because the <{node}> node ' 144 f'did not have an assigned id:\n\n {data.strip()}' 145 ) 146 self._open_for = True 147 if end_for and (not nloops or end_for.start() > open_for.start()): 148 self._open_for = False 149 150 if self._current_node and literal_matches: 151 if len(literal_matches) == 1: 152 literal_match = literal_matches[0][2:-2].strip() 153 else: 154 literal_match = None 155 156 if literal_match and (literal_match in self.loop_map) and self._open_for: 157 literal_match = self.loop_map[literal_match] 158 self.looped.append((dom_id, literal_match)) 159 160 if not (self._current_node and matches): 161 return 162 163 if len(matches) == 1: 164 match = matches[0][2:-1].strip() 165 else: 166 for match in matches: 167 mode = self.cls._child_config.get(match, 'model') 168 if mode != 'template': 169 raise ValueError(f"Cannot match multiple variables in '{mode}' mode.") 170 match = None 171 172 # Handle looped variables 173 if match and (match in self.loop_map or '[' in match) and self._open_for: 174 if match in self.loop_map: 175 matches[matches.index('${%s}' % match)] = '${%s}' % self.loop_map[match] 176 match = self.loop_map[match] 177 elif '[' in match: 178 match, _ = match.split('[') 179 dom_id = dom_id.replace('-{{ loop.index0 }}', '') 180 self.looped.append((dom_id, match)) 181 182 mode = self.cls._child_config.get(match, 'model') 183 if match in self.cls.param and mode != 'template': 184 self.children[dom_id] = match 185 return 186 187 templates = [] 188 for match in matches: 189 match = match[2:-1] 190 if match.startswith('model.'): 191 continue 192 if match not in self.cls.param and '.' not in match: 193 params = difflib.get_close_matches(match, list(self.cls.param)) 194 raise ValueError(f"{self.cls.__name__} HTML template references " 195 f"unknown parameter '{match}', similar parameters " 196 f"include {params}.") 197 templates.append(match) 198 self.attrs[dom_id].append(('children', templates, data.replace('${', '{'))) 199 200 201 202 def find_attrs(html): 203 p = ReactiveHTMLParser() 204 p.feed(html) 205 return p.attrs 206 207 208 209 class DOMEvent(ModelEvent): 210 211 event_name = 'dom_event' 212 213 def __init__(self, model, node=None, data=None): 214 self.data = data 215 self.node = node 216 super().__init__(model=model) 217 218 219 class ReactiveHTML(HTMLBox): 220 221 attrs = bp.Dict(bp.String, bp.List(bp.Tuple(bp.String, bp.List(bp.String), bp.String))) 222 223 callbacks = bp.Dict(bp.String, bp.List(bp.Tuple(bp.String, bp.String))) 224 225 children = bp.Dict(bp.String, bp.Either(bp.List(bp.Either(bp.Instance(LayoutDOM), bp.String)), bp.String)) 226 227 data = bp.Instance(DataModel) 228 229 events = bp.Dict(bp.String, bp.Dict(bp.String, bp.Bool)) 230 231 html = bp.String() 232 233 looped = bp.List(bp.String) 234 235 nodes = bp.List(bp.String) 236 237 scripts = bp.Dict(bp.String, bp.List(bp.String)) 238 239 def __init__(self, **props): 240 if 'attrs' not in props and 'html' in props: 241 props['attrs'] = find_attrs(props['html']) 242 super().__init__(**props) 243 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/panel/models/reactive_html.py b/panel/models/reactive_html.py --- a/panel/models/reactive_html.py +++ b/panel/models/reactive_html.py @@ -136,10 +136,11 @@ match = None # Handle looped variables - if match and (match in self.loop_map or '[' in match) and self._open_for: - if match in self.loop_map: - matches[matches.index('${%s}' % match)] = '${%s}' % self.loop_map[match] - match = self.loop_map[match] + if match and (match.strip() in self.loop_map or '[' in match) and self._open_for: + if match.strip() in self.loop_map: + loop_match = self.loop_map[match.strip()] + matches[matches.index('${%s}' % match)] = '${%s}' % loop_match + match = loop_match elif '[' in match: match, _ = match.split('[') dom_id = dom_id.replace('-{{ loop.index0 }}', '')
{"golden_diff": "diff --git a/panel/models/reactive_html.py b/panel/models/reactive_html.py\n--- a/panel/models/reactive_html.py\n+++ b/panel/models/reactive_html.py\n@@ -136,10 +136,11 @@\n match = None\n \n # Handle looped variables\n- if match and (match in self.loop_map or '[' in match) and self._open_for:\n- if match in self.loop_map:\n- matches[matches.index('${%s}' % match)] = '${%s}' % self.loop_map[match]\n- match = self.loop_map[match]\n+ if match and (match.strip() in self.loop_map or '[' in match) and self._open_for:\n+ if match.strip() in self.loop_map:\n+ loop_match = self.loop_map[match.strip()]\n+ matches[matches.index('${%s}' % match)] = '${%s}' % loop_match\n+ match = loop_match\n elif '[' in match:\n match, _ = match.split('[')\n dom_id = dom_id.replace('-{{ loop.index0 }}', '')\n", "issue": "Enable or document how to render options in a List Parameter as Literal elements\n## Request\r\n\r\nEnable or document how to render options in a List Parameter as Literal elements\r\n\r\n## Motivation\r\n\r\nI'm trying to show a user how to create a custom dropdown component using ReactiveHTML in https://discourse.holoviz.org/t/customize-panel-components-widgets-design/4187.\r\n\r\nThere is already an example of a *child* template/ dropdown here https://panel.holoviz.org/user_guide/Custom_Components.html#child-templates\r\n\r\nUnfortunately the drop down options are not rendered as Literal values but instead as panels, i.e. wrapping the literal value in `bk` `div`s.\r\n\r\nIn my example it makes the dropdown not look nice because a lot of margin is added.\r\n\r\n![image](https://user-images.githubusercontent.com/42288570/188283341-7364227c-03d1-4aaf-ab07-b349acf4c214.png)\r\n\r\n```python\r\nfrom panel.reactive import ReactiveHTML\r\nimport param\r\n\r\nclass ShoelaceSelect(ReactiveHTML):\r\n value = param.Parameter()\r\n options = param.List()\r\n _template = \"\"\"\r\n<sl-dropdown>\r\n <sl-button slot=\"trigger\" caret>Dropdown</sl-button>\r\n <sl-menu>\r\n {% for obj in options %}\r\n <sl-menu-item id=\"option\">${obj}</sl-menu-item>\r\n {% endfor %}\r\n </sl-menu>\r\n</sl-dropdown>\r\n\"\"\"\r\n\r\n __javascript_modules__=[\"https://cdn.jsdelivr.net/npm/@shoelace-style/[email protected]/dist/shoelace.js\"]\r\n __css__ = [\"https://cdn.jsdelivr.net/npm/@shoelace-style/[email protected]/dist/themes/light.css\"]\r\n # _child_config = {'obj': 'literal'}\r\n\r\nimport panel as pn\r\n\r\npn.extension()\r\n\r\nselect = ShoelaceSelect(options=[\"apple\", \"pear\", \"banana\"])\r\n\r\npn.Column(select).servable()\r\n```\r\n\r\nI simple cannot find any documentation or example telling me how to provide these a literal values.\n", "before_files": [{"content": "import difflib\nimport re\n\nfrom collections import defaultdict\nfrom html.parser import HTMLParser\n\nimport bokeh.core.properties as bp\n\nfrom bokeh.events import ModelEvent\nfrom bokeh.model import DataModel\nfrom bokeh.models import HTMLBox, LayoutDOM\n\nendfor = '{%-? endfor -?%}'\nlist_iter_re = r'{%-? for (\\s*[A-Za-z_]\\w*\\s*) in (\\s*[A-Za-z_]\\w*\\s*) -?%}'\nitems_iter_re = r'{%-? for \\s*[A-Za-z_]\\w*\\s*, (\\s*[A-Za-z_]\\w*\\s*) in (\\s*[A-Za-z_]\\w*\\s*)\\.items\\(\\) -?%}'\nvalues_iter_re = r'{%-? for (\\s*[A-Za-z_]\\w*\\s*) in (\\s*[A-Za-z_]\\w*\\s*)\\.values\\(\\) -?%}'\n\n\nclass ReactiveHTMLParser(HTMLParser):\n\n def __init__(self, cls, template=True):\n super().__init__()\n self.template = template\n self.cls = cls\n self.attrs = defaultdict(list)\n self.children = {}\n self.nodes = []\n self.looped = []\n self._template_re = re.compile(r'\\$\\{[^}]+\\}')\n self._literal_re = re.compile(r'\\{\\{[^}]+\\}\\}')\n self._current_node = None\n self._node_stack = []\n self._open_for = False\n self.loop_map = {}\n self.loop_var_map = defaultdict(list)\n\n def handle_starttag(self, tag, attrs):\n attrs = dict(attrs)\n dom_id = attrs.pop('id', None)\n self._current_node = None\n self._node_stack.append((tag, dom_id))\n\n if not dom_id:\n for attr, value in attrs.items():\n if value is None:\n continue\n params, methods = [], []\n for match in self._template_re.findall(value):\n match = match[2:-1]\n if match.startswith('model.'):\n continue\n if match in self.cls.param:\n params.append(match)\n elif hasattr(self.cls, match):\n methods.append(match)\n if methods:\n raise ValueError(\n \"DOM nodes with an attached callback must declare \"\n f\"an id. Found <{tag}> node with the `{attr}` callback \"\n f\"referencing the `{methods[0]}` method. Add an id \"\n \"attribute like this: \"\n f\"<{tag} id=\\\"{tag}\\\" {attr}=\\\"${{{methods[0]}}}>...</{tag}>.\"\n )\n elif params:\n literal = value.replace(f'${{{params[0]}}}', f'{{{{{params[0]}}}}}')\n raise ValueError(\n \"DOM node with a linked parameter declaration \"\n f\"must declare an id. Found <{tag}> node with \"\n f\"the `{attr}` attribute referencing the `{params[0]}` \"\n \"parameter. Either declare an id on the node, \"\n f\"i.e. <{tag} id=\\\"{tag}\\\" {attr}=\\\"{value}\\\">...</{tag}>, \"\n \"or insert the value as a literal: \"\n f\"<{tag} {attr}=\\\"{literal}\\\">...</{tag}>.\"\n )\n return\n\n if dom_id in self.nodes:\n raise ValueError(f'Multiple DOM nodes with id=\"{dom_id}\" found.')\n self._current_node = dom_id\n self.nodes.append(dom_id)\n for attr, value in attrs.items():\n if value is None:\n continue\n matches = []\n for match in self._template_re.findall(value):\n if not match[2:-1].startswith('model.'):\n matches.append(match[2:-1])\n if matches:\n self.attrs[dom_id].append((attr, matches, value.replace('${', '{')))\n\n def handle_endtag(self, tag):\n self._node_stack.pop()\n self._current_node = self._node_stack[-1][1] if self._node_stack else None\n\n def handle_data(self, data):\n if not self.template:\n return\n\n dom_id = self._current_node\n matches = []\n for match in self._template_re.findall(data):\n var = match[2:-1].strip()\n if match[2:-1] not in self.loop_var_map[var]:\n self.loop_var_map[var].append(match[2:-1])\n if var.endswith('.index0'):\n matches.append('${%s }}]}' % var)\n else:\n matches.append('${%s}' % var)\n\n literal_matches = []\n for match in self._literal_re.findall(data):\n match = match[2:-2].strip()\n if match.endswith('.index0'):\n literal_matches.append('{{%s }}]}' % match)\n else:\n literal_matches.append('{{ %s }}' % match)\n\n # Detect templating for loops\n list_loop = re.findall(list_iter_re, data)\n values_loop = re.findall(values_iter_re, data)\n items_loop = re.findall(items_iter_re, data)\n nloops = len(list_loop) + len(values_loop) + len(items_loop)\n if nloops > 1 and nloops and self._open_for:\n raise ValueError('Nested for loops currently not supported in templates.')\n elif nloops:\n loop = [loop for loop in (list_loop, values_loop, items_loop) if loop][0]\n var, obj = loop[0]\n if var in self.cls.param:\n raise ValueError(\n f'Loop variable {var} clashes with parameter name. '\n 'Ensure loop variables have a unique name. Relevant '\n f'template section:\\n\\n{data}'\n )\n self.loop_map[var] = obj\n\n open_for = re.search(r'{%-? for', data)\n end_for = re.search(endfor, data)\n if open_for:\n if self._current_node is None:\n node = self._node_stack[-1][0]\n raise ValueError(\n 'Loops may only be used inside a DOM node with an assigned ID. '\n f'The following loop could not be expanded because the <{node}> node '\n f'did not have an assigned id:\\n\\n {data.strip()}'\n )\n self._open_for = True\n if end_for and (not nloops or end_for.start() > open_for.start()):\n self._open_for = False\n\n if self._current_node and literal_matches:\n if len(literal_matches) == 1:\n literal_match = literal_matches[0][2:-2].strip()\n else:\n literal_match = None\n\n if literal_match and (literal_match in self.loop_map) and self._open_for:\n literal_match = self.loop_map[literal_match]\n self.looped.append((dom_id, literal_match))\n\n if not (self._current_node and matches):\n return\n\n if len(matches) == 1:\n match = matches[0][2:-1].strip()\n else:\n for match in matches:\n mode = self.cls._child_config.get(match, 'model')\n if mode != 'template':\n raise ValueError(f\"Cannot match multiple variables in '{mode}' mode.\")\n match = None\n\n # Handle looped variables\n if match and (match in self.loop_map or '[' in match) and self._open_for:\n if match in self.loop_map:\n matches[matches.index('${%s}' % match)] = '${%s}' % self.loop_map[match]\n match = self.loop_map[match]\n elif '[' in match:\n match, _ = match.split('[')\n dom_id = dom_id.replace('-{{ loop.index0 }}', '')\n self.looped.append((dom_id, match))\n\n mode = self.cls._child_config.get(match, 'model')\n if match in self.cls.param and mode != 'template':\n self.children[dom_id] = match\n return\n\n templates = []\n for match in matches:\n match = match[2:-1]\n if match.startswith('model.'):\n continue\n if match not in self.cls.param and '.' not in match:\n params = difflib.get_close_matches(match, list(self.cls.param))\n raise ValueError(f\"{self.cls.__name__} HTML template references \"\n f\"unknown parameter '{match}', similar parameters \"\n f\"include {params}.\")\n templates.append(match)\n self.attrs[dom_id].append(('children', templates, data.replace('${', '{')))\n\n\n\ndef find_attrs(html):\n p = ReactiveHTMLParser()\n p.feed(html)\n return p.attrs\n\n\n\nclass DOMEvent(ModelEvent):\n\n event_name = 'dom_event'\n\n def __init__(self, model, node=None, data=None):\n self.data = data\n self.node = node\n super().__init__(model=model)\n\n\nclass ReactiveHTML(HTMLBox):\n\n attrs = bp.Dict(bp.String, bp.List(bp.Tuple(bp.String, bp.List(bp.String), bp.String)))\n\n callbacks = bp.Dict(bp.String, bp.List(bp.Tuple(bp.String, bp.String)))\n\n children = bp.Dict(bp.String, bp.Either(bp.List(bp.Either(bp.Instance(LayoutDOM), bp.String)), bp.String))\n\n data = bp.Instance(DataModel)\n\n events = bp.Dict(bp.String, bp.Dict(bp.String, bp.Bool))\n\n html = bp.String()\n\n looped = bp.List(bp.String)\n\n nodes = bp.List(bp.String)\n\n scripts = bp.Dict(bp.String, bp.List(bp.String))\n\n def __init__(self, **props):\n if 'attrs' not in props and 'html' in props:\n props['attrs'] = find_attrs(props['html'])\n super().__init__(**props)\n", "path": "panel/models/reactive_html.py"}], "after_files": [{"content": "import difflib\nimport re\n\nfrom collections import defaultdict\nfrom html.parser import HTMLParser\n\nimport bokeh.core.properties as bp\n\nfrom bokeh.events import ModelEvent\nfrom bokeh.model import DataModel\nfrom bokeh.models import HTMLBox, LayoutDOM\n\nendfor = '{%-? endfor -?%}'\nlist_iter_re = r'{%-? for (\\s*[A-Za-z_]\\w*\\s*) in (\\s*[A-Za-z_]\\w*\\s*) -?%}'\nitems_iter_re = r'{%-? for \\s*[A-Za-z_]\\w*\\s*, (\\s*[A-Za-z_]\\w*\\s*) in (\\s*[A-Za-z_]\\w*\\s*)\\.items\\(\\) -?%}'\nvalues_iter_re = r'{%-? for (\\s*[A-Za-z_]\\w*\\s*) in (\\s*[A-Za-z_]\\w*\\s*)\\.values\\(\\) -?%}'\n\n\nclass ReactiveHTMLParser(HTMLParser):\n\n def __init__(self, cls, template=True):\n super().__init__()\n self.template = template\n self.cls = cls\n self.attrs = defaultdict(list)\n self.children = {}\n self.nodes = []\n self.looped = []\n self._template_re = re.compile(r'\\$\\{[^}]+\\}')\n self._current_node = None\n self._node_stack = []\n self._open_for = False\n self.loop_map = {}\n\n def handle_starttag(self, tag, attrs):\n attrs = dict(attrs)\n dom_id = attrs.pop('id', None)\n self._current_node = None\n self._node_stack.append((tag, dom_id))\n\n if not dom_id:\n for attr, value in attrs.items():\n if value is None:\n continue\n params, methods = [], []\n for match in self._template_re.findall(value):\n match = match[2:-1]\n if match.startswith('model.'):\n continue\n if match in self.cls.param:\n params.append(match)\n elif hasattr(self.cls, match):\n methods.append(match)\n if methods:\n raise ValueError(\n \"DOM nodes with an attached callback must declare \"\n f\"an id. Found <{tag}> node with the `{attr}` callback \"\n f\"referencing the `{methods[0]}` method. Add an id \"\n \"attribute like this: \"\n f\"<{tag} id=\\\"{tag}\\\" {attr}=\\\"${{{methods[0]}}}>...</{tag}>.\"\n )\n elif params:\n literal = value.replace(f'${{{params[0]}}}', f'{{{{{params[0]}}}}}')\n raise ValueError(\n \"DOM node with a linked parameter declaration \"\n f\"must declare an id. Found <{tag}> node with \"\n f\"the `{attr}` attribute referencing the `{params[0]}` \"\n \"parameter. Either declare an id on the node, \"\n f\"i.e. <{tag} id=\\\"{tag}\\\" {attr}=\\\"{value}\\\">...</{tag}>, \"\n \"or insert the value as a literal: \"\n f\"<{tag} {attr}=\\\"{literal}\\\">...</{tag}>.\"\n )\n return\n\n if dom_id in self.nodes:\n raise ValueError(f'Multiple DOM nodes with id=\"{dom_id}\" found.')\n self._current_node = dom_id\n self.nodes.append(dom_id)\n for attr, value in attrs.items():\n if value is None:\n continue\n matches = []\n for match in self._template_re.findall(value):\n if not match[2:-1].startswith('model.'):\n matches.append(match[2:-1])\n if matches:\n self.attrs[dom_id].append((attr, matches, value.replace('${', '{')))\n\n def handle_endtag(self, tag):\n self._node_stack.pop()\n self._current_node = self._node_stack[-1][1] if self._node_stack else None\n\n def handle_data(self, data):\n if not self.template:\n return\n\n dom_id = self._current_node\n matches = [\n '%s}]}' % match if match.endswith('.index0 }') else match\n for match in self._template_re.findall(data)\n ]\n\n # Detect templating for loops\n list_loop = re.findall(list_iter_re, data)\n values_loop = re.findall(values_iter_re, data)\n items_loop = re.findall(items_iter_re, data)\n nloops = len(list_loop) + len(values_loop) + len(items_loop)\n if nloops > 1 and nloops and self._open_for:\n raise ValueError('Nested for loops currently not supported in templates.')\n elif nloops:\n loop = [loop for loop in (list_loop, values_loop, items_loop) if loop][0]\n var, obj = loop[0]\n if var in self.cls.param:\n raise ValueError(f'Loop variable {var} clashes with parameter name. '\n 'Ensure loop variables have a unique name. Relevant '\n f'template section:\\n\\n{data}')\n self.loop_map[var] = obj\n\n open_for = re.search(r'{%-? for', data)\n end_for = re.search(endfor, data)\n if open_for:\n self._open_for = True\n if end_for and (not nloops or end_for.start() > open_for.start()):\n self._open_for = False\n\n if not (self._current_node and matches):\n return\n\n if len(matches) == 1:\n match = matches[0][2:-1]\n else:\n for match in matches:\n mode = self.cls._child_config.get(match, 'model')\n if mode != 'template':\n raise ValueError(f\"Cannot match multiple variables in '{mode}' mode.\")\n match = None\n\n # Handle looped variables\n if match and (match.strip() in self.loop_map or '[' in match) and self._open_for:\n if match.strip() in self.loop_map:\n loop_match = self.loop_map[match.strip()]\n matches[matches.index('${%s}' % match)] = '${%s}' % loop_match\n match = loop_match\n elif '[' in match:\n match, _ = match.split('[')\n dom_id = dom_id.replace('-{{ loop.index0 }}', '')\n self.looped.append((dom_id, match))\n\n mode = self.cls._child_config.get(match, 'model')\n if match in self.cls.param and mode != 'template':\n self.children[dom_id] = match\n return\n\n templates = []\n for match in matches:\n match = match[2:-1]\n if match.startswith('model.'):\n continue\n if match not in self.cls.param and '.' not in match:\n params = difflib.get_close_matches(match, list(self.cls.param))\n raise ValueError(f\"{self.cls.__name__} HTML template references \"\n f\"unknown parameter '{match}', similar parameters \"\n f\"include {params}.\")\n templates.append(match)\n self.attrs[dom_id].append(('children', templates, data.replace('${', '{')))\n\n\n\ndef find_attrs(html):\n p = ReactiveHTMLParser()\n p.feed(html)\n return p.attrs\n\n\n\nclass DOMEvent(ModelEvent):\n\n event_name = 'dom_event'\n\n def __init__(self, model, node=None, data=None):\n self.data = data\n self.node = node\n super().__init__(model=model)\n\n\nclass ReactiveHTML(HTMLBox):\n\n attrs = bp.Dict(bp.String, bp.List(bp.Tuple(bp.String, bp.List(bp.String), bp.String)))\n\n callbacks = bp.Dict(bp.String, bp.List(bp.Tuple(bp.String, bp.String)))\n\n children = bp.Dict(bp.String, bp.Either(bp.List(bp.Either(bp.Instance(LayoutDOM), bp.String)), bp.String))\n\n data = bp.Instance(DataModel)\n\n events = bp.Dict(bp.String, bp.Dict(bp.String, bp.Bool))\n\n html = bp.String()\n\n looped = bp.List(bp.String)\n\n nodes = bp.List(bp.String)\n\n scripts = bp.Dict(bp.String, bp.List(bp.String))\n\n def __init__(self, **props):\n if 'attrs' not in props and 'html' in props:\n props['attrs'] = find_attrs(props['html'])\n super().__init__(**props)\n", "path": "panel/models/reactive_html.py"}]}
3,472
241
gh_patches_debug_19668
rasdani/github-patches
git_diff
docker__docker-py-1050
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- login failed with default registry I am using docker-py (1.8.0) and trying to using login API. If I don't input `registry='https://index.docker.io/v1/'` . It will raise exception as following: ``` docker.errors.APIError: 500 Server Error: Internal Server Error ("Unexpected status code [301] :") ``` But I saw https://github.com/docker/docker-py/blob/81edb398ebf7ce5c7ef14aa0739de5329589aabe/docker/api/daemon.py#L52 in source code. Should work with default registry. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docker/api/daemon.py` Content: ``` 1 import os 2 import warnings 3 from datetime import datetime 4 5 from ..auth import auth 6 from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING 7 from ..utils import utils 8 9 10 class DaemonApiMixin(object): 11 def events(self, since=None, until=None, filters=None, decode=None): 12 if isinstance(since, datetime): 13 since = utils.datetime_to_timestamp(since) 14 15 if isinstance(until, datetime): 16 until = utils.datetime_to_timestamp(until) 17 18 if filters: 19 filters = utils.convert_filters(filters) 20 21 params = { 22 'since': since, 23 'until': until, 24 'filters': filters 25 } 26 27 return self._stream_helper( 28 self.get(self._url('/events'), params=params, stream=True), 29 decode=decode 30 ) 31 32 def info(self): 33 return self._result(self._get(self._url("/info")), True) 34 35 def login(self, username, password=None, email=None, registry=None, 36 reauth=False, insecure_registry=False, dockercfg_path=None): 37 if insecure_registry: 38 warnings.warn( 39 INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'), 40 DeprecationWarning 41 ) 42 43 # If we don't have any auth data so far, try reloading the config file 44 # one more time in case anything showed up in there. 45 # If dockercfg_path is passed check to see if the config file exists, 46 # if so load that config. 47 if dockercfg_path and os.path.exists(dockercfg_path): 48 self._auth_configs = auth.load_config(dockercfg_path) 49 elif not self._auth_configs: 50 self._auth_configs = auth.load_config() 51 52 registry = registry or auth.INDEX_URL 53 54 authcfg = auth.resolve_authconfig(self._auth_configs, registry) 55 # If we found an existing auth config for this registry and username 56 # combination, we can return it immediately unless reauth is requested. 57 if authcfg and authcfg.get('username', None) == username \ 58 and not reauth: 59 return authcfg 60 61 req_data = { 62 'username': username, 63 'password': password, 64 'email': email, 65 'serveraddress': registry, 66 } 67 68 response = self._post_json(self._url('/auth'), data=req_data) 69 if response.status_code == 200: 70 self._auth_configs[registry] = req_data 71 return self._result(response, json=True) 72 73 def ping(self): 74 return self._result(self._get(self._url('/_ping'))) 75 76 def version(self, api_version=True): 77 url = self._url("/version", versioned_api=api_version) 78 return self._result(self._get(url), json=True) 79 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docker/api/daemon.py b/docker/api/daemon.py --- a/docker/api/daemon.py +++ b/docker/api/daemon.py @@ -49,8 +49,6 @@ elif not self._auth_configs: self._auth_configs = auth.load_config() - registry = registry or auth.INDEX_URL - authcfg = auth.resolve_authconfig(self._auth_configs, registry) # If we found an existing auth config for this registry and username # combination, we can return it immediately unless reauth is requested. @@ -67,7 +65,7 @@ response = self._post_json(self._url('/auth'), data=req_data) if response.status_code == 200: - self._auth_configs[registry] = req_data + self._auth_configs[registry or auth.INDEX_NAME] = req_data return self._result(response, json=True) def ping(self):
{"golden_diff": "diff --git a/docker/api/daemon.py b/docker/api/daemon.py\n--- a/docker/api/daemon.py\n+++ b/docker/api/daemon.py\n@@ -49,8 +49,6 @@\n elif not self._auth_configs:\n self._auth_configs = auth.load_config()\n \n- registry = registry or auth.INDEX_URL\n-\n authcfg = auth.resolve_authconfig(self._auth_configs, registry)\n # If we found an existing auth config for this registry and username\n # combination, we can return it immediately unless reauth is requested.\n@@ -67,7 +65,7 @@\n \n response = self._post_json(self._url('/auth'), data=req_data)\n if response.status_code == 200:\n- self._auth_configs[registry] = req_data\n+ self._auth_configs[registry or auth.INDEX_NAME] = req_data\n return self._result(response, json=True)\n \n def ping(self):\n", "issue": "login failed with default registry\nI am using docker-py (1.8.0) and trying to using login API.\n\nIf I don't input `registry='https://index.docker.io/v1/'` .\nIt will raise exception as following:\n\n```\ndocker.errors.APIError: 500 Server Error: Internal Server Error (\"Unexpected status code [301] :\")\n```\n\nBut I saw https://github.com/docker/docker-py/blob/81edb398ebf7ce5c7ef14aa0739de5329589aabe/docker/api/daemon.py#L52 in source code. Should work with default registry.\n\n", "before_files": [{"content": "import os\nimport warnings\nfrom datetime import datetime\n\nfrom ..auth import auth\nfrom ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING\nfrom ..utils import utils\n\n\nclass DaemonApiMixin(object):\n def events(self, since=None, until=None, filters=None, decode=None):\n if isinstance(since, datetime):\n since = utils.datetime_to_timestamp(since)\n\n if isinstance(until, datetime):\n until = utils.datetime_to_timestamp(until)\n\n if filters:\n filters = utils.convert_filters(filters)\n\n params = {\n 'since': since,\n 'until': until,\n 'filters': filters\n }\n\n return self._stream_helper(\n self.get(self._url('/events'), params=params, stream=True),\n decode=decode\n )\n\n def info(self):\n return self._result(self._get(self._url(\"/info\")), True)\n\n def login(self, username, password=None, email=None, registry=None,\n reauth=False, insecure_registry=False, dockercfg_path=None):\n if insecure_registry:\n warnings.warn(\n INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'),\n DeprecationWarning\n )\n\n # If we don't have any auth data so far, try reloading the config file\n # one more time in case anything showed up in there.\n # If dockercfg_path is passed check to see if the config file exists,\n # if so load that config.\n if dockercfg_path and os.path.exists(dockercfg_path):\n self._auth_configs = auth.load_config(dockercfg_path)\n elif not self._auth_configs:\n self._auth_configs = auth.load_config()\n\n registry = registry or auth.INDEX_URL\n\n authcfg = auth.resolve_authconfig(self._auth_configs, registry)\n # If we found an existing auth config for this registry and username\n # combination, we can return it immediately unless reauth is requested.\n if authcfg and authcfg.get('username', None) == username \\\n and not reauth:\n return authcfg\n\n req_data = {\n 'username': username,\n 'password': password,\n 'email': email,\n 'serveraddress': registry,\n }\n\n response = self._post_json(self._url('/auth'), data=req_data)\n if response.status_code == 200:\n self._auth_configs[registry] = req_data\n return self._result(response, json=True)\n\n def ping(self):\n return self._result(self._get(self._url('/_ping')))\n\n def version(self, api_version=True):\n url = self._url(\"/version\", versioned_api=api_version)\n return self._result(self._get(url), json=True)\n", "path": "docker/api/daemon.py"}], "after_files": [{"content": "import os\nimport warnings\nfrom datetime import datetime\n\nfrom ..auth import auth\nfrom ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING\nfrom ..utils import utils\n\n\nclass DaemonApiMixin(object):\n def events(self, since=None, until=None, filters=None, decode=None):\n if isinstance(since, datetime):\n since = utils.datetime_to_timestamp(since)\n\n if isinstance(until, datetime):\n until = utils.datetime_to_timestamp(until)\n\n if filters:\n filters = utils.convert_filters(filters)\n\n params = {\n 'since': since,\n 'until': until,\n 'filters': filters\n }\n\n return self._stream_helper(\n self.get(self._url('/events'), params=params, stream=True),\n decode=decode\n )\n\n def info(self):\n return self._result(self._get(self._url(\"/info\")), True)\n\n def login(self, username, password=None, email=None, registry=None,\n reauth=False, insecure_registry=False, dockercfg_path=None):\n if insecure_registry:\n warnings.warn(\n INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'),\n DeprecationWarning\n )\n\n # If we don't have any auth data so far, try reloading the config file\n # one more time in case anything showed up in there.\n # If dockercfg_path is passed check to see if the config file exists,\n # if so load that config.\n if dockercfg_path and os.path.exists(dockercfg_path):\n self._auth_configs = auth.load_config(dockercfg_path)\n elif not self._auth_configs:\n self._auth_configs = auth.load_config()\n\n authcfg = auth.resolve_authconfig(self._auth_configs, registry)\n # If we found an existing auth config for this registry and username\n # combination, we can return it immediately unless reauth is requested.\n if authcfg and authcfg.get('username', None) == username \\\n and not reauth:\n return authcfg\n\n req_data = {\n 'username': username,\n 'password': password,\n 'email': email,\n 'serveraddress': registry,\n }\n\n response = self._post_json(self._url('/auth'), data=req_data)\n if response.status_code == 200:\n self._auth_configs[registry or auth.INDEX_NAME] = req_data\n return self._result(response, json=True)\n\n def ping(self):\n return self._result(self._get(self._url('/_ping')))\n\n def version(self, api_version=True):\n url = self._url(\"/version\", versioned_api=api_version)\n return self._result(self._get(url), json=True)\n", "path": "docker/api/daemon.py"}]}
1,140
208
gh_patches_debug_8290
rasdani/github-patches
git_diff
ipython__ipython-13377
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- %rerun doesn't handle integer parsing errors ## Steps to reproduce * Execute `%rerun -l one` ## Expected result An error message is printed, explaining the usage of the `-l` option. ## Actual result An uncaught `ValueError` with the traceback that's exposing the internals of the implementation and not being too helpful. ## System info Manjaro Linux, Python 3.9.4, IPython 7.22.0 (reproduces on IPython 8.0.0.dev) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `IPython/core/magics/history.py` Content: ``` 1 """Implementation of magic functions related to History. 2 """ 3 #----------------------------------------------------------------------------- 4 # Copyright (c) 2012, IPython Development Team. 5 # 6 # Distributed under the terms of the Modified BSD License. 7 # 8 # The full license is in the file COPYING.txt, distributed with this software. 9 #----------------------------------------------------------------------------- 10 11 #----------------------------------------------------------------------------- 12 # Imports 13 #----------------------------------------------------------------------------- 14 15 # Stdlib 16 import os 17 import sys 18 from io import open as io_open 19 import fnmatch 20 21 # Our own packages 22 from IPython.core.error import StdinNotImplementedError 23 from IPython.core.magic import Magics, magics_class, line_magic 24 from IPython.core.magic_arguments import (argument, magic_arguments, 25 parse_argstring) 26 from IPython.testing.skipdoctest import skip_doctest 27 from IPython.utils import io 28 29 #----------------------------------------------------------------------------- 30 # Magics class implementation 31 #----------------------------------------------------------------------------- 32 33 34 _unspecified = object() 35 36 37 @magics_class 38 class HistoryMagics(Magics): 39 40 @magic_arguments() 41 @argument( 42 '-n', dest='print_nums', action='store_true', default=False, 43 help=""" 44 print line numbers for each input. 45 This feature is only available if numbered prompts are in use. 46 """) 47 @argument( 48 '-o', dest='get_output', action='store_true', default=False, 49 help="also print outputs for each input.") 50 @argument( 51 '-p', dest='pyprompts', action='store_true', default=False, 52 help=""" 53 print classic '>>>' python prompts before each input. 54 This is useful for making documentation, and in conjunction 55 with -o, for producing doctest-ready output. 56 """) 57 @argument( 58 '-t', dest='raw', action='store_false', default=True, 59 help=""" 60 print the 'translated' history, as IPython understands it. 61 IPython filters your input and converts it all into valid Python 62 source before executing it (things like magics or aliases are turned 63 into function calls, for example). With this option, you'll see the 64 native history instead of the user-entered version: '%%cd /' will be 65 seen as 'get_ipython().run_line_magic("cd", "/")' instead of '%%cd /'. 66 """) 67 @argument( 68 '-f', dest='filename', 69 help=""" 70 FILENAME: instead of printing the output to the screen, redirect 71 it to the given file. The file is always overwritten, though *when 72 it can*, IPython asks for confirmation first. In particular, running 73 the command 'history -f FILENAME' from the IPython Notebook 74 interface will replace FILENAME even if it already exists *without* 75 confirmation. 76 """) 77 @argument( 78 '-g', dest='pattern', nargs='*', default=None, 79 help=""" 80 treat the arg as a glob pattern to search for in (full) history. 81 This includes the saved history (almost all commands ever written). 82 The pattern may contain '?' to match one unknown character and '*' 83 to match any number of unknown characters. Use '%%hist -g' to show 84 full saved history (may be very long). 85 """) 86 @argument( 87 '-l', dest='limit', type=int, nargs='?', default=_unspecified, 88 help=""" 89 get the last n lines from all sessions. Specify n as a single 90 arg, or the default is the last 10 lines. 91 """) 92 @argument( 93 '-u', dest='unique', action='store_true', 94 help=""" 95 when searching history using `-g`, show only unique history. 96 """) 97 @argument('range', nargs='*') 98 @skip_doctest 99 @line_magic 100 def history(self, parameter_s = ''): 101 """Print input history (_i<n> variables), with most recent last. 102 103 By default, input history is printed without line numbers so it can be 104 directly pasted into an editor. Use -n to show them. 105 106 By default, all input history from the current session is displayed. 107 Ranges of history can be indicated using the syntax: 108 109 ``4`` 110 Line 4, current session 111 ``4-6`` 112 Lines 4-6, current session 113 ``243/1-5`` 114 Lines 1-5, session 243 115 ``~2/7`` 116 Line 7, session 2 before current 117 ``~8/1-~6/5`` 118 From the first line of 8 sessions ago, to the fifth line of 6 119 sessions ago. 120 121 Multiple ranges can be entered, separated by spaces 122 123 The same syntax is used by %macro, %save, %edit, %rerun 124 125 Examples 126 -------- 127 :: 128 129 In [6]: %history -n 4-6 130 4:a = 12 131 5:print a**2 132 6:%history -n 4-6 133 134 """ 135 136 args = parse_argstring(self.history, parameter_s) 137 138 # For brevity 139 history_manager = self.shell.history_manager 140 141 def _format_lineno(session, line): 142 """Helper function to format line numbers properly.""" 143 if session in (0, history_manager.session_number): 144 return str(line) 145 return "%s/%s" % (session, line) 146 147 # Check if output to specific file was requested. 148 outfname = args.filename 149 if not outfname: 150 outfile = sys.stdout # default 151 # We don't want to close stdout at the end! 152 close_at_end = False 153 else: 154 if os.path.exists(outfname): 155 try: 156 ans = io.ask_yes_no("File %r exists. Overwrite?" % outfname) 157 except StdinNotImplementedError: 158 ans = True 159 if not ans: 160 print('Aborting.') 161 return 162 print("Overwriting file.") 163 outfile = io_open(outfname, 'w', encoding='utf-8') 164 close_at_end = True 165 166 print_nums = args.print_nums 167 get_output = args.get_output 168 pyprompts = args.pyprompts 169 raw = args.raw 170 171 pattern = None 172 limit = None if args.limit is _unspecified else args.limit 173 174 range_pattern = False 175 if args.pattern is not None and not args.range: 176 if args.pattern: 177 pattern = "*" + " ".join(args.pattern) + "*" 178 else: 179 pattern = "*" 180 hist = history_manager.search(pattern, raw=raw, output=get_output, 181 n=limit, unique=args.unique) 182 print_nums = True 183 elif args.limit is not _unspecified: 184 n = 10 if limit is None else limit 185 hist = history_manager.get_tail(n, raw=raw, output=get_output) 186 else: 187 if args.pattern: 188 range_pattern = "*" + " ".join(args.pattern) + "*" 189 print_nums = True 190 hist = history_manager.get_range_by_str( 191 " ".join(args.range), raw, get_output 192 ) 193 194 # We could be displaying the entire history, so let's not try to pull 195 # it into a list in memory. Anything that needs more space will just 196 # misalign. 197 width = 4 198 199 for session, lineno, inline in hist: 200 # Print user history with tabs expanded to 4 spaces. The GUI 201 # clients use hard tabs for easier usability in auto-indented code, 202 # but we want to produce PEP-8 compliant history for safe pasting 203 # into an editor. 204 if get_output: 205 inline, output = inline 206 if range_pattern: 207 if not fnmatch.fnmatch(inline, range_pattern): 208 continue 209 inline = inline.expandtabs(4).rstrip() 210 211 multiline = "\n" in inline 212 line_sep = '\n' if multiline else ' ' 213 if print_nums: 214 print(u'%s:%s' % (_format_lineno(session, lineno).rjust(width), 215 line_sep), file=outfile, end=u'') 216 if pyprompts: 217 print(u">>> ", end=u"", file=outfile) 218 if multiline: 219 inline = "\n... ".join(inline.splitlines()) + "\n..." 220 print(inline, file=outfile) 221 if get_output and output: 222 print(output, file=outfile) 223 224 if close_at_end: 225 outfile.close() 226 227 @line_magic 228 def recall(self, arg): 229 r"""Repeat a command, or get command to input line for editing. 230 231 %recall and %rep are equivalent. 232 233 - %recall (no arguments): 234 235 Place a string version of last computation result (stored in the 236 special '_' variable) to the next input prompt. Allows you to create 237 elaborate command lines without using copy-paste:: 238 239 In[1]: l = ["hei", "vaan"] 240 In[2]: "".join(l) 241 Out[2]: heivaan 242 In[3]: %recall 243 In[4]: heivaan_ <== cursor blinking 244 245 %recall 45 246 247 Place history line 45 on the next input prompt. Use %hist to find 248 out the number. 249 250 %recall 1-4 251 252 Combine the specified lines into one cell, and place it on the next 253 input prompt. See %history for the slice syntax. 254 255 %recall foo+bar 256 257 If foo+bar can be evaluated in the user namespace, the result is 258 placed at the next input prompt. Otherwise, the history is searched 259 for lines which contain that substring, and the most recent one is 260 placed at the next input prompt. 261 """ 262 if not arg: # Last output 263 self.shell.set_next_input(str(self.shell.user_ns["_"])) 264 return 265 # Get history range 266 histlines = self.shell.history_manager.get_range_by_str(arg) 267 cmd = "\n".join(x[2] for x in histlines) 268 if cmd: 269 self.shell.set_next_input(cmd.rstrip()) 270 return 271 272 try: # Variable in user namespace 273 cmd = str(eval(arg, self.shell.user_ns)) 274 except Exception: # Search for term in history 275 histlines = self.shell.history_manager.search("*"+arg+"*") 276 for h in reversed([x[2] for x in histlines]): 277 if 'recall' in h or 'rep' in h: 278 continue 279 self.shell.set_next_input(h.rstrip()) 280 return 281 else: 282 self.shell.set_next_input(cmd.rstrip()) 283 return 284 print("Couldn't evaluate or find in history:", arg) 285 286 @line_magic 287 def rerun(self, parameter_s=''): 288 """Re-run previous input 289 290 By default, you can specify ranges of input history to be repeated 291 (as with %history). With no arguments, it will repeat the last line. 292 293 Options: 294 295 -l <n> : Repeat the last n lines of input, not including the 296 current command. 297 298 -g foo : Repeat the most recent line which contains foo 299 """ 300 opts, args = self.parse_options(parameter_s, 'l:g:', mode='string') 301 if "l" in opts: # Last n lines 302 n = int(opts['l']) 303 304 if n == 0: 305 print("Requested 0 last lines - nothing to run") 306 return 307 elif n < 0: 308 print("Number of lines to rerun cannot be negative") 309 return 310 311 hist = self.shell.history_manager.get_tail(n) 312 elif "g" in opts: # Search 313 p = "*"+opts['g']+"*" 314 hist = list(self.shell.history_manager.search(p)) 315 for l in reversed(hist): 316 if "rerun" not in l[2]: 317 hist = [l] # The last match which isn't a %rerun 318 break 319 else: 320 hist = [] # No matches except %rerun 321 elif args: # Specify history ranges 322 hist = self.shell.history_manager.get_range_by_str(args) 323 else: # Last line 324 hist = self.shell.history_manager.get_tail(1) 325 hist = [x[2] for x in hist] 326 if not hist: 327 print("No lines in history match specification") 328 return 329 histlines = "\n".join(hist) 330 print("=== Executing: ===") 331 print(histlines) 332 print("=== Output: ===") 333 self.shell.run_cell("\n".join(hist), store_history=False) 334 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/IPython/core/magics/history.py b/IPython/core/magics/history.py --- a/IPython/core/magics/history.py +++ b/IPython/core/magics/history.py @@ -299,7 +299,11 @@ """ opts, args = self.parse_options(parameter_s, 'l:g:', mode='string') if "l" in opts: # Last n lines - n = int(opts['l']) + try: + n = int(opts["l"]) + except ValueError: + print("Number of lines must be an integer") + return if n == 0: print("Requested 0 last lines - nothing to run")
{"golden_diff": "diff --git a/IPython/core/magics/history.py b/IPython/core/magics/history.py\n--- a/IPython/core/magics/history.py\n+++ b/IPython/core/magics/history.py\n@@ -299,7 +299,11 @@\n \"\"\"\n opts, args = self.parse_options(parameter_s, 'l:g:', mode='string')\n if \"l\" in opts: # Last n lines\n- n = int(opts['l'])\n+ try:\n+ n = int(opts[\"l\"])\n+ except ValueError:\n+ print(\"Number of lines must be an integer\")\n+ return\n \n if n == 0:\n print(\"Requested 0 last lines - nothing to run\")\n", "issue": "%rerun doesn't handle integer parsing errors\n## Steps to reproduce\r\n\r\n* Execute `%rerun -l one`\r\n\r\n## Expected result\r\nAn error message is printed, explaining the usage of the `-l` option.\r\n\r\n## Actual result\r\nAn uncaught `ValueError` with the traceback that's exposing the internals of the implementation and not being too helpful.\r\n\r\n## System info\r\nManjaro Linux, Python 3.9.4, IPython 7.22.0 (reproduces on IPython 8.0.0.dev)\n", "before_files": [{"content": "\"\"\"Implementation of magic functions related to History.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (c) 2012, IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Stdlib\nimport os\nimport sys\nfrom io import open as io_open\nimport fnmatch\n\n# Our own packages\nfrom IPython.core.error import StdinNotImplementedError\nfrom IPython.core.magic import Magics, magics_class, line_magic\nfrom IPython.core.magic_arguments import (argument, magic_arguments,\n parse_argstring)\nfrom IPython.testing.skipdoctest import skip_doctest\nfrom IPython.utils import io\n\n#-----------------------------------------------------------------------------\n# Magics class implementation\n#-----------------------------------------------------------------------------\n\n\n_unspecified = object()\n\n\n@magics_class\nclass HistoryMagics(Magics):\n\n @magic_arguments()\n @argument(\n '-n', dest='print_nums', action='store_true', default=False,\n help=\"\"\"\n print line numbers for each input.\n This feature is only available if numbered prompts are in use.\n \"\"\")\n @argument(\n '-o', dest='get_output', action='store_true', default=False,\n help=\"also print outputs for each input.\")\n @argument(\n '-p', dest='pyprompts', action='store_true', default=False,\n help=\"\"\"\n print classic '>>>' python prompts before each input.\n This is useful for making documentation, and in conjunction\n with -o, for producing doctest-ready output.\n \"\"\")\n @argument(\n '-t', dest='raw', action='store_false', default=True,\n help=\"\"\"\n print the 'translated' history, as IPython understands it.\n IPython filters your input and converts it all into valid Python\n source before executing it (things like magics or aliases are turned\n into function calls, for example). With this option, you'll see the\n native history instead of the user-entered version: '%%cd /' will be\n seen as 'get_ipython().run_line_magic(\"cd\", \"/\")' instead of '%%cd /'.\n \"\"\")\n @argument(\n '-f', dest='filename',\n help=\"\"\"\n FILENAME: instead of printing the output to the screen, redirect\n it to the given file. The file is always overwritten, though *when\n it can*, IPython asks for confirmation first. In particular, running\n the command 'history -f FILENAME' from the IPython Notebook\n interface will replace FILENAME even if it already exists *without*\n confirmation.\n \"\"\")\n @argument(\n '-g', dest='pattern', nargs='*', default=None,\n help=\"\"\"\n treat the arg as a glob pattern to search for in (full) history.\n This includes the saved history (almost all commands ever written).\n The pattern may contain '?' to match one unknown character and '*'\n to match any number of unknown characters. Use '%%hist -g' to show\n full saved history (may be very long).\n \"\"\")\n @argument(\n '-l', dest='limit', type=int, nargs='?', default=_unspecified,\n help=\"\"\"\n get the last n lines from all sessions. Specify n as a single\n arg, or the default is the last 10 lines.\n \"\"\")\n @argument(\n '-u', dest='unique', action='store_true',\n help=\"\"\"\n when searching history using `-g`, show only unique history.\n \"\"\")\n @argument('range', nargs='*')\n @skip_doctest\n @line_magic\n def history(self, parameter_s = ''):\n \"\"\"Print input history (_i<n> variables), with most recent last.\n\n By default, input history is printed without line numbers so it can be\n directly pasted into an editor. Use -n to show them.\n\n By default, all input history from the current session is displayed.\n Ranges of history can be indicated using the syntax:\n \n ``4``\n Line 4, current session\n ``4-6``\n Lines 4-6, current session\n ``243/1-5``\n Lines 1-5, session 243\n ``~2/7``\n Line 7, session 2 before current\n ``~8/1-~6/5``\n From the first line of 8 sessions ago, to the fifth line of 6\n sessions ago.\n \n Multiple ranges can be entered, separated by spaces\n\n The same syntax is used by %macro, %save, %edit, %rerun\n\n Examples\n --------\n ::\n\n In [6]: %history -n 4-6\n 4:a = 12\n 5:print a**2\n 6:%history -n 4-6\n\n \"\"\"\n\n args = parse_argstring(self.history, parameter_s)\n\n # For brevity\n history_manager = self.shell.history_manager\n\n def _format_lineno(session, line):\n \"\"\"Helper function to format line numbers properly.\"\"\"\n if session in (0, history_manager.session_number):\n return str(line)\n return \"%s/%s\" % (session, line)\n\n # Check if output to specific file was requested.\n outfname = args.filename\n if not outfname:\n outfile = sys.stdout # default\n # We don't want to close stdout at the end!\n close_at_end = False\n else:\n if os.path.exists(outfname):\n try:\n ans = io.ask_yes_no(\"File %r exists. Overwrite?\" % outfname)\n except StdinNotImplementedError:\n ans = True\n if not ans:\n print('Aborting.')\n return\n print(\"Overwriting file.\")\n outfile = io_open(outfname, 'w', encoding='utf-8')\n close_at_end = True\n\n print_nums = args.print_nums\n get_output = args.get_output\n pyprompts = args.pyprompts\n raw = args.raw\n\n pattern = None\n limit = None if args.limit is _unspecified else args.limit\n\n range_pattern = False\n if args.pattern is not None and not args.range:\n if args.pattern:\n pattern = \"*\" + \" \".join(args.pattern) + \"*\"\n else:\n pattern = \"*\"\n hist = history_manager.search(pattern, raw=raw, output=get_output,\n n=limit, unique=args.unique)\n print_nums = True\n elif args.limit is not _unspecified:\n n = 10 if limit is None else limit\n hist = history_manager.get_tail(n, raw=raw, output=get_output)\n else:\n if args.pattern:\n range_pattern = \"*\" + \" \".join(args.pattern) + \"*\"\n print_nums = True\n hist = history_manager.get_range_by_str(\n \" \".join(args.range), raw, get_output\n )\n\n # We could be displaying the entire history, so let's not try to pull\n # it into a list in memory. Anything that needs more space will just\n # misalign.\n width = 4\n\n for session, lineno, inline in hist:\n # Print user history with tabs expanded to 4 spaces. The GUI\n # clients use hard tabs for easier usability in auto-indented code,\n # but we want to produce PEP-8 compliant history for safe pasting\n # into an editor.\n if get_output:\n inline, output = inline\n if range_pattern:\n if not fnmatch.fnmatch(inline, range_pattern):\n continue\n inline = inline.expandtabs(4).rstrip()\n\n multiline = \"\\n\" in inline\n line_sep = '\\n' if multiline else ' '\n if print_nums:\n print(u'%s:%s' % (_format_lineno(session, lineno).rjust(width),\n line_sep), file=outfile, end=u'')\n if pyprompts:\n print(u\">>> \", end=u\"\", file=outfile)\n if multiline:\n inline = \"\\n... \".join(inline.splitlines()) + \"\\n...\"\n print(inline, file=outfile)\n if get_output and output:\n print(output, file=outfile)\n\n if close_at_end:\n outfile.close()\n\n @line_magic\n def recall(self, arg):\n r\"\"\"Repeat a command, or get command to input line for editing.\n\n %recall and %rep are equivalent.\n\n - %recall (no arguments):\n\n Place a string version of last computation result (stored in the\n special '_' variable) to the next input prompt. Allows you to create\n elaborate command lines without using copy-paste::\n\n In[1]: l = [\"hei\", \"vaan\"]\n In[2]: \"\".join(l)\n Out[2]: heivaan\n In[3]: %recall\n In[4]: heivaan_ <== cursor blinking\n\n %recall 45\n\n Place history line 45 on the next input prompt. Use %hist to find\n out the number.\n\n %recall 1-4\n\n Combine the specified lines into one cell, and place it on the next\n input prompt. See %history for the slice syntax.\n\n %recall foo+bar\n\n If foo+bar can be evaluated in the user namespace, the result is\n placed at the next input prompt. Otherwise, the history is searched\n for lines which contain that substring, and the most recent one is\n placed at the next input prompt.\n \"\"\"\n if not arg: # Last output\n self.shell.set_next_input(str(self.shell.user_ns[\"_\"]))\n return\n # Get history range\n histlines = self.shell.history_manager.get_range_by_str(arg)\n cmd = \"\\n\".join(x[2] for x in histlines)\n if cmd:\n self.shell.set_next_input(cmd.rstrip())\n return\n\n try: # Variable in user namespace\n cmd = str(eval(arg, self.shell.user_ns))\n except Exception: # Search for term in history\n histlines = self.shell.history_manager.search(\"*\"+arg+\"*\")\n for h in reversed([x[2] for x in histlines]):\n if 'recall' in h or 'rep' in h:\n continue\n self.shell.set_next_input(h.rstrip())\n return\n else:\n self.shell.set_next_input(cmd.rstrip())\n return\n print(\"Couldn't evaluate or find in history:\", arg)\n\n @line_magic\n def rerun(self, parameter_s=''):\n \"\"\"Re-run previous input\n\n By default, you can specify ranges of input history to be repeated\n (as with %history). With no arguments, it will repeat the last line.\n\n Options:\n\n -l <n> : Repeat the last n lines of input, not including the\n current command.\n\n -g foo : Repeat the most recent line which contains foo\n \"\"\"\n opts, args = self.parse_options(parameter_s, 'l:g:', mode='string')\n if \"l\" in opts: # Last n lines\n n = int(opts['l'])\n\n if n == 0:\n print(\"Requested 0 last lines - nothing to run\")\n return\n elif n < 0:\n print(\"Number of lines to rerun cannot be negative\")\n return\n\n hist = self.shell.history_manager.get_tail(n)\n elif \"g\" in opts: # Search\n p = \"*\"+opts['g']+\"*\"\n hist = list(self.shell.history_manager.search(p))\n for l in reversed(hist):\n if \"rerun\" not in l[2]:\n hist = [l] # The last match which isn't a %rerun\n break\n else:\n hist = [] # No matches except %rerun\n elif args: # Specify history ranges\n hist = self.shell.history_manager.get_range_by_str(args)\n else: # Last line\n hist = self.shell.history_manager.get_tail(1)\n hist = [x[2] for x in hist]\n if not hist:\n print(\"No lines in history match specification\")\n return\n histlines = \"\\n\".join(hist)\n print(\"=== Executing: ===\")\n print(histlines)\n print(\"=== Output: ===\")\n self.shell.run_cell(\"\\n\".join(hist), store_history=False)\n", "path": "IPython/core/magics/history.py"}], "after_files": [{"content": "\"\"\"Implementation of magic functions related to History.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (c) 2012, IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Stdlib\nimport os\nimport sys\nfrom io import open as io_open\nimport fnmatch\n\n# Our own packages\nfrom IPython.core.error import StdinNotImplementedError\nfrom IPython.core.magic import Magics, magics_class, line_magic\nfrom IPython.core.magic_arguments import (argument, magic_arguments,\n parse_argstring)\nfrom IPython.testing.skipdoctest import skip_doctest\nfrom IPython.utils import io\n\n#-----------------------------------------------------------------------------\n# Magics class implementation\n#-----------------------------------------------------------------------------\n\n\n_unspecified = object()\n\n\n@magics_class\nclass HistoryMagics(Magics):\n\n @magic_arguments()\n @argument(\n '-n', dest='print_nums', action='store_true', default=False,\n help=\"\"\"\n print line numbers for each input.\n This feature is only available if numbered prompts are in use.\n \"\"\")\n @argument(\n '-o', dest='get_output', action='store_true', default=False,\n help=\"also print outputs for each input.\")\n @argument(\n '-p', dest='pyprompts', action='store_true', default=False,\n help=\"\"\"\n print classic '>>>' python prompts before each input.\n This is useful for making documentation, and in conjunction\n with -o, for producing doctest-ready output.\n \"\"\")\n @argument(\n '-t', dest='raw', action='store_false', default=True,\n help=\"\"\"\n print the 'translated' history, as IPython understands it.\n IPython filters your input and converts it all into valid Python\n source before executing it (things like magics or aliases are turned\n into function calls, for example). With this option, you'll see the\n native history instead of the user-entered version: '%%cd /' will be\n seen as 'get_ipython().run_line_magic(\"cd\", \"/\")' instead of '%%cd /'.\n \"\"\")\n @argument(\n '-f', dest='filename',\n help=\"\"\"\n FILENAME: instead of printing the output to the screen, redirect\n it to the given file. The file is always overwritten, though *when\n it can*, IPython asks for confirmation first. In particular, running\n the command 'history -f FILENAME' from the IPython Notebook\n interface will replace FILENAME even if it already exists *without*\n confirmation.\n \"\"\")\n @argument(\n '-g', dest='pattern', nargs='*', default=None,\n help=\"\"\"\n treat the arg as a glob pattern to search for in (full) history.\n This includes the saved history (almost all commands ever written).\n The pattern may contain '?' to match one unknown character and '*'\n to match any number of unknown characters. Use '%%hist -g' to show\n full saved history (may be very long).\n \"\"\")\n @argument(\n '-l', dest='limit', type=int, nargs='?', default=_unspecified,\n help=\"\"\"\n get the last n lines from all sessions. Specify n as a single\n arg, or the default is the last 10 lines.\n \"\"\")\n @argument(\n '-u', dest='unique', action='store_true',\n help=\"\"\"\n when searching history using `-g`, show only unique history.\n \"\"\")\n @argument('range', nargs='*')\n @skip_doctest\n @line_magic\n def history(self, parameter_s = ''):\n \"\"\"Print input history (_i<n> variables), with most recent last.\n\n By default, input history is printed without line numbers so it can be\n directly pasted into an editor. Use -n to show them.\n\n By default, all input history from the current session is displayed.\n Ranges of history can be indicated using the syntax:\n \n ``4``\n Line 4, current session\n ``4-6``\n Lines 4-6, current session\n ``243/1-5``\n Lines 1-5, session 243\n ``~2/7``\n Line 7, session 2 before current\n ``~8/1-~6/5``\n From the first line of 8 sessions ago, to the fifth line of 6\n sessions ago.\n \n Multiple ranges can be entered, separated by spaces\n\n The same syntax is used by %macro, %save, %edit, %rerun\n\n Examples\n --------\n ::\n\n In [6]: %history -n 4-6\n 4:a = 12\n 5:print a**2\n 6:%history -n 4-6\n\n \"\"\"\n\n args = parse_argstring(self.history, parameter_s)\n\n # For brevity\n history_manager = self.shell.history_manager\n\n def _format_lineno(session, line):\n \"\"\"Helper function to format line numbers properly.\"\"\"\n if session in (0, history_manager.session_number):\n return str(line)\n return \"%s/%s\" % (session, line)\n\n # Check if output to specific file was requested.\n outfname = args.filename\n if not outfname:\n outfile = sys.stdout # default\n # We don't want to close stdout at the end!\n close_at_end = False\n else:\n if os.path.exists(outfname):\n try:\n ans = io.ask_yes_no(\"File %r exists. Overwrite?\" % outfname)\n except StdinNotImplementedError:\n ans = True\n if not ans:\n print('Aborting.')\n return\n print(\"Overwriting file.\")\n outfile = io_open(outfname, 'w', encoding='utf-8')\n close_at_end = True\n\n print_nums = args.print_nums\n get_output = args.get_output\n pyprompts = args.pyprompts\n raw = args.raw\n\n pattern = None\n limit = None if args.limit is _unspecified else args.limit\n\n range_pattern = False\n if args.pattern is not None and not args.range:\n if args.pattern:\n pattern = \"*\" + \" \".join(args.pattern) + \"*\"\n else:\n pattern = \"*\"\n hist = history_manager.search(pattern, raw=raw, output=get_output,\n n=limit, unique=args.unique)\n print_nums = True\n elif args.limit is not _unspecified:\n n = 10 if limit is None else limit\n hist = history_manager.get_tail(n, raw=raw, output=get_output)\n else:\n if args.pattern:\n range_pattern = \"*\" + \" \".join(args.pattern) + \"*\"\n print_nums = True\n hist = history_manager.get_range_by_str(\n \" \".join(args.range), raw, get_output\n )\n\n # We could be displaying the entire history, so let's not try to pull\n # it into a list in memory. Anything that needs more space will just\n # misalign.\n width = 4\n\n for session, lineno, inline in hist:\n # Print user history with tabs expanded to 4 spaces. The GUI\n # clients use hard tabs for easier usability in auto-indented code,\n # but we want to produce PEP-8 compliant history for safe pasting\n # into an editor.\n if get_output:\n inline, output = inline\n if range_pattern:\n if not fnmatch.fnmatch(inline, range_pattern):\n continue\n inline = inline.expandtabs(4).rstrip()\n\n multiline = \"\\n\" in inline\n line_sep = '\\n' if multiline else ' '\n if print_nums:\n print(u'%s:%s' % (_format_lineno(session, lineno).rjust(width),\n line_sep), file=outfile, end=u'')\n if pyprompts:\n print(u\">>> \", end=u\"\", file=outfile)\n if multiline:\n inline = \"\\n... \".join(inline.splitlines()) + \"\\n...\"\n print(inline, file=outfile)\n if get_output and output:\n print(output, file=outfile)\n\n if close_at_end:\n outfile.close()\n\n @line_magic\n def recall(self, arg):\n r\"\"\"Repeat a command, or get command to input line for editing.\n\n %recall and %rep are equivalent.\n\n - %recall (no arguments):\n\n Place a string version of last computation result (stored in the\n special '_' variable) to the next input prompt. Allows you to create\n elaborate command lines without using copy-paste::\n\n In[1]: l = [\"hei\", \"vaan\"]\n In[2]: \"\".join(l)\n Out[2]: heivaan\n In[3]: %recall\n In[4]: heivaan_ <== cursor blinking\n\n %recall 45\n\n Place history line 45 on the next input prompt. Use %hist to find\n out the number.\n\n %recall 1-4\n\n Combine the specified lines into one cell, and place it on the next\n input prompt. See %history for the slice syntax.\n\n %recall foo+bar\n\n If foo+bar can be evaluated in the user namespace, the result is\n placed at the next input prompt. Otherwise, the history is searched\n for lines which contain that substring, and the most recent one is\n placed at the next input prompt.\n \"\"\"\n if not arg: # Last output\n self.shell.set_next_input(str(self.shell.user_ns[\"_\"]))\n return\n # Get history range\n histlines = self.shell.history_manager.get_range_by_str(arg)\n cmd = \"\\n\".join(x[2] for x in histlines)\n if cmd:\n self.shell.set_next_input(cmd.rstrip())\n return\n\n try: # Variable in user namespace\n cmd = str(eval(arg, self.shell.user_ns))\n except Exception: # Search for term in history\n histlines = self.shell.history_manager.search(\"*\"+arg+\"*\")\n for h in reversed([x[2] for x in histlines]):\n if 'recall' in h or 'rep' in h:\n continue\n self.shell.set_next_input(h.rstrip())\n return\n else:\n self.shell.set_next_input(cmd.rstrip())\n return\n print(\"Couldn't evaluate or find in history:\", arg)\n\n @line_magic\n def rerun(self, parameter_s=''):\n \"\"\"Re-run previous input\n\n By default, you can specify ranges of input history to be repeated\n (as with %history). With no arguments, it will repeat the last line.\n\n Options:\n\n -l <n> : Repeat the last n lines of input, not including the\n current command.\n\n -g foo : Repeat the most recent line which contains foo\n \"\"\"\n opts, args = self.parse_options(parameter_s, 'l:g:', mode='string')\n if \"l\" in opts: # Last n lines\n try:\n n = int(opts[\"l\"])\n except ValueError:\n print(\"Number of lines must be an integer\")\n return\n\n if n == 0:\n print(\"Requested 0 last lines - nothing to run\")\n return\n elif n < 0:\n print(\"Number of lines to rerun cannot be negative\")\n return\n\n hist = self.shell.history_manager.get_tail(n)\n elif \"g\" in opts: # Search\n p = \"*\"+opts['g']+\"*\"\n hist = list(self.shell.history_manager.search(p))\n for l in reversed(hist):\n if \"rerun\" not in l[2]:\n hist = [l] # The last match which isn't a %rerun\n break\n else:\n hist = [] # No matches except %rerun\n elif args: # Specify history ranges\n hist = self.shell.history_manager.get_range_by_str(args)\n else: # Last line\n hist = self.shell.history_manager.get_tail(1)\n hist = [x[2] for x in hist]\n if not hist:\n print(\"No lines in history match specification\")\n return\n histlines = \"\\n\".join(hist)\n print(\"=== Executing: ===\")\n print(histlines)\n print(\"=== Output: ===\")\n self.shell.run_cell(\"\\n\".join(hist), store_history=False)\n", "path": "IPython/core/magics/history.py"}]}
3,995
157
gh_patches_debug_29586
rasdani/github-patches
git_diff
blaze__blaze-1114
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- dask test failure it seems `atop` is using an older dask API ``` ================================================================================= FAILURES ================================================================================== ____________________________________________________________________________ test_compute[expr0] ____________________________________________________________________________ blaze/compute/tests/test_dask.py:69: in test_compute result = compute(expr, dask_ns) ../../../../miniconda/envs/py27/lib/python2.7/site-packages/multipledispatch/dispatcher.py:163: in __call__ return func(*args, **kwargs) blaze/compute/core.py:470: in compute result = top_then_bottom_then_top_again_etc(expr3, d4, **kwargs) blaze/compute/core.py:164: in top_then_bottom_then_top_again_etc expr2, scope2 = bottom_up_until_type_break(expr, scope, **kwargs) blaze/compute/core.py:371: in bottom_up_until_type_break **kwargs)} ../../../../miniconda/envs/py27/lib/python2.7/site-packages/multipledispatch/dispatcher.py:163: in __call__ return func(*args, **kwargs) blaze/compute/dask.py:40: in compute_broadcast *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data)) ../../../../code/py/dask/dask/array/core.py:1099: in atop numblocks = dict([(a.name, a.numblocks) for a, ind in arginds]) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `blaze/compute/dask.py` Content: ``` 1 from __future__ import absolute_import, division, print_function 2 3 from numbers import Number 4 from toolz import concat, first, curry, compose 5 from datashape import DataShape 6 7 from blaze import compute, ndim 8 from blaze.dispatch import dispatch 9 from blaze.compute.core import compute_up, optimize 10 from blaze.expr import (ElemWise, symbol, Reduction, Transpose, TensorDot, 11 Expr, Slice, Broadcast) 12 from blaze.expr.split import split 13 14 from dask.array.core import (_concatenate2, Array, atop, names, transpose, 15 tensordot) 16 17 18 def compute_it(expr, leaves, *data, **kwargs): 19 kwargs.pop('scope') 20 return compute(expr, dict(zip(leaves, data)), **kwargs) 21 22 23 def elemwise_array(expr, *data, **kwargs): 24 leaves = expr._inputs 25 expr_inds = tuple(range(ndim(expr)))[::-1] 26 return atop(curry(compute_it, expr, leaves, **kwargs), 27 next(names), expr_inds, 28 *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data)) 29 30 31 try: 32 from blaze.compute.numba import (get_numba_ufunc, broadcast_collect, 33 Broadcastable) 34 35 def compute_broadcast(expr, *data, **kwargs): 36 expr_inds = tuple(range(ndim(expr)))[::-1] 37 func = get_numba_ufunc(expr) 38 return atop(func, 39 next(names), expr_inds, 40 *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data)) 41 42 def optimize_array(expr, *data): 43 return broadcast_collect(expr, Broadcastable=Broadcastable, 44 WantToBroadcast=Broadcastable) 45 46 for i in range(5): 47 compute_up.register(Broadcast, *([(Array, Number)] * i))(compute_broadcast) 48 optimize.register(Expr, *([(Array, Number)] * i))(optimize_array) 49 50 except ImportError: 51 pass 52 53 54 for i in range(5): 55 compute_up.register(ElemWise, *([Array] * i))(elemwise_array) 56 57 58 @dispatch(Reduction, Array) 59 def compute_up(expr, data, **kwargs): 60 leaf = expr._leaves()[0] 61 chunk = symbol('chunk', DataShape(*(tuple(map(first, data.chunks)) + 62 (leaf.dshape.measure,)))) 63 (chunk, chunk_expr), (agg, agg_expr) = split(expr._child, expr, 64 chunk=chunk) 65 66 inds = tuple(range(ndim(leaf))) 67 tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs), 68 next(names), inds, 69 data, inds) 70 71 return atop(compose(curry(compute_it, agg_expr, [agg], **kwargs), 72 curry(_concatenate2, axes=expr.axis)), 73 next(names), tuple(i for i in inds if i not in expr.axis), 74 tmp, inds) 75 76 77 @dispatch(Transpose, Array) 78 def compute_up(expr, data, **kwargs): 79 return transpose(data, expr.axes) 80 81 82 @dispatch(TensorDot, Array, Array) 83 def compute_up(expr, lhs, rhs, **kwargs): 84 return tensordot(lhs, rhs, (expr._left_axes, expr._right_axes)) 85 86 87 @dispatch(Slice, Array) 88 def compute_up(expr, data, **kwargs): 89 return data[expr.index] 90 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/blaze/compute/dask.py b/blaze/compute/dask.py --- a/blaze/compute/dask.py +++ b/blaze/compute/dask.py @@ -24,7 +24,7 @@ leaves = expr._inputs expr_inds = tuple(range(ndim(expr)))[::-1] return atop(curry(compute_it, expr, leaves, **kwargs), - next(names), expr_inds, + expr_inds, *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data)) @@ -36,7 +36,7 @@ expr_inds = tuple(range(ndim(expr)))[::-1] func = get_numba_ufunc(expr) return atop(func, - next(names), expr_inds, + expr_inds, *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data)) def optimize_array(expr, *data): @@ -64,13 +64,12 @@ chunk=chunk) inds = tuple(range(ndim(leaf))) - tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs), - next(names), inds, - data, inds) + tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs), inds, data, + inds) return atop(compose(curry(compute_it, agg_expr, [agg], **kwargs), curry(_concatenate2, axes=expr.axis)), - next(names), tuple(i for i in inds if i not in expr.axis), + tuple(i for i in inds if i not in expr.axis), tmp, inds)
{"golden_diff": "diff --git a/blaze/compute/dask.py b/blaze/compute/dask.py\n--- a/blaze/compute/dask.py\n+++ b/blaze/compute/dask.py\n@@ -24,7 +24,7 @@\n leaves = expr._inputs\n expr_inds = tuple(range(ndim(expr)))[::-1]\n return atop(curry(compute_it, expr, leaves, **kwargs),\n- next(names), expr_inds,\n+ expr_inds,\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n \n \n@@ -36,7 +36,7 @@\n expr_inds = tuple(range(ndim(expr)))[::-1]\n func = get_numba_ufunc(expr)\n return atop(func,\n- next(names), expr_inds,\n+ expr_inds,\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n \n def optimize_array(expr, *data):\n@@ -64,13 +64,12 @@\n chunk=chunk)\n \n inds = tuple(range(ndim(leaf)))\n- tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs),\n- next(names), inds,\n- data, inds)\n+ tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs), inds, data,\n+ inds)\n \n return atop(compose(curry(compute_it, agg_expr, [agg], **kwargs),\n curry(_concatenate2, axes=expr.axis)),\n- next(names), tuple(i for i in inds if i not in expr.axis),\n+ tuple(i for i in inds if i not in expr.axis),\n tmp, inds)\n", "issue": "dask test failure\nit seems `atop` is using an older dask API\n\n```\n================================================================================= FAILURES ==================================================================================\n____________________________________________________________________________ test_compute[expr0] ____________________________________________________________________________\nblaze/compute/tests/test_dask.py:69: in test_compute\n result = compute(expr, dask_ns)\n../../../../miniconda/envs/py27/lib/python2.7/site-packages/multipledispatch/dispatcher.py:163: in __call__\n return func(*args, **kwargs)\nblaze/compute/core.py:470: in compute\n result = top_then_bottom_then_top_again_etc(expr3, d4, **kwargs)\nblaze/compute/core.py:164: in top_then_bottom_then_top_again_etc\n expr2, scope2 = bottom_up_until_type_break(expr, scope, **kwargs)\nblaze/compute/core.py:371: in bottom_up_until_type_break\n **kwargs)}\n../../../../miniconda/envs/py27/lib/python2.7/site-packages/multipledispatch/dispatcher.py:163: in __call__\n return func(*args, **kwargs)\nblaze/compute/dask.py:40: in compute_broadcast\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n../../../../code/py/dask/dask/array/core.py:1099: in atop\n numblocks = dict([(a.name, a.numblocks) for a, ind in arginds])\n```\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nfrom numbers import Number\nfrom toolz import concat, first, curry, compose\nfrom datashape import DataShape\n\nfrom blaze import compute, ndim\nfrom blaze.dispatch import dispatch\nfrom blaze.compute.core import compute_up, optimize\nfrom blaze.expr import (ElemWise, symbol, Reduction, Transpose, TensorDot,\n Expr, Slice, Broadcast)\nfrom blaze.expr.split import split\n\nfrom dask.array.core import (_concatenate2, Array, atop, names, transpose,\n tensordot)\n\n\ndef compute_it(expr, leaves, *data, **kwargs):\n kwargs.pop('scope')\n return compute(expr, dict(zip(leaves, data)), **kwargs)\n\n\ndef elemwise_array(expr, *data, **kwargs):\n leaves = expr._inputs\n expr_inds = tuple(range(ndim(expr)))[::-1]\n return atop(curry(compute_it, expr, leaves, **kwargs),\n next(names), expr_inds,\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n\n\ntry:\n from blaze.compute.numba import (get_numba_ufunc, broadcast_collect,\n Broadcastable)\n\n def compute_broadcast(expr, *data, **kwargs):\n expr_inds = tuple(range(ndim(expr)))[::-1]\n func = get_numba_ufunc(expr)\n return atop(func,\n next(names), expr_inds,\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n\n def optimize_array(expr, *data):\n return broadcast_collect(expr, Broadcastable=Broadcastable,\n WantToBroadcast=Broadcastable)\n\n for i in range(5):\n compute_up.register(Broadcast, *([(Array, Number)] * i))(compute_broadcast)\n optimize.register(Expr, *([(Array, Number)] * i))(optimize_array)\n\nexcept ImportError:\n pass\n\n\nfor i in range(5):\n compute_up.register(ElemWise, *([Array] * i))(elemwise_array)\n\n\n@dispatch(Reduction, Array)\ndef compute_up(expr, data, **kwargs):\n leaf = expr._leaves()[0]\n chunk = symbol('chunk', DataShape(*(tuple(map(first, data.chunks)) +\n (leaf.dshape.measure,))))\n (chunk, chunk_expr), (agg, agg_expr) = split(expr._child, expr,\n chunk=chunk)\n\n inds = tuple(range(ndim(leaf)))\n tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs),\n next(names), inds,\n data, inds)\n\n return atop(compose(curry(compute_it, agg_expr, [agg], **kwargs),\n curry(_concatenate2, axes=expr.axis)),\n next(names), tuple(i for i in inds if i not in expr.axis),\n tmp, inds)\n\n\n@dispatch(Transpose, Array)\ndef compute_up(expr, data, **kwargs):\n return transpose(data, expr.axes)\n\n\n@dispatch(TensorDot, Array, Array)\ndef compute_up(expr, lhs, rhs, **kwargs):\n return tensordot(lhs, rhs, (expr._left_axes, expr._right_axes))\n\n\n@dispatch(Slice, Array)\ndef compute_up(expr, data, **kwargs):\n return data[expr.index]\n", "path": "blaze/compute/dask.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nfrom numbers import Number\nfrom toolz import concat, first, curry, compose\nfrom datashape import DataShape\n\nfrom blaze import compute, ndim\nfrom blaze.dispatch import dispatch\nfrom blaze.compute.core import compute_up, optimize\nfrom blaze.expr import (ElemWise, symbol, Reduction, Transpose, TensorDot,\n Expr, Slice, Broadcast)\nfrom blaze.expr.split import split\n\nfrom dask.array.core import (_concatenate2, Array, atop, names, transpose,\n tensordot)\n\n\ndef compute_it(expr, leaves, *data, **kwargs):\n kwargs.pop('scope')\n return compute(expr, dict(zip(leaves, data)), **kwargs)\n\n\ndef elemwise_array(expr, *data, **kwargs):\n leaves = expr._inputs\n expr_inds = tuple(range(ndim(expr)))[::-1]\n return atop(curry(compute_it, expr, leaves, **kwargs),\n expr_inds,\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n\n\ntry:\n from blaze.compute.numba import (get_numba_ufunc, broadcast_collect,\n Broadcastable)\n\n def compute_broadcast(expr, *data, **kwargs):\n expr_inds = tuple(range(ndim(expr)))[::-1]\n func = get_numba_ufunc(expr)\n return atop(func,\n expr_inds,\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n\n def optimize_array(expr, *data):\n return broadcast_collect(expr, Broadcastable=Broadcastable,\n WantToBroadcast=Broadcastable)\n\n for i in range(5):\n compute_up.register(Broadcast, *([(Array, Number)] * i))(compute_broadcast)\n optimize.register(Expr, *([(Array, Number)] * i))(optimize_array)\n\nexcept ImportError:\n pass\n\n\nfor i in range(5):\n compute_up.register(ElemWise, *([Array] * i))(elemwise_array)\n\n\n@dispatch(Reduction, Array)\ndef compute_up(expr, data, **kwargs):\n leaf = expr._leaves()[0]\n chunk = symbol('chunk', DataShape(*(tuple(map(first, data.chunks)) +\n (leaf.dshape.measure,))))\n (chunk, chunk_expr), (agg, agg_expr) = split(expr._child, expr,\n chunk=chunk)\n\n inds = tuple(range(ndim(leaf)))\n tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs), inds, data,\n inds)\n\n return atop(compose(curry(compute_it, agg_expr, [agg], **kwargs),\n curry(_concatenate2, axes=expr.axis)),\n tuple(i for i in inds if i not in expr.axis),\n tmp, inds)\n\n\n@dispatch(Transpose, Array)\ndef compute_up(expr, data, **kwargs):\n return transpose(data, expr.axes)\n\n\n@dispatch(TensorDot, Array, Array)\ndef compute_up(expr, lhs, rhs, **kwargs):\n return tensordot(lhs, rhs, (expr._left_axes, expr._right_axes))\n\n\n@dispatch(Slice, Array)\ndef compute_up(expr, data, **kwargs):\n return data[expr.index]\n", "path": "blaze/compute/dask.py"}]}
1,478
370
gh_patches_debug_42806
rasdani/github-patches
git_diff
saleor__saleor-2738
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Create menu items in menuCreate mutation ### What I'm trying to achieve It would be very handy if API user would be able to create the first level of menu items at the menu creation moment. This way we could reduce the number of database queries needed to create a menu and speed up the whole process. ### Describe a proposed solution Change `MenuInput` signature to ``` type MenuInput { name: String! items: [MenuItemCreate!] } ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `saleor/graphql/menu/mutations.py` Content: ``` 1 import graphene 2 from graphql_jwt.decorators import permission_required 3 4 from ...menu import models 5 from ..core.mutations import BaseMutation, ModelDeleteMutation, ModelMutation 6 from .types import Menu 7 8 9 class MenuInput(graphene.InputObjectType): 10 name = graphene.String(description='Name of the menu.') 11 12 13 class MenuItemInput(graphene.InputObjectType): 14 menu = graphene.ID( 15 description='Menu to which item belongs to.', name='menu') 16 name = graphene.String(description='Name of the menu item.') 17 parent = graphene.ID( 18 description=''' 19 ID of the parent menu. If empty, menu will be top level 20 menu.''', 21 name='parent') 22 url = graphene.String(description='URL of the pointed item.') 23 category = graphene.ID( 24 description='Category to which item points.', name='category') 25 collection = graphene.ID( 26 description='Collection to which item points.', name='collection') 27 page = graphene.ID( 28 description='Page to which item points.', name='page') 29 30 31 class MenuCreate(ModelMutation): 32 class Arguments: 33 input = MenuInput( 34 required=True, 35 description='Fields required to create a menu.') 36 37 class Meta: 38 description = 'Creates a new Menu' 39 model = models.Menu 40 41 @classmethod 42 def user_is_allowed(cls, user, input): 43 return user.has_perm('menu.manage_menus') 44 45 46 class MenuUpdate(ModelMutation): 47 class Arguments: 48 id = graphene.ID( 49 required=True, description='ID of a menu to update.') 50 input = MenuInput( 51 required=True, 52 description='Fields required to update a menu.') 53 54 class Meta: 55 description = 'Updates a menu.' 56 model = models.Menu 57 58 @classmethod 59 def user_is_allowed(cls, user, input): 60 return user.has_perm('menu.manage_menus') 61 62 63 class MenuDelete(ModelDeleteMutation): 64 class Arguments: 65 id = graphene.ID( 66 required=True, description='ID of a menu to delete.') 67 68 class Meta: 69 description = 'Deletes a menu.' 70 model = models.Menu 71 72 @classmethod 73 def user_is_allowed(cls, user, input): 74 return user.has_perm('menu.manage_menus') 75 76 77 class MenuItemCreate(ModelMutation): 78 class Arguments: 79 input = MenuItemInput( 80 required=True, 81 description="""Fields required to update a menu item. 82 Only one of 'url', 'category', 'page', 'collection' is allowed 83 per item""") 84 85 class Meta: 86 description = 'Creates a new Menu' 87 model = models.MenuItem 88 89 @classmethod 90 def user_is_allowed(cls, user, input): 91 return user.has_perm('menu.manage_menus') 92 93 @classmethod 94 def clean_input(cls, info, instance, input, errors): 95 cleaned_input = super().clean_input(info, instance, input, errors) 96 items = [ 97 cleaned_input.get('page'), cleaned_input.get('collection'), 98 cleaned_input.get('url'), cleaned_input.get('category')] 99 items = [item for item in items if item is not None] 100 if len(items) > 1: 101 cls.add_error( 102 errors=errors, 103 field='items', message='More than one item provided.') 104 return cleaned_input 105 106 107 class MenuItemUpdate(MenuItemCreate): 108 class Arguments: 109 id = graphene.ID( 110 required=True, description='ID of a menu item to update.') 111 input = MenuItemInput( 112 required=True, 113 description="""Fields required to update a menu item. 114 Only one of 'url', 'category', 'page', 'collection' is allowed 115 per item""") 116 117 class Meta: 118 description = 'Updates a menu item.' 119 model = models.MenuItem 120 121 @classmethod 122 def user_is_allowed(cls, user, input): 123 return user.has_perm('menu.manage_menus') 124 125 @classmethod 126 def construct_instance(cls, instance, cleaned_data): 127 # Only one item can be assigned per menu item 128 instance.page = None 129 instance.collection = None 130 instance.category = None 131 instance.url = None 132 return super().construct_instance(instance, cleaned_data) 133 134 135 class MenuItemDelete(ModelDeleteMutation): 136 class Arguments: 137 id = graphene.ID( 138 required=True, description='ID of a menu item to delete.') 139 140 class Meta: 141 description = 'Deletes a menu item.' 142 model = models.MenuItem 143 144 @classmethod 145 def user_is_allowed(cls, user, input): 146 return user.has_perm('menu.manage_menus') 147 148 149 class NavigationType(graphene.Enum): 150 MAIN = 'main' 151 SECONDARY = 'secondary' 152 153 @property 154 def description(self): 155 if self == NavigationType.MAIN: 156 return 'Main storefront\'s navigation.' 157 return 'Secondary storefront\'s navigation.' 158 159 160 class AssignNavigation(BaseMutation): 161 menu = graphene.Field(Menu, description='Assigned navigation menu.') 162 163 class Arguments: 164 menu = graphene.ID( 165 description='ID of the menu.') 166 navigation_type = NavigationType( 167 description='Type of the navigation bar to assign the menu to.', 168 required=True) 169 170 class Meta: 171 description = 'Assigns storefront\'s navigation menus.' 172 173 @classmethod 174 @permission_required(['menu.manage_menus', 'site.manage_settings']) 175 def mutate(cls, root, info, navigation_type, menu=None): 176 errors = [] 177 site_settings = info.context.site.settings 178 if menu is not None: 179 menu = cls.get_node_or_error( 180 info, menu, errors=errors, field='menu') 181 if not errors: 182 if navigation_type == NavigationType.MAIN: 183 site_settings.top_menu = menu 184 site_settings.save(update_fields=['top_menu']) 185 elif navigation_type == NavigationType.SECONDARY: 186 site_settings.bottom_menu = menu 187 site_settings.save(update_fields=['bottom_menu']) 188 else: 189 raise AssertionError( 190 'Unknown navigation type: %s' % navigation_type) 191 return AssignNavigation(menu=menu, errors=errors) 192 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/saleor/graphql/menu/mutations.py b/saleor/graphql/menu/mutations.py --- a/saleor/graphql/menu/mutations.py +++ b/saleor/graphql/menu/mutations.py @@ -3,22 +3,13 @@ from ...menu import models from ..core.mutations import BaseMutation, ModelDeleteMutation, ModelMutation +from ..product.types import Category, Collection +from ..page.types import Page from .types import Menu -class MenuInput(graphene.InputObjectType): - name = graphene.String(description='Name of the menu.') - - class MenuItemInput(graphene.InputObjectType): - menu = graphene.ID( - description='Menu to which item belongs to.', name='menu') name = graphene.String(description='Name of the menu item.') - parent = graphene.ID( - description=''' - ID of the parent menu. If empty, menu will be top level - menu.''', - name='parent') url = graphene.String(description='URL of the pointed item.') category = graphene.ID( description='Category to which item points.', name='category') @@ -28,9 +19,28 @@ description='Page to which item points.', name='page') +class MenuItemCreateInput(MenuItemInput): + menu = graphene.ID( + description='Menu to which item belongs to.', name='menu') + parent = graphene.ID( + description=''' + ID of the parent menu. If empty, menu will be top level + menu.''', + name='parent') + + +class MenuInput(graphene.InputObjectType): + name = graphene.String(description='Name of the menu.') + + +class MenuCreateInput(MenuInput): + items = graphene.List( + MenuItemInput, description='List of menu items.') + + class MenuCreate(ModelMutation): class Arguments: - input = MenuInput( + input = MenuCreateInput( required=True, description='Fields required to create a menu.') @@ -42,6 +52,45 @@ def user_is_allowed(cls, user, input): return user.has_perm('menu.manage_menus') + @classmethod + def clean_input(cls, info, instance, input, errors): + cleaned_input = super().clean_input(info, instance, input, errors) + items = [] + for item in cleaned_input.get('items', []): + category = item.get('category') + collection = item.get('collection') + page = item.get('page') + url = item.get('url') + if len([i for i in [category, collection, page, url] if i]) > 1: + cls.add_error( + errors, 'items', 'More than one item provided.') + else: + if category: + category = cls.get_node_or_error( + info, category, errors, 'items', only_type=Category) + item['category'] = category + elif collection: + collection = cls.get_node_or_error( + info, collection, errors, 'items', + only_type=Collection) + item['collection'] = collection + elif page: + page = cls.get_node_or_error( + info, page, errors, 'items', only_type=Page) + item['page'] = page + elif not url: + cls.add_error(errors, 'items', 'No menu item provided.') + items.append(item) + cleaned_input['items'] = items + return cleaned_input + + @classmethod + def _save_m2m(cls, info, instance, cleaned_data): + super()._save_m2m(info, instance, cleaned_data) + items = cleaned_data.get('items', []) + for item in items: + instance.items.create(**item) + class MenuUpdate(ModelMutation): class Arguments: @@ -76,7 +125,7 @@ class MenuItemCreate(ModelMutation): class Arguments: - input = MenuItemInput( + input = MenuItemCreateInput( required=True, description="""Fields required to update a menu item. Only one of 'url', 'category', 'page', 'collection' is allowed
{"golden_diff": "diff --git a/saleor/graphql/menu/mutations.py b/saleor/graphql/menu/mutations.py\n--- a/saleor/graphql/menu/mutations.py\n+++ b/saleor/graphql/menu/mutations.py\n@@ -3,22 +3,13 @@\n \n from ...menu import models\n from ..core.mutations import BaseMutation, ModelDeleteMutation, ModelMutation\n+from ..product.types import Category, Collection\n+from ..page.types import Page\n from .types import Menu\n \n \n-class MenuInput(graphene.InputObjectType):\n- name = graphene.String(description='Name of the menu.')\n-\n-\n class MenuItemInput(graphene.InputObjectType):\n- menu = graphene.ID(\n- description='Menu to which item belongs to.', name='menu')\n name = graphene.String(description='Name of the menu item.')\n- parent = graphene.ID(\n- description='''\n- ID of the parent menu. If empty, menu will be top level\n- menu.''',\n- name='parent')\n url = graphene.String(description='URL of the pointed item.')\n category = graphene.ID(\n description='Category to which item points.', name='category')\n@@ -28,9 +19,28 @@\n description='Page to which item points.', name='page')\n \n \n+class MenuItemCreateInput(MenuItemInput):\n+ menu = graphene.ID(\n+ description='Menu to which item belongs to.', name='menu')\n+ parent = graphene.ID(\n+ description='''\n+ ID of the parent menu. If empty, menu will be top level\n+ menu.''',\n+ name='parent')\n+\n+\n+class MenuInput(graphene.InputObjectType):\n+ name = graphene.String(description='Name of the menu.')\n+\n+\n+class MenuCreateInput(MenuInput):\n+ items = graphene.List(\n+ MenuItemInput, description='List of menu items.')\n+\n+\n class MenuCreate(ModelMutation):\n class Arguments:\n- input = MenuInput(\n+ input = MenuCreateInput(\n required=True,\n description='Fields required to create a menu.')\n \n@@ -42,6 +52,45 @@\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n \n+ @classmethod\n+ def clean_input(cls, info, instance, input, errors):\n+ cleaned_input = super().clean_input(info, instance, input, errors)\n+ items = []\n+ for item in cleaned_input.get('items', []):\n+ category = item.get('category')\n+ collection = item.get('collection')\n+ page = item.get('page')\n+ url = item.get('url')\n+ if len([i for i in [category, collection, page, url] if i]) > 1:\n+ cls.add_error(\n+ errors, 'items', 'More than one item provided.')\n+ else:\n+ if category:\n+ category = cls.get_node_or_error(\n+ info, category, errors, 'items', only_type=Category)\n+ item['category'] = category\n+ elif collection:\n+ collection = cls.get_node_or_error(\n+ info, collection, errors, 'items',\n+ only_type=Collection)\n+ item['collection'] = collection\n+ elif page:\n+ page = cls.get_node_or_error(\n+ info, page, errors, 'items', only_type=Page)\n+ item['page'] = page\n+ elif not url:\n+ cls.add_error(errors, 'items', 'No menu item provided.')\n+ items.append(item)\n+ cleaned_input['items'] = items\n+ return cleaned_input\n+\n+ @classmethod\n+ def _save_m2m(cls, info, instance, cleaned_data):\n+ super()._save_m2m(info, instance, cleaned_data)\n+ items = cleaned_data.get('items', [])\n+ for item in items:\n+ instance.items.create(**item)\n+\n \n class MenuUpdate(ModelMutation):\n class Arguments:\n@@ -76,7 +125,7 @@\n \n class MenuItemCreate(ModelMutation):\n class Arguments:\n- input = MenuItemInput(\n+ input = MenuItemCreateInput(\n required=True,\n description=\"\"\"Fields required to update a menu item.\n Only one of 'url', 'category', 'page', 'collection' is allowed\n", "issue": "Create menu items in menuCreate mutation\n### What I'm trying to achieve\r\nIt would be very handy if API user would be able to create the first level of menu items at the menu creation moment. This way we could reduce the number of database queries needed to create a menu and speed up the whole process. \r\n\r\n### Describe a proposed solution\r\nChange `MenuInput` signature to\r\n```\r\ntype MenuInput {\r\n name: String!\r\n items: [MenuItemCreate!]\r\n}\r\n```\r\n\n", "before_files": [{"content": "import graphene\nfrom graphql_jwt.decorators import permission_required\n\nfrom ...menu import models\nfrom ..core.mutations import BaseMutation, ModelDeleteMutation, ModelMutation\nfrom .types import Menu\n\n\nclass MenuInput(graphene.InputObjectType):\n name = graphene.String(description='Name of the menu.')\n\n\nclass MenuItemInput(graphene.InputObjectType):\n menu = graphene.ID(\n description='Menu to which item belongs to.', name='menu')\n name = graphene.String(description='Name of the menu item.')\n parent = graphene.ID(\n description='''\n ID of the parent menu. If empty, menu will be top level\n menu.''',\n name='parent')\n url = graphene.String(description='URL of the pointed item.')\n category = graphene.ID(\n description='Category to which item points.', name='category')\n collection = graphene.ID(\n description='Collection to which item points.', name='collection')\n page = graphene.ID(\n description='Page to which item points.', name='page')\n\n\nclass MenuCreate(ModelMutation):\n class Arguments:\n input = MenuInput(\n required=True,\n description='Fields required to create a menu.')\n\n class Meta:\n description = 'Creates a new Menu'\n model = models.Menu\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n\nclass MenuUpdate(ModelMutation):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu to update.')\n input = MenuInput(\n required=True,\n description='Fields required to update a menu.')\n\n class Meta:\n description = 'Updates a menu.'\n model = models.Menu\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n\nclass MenuDelete(ModelDeleteMutation):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu to delete.')\n\n class Meta:\n description = 'Deletes a menu.'\n model = models.Menu\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n\nclass MenuItemCreate(ModelMutation):\n class Arguments:\n input = MenuItemInput(\n required=True,\n description=\"\"\"Fields required to update a menu item.\n Only one of 'url', 'category', 'page', 'collection' is allowed\n per item\"\"\")\n\n class Meta:\n description = 'Creates a new Menu'\n model = models.MenuItem\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n @classmethod\n def clean_input(cls, info, instance, input, errors):\n cleaned_input = super().clean_input(info, instance, input, errors)\n items = [\n cleaned_input.get('page'), cleaned_input.get('collection'),\n cleaned_input.get('url'), cleaned_input.get('category')]\n items = [item for item in items if item is not None]\n if len(items) > 1:\n cls.add_error(\n errors=errors,\n field='items', message='More than one item provided.')\n return cleaned_input\n\n\nclass MenuItemUpdate(MenuItemCreate):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu item to update.')\n input = MenuItemInput(\n required=True,\n description=\"\"\"Fields required to update a menu item.\n Only one of 'url', 'category', 'page', 'collection' is allowed\n per item\"\"\")\n\n class Meta:\n description = 'Updates a menu item.'\n model = models.MenuItem\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n @classmethod\n def construct_instance(cls, instance, cleaned_data):\n # Only one item can be assigned per menu item\n instance.page = None\n instance.collection = None\n instance.category = None\n instance.url = None\n return super().construct_instance(instance, cleaned_data)\n\n\nclass MenuItemDelete(ModelDeleteMutation):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu item to delete.')\n\n class Meta:\n description = 'Deletes a menu item.'\n model = models.MenuItem\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n\nclass NavigationType(graphene.Enum):\n MAIN = 'main'\n SECONDARY = 'secondary'\n\n @property\n def description(self):\n if self == NavigationType.MAIN:\n return 'Main storefront\\'s navigation.'\n return 'Secondary storefront\\'s navigation.'\n\n\nclass AssignNavigation(BaseMutation):\n menu = graphene.Field(Menu, description='Assigned navigation menu.')\n\n class Arguments:\n menu = graphene.ID(\n description='ID of the menu.')\n navigation_type = NavigationType(\n description='Type of the navigation bar to assign the menu to.',\n required=True)\n\n class Meta:\n description = 'Assigns storefront\\'s navigation menus.'\n\n @classmethod\n @permission_required(['menu.manage_menus', 'site.manage_settings'])\n def mutate(cls, root, info, navigation_type, menu=None):\n errors = []\n site_settings = info.context.site.settings\n if menu is not None:\n menu = cls.get_node_or_error(\n info, menu, errors=errors, field='menu')\n if not errors:\n if navigation_type == NavigationType.MAIN:\n site_settings.top_menu = menu\n site_settings.save(update_fields=['top_menu'])\n elif navigation_type == NavigationType.SECONDARY:\n site_settings.bottom_menu = menu\n site_settings.save(update_fields=['bottom_menu'])\n else:\n raise AssertionError(\n 'Unknown navigation type: %s' % navigation_type)\n return AssignNavigation(menu=menu, errors=errors)\n", "path": "saleor/graphql/menu/mutations.py"}], "after_files": [{"content": "import graphene\nfrom graphql_jwt.decorators import permission_required\n\nfrom ...menu import models\nfrom ..core.mutations import BaseMutation, ModelDeleteMutation, ModelMutation\nfrom ..product.types import Category, Collection\nfrom ..page.types import Page\nfrom .types import Menu\n\n\nclass MenuItemInput(graphene.InputObjectType):\n name = graphene.String(description='Name of the menu item.')\n url = graphene.String(description='URL of the pointed item.')\n category = graphene.ID(\n description='Category to which item points.', name='category')\n collection = graphene.ID(\n description='Collection to which item points.', name='collection')\n page = graphene.ID(\n description='Page to which item points.', name='page')\n\n\nclass MenuItemCreateInput(MenuItemInput):\n menu = graphene.ID(\n description='Menu to which item belongs to.', name='menu')\n parent = graphene.ID(\n description='''\n ID of the parent menu. If empty, menu will be top level\n menu.''',\n name='parent')\n\n\nclass MenuInput(graphene.InputObjectType):\n name = graphene.String(description='Name of the menu.')\n\n\nclass MenuCreateInput(MenuInput):\n items = graphene.List(\n MenuItemInput, description='List of menu items.')\n\n\nclass MenuCreate(ModelMutation):\n class Arguments:\n input = MenuCreateInput(\n required=True,\n description='Fields required to create a menu.')\n\n class Meta:\n description = 'Creates a new Menu'\n model = models.Menu\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n @classmethod\n def clean_input(cls, info, instance, input, errors):\n cleaned_input = super().clean_input(info, instance, input, errors)\n items = []\n for item in cleaned_input.get('items', []):\n category = item.get('category')\n collection = item.get('collection')\n page = item.get('page')\n url = item.get('url')\n if len([i for i in [category, collection, page, url] if i]) > 1:\n cls.add_error(\n errors, 'items', 'More than one item provided.')\n else:\n if category:\n category = cls.get_node_or_error(\n info, category, errors, 'items', only_type=Category)\n item['category'] = category\n elif collection:\n collection = cls.get_node_or_error(\n info, collection, errors, 'items',\n only_type=Collection)\n item['collection'] = collection\n elif page:\n page = cls.get_node_or_error(\n info, page, errors, 'items', only_type=Page)\n item['page'] = page\n elif not url:\n cls.add_error(errors, 'items', 'No menu item provided.')\n items.append(item)\n cleaned_input['items'] = items\n return cleaned_input\n\n @classmethod\n def _save_m2m(cls, info, instance, cleaned_data):\n super()._save_m2m(info, instance, cleaned_data)\n items = cleaned_data.get('items', [])\n for item in items:\n instance.items.create(**item)\n\n\nclass MenuUpdate(ModelMutation):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu to update.')\n input = MenuInput(\n required=True,\n description='Fields required to update a menu.')\n\n class Meta:\n description = 'Updates a menu.'\n model = models.Menu\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n\nclass MenuDelete(ModelDeleteMutation):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu to delete.')\n\n class Meta:\n description = 'Deletes a menu.'\n model = models.Menu\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n\nclass MenuItemCreate(ModelMutation):\n class Arguments:\n input = MenuItemCreateInput(\n required=True,\n description=\"\"\"Fields required to update a menu item.\n Only one of 'url', 'category', 'page', 'collection' is allowed\n per item\"\"\")\n\n class Meta:\n description = 'Creates a new Menu'\n model = models.MenuItem\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n @classmethod\n def clean_input(cls, info, instance, input, errors):\n cleaned_input = super().clean_input(info, instance, input, errors)\n items = [\n cleaned_input.get('page'), cleaned_input.get('collection'),\n cleaned_input.get('url'), cleaned_input.get('category')]\n items = [item for item in items if item is not None]\n if len(items) > 1:\n cls.add_error(\n errors=errors,\n field='items', message='More than one item provided.')\n return cleaned_input\n\n\nclass MenuItemUpdate(MenuItemCreate):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu item to update.')\n input = MenuItemInput(\n required=True,\n description=\"\"\"Fields required to update a menu item.\n Only one of 'url', 'category', 'page', 'collection' is allowed\n per item\"\"\")\n\n class Meta:\n description = 'Updates a menu item.'\n model = models.MenuItem\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n @classmethod\n def construct_instance(cls, instance, cleaned_data):\n # Only one item can be assigned per menu item\n instance.page = None\n instance.collection = None\n instance.category = None\n instance.url = None\n return super().construct_instance(instance, cleaned_data)\n\n\nclass MenuItemDelete(ModelDeleteMutation):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu item to delete.')\n\n class Meta:\n description = 'Deletes a menu item.'\n model = models.MenuItem\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n\nclass NavigationType(graphene.Enum):\n MAIN = 'main'\n SECONDARY = 'secondary'\n\n @property\n def description(self):\n if self == NavigationType.MAIN:\n return 'Main storefront\\'s navigation.'\n return 'Secondary storefront\\'s navigation.'\n\n\nclass AssignNavigation(BaseMutation):\n menu = graphene.Field(Menu, description='Assigned navigation menu.')\n\n class Arguments:\n menu = graphene.ID(\n description='ID of the menu.')\n navigation_type = NavigationType(\n description='Type of the navigation bar to assign the menu to.',\n required=True)\n\n class Meta:\n description = 'Assigns storefront\\'s navigation menus.'\n\n @classmethod\n @permission_required(['menu.manage_menus', 'site.manage_settings'])\n def mutate(cls, root, info, navigation_type, menu=None):\n errors = []\n site_settings = info.context.site.settings\n if menu is not None:\n menu = cls.get_node_or_error(\n info, menu, errors=errors, field='menu')\n if not errors:\n if navigation_type == NavigationType.MAIN:\n site_settings.top_menu = menu\n site_settings.save(update_fields=['top_menu'])\n elif navigation_type == NavigationType.SECONDARY:\n site_settings.bottom_menu = menu\n site_settings.save(update_fields=['bottom_menu'])\n else:\n raise AssertionError(\n 'Unknown navigation type: %s' % navigation_type)\n return AssignNavigation(menu=menu, errors=errors)\n", "path": "saleor/graphql/menu/mutations.py"}]}
2,083
924
gh_patches_debug_22377
rasdani/github-patches
git_diff
svthalia__concrexit-1741
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cannot renew membership as benefactor, expects length field ### Describe the bug Right now, when trying to renew a membership as benefactor from http://localhost:8000/user/membership/ I can't send the form. I get a javascript error: `An invalid form control with name='length' is not focusable.` So the problem is that the length field is expected to have a value while this isn't the case for benefactors. ### How to reproduce Steps to reproduce the behaviour: 1. Go to http://localhost:8000/user/membership/ 2. Add a benefactor membership renewal 3. The form will not respond when trying to send 4. You will get a javascript error, indicating that the form can't focus on the length field (as it is hidden for benefactors), ### Expected behaviour It should ignore the length field for benefactorship renewals, as for benefactors it is always 1 year --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `website/registrations/views.py` Content: ``` 1 """Views provided by the registrations package.""" 2 from django.conf import settings 3 from django.contrib import messages 4 from django.contrib.admin.utils import model_ngettext 5 from django.contrib.admin.views.decorators import staff_member_required 6 from django.contrib.auth.decorators import login_required, permission_required 7 from django.contrib.contenttypes.models import ContentType 8 from django.core.exceptions import ValidationError 9 from django.db.models import Q 10 from django.http import Http404 11 from django.shortcuts import redirect, get_object_or_404 12 from django.template.defaultfilters import floatformat 13 from django.urls import reverse 14 from django.utils import timezone 15 from django.utils.decorators import method_decorator 16 from django.utils.translation import gettext_lazy as _ 17 from django.views import View 18 from django.views.generic import FormView, CreateView 19 from django.views.generic.base import TemplateResponseMixin, TemplateView 20 21 from members.decorators import membership_required 22 from members.models import Membership 23 from . import emails, forms, services 24 from .models import Entry, Registration, Renewal, Reference 25 26 27 class BecomeAMemberView(TemplateView): 28 """View that render a HTML template with context data.""" 29 30 template_name = "registrations/become_a_member.html" 31 32 def get_context_data(self, **kwargs): 33 context = super().get_context_data(**kwargs) 34 context["year_fees"] = floatformat( 35 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2 36 ) 37 context["study_fees"] = floatformat( 38 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2 39 ) 40 return context 41 42 43 @method_decorator(staff_member_required, name="dispatch") 44 @method_decorator( 45 permission_required("registrations.review_entries"), name="dispatch", 46 ) 47 class EntryAdminView(View): 48 """View that handles the processing of entries.""" 49 50 def post(self, request, *args, **kwargs): 51 action = request.POST.get("action") 52 entry_qs = Entry.objects.filter(pk=kwargs["pk"]) 53 try: 54 entry = entry_qs.get() 55 except Entry.DoesNotExist: 56 return redirect("admin:index") 57 58 if action == "accept": 59 if not services.check_unique_user(entry): 60 messages.error( 61 request, 62 _("Could not accept %s. Username is not unique.") 63 % model_ngettext(entry, 1), 64 ) 65 elif services.accept_entries(request.user.pk, entry_qs) > 0: 66 messages.success( 67 request, _("Successfully accepted %s.") % model_ngettext(entry, 1) 68 ) 69 else: 70 messages.error( 71 request, _("Could not accept %s.") % model_ngettext(entry, 1) 72 ) 73 elif action == "reject": 74 if services.reject_entries(request.user.pk, entry_qs) > 0: 75 messages.success( 76 request, _("Successfully rejected %s.") % model_ngettext(entry, 1) 77 ) 78 else: 79 messages.error( 80 request, _("Could not reject %s.") % model_ngettext(entry, 1) 81 ) 82 elif action == "resend": 83 try: 84 emails.send_registration_email_confirmation(entry.registration) 85 except Registration.DoesNotExist: 86 pass 87 elif action == "revert": 88 services.revert_entry(request.user.pk, entry) 89 90 if entry_qs.filter(renewal=None).exists(): 91 content_type = ContentType.objects.get_for_model(Registration) 92 else: 93 content_type = ContentType.objects.get_for_model(Renewal) 94 95 return redirect( 96 f"admin:{content_type.app_label}_{content_type.model}_change", kwargs["pk"], 97 ) 98 99 100 class ConfirmEmailView(View, TemplateResponseMixin): 101 """View that renders an HTML template and confirms the email address of the provided registration.""" 102 103 template_name = "registrations/confirm_email.html" 104 105 def get(self, request, *args, **kwargs): 106 queryset = Registration.objects.filter(pk=kwargs["pk"]) 107 108 processed = 0 109 try: 110 processed = services.confirm_entry(queryset) 111 except ValidationError: 112 pass 113 114 if processed == 0: 115 return redirect("registrations:register-member") 116 117 registration = queryset.get() 118 119 if ( 120 registration.membership_type == Membership.BENEFACTOR 121 and not registration.no_references 122 ): 123 emails.send_references_information_message(registration) 124 125 emails.send_new_registration_board_message(registration) 126 127 return self.render_to_response({}) 128 129 130 class BaseRegistrationFormView(FormView): 131 """View that renders a membership registration form.""" 132 133 form_class = forms.MemberRegistrationForm 134 template_name = "registrations/register_member.html" 135 136 def get_context_data(self, **kwargs): 137 context = super().get_context_data(**kwargs) 138 context["google_api_key"] = settings.GOOGLE_PLACES_API_KEY 139 context["year_fees"] = floatformat( 140 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2 141 ) 142 context["study_fees"] = floatformat( 143 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2 144 ) 145 return context 146 147 def get(self, request, *args, **kwargs): 148 if request.user.is_authenticated: 149 return redirect("registrations:renew") 150 return super().get(request, args, kwargs) 151 152 def form_valid(self, form): 153 form.save() 154 emails.send_registration_email_confirmation(form.instance) 155 return redirect("registrations:register-success") 156 157 158 class MemberRegistrationFormView(BaseRegistrationFormView): 159 """View that renders the `member` membership registration form.""" 160 161 form_class = forms.MemberRegistrationForm 162 template_name = "registrations/register_member.html" 163 164 def get_context_data(self, **kwargs): 165 context = super().get_context_data(**kwargs) 166 context["tpay_enabled"] = ( 167 settings.THALIA_PAY_ENABLED_PAYMENT_METHOD 168 and settings.THALIA_PAY_FOR_NEW_MEMBERS 169 ) 170 return context 171 172 def post(self, request, *args, **kwargs): 173 request.POST = request.POST.dict() 174 request.POST["language"] = request.LANGUAGE_CODE 175 request.POST["membership_type"] = Membership.MEMBER 176 return super().post(request, *args, **kwargs) 177 178 179 class BenefactorRegistrationFormView(BaseRegistrationFormView): 180 """View that renders the `benefactor` membership registration form.""" 181 182 form_class = forms.BenefactorRegistrationForm 183 template_name = "registrations/register_benefactor.html" 184 185 def get_context_data(self, **kwargs): 186 context = super().get_context_data(**kwargs) 187 context["tpay_enabled"] = ( 188 settings.THALIA_PAY_ENABLED_PAYMENT_METHOD 189 and settings.THALIA_PAY_FOR_NEW_MEMBERS 190 ) 191 return context 192 193 def post(self, request, *args, **kwargs): 194 request.POST = request.POST.dict() 195 request.POST["language"] = request.LANGUAGE_CODE 196 request.POST["membership_type"] = Membership.BENEFACTOR 197 request.POST["length"] = Entry.MEMBERSHIP_YEAR 198 request.POST["remarks"] = ( 199 "Registered as iCIS employee" if "icis_employee" in request.POST else "" 200 ) 201 request.POST["no_references"] = "icis_employee" in request.POST 202 return super().post(request, *args, **kwargs) 203 204 205 @method_decorator(login_required, name="dispatch") 206 class RenewalFormView(FormView): 207 """View that renders the membership renewal form.""" 208 209 form_class = forms.RenewalForm 210 template_name = "registrations/renewal.html" 211 212 def get_context_data(self, **kwargs): 213 context = super().get_context_data(**kwargs) 214 context["year_fees"] = floatformat( 215 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2 216 ) 217 context["study_fees"] = floatformat( 218 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2 219 ) 220 context["latest_membership"] = self.request.member.latest_membership 221 context["latest_renewal"] = Renewal.objects.filter( 222 Q(member=self.request.member) 223 & ( 224 Q(status=Registration.STATUS_ACCEPTED) 225 | Q(status=Registration.STATUS_REVIEW) 226 ) 227 ).last() 228 context["was_member"] = Membership.objects.filter( 229 user=self.request.member, type=Membership.MEMBER 230 ).exists() 231 context["benefactor_type"] = Membership.BENEFACTOR 232 return context 233 234 def get_form(self, form_class=None): 235 form = super().get_form(form_class) 236 member = self.request.member 237 if member is not None and member.latest_membership is not None: 238 latest_membership = member.latest_membership 239 # If latest membership has not ended or does not ends 240 # within 1 month: do not show 'year' length 241 hide_year_choice = not ( 242 latest_membership is not None 243 and latest_membership.until is not None 244 and (latest_membership.until - timezone.now().date()).days <= 31 245 ) 246 247 if hide_year_choice: 248 form.fields["length"].choices = [ 249 c 250 for c in form.fields["length"].choices 251 if c[0] != Entry.MEMBERSHIP_YEAR 252 ] 253 254 return form 255 256 def post(self, request, *args, **kwargs): 257 request.POST = request.POST.dict() 258 if request.member.latest_membership.type == Membership.BENEFACTOR: 259 request.POST["membership_type"] = Membership.BENEFACTOR 260 request.POST["length"] = Entry.MEMBERSHIP_YEAR 261 request.POST["member"] = request.member.pk 262 request.POST["remarks"] = "" 263 request.POST["no_references"] = True 264 265 if request.POST["membership_type"] == Membership.BENEFACTOR: 266 request.POST["no_references"] = False 267 if Membership.objects.filter( 268 user=request.member, type=Membership.MEMBER 269 ).exists(): 270 request.POST["remarks"] = "Was a Thalia member in the past." 271 request.POST["no_references"] = True 272 if "icis_employee" in request.POST: 273 request.POST["remarks"] = "Registered as iCIS employee." 274 request.POST["no_references"] = True 275 276 return super().post(request, *args, **kwargs) 277 278 def form_valid(self, form): 279 renewal = form.save() 280 if not renewal.no_references: 281 emails.send_references_information_message(renewal) 282 emails.send_new_renewal_board_message(renewal) 283 return redirect("registrations:renew-success") 284 285 286 @method_decorator(login_required, name="dispatch") 287 @method_decorator(membership_required, name="dispatch") 288 class ReferenceCreateView(CreateView): 289 """View that renders a reference creation form.""" 290 291 model = Reference 292 form_class = forms.ReferenceForm 293 template_name = "registrations/reference.html" 294 entry = None 295 success = False 296 297 def get_success_url(self): 298 return reverse("registrations:reference-success", args=(self.entry.pk,)) 299 300 def get_context_data(self, **kwargs): 301 context = super().get_context_data(**kwargs) 302 303 context["success"] = self.success 304 try: 305 context["name"] = self.entry.registration.get_full_name() 306 except Registration.DoesNotExist: 307 context["name"] = self.entry.renewal.member.get_full_name() 308 309 return context 310 311 def dispatch(self, request, *args, **kwargs): 312 self.entry = get_object_or_404(Entry, pk=kwargs.get("pk")) 313 314 if ( 315 self.entry.no_references 316 or self.entry.membership_type != Membership.BENEFACTOR 317 ): 318 raise Http404 319 320 return super().dispatch(request, *args, **kwargs) 321 322 def post(self, request, *args, **kwargs): 323 request.POST = request.POST.dict() 324 request.POST["member"] = request.member.pk 325 request.POST["entry"] = kwargs["pk"] 326 return super().post(request, *args, **kwargs) 327 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/website/registrations/views.py b/website/registrations/views.py --- a/website/registrations/views.py +++ b/website/registrations/views.py @@ -237,7 +237,7 @@ if member is not None and member.latest_membership is not None: latest_membership = member.latest_membership # If latest membership has not ended or does not ends - # within 1 month: do not show 'year' length + # within 1 month: do not show 'year' length and disable benefactor option hide_year_choice = not ( latest_membership is not None and latest_membership.until is not None @@ -250,6 +250,11 @@ for c in form.fields["length"].choices if c[0] != Entry.MEMBERSHIP_YEAR ] + form.fields["membership_type"].choices = [ + c + for c in form.fields["membership_type"].choices + if c[0] != Membership.BENEFACTOR + ] return form
{"golden_diff": "diff --git a/website/registrations/views.py b/website/registrations/views.py\n--- a/website/registrations/views.py\n+++ b/website/registrations/views.py\n@@ -237,7 +237,7 @@\n if member is not None and member.latest_membership is not None:\n latest_membership = member.latest_membership\n # If latest membership has not ended or does not ends\n- # within 1 month: do not show 'year' length\n+ # within 1 month: do not show 'year' length and disable benefactor option\n hide_year_choice = not (\n latest_membership is not None\n and latest_membership.until is not None\n@@ -250,6 +250,11 @@\n for c in form.fields[\"length\"].choices\n if c[0] != Entry.MEMBERSHIP_YEAR\n ]\n+ form.fields[\"membership_type\"].choices = [\n+ c\n+ for c in form.fields[\"membership_type\"].choices\n+ if c[0] != Membership.BENEFACTOR\n+ ]\n \n return form\n", "issue": "Cannot renew membership as benefactor, expects length field\n### Describe the bug\r\nRight now, when trying to renew a membership as benefactor from http://localhost:8000/user/membership/ I can't send the form. I get a javascript error: `An invalid form control with name='length' is not focusable.` \r\nSo the problem is that the length field is expected to have a value while this isn't the case for benefactors.\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Go to http://localhost:8000/user/membership/\r\n2. Add a benefactor membership renewal \r\n3. The form will not respond when trying to send\r\n4. You will get a javascript error, indicating that the form can't focus on the length field (as it is hidden for benefactors), \r\n\r\n### Expected behaviour\r\nIt should ignore the length field for benefactorship renewals, as for benefactors it is always 1 year\r\n\n", "before_files": [{"content": "\"\"\"Views provided by the registrations package.\"\"\"\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.admin.utils import model_ngettext\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Q\nfrom django.http import Http404\nfrom django.shortcuts import redirect, get_object_or_404\nfrom django.template.defaultfilters import floatformat\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\nfrom django.views.generic import FormView, CreateView\nfrom django.views.generic.base import TemplateResponseMixin, TemplateView\n\nfrom members.decorators import membership_required\nfrom members.models import Membership\nfrom . import emails, forms, services\nfrom .models import Entry, Registration, Renewal, Reference\n\n\nclass BecomeAMemberView(TemplateView):\n \"\"\"View that render a HTML template with context data.\"\"\"\n\n template_name = \"registrations/become_a_member.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"year_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2\n )\n context[\"study_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2\n )\n return context\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"registrations.review_entries\"), name=\"dispatch\",\n)\nclass EntryAdminView(View):\n \"\"\"View that handles the processing of entries.\"\"\"\n\n def post(self, request, *args, **kwargs):\n action = request.POST.get(\"action\")\n entry_qs = Entry.objects.filter(pk=kwargs[\"pk\"])\n try:\n entry = entry_qs.get()\n except Entry.DoesNotExist:\n return redirect(\"admin:index\")\n\n if action == \"accept\":\n if not services.check_unique_user(entry):\n messages.error(\n request,\n _(\"Could not accept %s. Username is not unique.\")\n % model_ngettext(entry, 1),\n )\n elif services.accept_entries(request.user.pk, entry_qs) > 0:\n messages.success(\n request, _(\"Successfully accepted %s.\") % model_ngettext(entry, 1)\n )\n else:\n messages.error(\n request, _(\"Could not accept %s.\") % model_ngettext(entry, 1)\n )\n elif action == \"reject\":\n if services.reject_entries(request.user.pk, entry_qs) > 0:\n messages.success(\n request, _(\"Successfully rejected %s.\") % model_ngettext(entry, 1)\n )\n else:\n messages.error(\n request, _(\"Could not reject %s.\") % model_ngettext(entry, 1)\n )\n elif action == \"resend\":\n try:\n emails.send_registration_email_confirmation(entry.registration)\n except Registration.DoesNotExist:\n pass\n elif action == \"revert\":\n services.revert_entry(request.user.pk, entry)\n\n if entry_qs.filter(renewal=None).exists():\n content_type = ContentType.objects.get_for_model(Registration)\n else:\n content_type = ContentType.objects.get_for_model(Renewal)\n\n return redirect(\n f\"admin:{content_type.app_label}_{content_type.model}_change\", kwargs[\"pk\"],\n )\n\n\nclass ConfirmEmailView(View, TemplateResponseMixin):\n \"\"\"View that renders an HTML template and confirms the email address of the provided registration.\"\"\"\n\n template_name = \"registrations/confirm_email.html\"\n\n def get(self, request, *args, **kwargs):\n queryset = Registration.objects.filter(pk=kwargs[\"pk\"])\n\n processed = 0\n try:\n processed = services.confirm_entry(queryset)\n except ValidationError:\n pass\n\n if processed == 0:\n return redirect(\"registrations:register-member\")\n\n registration = queryset.get()\n\n if (\n registration.membership_type == Membership.BENEFACTOR\n and not registration.no_references\n ):\n emails.send_references_information_message(registration)\n\n emails.send_new_registration_board_message(registration)\n\n return self.render_to_response({})\n\n\nclass BaseRegistrationFormView(FormView):\n \"\"\"View that renders a membership registration form.\"\"\"\n\n form_class = forms.MemberRegistrationForm\n template_name = \"registrations/register_member.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"google_api_key\"] = settings.GOOGLE_PLACES_API_KEY\n context[\"year_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2\n )\n context[\"study_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2\n )\n return context\n\n def get(self, request, *args, **kwargs):\n if request.user.is_authenticated:\n return redirect(\"registrations:renew\")\n return super().get(request, args, kwargs)\n\n def form_valid(self, form):\n form.save()\n emails.send_registration_email_confirmation(form.instance)\n return redirect(\"registrations:register-success\")\n\n\nclass MemberRegistrationFormView(BaseRegistrationFormView):\n \"\"\"View that renders the `member` membership registration form.\"\"\"\n\n form_class = forms.MemberRegistrationForm\n template_name = \"registrations/register_member.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"tpay_enabled\"] = (\n settings.THALIA_PAY_ENABLED_PAYMENT_METHOD\n and settings.THALIA_PAY_FOR_NEW_MEMBERS\n )\n return context\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n request.POST[\"language\"] = request.LANGUAGE_CODE\n request.POST[\"membership_type\"] = Membership.MEMBER\n return super().post(request, *args, **kwargs)\n\n\nclass BenefactorRegistrationFormView(BaseRegistrationFormView):\n \"\"\"View that renders the `benefactor` membership registration form.\"\"\"\n\n form_class = forms.BenefactorRegistrationForm\n template_name = \"registrations/register_benefactor.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"tpay_enabled\"] = (\n settings.THALIA_PAY_ENABLED_PAYMENT_METHOD\n and settings.THALIA_PAY_FOR_NEW_MEMBERS\n )\n return context\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n request.POST[\"language\"] = request.LANGUAGE_CODE\n request.POST[\"membership_type\"] = Membership.BENEFACTOR\n request.POST[\"length\"] = Entry.MEMBERSHIP_YEAR\n request.POST[\"remarks\"] = (\n \"Registered as iCIS employee\" if \"icis_employee\" in request.POST else \"\"\n )\n request.POST[\"no_references\"] = \"icis_employee\" in request.POST\n return super().post(request, *args, **kwargs)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass RenewalFormView(FormView):\n \"\"\"View that renders the membership renewal form.\"\"\"\n\n form_class = forms.RenewalForm\n template_name = \"registrations/renewal.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"year_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2\n )\n context[\"study_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2\n )\n context[\"latest_membership\"] = self.request.member.latest_membership\n context[\"latest_renewal\"] = Renewal.objects.filter(\n Q(member=self.request.member)\n & (\n Q(status=Registration.STATUS_ACCEPTED)\n | Q(status=Registration.STATUS_REVIEW)\n )\n ).last()\n context[\"was_member\"] = Membership.objects.filter(\n user=self.request.member, type=Membership.MEMBER\n ).exists()\n context[\"benefactor_type\"] = Membership.BENEFACTOR\n return context\n\n def get_form(self, form_class=None):\n form = super().get_form(form_class)\n member = self.request.member\n if member is not None and member.latest_membership is not None:\n latest_membership = member.latest_membership\n # If latest membership has not ended or does not ends\n # within 1 month: do not show 'year' length\n hide_year_choice = not (\n latest_membership is not None\n and latest_membership.until is not None\n and (latest_membership.until - timezone.now().date()).days <= 31\n )\n\n if hide_year_choice:\n form.fields[\"length\"].choices = [\n c\n for c in form.fields[\"length\"].choices\n if c[0] != Entry.MEMBERSHIP_YEAR\n ]\n\n return form\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n if request.member.latest_membership.type == Membership.BENEFACTOR:\n request.POST[\"membership_type\"] = Membership.BENEFACTOR\n request.POST[\"length\"] = Entry.MEMBERSHIP_YEAR\n request.POST[\"member\"] = request.member.pk\n request.POST[\"remarks\"] = \"\"\n request.POST[\"no_references\"] = True\n\n if request.POST[\"membership_type\"] == Membership.BENEFACTOR:\n request.POST[\"no_references\"] = False\n if Membership.objects.filter(\n user=request.member, type=Membership.MEMBER\n ).exists():\n request.POST[\"remarks\"] = \"Was a Thalia member in the past.\"\n request.POST[\"no_references\"] = True\n if \"icis_employee\" in request.POST:\n request.POST[\"remarks\"] = \"Registered as iCIS employee.\"\n request.POST[\"no_references\"] = True\n\n return super().post(request, *args, **kwargs)\n\n def form_valid(self, form):\n renewal = form.save()\n if not renewal.no_references:\n emails.send_references_information_message(renewal)\n emails.send_new_renewal_board_message(renewal)\n return redirect(\"registrations:renew-success\")\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(membership_required, name=\"dispatch\")\nclass ReferenceCreateView(CreateView):\n \"\"\"View that renders a reference creation form.\"\"\"\n\n model = Reference\n form_class = forms.ReferenceForm\n template_name = \"registrations/reference.html\"\n entry = None\n success = False\n\n def get_success_url(self):\n return reverse(\"registrations:reference-success\", args=(self.entry.pk,))\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context[\"success\"] = self.success\n try:\n context[\"name\"] = self.entry.registration.get_full_name()\n except Registration.DoesNotExist:\n context[\"name\"] = self.entry.renewal.member.get_full_name()\n\n return context\n\n def dispatch(self, request, *args, **kwargs):\n self.entry = get_object_or_404(Entry, pk=kwargs.get(\"pk\"))\n\n if (\n self.entry.no_references\n or self.entry.membership_type != Membership.BENEFACTOR\n ):\n raise Http404\n\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n request.POST[\"member\"] = request.member.pk\n request.POST[\"entry\"] = kwargs[\"pk\"]\n return super().post(request, *args, **kwargs)\n", "path": "website/registrations/views.py"}], "after_files": [{"content": "\"\"\"Views provided by the registrations package.\"\"\"\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.admin.utils import model_ngettext\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Q\nfrom django.http import Http404\nfrom django.shortcuts import redirect, get_object_or_404\nfrom django.template.defaultfilters import floatformat\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\nfrom django.views.generic import FormView, CreateView\nfrom django.views.generic.base import TemplateResponseMixin, TemplateView\n\nfrom members.decorators import membership_required\nfrom members.models import Membership\nfrom . import emails, forms, services\nfrom .models import Entry, Registration, Renewal, Reference\n\n\nclass BecomeAMemberView(TemplateView):\n \"\"\"View that render a HTML template with context data.\"\"\"\n\n template_name = \"registrations/become_a_member.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"year_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2\n )\n context[\"study_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2\n )\n return context\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"registrations.review_entries\"), name=\"dispatch\",\n)\nclass EntryAdminView(View):\n \"\"\"View that handles the processing of entries.\"\"\"\n\n def post(self, request, *args, **kwargs):\n action = request.POST.get(\"action\")\n entry_qs = Entry.objects.filter(pk=kwargs[\"pk\"])\n try:\n entry = entry_qs.get()\n except Entry.DoesNotExist:\n return redirect(\"admin:index\")\n\n if action == \"accept\":\n if not services.check_unique_user(entry):\n messages.error(\n request,\n _(\"Could not accept %s. Username is not unique.\")\n % model_ngettext(entry, 1),\n )\n elif services.accept_entries(request.user.pk, entry_qs) > 0:\n messages.success(\n request, _(\"Successfully accepted %s.\") % model_ngettext(entry, 1)\n )\n else:\n messages.error(\n request, _(\"Could not accept %s.\") % model_ngettext(entry, 1)\n )\n elif action == \"reject\":\n if services.reject_entries(request.user.pk, entry_qs) > 0:\n messages.success(\n request, _(\"Successfully rejected %s.\") % model_ngettext(entry, 1)\n )\n else:\n messages.error(\n request, _(\"Could not reject %s.\") % model_ngettext(entry, 1)\n )\n elif action == \"resend\":\n try:\n emails.send_registration_email_confirmation(entry.registration)\n except Registration.DoesNotExist:\n pass\n elif action == \"revert\":\n services.revert_entry(request.user.pk, entry)\n\n if entry_qs.filter(renewal=None).exists():\n content_type = ContentType.objects.get_for_model(Registration)\n else:\n content_type = ContentType.objects.get_for_model(Renewal)\n\n return redirect(\n f\"admin:{content_type.app_label}_{content_type.model}_change\", kwargs[\"pk\"],\n )\n\n\nclass ConfirmEmailView(View, TemplateResponseMixin):\n \"\"\"View that renders an HTML template and confirms the email address of the provided registration.\"\"\"\n\n template_name = \"registrations/confirm_email.html\"\n\n def get(self, request, *args, **kwargs):\n queryset = Registration.objects.filter(pk=kwargs[\"pk\"])\n\n processed = 0\n try:\n processed = services.confirm_entry(queryset)\n except ValidationError:\n pass\n\n if processed == 0:\n return redirect(\"registrations:register-member\")\n\n registration = queryset.get()\n\n if (\n registration.membership_type == Membership.BENEFACTOR\n and not registration.no_references\n ):\n emails.send_references_information_message(registration)\n\n emails.send_new_registration_board_message(registration)\n\n return self.render_to_response({})\n\n\nclass BaseRegistrationFormView(FormView):\n \"\"\"View that renders a membership registration form.\"\"\"\n\n form_class = forms.MemberRegistrationForm\n template_name = \"registrations/register_member.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"google_api_key\"] = settings.GOOGLE_PLACES_API_KEY\n context[\"year_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2\n )\n context[\"study_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2\n )\n return context\n\n def get(self, request, *args, **kwargs):\n if request.user.is_authenticated:\n return redirect(\"registrations:renew\")\n return super().get(request, args, kwargs)\n\n def form_valid(self, form):\n form.save()\n emails.send_registration_email_confirmation(form.instance)\n return redirect(\"registrations:register-success\")\n\n\nclass MemberRegistrationFormView(BaseRegistrationFormView):\n \"\"\"View that renders the `member` membership registration form.\"\"\"\n\n form_class = forms.MemberRegistrationForm\n template_name = \"registrations/register_member.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"tpay_enabled\"] = (\n settings.THALIA_PAY_ENABLED_PAYMENT_METHOD\n and settings.THALIA_PAY_FOR_NEW_MEMBERS\n )\n return context\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n request.POST[\"language\"] = request.LANGUAGE_CODE\n request.POST[\"membership_type\"] = Membership.MEMBER\n return super().post(request, *args, **kwargs)\n\n\nclass BenefactorRegistrationFormView(BaseRegistrationFormView):\n \"\"\"View that renders the `benefactor` membership registration form.\"\"\"\n\n form_class = forms.BenefactorRegistrationForm\n template_name = \"registrations/register_benefactor.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"tpay_enabled\"] = (\n settings.THALIA_PAY_ENABLED_PAYMENT_METHOD\n and settings.THALIA_PAY_FOR_NEW_MEMBERS\n )\n return context\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n request.POST[\"language\"] = request.LANGUAGE_CODE\n request.POST[\"membership_type\"] = Membership.BENEFACTOR\n request.POST[\"length\"] = Entry.MEMBERSHIP_YEAR\n request.POST[\"remarks\"] = (\n \"Registered as iCIS employee\" if \"icis_employee\" in request.POST else \"\"\n )\n request.POST[\"no_references\"] = \"icis_employee\" in request.POST\n return super().post(request, *args, **kwargs)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass RenewalFormView(FormView):\n \"\"\"View that renders the membership renewal form.\"\"\"\n\n form_class = forms.RenewalForm\n template_name = \"registrations/renewal.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"year_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2\n )\n context[\"study_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2\n )\n context[\"latest_membership\"] = self.request.member.latest_membership\n context[\"latest_renewal\"] = Renewal.objects.filter(\n Q(member=self.request.member)\n & (\n Q(status=Registration.STATUS_ACCEPTED)\n | Q(status=Registration.STATUS_REVIEW)\n )\n ).last()\n context[\"was_member\"] = Membership.objects.filter(\n user=self.request.member, type=Membership.MEMBER\n ).exists()\n context[\"benefactor_type\"] = Membership.BENEFACTOR\n return context\n\n def get_form(self, form_class=None):\n form = super().get_form(form_class)\n member = self.request.member\n if member is not None and member.latest_membership is not None:\n latest_membership = member.latest_membership\n # If latest membership has not ended or does not ends\n # within 1 month: do not show 'year' length and disable benefactor option\n hide_year_choice = not (\n latest_membership is not None\n and latest_membership.until is not None\n and (latest_membership.until - timezone.now().date()).days <= 31\n )\n\n if hide_year_choice:\n form.fields[\"length\"].choices = [\n c\n for c in form.fields[\"length\"].choices\n if c[0] != Entry.MEMBERSHIP_YEAR\n ]\n form.fields[\"membership_type\"].choices = [\n c\n for c in form.fields[\"membership_type\"].choices\n if c[0] != Membership.BENEFACTOR\n ]\n\n return form\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n if request.member.latest_membership.type == Membership.BENEFACTOR:\n request.POST[\"membership_type\"] = Membership.BENEFACTOR\n request.POST[\"length\"] = Entry.MEMBERSHIP_YEAR\n request.POST[\"member\"] = request.member.pk\n request.POST[\"remarks\"] = \"\"\n request.POST[\"no_references\"] = True\n\n if request.POST[\"membership_type\"] == Membership.BENEFACTOR:\n request.POST[\"no_references\"] = False\n if Membership.objects.filter(\n user=request.member, type=Membership.MEMBER\n ).exists():\n request.POST[\"remarks\"] = \"Was a Thalia member in the past.\"\n request.POST[\"no_references\"] = True\n if \"icis_employee\" in request.POST:\n request.POST[\"remarks\"] = \"Registered as iCIS employee.\"\n request.POST[\"no_references\"] = True\n\n return super().post(request, *args, **kwargs)\n\n def form_valid(self, form):\n renewal = form.save()\n if not renewal.no_references:\n emails.send_references_information_message(renewal)\n emails.send_new_renewal_board_message(renewal)\n return redirect(\"registrations:renew-success\")\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(membership_required, name=\"dispatch\")\nclass ReferenceCreateView(CreateView):\n \"\"\"View that renders a reference creation form.\"\"\"\n\n model = Reference\n form_class = forms.ReferenceForm\n template_name = \"registrations/reference.html\"\n entry = None\n success = False\n\n def get_success_url(self):\n return reverse(\"registrations:reference-success\", args=(self.entry.pk,))\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context[\"success\"] = self.success\n try:\n context[\"name\"] = self.entry.registration.get_full_name()\n except Registration.DoesNotExist:\n context[\"name\"] = self.entry.renewal.member.get_full_name()\n\n return context\n\n def dispatch(self, request, *args, **kwargs):\n self.entry = get_object_or_404(Entry, pk=kwargs.get(\"pk\"))\n\n if (\n self.entry.no_references\n or self.entry.membership_type != Membership.BENEFACTOR\n ):\n raise Http404\n\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n request.POST[\"member\"] = request.member.pk\n request.POST[\"entry\"] = kwargs[\"pk\"]\n return super().post(request, *args, **kwargs)\n", "path": "website/registrations/views.py"}]}
3,879
241
gh_patches_debug_24970
rasdani/github-patches
git_diff
WeblateOrg__weblate-11333
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Glossary check not working on spaceless languages ### Describe the issue When having `check-glossary` enabled, the check always fails on spaceless languages, if the source string has a word in it that is in the glossary. ### I already tried - [X] I've read and searched [the documentation](https://docs.weblate.org/). - [X] I've searched for similar filed issues in this repository. ### Steps to reproduce the behavior 1. Create glossary entry that actually exists in a source string 2. Translate the source string with the glossary entry to Japanese (or Korean or similar) 3. Enable `check-glossary` 4. Notice a false positive on the check ### Expected behavior _No response_ ### Screenshots ![Bildschirmfoto vom 2024-03-30 17-12-24](https://github.com/WeblateOrg/weblate/assets/11144627/d906ae16-7c70-42f7-943a-ac9f03b0517d) ![Bildschirmfoto vom 2024-03-30 17-12-20](https://github.com/WeblateOrg/weblate/assets/11144627/65995849-2de4-4df8-a05c-230964d257cc) ### Exception traceback _No response_ ### How do you run Weblate? Docker container ### Weblate versions * Weblate: 5.4.3 * Django: 4.2.11 * siphashc: 2.4.1 * translate-toolkit: 3.12.2 * lxml: 5.1.0 * pillow: 10.2.0 * nh3: 0.2.17 * python-dateutil: 2.9.0.post0 * social-auth-core: 4.5.3 * social-auth-app-django: 5.4.0 * django-crispy-forms: 2.1 * oauthlib: 3.2.2 * django-compressor: 4.4 * djangorestframework: 3.14.0 * django-filter: 23.5 * django-appconf: 1.0.6 * user-agents: 2.2.0 * filelock: 3.13.3 * rapidfuzz: 3.7.0 * openpyxl: 3.1.2 * celery: 5.3.6 * django-celery-beat: 2.5.0 * kombu: 5.3.5 * translation-finder: 2.16 * weblate-language-data: 2024.3 * html2text: 2020.1.16 * pycairo: 1.26.0 * PyGObject: 3.48.1 * diff-match-patch: 20230430 * requests: 2.31.0 * django-redis: 5.4.0 * hiredis: 2.3.2 * sentry-sdk: 1.43.0 * Cython: 3.0.9 * misaka: 2.1.1 * GitPython: 3.1.42 * borgbackup: 1.2.7 * pyparsing: 3.1.2 * ahocorasick_rs: 0.22.0 * python-redis-lock: 4.0.0 * charset-normalizer: 3.3.2 * Python: 3.12.2 * Git: 2.39.2 * psycopg: 3.1.18 * psycopg-binary: 3.1.18 * phply: 1.2.6 * ruamel.yaml: 0.18.6 * tesserocr: 2.6.2 * boto3: 1.34.71 * zeep: 4.2.1 * aeidon: 1.13 * iniparse: 0.5 * mysqlclient: 2.2.4 * Mercurial: 6.7.1 * git-svn: 2.39.2 * git-review: 2.4.0 * PostgreSQL server: 15.2 * Database backends: django.db.backends.postgresql * PostgreSQL implementation: psycopg3 (binary) * Cache backends: default:RedisCache, avatar:FileBasedCache * Email setup: django.core.mail.backends.smtp.EmailBackend: mail.your-server.de * OS encoding: filesystem=utf-8, default=utf-8 * Celery: redis://cache:6379/1, redis://cache:6379/1, regular * Platform: Linux 5.10.0-28-amd64 (x86_64) ### Weblate deploy checks ```shell System check identified some issues: INFOS: ?: (weblate.I028) Backups are not configured, it is highly recommended for production use HINT: https://docs.weblate.org/en/weblate-5.4.3/admin/backup.html System check identified 1 issue (1 silenced). ``` ### Additional context _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `weblate/checks/glossary.py` Content: ``` 1 # Copyright © Michal Čihař <[email protected]> 2 # 3 # SPDX-License-Identifier: GPL-3.0-or-later 4 5 import re 6 7 from django.utils.html import escape, format_html, format_html_join 8 from django.utils.translation import gettext, gettext_lazy 9 10 from weblate.checks.base import TargetCheck 11 12 13 class GlossaryCheck(TargetCheck): 14 default_disabled = True 15 check_id = "check_glossary" 16 name = gettext_lazy("Does not follow glossary") 17 description = gettext_lazy( 18 "The translation does not follow terms defined in a glossary." 19 ) 20 21 def check_single(self, source, target, unit): 22 from weblate.glossary.models import get_glossary_terms 23 24 forbidden = set() 25 mismatched = set() 26 matched = set() 27 for term in get_glossary_terms(unit): 28 term_source = term.source 29 flags = term.all_flags 30 expected = term_source if "read-only" in flags else term.target 31 if "forbidden" in flags: 32 if re.search(rf"\b{re.escape(expected)}\b", target, re.IGNORECASE): 33 forbidden.add(term_source) 34 else: 35 if term_source in matched: 36 continue 37 if re.search(rf"\b{re.escape(expected)}\b", target, re.IGNORECASE): 38 mismatched.discard(term_source) 39 matched.add(term_source) 40 else: 41 mismatched.add(term_source) 42 43 return forbidden | mismatched 44 45 def get_description(self, check_obj): 46 unit = check_obj.unit 47 sources = unit.get_source_plurals() 48 targets = unit.get_target_plurals() 49 source = sources[0] 50 results = set() 51 # Check singular 52 result = self.check_single(source, targets[0], unit) 53 if result: 54 results.update(result) 55 # Do we have more to check? 56 if len(sources) > 1: 57 source = sources[1] 58 # Check plurals against plural from source 59 for target in targets[1:]: 60 result = self.check_single(source, target, unit) 61 if result: 62 results.update(result) 63 64 if not results: 65 return super().get_description(check_obj) 66 67 return format_html( 68 escape( 69 gettext("Following terms are not translated according to glossary: {}") 70 ), 71 format_html_join(", ", "{}", ((term,) for term in sorted(results))), 72 ) 73 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/weblate/checks/glossary.py b/weblate/checks/glossary.py --- a/weblate/checks/glossary.py +++ b/weblate/checks/glossary.py @@ -24,17 +24,22 @@ forbidden = set() mismatched = set() matched = set() + boundary = r"\b" if unit.translation.language.uses_whitespace() else "" for term in get_glossary_terms(unit): term_source = term.source flags = term.all_flags expected = term_source if "read-only" in flags else term.target if "forbidden" in flags: - if re.search(rf"\b{re.escape(expected)}\b", target, re.IGNORECASE): + if re.search( + rf"{boundary}{re.escape(expected)}{boundary}", target, re.IGNORECASE + ): forbidden.add(term_source) else: if term_source in matched: continue - if re.search(rf"\b{re.escape(expected)}\b", target, re.IGNORECASE): + if re.search( + rf"{boundary}{re.escape(expected)}{boundary}", target, re.IGNORECASE + ): mismatched.discard(term_source) matched.add(term_source) else:
{"golden_diff": "diff --git a/weblate/checks/glossary.py b/weblate/checks/glossary.py\n--- a/weblate/checks/glossary.py\n+++ b/weblate/checks/glossary.py\n@@ -24,17 +24,22 @@\n forbidden = set()\n mismatched = set()\n matched = set()\n+ boundary = r\"\\b\" if unit.translation.language.uses_whitespace() else \"\"\n for term in get_glossary_terms(unit):\n term_source = term.source\n flags = term.all_flags\n expected = term_source if \"read-only\" in flags else term.target\n if \"forbidden\" in flags:\n- if re.search(rf\"\\b{re.escape(expected)}\\b\", target, re.IGNORECASE):\n+ if re.search(\n+ rf\"{boundary}{re.escape(expected)}{boundary}\", target, re.IGNORECASE\n+ ):\n forbidden.add(term_source)\n else:\n if term_source in matched:\n continue\n- if re.search(rf\"\\b{re.escape(expected)}\\b\", target, re.IGNORECASE):\n+ if re.search(\n+ rf\"{boundary}{re.escape(expected)}{boundary}\", target, re.IGNORECASE\n+ ):\n mismatched.discard(term_source)\n matched.add(term_source)\n else:\n", "issue": "Glossary check not working on spaceless languages\n### Describe the issue\n\nWhen having `check-glossary` enabled, the check always fails on spaceless languages, if the source string has a word in it that is in the glossary.\n\n### I already tried\n\n- [X] I've read and searched [the documentation](https://docs.weblate.org/).\n- [X] I've searched for similar filed issues in this repository.\n\n### Steps to reproduce the behavior\n\n1. Create glossary entry that actually exists in a source string\r\n2. Translate the source string with the glossary entry to Japanese (or Korean or similar)\r\n3. Enable `check-glossary`\r\n4. Notice a false positive on the check\n\n### Expected behavior\n\n_No response_\n\n### Screenshots\n\n![Bildschirmfoto vom 2024-03-30 17-12-24](https://github.com/WeblateOrg/weblate/assets/11144627/d906ae16-7c70-42f7-943a-ac9f03b0517d)\r\n![Bildschirmfoto vom 2024-03-30 17-12-20](https://github.com/WeblateOrg/weblate/assets/11144627/65995849-2de4-4df8-a05c-230964d257cc)\r\n\n\n### Exception traceback\n\n_No response_\n\n### How do you run Weblate?\n\nDocker container\n\n### Weblate versions\n\n * Weblate: 5.4.3\r\n * Django: 4.2.11\r\n * siphashc: 2.4.1\r\n * translate-toolkit: 3.12.2\r\n * lxml: 5.1.0\r\n * pillow: 10.2.0\r\n * nh3: 0.2.17\r\n * python-dateutil: 2.9.0.post0\r\n * social-auth-core: 4.5.3\r\n * social-auth-app-django: 5.4.0\r\n * django-crispy-forms: 2.1\r\n * oauthlib: 3.2.2\r\n * django-compressor: 4.4\r\n * djangorestframework: 3.14.0\r\n * django-filter: 23.5\r\n * django-appconf: 1.0.6\r\n * user-agents: 2.2.0\r\n * filelock: 3.13.3\r\n * rapidfuzz: 3.7.0\r\n * openpyxl: 3.1.2\r\n * celery: 5.3.6\r\n * django-celery-beat: 2.5.0\r\n * kombu: 5.3.5\r\n * translation-finder: 2.16\r\n * weblate-language-data: 2024.3\r\n * html2text: 2020.1.16\r\n * pycairo: 1.26.0\r\n * PyGObject: 3.48.1\r\n * diff-match-patch: 20230430\r\n * requests: 2.31.0\r\n * django-redis: 5.4.0\r\n * hiredis: 2.3.2\r\n * sentry-sdk: 1.43.0\r\n * Cython: 3.0.9\r\n * misaka: 2.1.1\r\n * GitPython: 3.1.42\r\n * borgbackup: 1.2.7\r\n * pyparsing: 3.1.2\r\n * ahocorasick_rs: 0.22.0\r\n * python-redis-lock: 4.0.0\r\n * charset-normalizer: 3.3.2\r\n * Python: 3.12.2\r\n * Git: 2.39.2\r\n * psycopg: 3.1.18\r\n * psycopg-binary: 3.1.18\r\n * phply: 1.2.6\r\n * ruamel.yaml: 0.18.6\r\n * tesserocr: 2.6.2\r\n * boto3: 1.34.71\r\n * zeep: 4.2.1\r\n * aeidon: 1.13\r\n * iniparse: 0.5\r\n * mysqlclient: 2.2.4\r\n * Mercurial: 6.7.1\r\n * git-svn: 2.39.2\r\n * git-review: 2.4.0\r\n * PostgreSQL server: 15.2\r\n * Database backends: django.db.backends.postgresql\r\n * PostgreSQL implementation: psycopg3 (binary)\r\n * Cache backends: default:RedisCache, avatar:FileBasedCache\r\n * Email setup: django.core.mail.backends.smtp.EmailBackend: mail.your-server.de\r\n * OS encoding: filesystem=utf-8, default=utf-8\r\n * Celery: redis://cache:6379/1, redis://cache:6379/1, regular\r\n * Platform: Linux 5.10.0-28-amd64 (x86_64)\n\n### Weblate deploy checks\n\n```shell\nSystem check identified some issues:\r\n\r\nINFOS:\r\n?: (weblate.I028) Backups are not configured, it is highly recommended for production use\r\n\tHINT: https://docs.weblate.org/en/weblate-5.4.3/admin/backup.html\r\n\r\nSystem check identified 1 issue (1 silenced).\n```\n\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nimport re\n\nfrom django.utils.html import escape, format_html, format_html_join\nfrom django.utils.translation import gettext, gettext_lazy\n\nfrom weblate.checks.base import TargetCheck\n\n\nclass GlossaryCheck(TargetCheck):\n default_disabled = True\n check_id = \"check_glossary\"\n name = gettext_lazy(\"Does not follow glossary\")\n description = gettext_lazy(\n \"The translation does not follow terms defined in a glossary.\"\n )\n\n def check_single(self, source, target, unit):\n from weblate.glossary.models import get_glossary_terms\n\n forbidden = set()\n mismatched = set()\n matched = set()\n for term in get_glossary_terms(unit):\n term_source = term.source\n flags = term.all_flags\n expected = term_source if \"read-only\" in flags else term.target\n if \"forbidden\" in flags:\n if re.search(rf\"\\b{re.escape(expected)}\\b\", target, re.IGNORECASE):\n forbidden.add(term_source)\n else:\n if term_source in matched:\n continue\n if re.search(rf\"\\b{re.escape(expected)}\\b\", target, re.IGNORECASE):\n mismatched.discard(term_source)\n matched.add(term_source)\n else:\n mismatched.add(term_source)\n\n return forbidden | mismatched\n\n def get_description(self, check_obj):\n unit = check_obj.unit\n sources = unit.get_source_plurals()\n targets = unit.get_target_plurals()\n source = sources[0]\n results = set()\n # Check singular\n result = self.check_single(source, targets[0], unit)\n if result:\n results.update(result)\n # Do we have more to check?\n if len(sources) > 1:\n source = sources[1]\n # Check plurals against plural from source\n for target in targets[1:]:\n result = self.check_single(source, target, unit)\n if result:\n results.update(result)\n\n if not results:\n return super().get_description(check_obj)\n\n return format_html(\n escape(\n gettext(\"Following terms are not translated according to glossary: {}\")\n ),\n format_html_join(\", \", \"{}\", ((term,) for term in sorted(results))),\n )\n", "path": "weblate/checks/glossary.py"}], "after_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nimport re\n\nfrom django.utils.html import escape, format_html, format_html_join\nfrom django.utils.translation import gettext, gettext_lazy\n\nfrom weblate.checks.base import TargetCheck\n\n\nclass GlossaryCheck(TargetCheck):\n default_disabled = True\n check_id = \"check_glossary\"\n name = gettext_lazy(\"Does not follow glossary\")\n description = gettext_lazy(\n \"The translation does not follow terms defined in a glossary.\"\n )\n\n def check_single(self, source, target, unit):\n from weblate.glossary.models import get_glossary_terms\n\n forbidden = set()\n mismatched = set()\n matched = set()\n boundary = r\"\\b\" if unit.translation.language.uses_whitespace() else \"\"\n for term in get_glossary_terms(unit):\n term_source = term.source\n flags = term.all_flags\n expected = term_source if \"read-only\" in flags else term.target\n if \"forbidden\" in flags:\n if re.search(\n rf\"{boundary}{re.escape(expected)}{boundary}\", target, re.IGNORECASE\n ):\n forbidden.add(term_source)\n else:\n if term_source in matched:\n continue\n if re.search(\n rf\"{boundary}{re.escape(expected)}{boundary}\", target, re.IGNORECASE\n ):\n mismatched.discard(term_source)\n matched.add(term_source)\n else:\n mismatched.add(term_source)\n\n return forbidden | mismatched\n\n def get_description(self, check_obj):\n unit = check_obj.unit\n sources = unit.get_source_plurals()\n targets = unit.get_target_plurals()\n source = sources[0]\n results = set()\n # Check singular\n result = self.check_single(source, targets[0], unit)\n if result:\n results.update(result)\n # Do we have more to check?\n if len(sources) > 1:\n source = sources[1]\n # Check plurals against plural from source\n for target in targets[1:]:\n result = self.check_single(source, target, unit)\n if result:\n results.update(result)\n\n if not results:\n return super().get_description(check_obj)\n\n return format_html(\n escape(\n gettext(\"Following terms are not translated according to glossary: {}\")\n ),\n format_html_join(\", \", \"{}\", ((term,) for term in sorted(results))),\n )\n", "path": "weblate/checks/glossary.py"}]}
2,181
284
gh_patches_debug_14632
rasdani/github-patches
git_diff
PyGithub__PyGithub-1053
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- dismiss a PR review? Am I reading the docs correctly and understanding that there is no support for [dismissing a PR review](https://developer.github.com/v3/pulls/reviews/#dismiss-a-pull-request-review)? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `github/PullRequestReview.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 ############################ Copyrights and license ############################ 4 # # 5 # Copyright 2017 Aaron Levine <[email protected]> # 6 # Copyright 2017 Mike Miller <[email protected]> # 7 # Copyright 2018 Darragh Bailey <[email protected]> # 8 # Copyright 2018 Wan Liuyang <[email protected]> # 9 # Copyright 2018 sfdye <[email protected]> # 10 # # 11 # This file is part of PyGithub. # 12 # http://pygithub.readthedocs.io/ # 13 # # 14 # PyGithub is free software: you can redistribute it and/or modify it under # 15 # the terms of the GNU Lesser General Public License as published by the Free # 16 # Software Foundation, either version 3 of the License, or (at your option) # 17 # any later version. # 18 # # 19 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # 20 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # 21 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # 22 # details. # 23 # # 24 # You should have received a copy of the GNU Lesser General Public License # 25 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # 26 # # 27 ################################################################################ 28 29 import github.GithubObject 30 31 import github.NamedUser 32 33 34 class PullRequestReview(github.GithubObject.CompletableGithubObject): 35 """ 36 This class represents PullRequestReviews. The reference can be found here https://developer.github.com/v3/pulls/reviews/ 37 """ 38 39 def __repr__(self): 40 return self.get__repr__({"id": self._id.value, "user": self._user.value}) 41 42 @property 43 def id(self): 44 """ 45 :type: integer 46 """ 47 self._completeIfNotSet(self._id) 48 return self._id.value 49 50 @property 51 def user(self): 52 """ 53 :type: :class:`github.NamedUser.NamedUser` 54 """ 55 self._completeIfNotSet(self._user) 56 return self._user.value 57 58 @property 59 def body(self): 60 """ 61 :type: string 62 """ 63 self._completeIfNotSet(self._body) 64 return self._body.value 65 66 @property 67 def commit_id(self): 68 """ 69 :type: string 70 """ 71 self._completeIfNotSet(self._commit_id) 72 return self._commit_id.value 73 74 @property 75 def state(self): 76 """ 77 :type: string 78 """ 79 self._completeIfNotSet(self._state) 80 return self._state.value 81 82 @property 83 def url(self): 84 """ 85 :type: string 86 """ 87 self._completeIfNotSet(self._url) 88 return self._url.value 89 90 @property 91 def html_url(self): 92 """ 93 :type: string 94 """ 95 self._completeIfNotSet(self._html_url) 96 return self._html_url.value 97 98 @property 99 def pull_request_url(self): 100 """ 101 :type: string 102 """ 103 self._completeIfNotSet(self._pull_request_url) 104 return self._pull_request_url.value 105 106 @property 107 def submitted_at(self): 108 """ 109 :type: datetime.datetime 110 """ 111 self._completeIfNotSet(self._submitted_at) 112 return self._submitted_at.value 113 114 def _initAttributes(self): 115 self._id = github.GithubObject.NotSet 116 self._user = github.GithubObject.NotSet 117 self._body = github.GithubObject.NotSet 118 self._commit_id = github.GithubObject.NotSet 119 self._state = github.GithubObject.NotSet 120 self._url = github.GithubObject.NotSet 121 self._html_url = github.GithubObject.NotSet 122 self._pull_request_url = github.GithubObject.NotSet 123 self._submitted_at = github.GithubObject.NotSet 124 125 def _useAttributes(self, attributes): 126 if "id" in attributes: # pragma no branch 127 self._id = self._makeIntAttribute(attributes["id"]) 128 if "user" in attributes: # pragma no branch 129 self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["user"]) 130 if "body" in attributes: # pragma no branch 131 self._body = self._makeStringAttribute(attributes["body"]) 132 if "commit_id" in attributes: # pragma no branch 133 self._commit_id = self._makeStringAttribute(attributes["commit_id"]) 134 if "state" in attributes: # pragma no branch 135 self._state = self._makeStringAttribute(attributes["state"]) 136 if "url" in attributes: # pragma no branch 137 self._url = self._makeStringAttribute(attributes["url"]) 138 if "html_url" in attributes: # pragma no branch 139 self._html_url = self._makeStringAttribute(attributes["html_url"]) 140 if "pull_request_url" in attributes: # pragma no branch 141 self._pull_request_url = self._makeStringAttribute(attributes["pull_request_url"]) 142 if "submitted_at" in attributes: # pragma no branch 143 self._submitted_at = self._makeDatetimeAttribute(attributes["submitted_at"]) 144 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/github/PullRequestReview.py b/github/PullRequestReview.py --- a/github/PullRequestReview.py +++ b/github/PullRequestReview.py @@ -111,6 +111,19 @@ self._completeIfNotSet(self._submitted_at) return self._submitted_at.value + def dismiss(self, message): + """ + :calls: `PUT /repos/:owner/:repo/pulls/:number/reviews/:review_id/dismissals <https://developer.github.com/v3/pulls/reviews/>`_ + :rtype: None + """ + assert isinstance(message, (str, unicode)), message + post_parameters = {'message': message} + headers, data = self._requester.requestJsonAndCheck( + "PUT", + self.pull_request_url + "/reviews/%s/dismissals" % self.id, + input=post_parameters + ) + def _initAttributes(self): self._id = github.GithubObject.NotSet self._user = github.GithubObject.NotSet
{"golden_diff": "diff --git a/github/PullRequestReview.py b/github/PullRequestReview.py\n--- a/github/PullRequestReview.py\n+++ b/github/PullRequestReview.py\n@@ -111,6 +111,19 @@\n self._completeIfNotSet(self._submitted_at)\n return self._submitted_at.value\n \n+ def dismiss(self, message):\n+ \"\"\"\n+ :calls: `PUT /repos/:owner/:repo/pulls/:number/reviews/:review_id/dismissals <https://developer.github.com/v3/pulls/reviews/>`_\n+ :rtype: None\n+ \"\"\"\n+ assert isinstance(message, (str, unicode)), message\n+ post_parameters = {'message': message}\n+ headers, data = self._requester.requestJsonAndCheck(\n+ \"PUT\",\n+ self.pull_request_url + \"/reviews/%s/dismissals\" % self.id,\n+ input=post_parameters\n+ )\n+\n def _initAttributes(self):\n self._id = github.GithubObject.NotSet\n self._user = github.GithubObject.NotSet\n", "issue": "dismiss a PR review?\nAm I reading the docs correctly and understanding that there is no support for [dismissing a PR review](https://developer.github.com/v3/pulls/reviews/#dismiss-a-pull-request-review)?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2017 Aaron Levine <[email protected]> #\n# Copyright 2017 Mike Miller <[email protected]> #\n# Copyright 2018 Darragh Bailey <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nimport github.GithubObject\n\nimport github.NamedUser\n\n\nclass PullRequestReview(github.GithubObject.CompletableGithubObject):\n \"\"\"\n This class represents PullRequestReviews. The reference can be found here https://developer.github.com/v3/pulls/reviews/\n \"\"\"\n\n def __repr__(self):\n return self.get__repr__({\"id\": self._id.value, \"user\": self._user.value})\n\n @property\n def id(self):\n \"\"\"\n :type: integer\n \"\"\"\n self._completeIfNotSet(self._id)\n return self._id.value\n\n @property\n def user(self):\n \"\"\"\n :type: :class:`github.NamedUser.NamedUser`\n \"\"\"\n self._completeIfNotSet(self._user)\n return self._user.value\n\n @property\n def body(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._body)\n return self._body.value\n\n @property\n def commit_id(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._commit_id)\n return self._commit_id.value\n\n @property\n def state(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._state)\n return self._state.value\n\n @property\n def url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._url)\n return self._url.value\n\n @property\n def html_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._html_url)\n return self._html_url.value\n\n @property\n def pull_request_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._pull_request_url)\n return self._pull_request_url.value\n\n @property\n def submitted_at(self):\n \"\"\"\n :type: datetime.datetime\n \"\"\"\n self._completeIfNotSet(self._submitted_at)\n return self._submitted_at.value\n\n def _initAttributes(self):\n self._id = github.GithubObject.NotSet\n self._user = github.GithubObject.NotSet\n self._body = github.GithubObject.NotSet\n self._commit_id = github.GithubObject.NotSet\n self._state = github.GithubObject.NotSet\n self._url = github.GithubObject.NotSet\n self._html_url = github.GithubObject.NotSet\n self._pull_request_url = github.GithubObject.NotSet\n self._submitted_at = github.GithubObject.NotSet\n\n def _useAttributes(self, attributes):\n if \"id\" in attributes: # pragma no branch\n self._id = self._makeIntAttribute(attributes[\"id\"])\n if \"user\" in attributes: # pragma no branch\n self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes[\"user\"])\n if \"body\" in attributes: # pragma no branch\n self._body = self._makeStringAttribute(attributes[\"body\"])\n if \"commit_id\" in attributes: # pragma no branch\n self._commit_id = self._makeStringAttribute(attributes[\"commit_id\"])\n if \"state\" in attributes: # pragma no branch\n self._state = self._makeStringAttribute(attributes[\"state\"])\n if \"url\" in attributes: # pragma no branch\n self._url = self._makeStringAttribute(attributes[\"url\"])\n if \"html_url\" in attributes: # pragma no branch\n self._html_url = self._makeStringAttribute(attributes[\"html_url\"])\n if \"pull_request_url\" in attributes: # pragma no branch\n self._pull_request_url = self._makeStringAttribute(attributes[\"pull_request_url\"])\n if \"submitted_at\" in attributes: # pragma no branch\n self._submitted_at = self._makeDatetimeAttribute(attributes[\"submitted_at\"])\n", "path": "github/PullRequestReview.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2017 Aaron Levine <[email protected]> #\n# Copyright 2017 Mike Miller <[email protected]> #\n# Copyright 2018 Darragh Bailey <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nimport github.GithubObject\n\nimport github.NamedUser\n\n\nclass PullRequestReview(github.GithubObject.CompletableGithubObject):\n \"\"\"\n This class represents PullRequestReviews. The reference can be found here https://developer.github.com/v3/pulls/reviews/\n \"\"\"\n\n def __repr__(self):\n return self.get__repr__({\"id\": self._id.value, \"user\": self._user.value})\n\n @property\n def id(self):\n \"\"\"\n :type: integer\n \"\"\"\n self._completeIfNotSet(self._id)\n return self._id.value\n\n @property\n def user(self):\n \"\"\"\n :type: :class:`github.NamedUser.NamedUser`\n \"\"\"\n self._completeIfNotSet(self._user)\n return self._user.value\n\n @property\n def body(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._body)\n return self._body.value\n\n @property\n def commit_id(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._commit_id)\n return self._commit_id.value\n\n @property\n def state(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._state)\n return self._state.value\n\n @property\n def url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._url)\n return self._url.value\n\n @property\n def html_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._html_url)\n return self._html_url.value\n\n @property\n def pull_request_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._pull_request_url)\n return self._pull_request_url.value\n\n @property\n def submitted_at(self):\n \"\"\"\n :type: datetime.datetime\n \"\"\"\n self._completeIfNotSet(self._submitted_at)\n return self._submitted_at.value\n\n def dismiss(self, message):\n \"\"\"\n :calls: `PUT /repos/:owner/:repo/pulls/:number/reviews/:review_id/dismissals <https://developer.github.com/v3/pulls/reviews/>`_\n :rtype: None\n \"\"\"\n assert isinstance(message, (str, unicode)), message\n post_parameters = {'message': message}\n headers, data = self._requester.requestJsonAndCheck(\n \"PUT\",\n self.pull_request_url + \"/reviews/%s/dismissals\" % self.id,\n input=post_parameters\n )\n\n def _initAttributes(self):\n self._id = github.GithubObject.NotSet\n self._user = github.GithubObject.NotSet\n self._body = github.GithubObject.NotSet\n self._commit_id = github.GithubObject.NotSet\n self._state = github.GithubObject.NotSet\n self._url = github.GithubObject.NotSet\n self._html_url = github.GithubObject.NotSet\n self._pull_request_url = github.GithubObject.NotSet\n self._submitted_at = github.GithubObject.NotSet\n\n def _useAttributes(self, attributes):\n if \"id\" in attributes: # pragma no branch\n self._id = self._makeIntAttribute(attributes[\"id\"])\n if \"user\" in attributes: # pragma no branch\n self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes[\"user\"])\n if \"body\" in attributes: # pragma no branch\n self._body = self._makeStringAttribute(attributes[\"body\"])\n if \"commit_id\" in attributes: # pragma no branch\n self._commit_id = self._makeStringAttribute(attributes[\"commit_id\"])\n if \"state\" in attributes: # pragma no branch\n self._state = self._makeStringAttribute(attributes[\"state\"])\n if \"url\" in attributes: # pragma no branch\n self._url = self._makeStringAttribute(attributes[\"url\"])\n if \"html_url\" in attributes: # pragma no branch\n self._html_url = self._makeStringAttribute(attributes[\"html_url\"])\n if \"pull_request_url\" in attributes: # pragma no branch\n self._pull_request_url = self._makeStringAttribute(attributes[\"pull_request_url\"])\n if \"submitted_at\" in attributes: # pragma no branch\n self._submitted_at = self._makeDatetimeAttribute(attributes[\"submitted_at\"])\n", "path": "github/PullRequestReview.py"}]}
1,801
235
gh_patches_debug_19909
rasdani/github-patches
git_diff
pyg-team__pytorch_geometric-5441
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- The the feature dim of data.x is zero in Proteins dataset with the pyg version after 2.0.5 ### 🐛 Describe the bug The main reason is in line 136 of tu_dataset.py it is strange that the value of num_edge_attributes is larger than the feature dimension of self.data.x in proteins, which leads to the resulting dimension of self.data.x is num_nodes*0 ### Environment * PyG version: * PyTorch version: * OS: * Python version: * CUDA/cuDNN version: * How you installed PyTorch and PyG (`conda`, `pip`, source): * Any other relevant information (*e.g.*, version of `torch-scatter`): --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torch_geometric/io/tu.py` Content: ``` 1 import glob 2 import os 3 import os.path as osp 4 5 import numpy as np 6 import torch 7 import torch.nn.functional as F 8 from torch_sparse import coalesce 9 10 from torch_geometric.data import Data 11 from torch_geometric.io import read_txt_array 12 from torch_geometric.utils import remove_self_loops 13 14 names = [ 15 'A', 'graph_indicator', 'node_labels', 'node_attributes' 16 'edge_labels', 'edge_attributes', 'graph_labels', 'graph_attributes' 17 ] 18 19 20 def read_tu_data(folder, prefix): 21 files = glob.glob(osp.join(folder, f'{prefix}_*.txt')) 22 names = [f.split(os.sep)[-1][len(prefix) + 1:-4] for f in files] 23 24 edge_index = read_file(folder, prefix, 'A', torch.long).t() - 1 25 batch = read_file(folder, prefix, 'graph_indicator', torch.long) - 1 26 27 node_attributes = torch.empty((batch.size(0), 0)) 28 if 'node_attributes' in names: 29 node_attributes = read_file(folder, prefix, 'node_attributes') 30 31 node_labels = torch.empty((batch.size(0), 0)) 32 if 'node_labels' in names: 33 node_labels = read_file(folder, prefix, 'node_labels', torch.long) 34 if node_labels.dim() == 1: 35 node_labels = node_labels.unsqueeze(-1) 36 node_labels = node_labels - node_labels.min(dim=0)[0] 37 node_labels = node_labels.unbind(dim=-1) 38 node_labels = [F.one_hot(x, num_classes=-1) for x in node_labels] 39 node_labels = torch.cat(node_labels, dim=-1).to(torch.float) 40 41 edge_attributes = torch.empty((edge_index.size(1), 0)) 42 if 'edge_attributes' in names: 43 edge_attributes = read_file(folder, prefix, 'edge_attributes') 44 45 edge_labels = torch.empty((edge_index.size(1), 0)) 46 if 'edge_labels' in names: 47 edge_labels = read_file(folder, prefix, 'edge_labels', torch.long) 48 if edge_labels.dim() == 1: 49 edge_labels = edge_labels.unsqueeze(-1) 50 edge_labels = edge_labels - edge_labels.min(dim=0)[0] 51 edge_labels = edge_labels.unbind(dim=-1) 52 edge_labels = [F.one_hot(e, num_classes=-1) for e in edge_labels] 53 edge_labels = torch.cat(edge_labels, dim=-1).to(torch.float) 54 55 x = cat([node_attributes, node_labels]) 56 edge_attr = cat([edge_attributes, edge_labels]) 57 58 y = None 59 if 'graph_attributes' in names: # Regression problem. 60 y = read_file(folder, prefix, 'graph_attributes') 61 elif 'graph_labels' in names: # Classification problem. 62 y = read_file(folder, prefix, 'graph_labels', torch.long) 63 _, y = y.unique(sorted=True, return_inverse=True) 64 65 num_nodes = edge_index.max().item() + 1 if x is None else x.size(0) 66 edge_index, edge_attr = remove_self_loops(edge_index, edge_attr) 67 edge_index, edge_attr = coalesce(edge_index, edge_attr, num_nodes, 68 num_nodes) 69 70 data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y) 71 data, slices = split(data, batch) 72 73 sizes = { 74 'num_node_attributes': node_attributes.size(-1), 75 'num_node_labels': node_labels.size(-1), 76 'num_edge_attributes': edge_attributes.size(-1), 77 'num_edge_labels': edge_labels.size(-1), 78 } 79 80 return data, slices, sizes 81 82 83 def read_file(folder, prefix, name, dtype=None): 84 path = osp.join(folder, f'{prefix}_{name}.txt') 85 return read_txt_array(path, sep=',', dtype=dtype) 86 87 88 def cat(seq): 89 seq = [item for item in seq if item is not None] 90 seq = [item for item in seq if item.numel() > 0] 91 seq = [item.unsqueeze(-1) if item.dim() == 1 else item for item in seq] 92 return torch.cat(seq, dim=-1) if len(seq) > 0 else None 93 94 95 def split(data, batch): 96 node_slice = torch.cumsum(torch.from_numpy(np.bincount(batch)), 0) 97 node_slice = torch.cat([torch.tensor([0]), node_slice]) 98 99 row, _ = data.edge_index 100 edge_slice = torch.cumsum(torch.from_numpy(np.bincount(batch[row])), 0) 101 edge_slice = torch.cat([torch.tensor([0]), edge_slice]) 102 103 # Edge indices should start at zero for every graph. 104 data.edge_index -= node_slice[batch[row]].unsqueeze(0) 105 106 slices = {'edge_index': edge_slice} 107 if data.x is not None: 108 slices['x'] = node_slice 109 else: 110 # Imitate `collate` functionality: 111 data._num_nodes = torch.bincount(batch).tolist() 112 data.num_nodes = batch.numel() 113 if data.edge_attr is not None: 114 slices['edge_attr'] = edge_slice 115 if data.y is not None: 116 if data.y.size(0) == batch.size(0): 117 slices['y'] = node_slice 118 else: 119 slices['y'] = torch.arange(0, batch[-1] + 2, dtype=torch.long) 120 121 return data, slices 122 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torch_geometric/io/tu.py b/torch_geometric/io/tu.py --- a/torch_geometric/io/tu.py +++ b/torch_geometric/io/tu.py @@ -27,6 +27,8 @@ node_attributes = torch.empty((batch.size(0), 0)) if 'node_attributes' in names: node_attributes = read_file(folder, prefix, 'node_attributes') + if node_attributes.dim() == 1: + node_attributes = node_attributes.unsqueeze(-1) node_labels = torch.empty((batch.size(0), 0)) if 'node_labels' in names: @@ -41,6 +43,8 @@ edge_attributes = torch.empty((edge_index.size(1), 0)) if 'edge_attributes' in names: edge_attributes = read_file(folder, prefix, 'edge_attributes') + if edge_attributes.dim() == 1: + edge_attributes = edge_attributes.unsqueeze(-1) edge_labels = torch.empty((edge_index.size(1), 0)) if 'edge_labels' in names:
{"golden_diff": "diff --git a/torch_geometric/io/tu.py b/torch_geometric/io/tu.py\n--- a/torch_geometric/io/tu.py\n+++ b/torch_geometric/io/tu.py\n@@ -27,6 +27,8 @@\n node_attributes = torch.empty((batch.size(0), 0))\n if 'node_attributes' in names:\n node_attributes = read_file(folder, prefix, 'node_attributes')\n+ if node_attributes.dim() == 1:\n+ node_attributes = node_attributes.unsqueeze(-1)\n \n node_labels = torch.empty((batch.size(0), 0))\n if 'node_labels' in names:\n@@ -41,6 +43,8 @@\n edge_attributes = torch.empty((edge_index.size(1), 0))\n if 'edge_attributes' in names:\n edge_attributes = read_file(folder, prefix, 'edge_attributes')\n+ if edge_attributes.dim() == 1:\n+ edge_attributes = edge_attributes.unsqueeze(-1)\n \n edge_labels = torch.empty((edge_index.size(1), 0))\n if 'edge_labels' in names:\n", "issue": "The the feature dim of data.x is zero in Proteins dataset with the pyg version after 2.0.5\n### \ud83d\udc1b Describe the bug\n\nThe main reason is in line 136 of tu_dataset.py\r\n\r\nit is strange that the value of num_edge_attributes is larger than the feature dimension of self.data.x in proteins, which leads to the resulting dimension of self.data.x is num_nodes*0\r\n\n\n### Environment\n\n* PyG version:\r\n* PyTorch version:\r\n* OS:\r\n* Python version:\r\n* CUDA/cuDNN version:\r\n* How you installed PyTorch and PyG (`conda`, `pip`, source):\r\n* Any other relevant information (*e.g.*, version of `torch-scatter`):\r\n\n", "before_files": [{"content": "import glob\nimport os\nimport os.path as osp\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch_sparse import coalesce\n\nfrom torch_geometric.data import Data\nfrom torch_geometric.io import read_txt_array\nfrom torch_geometric.utils import remove_self_loops\n\nnames = [\n 'A', 'graph_indicator', 'node_labels', 'node_attributes'\n 'edge_labels', 'edge_attributes', 'graph_labels', 'graph_attributes'\n]\n\n\ndef read_tu_data(folder, prefix):\n files = glob.glob(osp.join(folder, f'{prefix}_*.txt'))\n names = [f.split(os.sep)[-1][len(prefix) + 1:-4] for f in files]\n\n edge_index = read_file(folder, prefix, 'A', torch.long).t() - 1\n batch = read_file(folder, prefix, 'graph_indicator', torch.long) - 1\n\n node_attributes = torch.empty((batch.size(0), 0))\n if 'node_attributes' in names:\n node_attributes = read_file(folder, prefix, 'node_attributes')\n\n node_labels = torch.empty((batch.size(0), 0))\n if 'node_labels' in names:\n node_labels = read_file(folder, prefix, 'node_labels', torch.long)\n if node_labels.dim() == 1:\n node_labels = node_labels.unsqueeze(-1)\n node_labels = node_labels - node_labels.min(dim=0)[0]\n node_labels = node_labels.unbind(dim=-1)\n node_labels = [F.one_hot(x, num_classes=-1) for x in node_labels]\n node_labels = torch.cat(node_labels, dim=-1).to(torch.float)\n\n edge_attributes = torch.empty((edge_index.size(1), 0))\n if 'edge_attributes' in names:\n edge_attributes = read_file(folder, prefix, 'edge_attributes')\n\n edge_labels = torch.empty((edge_index.size(1), 0))\n if 'edge_labels' in names:\n edge_labels = read_file(folder, prefix, 'edge_labels', torch.long)\n if edge_labels.dim() == 1:\n edge_labels = edge_labels.unsqueeze(-1)\n edge_labels = edge_labels - edge_labels.min(dim=0)[0]\n edge_labels = edge_labels.unbind(dim=-1)\n edge_labels = [F.one_hot(e, num_classes=-1) for e in edge_labels]\n edge_labels = torch.cat(edge_labels, dim=-1).to(torch.float)\n\n x = cat([node_attributes, node_labels])\n edge_attr = cat([edge_attributes, edge_labels])\n\n y = None\n if 'graph_attributes' in names: # Regression problem.\n y = read_file(folder, prefix, 'graph_attributes')\n elif 'graph_labels' in names: # Classification problem.\n y = read_file(folder, prefix, 'graph_labels', torch.long)\n _, y = y.unique(sorted=True, return_inverse=True)\n\n num_nodes = edge_index.max().item() + 1 if x is None else x.size(0)\n edge_index, edge_attr = remove_self_loops(edge_index, edge_attr)\n edge_index, edge_attr = coalesce(edge_index, edge_attr, num_nodes,\n num_nodes)\n\n data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y)\n data, slices = split(data, batch)\n\n sizes = {\n 'num_node_attributes': node_attributes.size(-1),\n 'num_node_labels': node_labels.size(-1),\n 'num_edge_attributes': edge_attributes.size(-1),\n 'num_edge_labels': edge_labels.size(-1),\n }\n\n return data, slices, sizes\n\n\ndef read_file(folder, prefix, name, dtype=None):\n path = osp.join(folder, f'{prefix}_{name}.txt')\n return read_txt_array(path, sep=',', dtype=dtype)\n\n\ndef cat(seq):\n seq = [item for item in seq if item is not None]\n seq = [item for item in seq if item.numel() > 0]\n seq = [item.unsqueeze(-1) if item.dim() == 1 else item for item in seq]\n return torch.cat(seq, dim=-1) if len(seq) > 0 else None\n\n\ndef split(data, batch):\n node_slice = torch.cumsum(torch.from_numpy(np.bincount(batch)), 0)\n node_slice = torch.cat([torch.tensor([0]), node_slice])\n\n row, _ = data.edge_index\n edge_slice = torch.cumsum(torch.from_numpy(np.bincount(batch[row])), 0)\n edge_slice = torch.cat([torch.tensor([0]), edge_slice])\n\n # Edge indices should start at zero for every graph.\n data.edge_index -= node_slice[batch[row]].unsqueeze(0)\n\n slices = {'edge_index': edge_slice}\n if data.x is not None:\n slices['x'] = node_slice\n else:\n # Imitate `collate` functionality:\n data._num_nodes = torch.bincount(batch).tolist()\n data.num_nodes = batch.numel()\n if data.edge_attr is not None:\n slices['edge_attr'] = edge_slice\n if data.y is not None:\n if data.y.size(0) == batch.size(0):\n slices['y'] = node_slice\n else:\n slices['y'] = torch.arange(0, batch[-1] + 2, dtype=torch.long)\n\n return data, slices\n", "path": "torch_geometric/io/tu.py"}], "after_files": [{"content": "import glob\nimport os\nimport os.path as osp\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch_sparse import coalesce\n\nfrom torch_geometric.data import Data\nfrom torch_geometric.io import read_txt_array\nfrom torch_geometric.utils import remove_self_loops\n\nnames = [\n 'A', 'graph_indicator', 'node_labels', 'node_attributes'\n 'edge_labels', 'edge_attributes', 'graph_labels', 'graph_attributes'\n]\n\n\ndef read_tu_data(folder, prefix):\n files = glob.glob(osp.join(folder, f'{prefix}_*.txt'))\n names = [f.split(os.sep)[-1][len(prefix) + 1:-4] for f in files]\n\n edge_index = read_file(folder, prefix, 'A', torch.long).t() - 1\n batch = read_file(folder, prefix, 'graph_indicator', torch.long) - 1\n\n node_attributes = torch.empty((batch.size(0), 0))\n if 'node_attributes' in names:\n node_attributes = read_file(folder, prefix, 'node_attributes')\n if node_attributes.dim() == 1:\n node_attributes = node_attributes.unsqueeze(-1)\n\n node_labels = torch.empty((batch.size(0), 0))\n if 'node_labels' in names:\n node_labels = read_file(folder, prefix, 'node_labels', torch.long)\n if node_labels.dim() == 1:\n node_labels = node_labels.unsqueeze(-1)\n node_labels = node_labels - node_labels.min(dim=0)[0]\n node_labels = node_labels.unbind(dim=-1)\n node_labels = [F.one_hot(x, num_classes=-1) for x in node_labels]\n node_labels = torch.cat(node_labels, dim=-1).to(torch.float)\n\n edge_attributes = torch.empty((edge_index.size(1), 0))\n if 'edge_attributes' in names:\n edge_attributes = read_file(folder, prefix, 'edge_attributes')\n if edge_attributes.dim() == 1:\n edge_attributes = edge_attributes.unsqueeze(-1)\n\n edge_labels = torch.empty((edge_index.size(1), 0))\n if 'edge_labels' in names:\n edge_labels = read_file(folder, prefix, 'edge_labels', torch.long)\n if edge_labels.dim() == 1:\n edge_labels = edge_labels.unsqueeze(-1)\n edge_labels = edge_labels - edge_labels.min(dim=0)[0]\n edge_labels = edge_labels.unbind(dim=-1)\n edge_labels = [F.one_hot(e, num_classes=-1) for e in edge_labels]\n edge_labels = torch.cat(edge_labels, dim=-1).to(torch.float)\n\n x = cat([node_attributes, node_labels])\n edge_attr = cat([edge_attributes, edge_labels])\n\n y = None\n if 'graph_attributes' in names: # Regression problem.\n y = read_file(folder, prefix, 'graph_attributes')\n elif 'graph_labels' in names: # Classification problem.\n y = read_file(folder, prefix, 'graph_labels', torch.long)\n _, y = y.unique(sorted=True, return_inverse=True)\n\n num_nodes = edge_index.max().item() + 1 if x is None else x.size(0)\n edge_index, edge_attr = remove_self_loops(edge_index, edge_attr)\n edge_index, edge_attr = coalesce(edge_index, edge_attr, num_nodes,\n num_nodes)\n\n data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y)\n data, slices = split(data, batch)\n\n sizes = {\n 'num_node_attributes': node_attributes.size(-1),\n 'num_node_labels': node_labels.size(-1),\n 'num_edge_attributes': edge_attributes.size(-1),\n 'num_edge_labels': edge_labels.size(-1),\n }\n\n return data, slices, sizes\n\n\ndef read_file(folder, prefix, name, dtype=None):\n path = osp.join(folder, f'{prefix}_{name}.txt')\n return read_txt_array(path, sep=',', dtype=dtype)\n\n\ndef cat(seq):\n seq = [item for item in seq if item is not None]\n seq = [item for item in seq if item.numel() > 0]\n seq = [item.unsqueeze(-1) if item.dim() == 1 else item for item in seq]\n return torch.cat(seq, dim=-1) if len(seq) > 0 else None\n\n\ndef split(data, batch):\n node_slice = torch.cumsum(torch.from_numpy(np.bincount(batch)), 0)\n node_slice = torch.cat([torch.tensor([0]), node_slice])\n\n row, _ = data.edge_index\n edge_slice = torch.cumsum(torch.from_numpy(np.bincount(batch[row])), 0)\n edge_slice = torch.cat([torch.tensor([0]), edge_slice])\n\n # Edge indices should start at zero for every graph.\n data.edge_index -= node_slice[batch[row]].unsqueeze(0)\n\n slices = {'edge_index': edge_slice}\n if data.x is not None:\n slices['x'] = node_slice\n else:\n # Imitate `collate` functionality:\n data._num_nodes = torch.bincount(batch).tolist()\n data.num_nodes = batch.numel()\n if data.edge_attr is not None:\n slices['edge_attr'] = edge_slice\n if data.y is not None:\n if data.y.size(0) == batch.size(0):\n slices['y'] = node_slice\n else:\n slices['y'] = torch.arange(0, batch[-1] + 2, dtype=torch.long)\n\n return data, slices\n", "path": "torch_geometric/io/tu.py"}]}
1,849
240
gh_patches_debug_25486
rasdani/github-patches
git_diff
google__budoux-102
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add `lang` parameter to JavaScript CLI Consider to add the `--lang` option to the JavaScript CLI tool similar to #55 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `budoux/main.py` Content: ``` 1 #!/usr/bin/env python 2 # Copyright 2021 Google LLC 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # https://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 """BudouX Script to provide CLI for user.""" 16 import argparse 17 import glob 18 import json 19 import os 20 import shutil 21 import sys 22 import textwrap 23 import typing 24 25 import pkg_resources 26 27 import budoux 28 29 ArgList = typing.Optional[typing.List[str]] 30 31 32 class BudouxHelpFormatter(argparse.ArgumentDefaultsHelpFormatter, 33 argparse.RawDescriptionHelpFormatter): 34 pass 35 36 37 def check_file(path: str) -> str: 38 """Check if a given filepath exists or not. 39 40 Args: 41 path (str): Model path 42 43 Raises: 44 FileNotFoundError: Raise if given path does not exist. 45 46 Returns: 47 str: A model path. 48 """ 49 if os.path.isfile(path): 50 return path 51 else: 52 raise argparse.ArgumentTypeError(f"'{path}' is not found.") 53 54 55 def get_model_langs() -> typing.Dict[str, str]: 56 """Get a dictionary of model languages and its paths. 57 58 Returns: 59 typing.Dict[str, str]: A dictionary of model languages and its paths. 60 """ 61 models = glob.glob( 62 pkg_resources.resource_filename(__name__, "models") + "/*-*.json") 63 langs = {} 64 for model in models: 65 model_name = model.split(os.sep)[-1][:-5] 66 if model_name.startswith('zh-'): 67 langs[model_name] = model 68 else: 69 langs[model_name[:2]] = model 70 return langs 71 72 73 def check_lang(lang: str) -> str: 74 """Check if given language exists or not. 75 76 Args: 77 lang (str): language code (e.g.: 'ja') 78 79 Raises: 80 argparse.ArgumentTypeError: Raise if no model for given language exists. 81 82 Returns: 83 str: A model path. 84 """ 85 langs = get_model_langs() 86 if lang in langs: 87 return langs[lang] 88 else: 89 raise argparse.ArgumentTypeError( 90 f"'{lang}' does not exist in builtin models. (supported languages: {list(langs.keys())})" 91 ) 92 93 94 def parse_args(test: ArgList = None) -> argparse.Namespace: 95 """Parse commandline arguments. 96 97 Args: 98 test (typing.Optional[typing.List[str]], optional): Commandline args for testing. Defaults to None. 99 100 Returns: 101 argparse.Namespace: Parsed data of args. 102 """ 103 parser = argparse.ArgumentParser( 104 prog="budoux", 105 formatter_class=(lambda prog: BudouxHelpFormatter( 106 prog, 107 **{ 108 "width": shutil.get_terminal_size(fallback=(120, 50)).columns, 109 "max_help_position": 30, 110 }, 111 )), 112 description=textwrap.dedent("""\ 113 BudouX is the successor to Budou, 114 the machine learning powered line break organizer tool."""), 115 epilog="\n- ".join( 116 ["supported languages of `-l`, `--lang`:", 117 *get_model_langs().keys()])) 118 119 parser.add_argument("text", metavar="TXT", nargs="?", type=str, help="text") 120 parser.add_argument( 121 "-H", 122 "--html", 123 action="store_true", 124 help="HTML mode", 125 ) 126 model_select_group = parser.add_mutually_exclusive_group() 127 model_select_group.add_argument( 128 "-m", 129 "--model", 130 metavar="JSON", 131 type=check_file, 132 default=pkg_resources.resource_filename(__name__, "models/ja-knbc.json"), 133 help="custom model file path", 134 ) 135 model_select_group.add_argument( 136 "-l", 137 "--lang", 138 metavar="LANG", 139 type=check_lang, 140 help="language of custom model", 141 ) 142 parser.add_argument( 143 "-d", 144 "--delim", 145 metavar="STR", 146 type=str, 147 default="---", 148 help="output delimiter in TEXT mode", 149 ) 150 parser.add_argument( 151 "-V", 152 "--version", 153 action="version", 154 version="%(prog)s {}".format(budoux.__version__), 155 ) 156 if test is not None: 157 return parser.parse_args(test) 158 else: 159 return parser.parse_args() 160 161 162 def _main(test: ArgList = None) -> str: 163 args = parse_args(test=test) 164 model_path = args.lang or args.model 165 with open(model_path, 'r', encoding='utf-8') as f: 166 model = json.load(f) 167 168 parser = budoux.Parser(model) 169 if args.html: 170 if args.text is None: 171 inputs_html = sys.stdin.read() 172 else: 173 inputs_html = args.text 174 res = parser.translate_html_string(inputs_html) 175 else: 176 if args.text is None: 177 inputs = [v.rstrip() for v in sys.stdin.readlines()] 178 else: 179 inputs = [v.rstrip() for v in args.text.splitlines()] 180 outputs = [parser.parse(sentence) for sentence in inputs] 181 conbined_output = ["\n".join(output) for output in outputs] 182 ors = "\n" + args.delim + "\n" 183 res = ors.join(conbined_output) 184 185 return res 186 187 188 def main(test: ArgList = None) -> None: 189 try: 190 print(_main(test)) 191 except KeyboardInterrupt: 192 exit(0) 193 194 195 if __name__ == "__main__": 196 main() 197 ``` Path: `budoux/parser.py` Content: ``` 1 # Copyright 2021 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """BudouX parser to provide semantic chunks.""" 15 16 import json 17 import os 18 import typing 19 from html.parser import HTMLParser 20 21 from .feature_extractor import get_feature 22 from .utils import INVALID, SEP 23 24 MODEL_DIR = os.path.join(os.path.dirname(__file__), 'models') 25 PARENT_CSS_STYLE = 'word-break: keep-all; overflow-wrap: break-word;' 26 with open( 27 os.path.join(os.path.dirname(__file__), 'skip_nodes.json'), 28 encoding='utf-8') as f: 29 SKIP_NODES: typing.Set[str] = set(json.load(f)) 30 31 HTMLAttr = typing.List[typing.Tuple[str, typing.Union[str, None]]] 32 33 34 class TextContentExtractor(HTMLParser): 35 """An HTML parser to extract text content. 36 37 Attributes: 38 output (str): Accumulated text content. 39 """ 40 output = '' 41 42 def handle_data(self, data: str) -> None: 43 self.output += data 44 45 46 class HTMLChunkResolver(HTMLParser): 47 """An HTML parser to resolve the given HTML string and semantic chunks. 48 49 Attributes: 50 output (str): The HTML string to output. 51 chunks (str): Chunks concatenated with the separator string. 52 """ 53 output = '' 54 55 def __init__(self, chunks: typing.List[str]): 56 """Initializes the parser. 57 58 Args: 59 chunks (List[str]): The chunks to resolve. 60 """ 61 HTMLParser.__init__(self) 62 self.chunks_joined = SEP.join(chunks) 63 self.to_skip = False 64 65 def handle_starttag(self, tag: str, attrs: HTMLAttr) -> None: 66 attr_pairs = [] 67 for attr in attrs: 68 if attr[1] is None: 69 attr_pairs.append(attr[0]) 70 else: 71 attr_pairs.append('%s="%s"' % (attr[0], attr[1])) 72 encoded_attrs = ' '.join(attr_pairs) 73 self.output += '<%s %s>' % (tag, encoded_attrs) 74 self.to_skip = tag.upper() in SKIP_NODES 75 76 def handle_endtag(self, tag: str) -> None: 77 self.output += '</%s>' % (tag) 78 self.to_skip = False 79 80 def handle_data(self, data: str) -> None: 81 if self.to_skip: 82 self.output += data 83 if self.chunks_joined[0] == SEP: 84 self.chunks_joined = self.chunks_joined[1 + len(data):] 85 else: 86 self.chunks_joined = self.chunks_joined[len(data):] 87 return None 88 for char in data: 89 if char == self.chunks_joined[0]: 90 self.chunks_joined = self.chunks_joined[1:] 91 self.output += char 92 else: 93 self.chunks_joined = self.chunks_joined[2:] 94 self.output += '<wbr>' + char 95 96 97 class Parser: 98 """BudouX's Parser. 99 100 The main parser object with a variety of class methods to provide semantic 101 chunks and markups from the given input string. 102 103 Attributes: 104 model: A dict mapping a feature (str) and its score (int). 105 """ 106 107 def __init__(self, model: typing.Dict[str, int]): 108 """Initializes the parser. 109 110 Args: 111 model (Dict[str, int]): A dict mapping a feature and its score. 112 """ 113 self.model = model 114 115 def parse(self, sentence: str) -> typing.List[str]: 116 """Parses the input sentence and returns a list of semantic chunks. 117 118 Args: 119 sentence (str): An input sentence. 120 121 Returns: 122 A list of semantic chunks (List[str]). 123 """ 124 if sentence == '': 125 return [] 126 chunks = [sentence[0]] 127 base_score = -sum(self.model.values()) 128 for i in range(1, len(sentence)): 129 feature = get_feature( 130 sentence[i - 3] if i > 2 else INVALID, 131 sentence[i - 2] if i > 1 else INVALID, sentence[i - 1], sentence[i], 132 sentence[i + 1] if i + 1 < len(sentence) else INVALID, 133 sentence[i + 2] if i + 2 < len(sentence) else INVALID) 134 score = base_score + 2 * sum( 135 self.model[f] for f in feature if f in self.model) 136 if score > 0: 137 chunks.append(sentence[i]) 138 else: 139 chunks[-1] += sentence[i] 140 return chunks 141 142 def translate_html_string(self, html: str) -> str: 143 """Translates the given HTML string with markups for semantic line breaks. 144 145 Args: 146 html (str): An input html string. 147 148 Returns: 149 The translated HTML string (str). 150 """ 151 # TODO: Align with the JavaScript API regarding the parent element addition. 152 text_content_extractor = TextContentExtractor() 153 text_content_extractor.feed(html) 154 text_content = text_content_extractor.output 155 chunks = self.parse(text_content) 156 resolver = HTMLChunkResolver(chunks) 157 resolver.feed(html) 158 return '<span style="%s">%s</span>' % (PARENT_CSS_STYLE, resolver.output) 159 160 161 def load_default_japanese_parser() -> Parser: 162 """Loads a parser equipped with the default Japanese model. 163 164 Returns: 165 A parser (:obj:`budoux.Parser`). 166 """ 167 with open(os.path.join(MODEL_DIR, 'ja-knbc.json'), encoding='utf-8') as f: 168 model = json.load(f) 169 return Parser(model) 170 171 172 def load_default_simplified_chinese_parser() -> Parser: 173 """Loads a parser equipped with the default Simplified Chinese model. 174 175 Returns: 176 A parser (:obj:`budoux.Parser`). 177 """ 178 with open(os.path.join(MODEL_DIR, 'zh-hans.json'), encoding='utf-8') as f: 179 model = json.load(f) 180 return Parser(model) 181 182 183 def load_default_traditional_chinese_parser() -> Parser: 184 """Loads a parser equipped with the default Traditional Chinese model. 185 186 Returns: 187 A parser (:obj:`budoux.Parser`). 188 """ 189 with open(os.path.join(MODEL_DIR, 'zh-hant.json'), encoding='utf-8') as f: 190 model = json.load(f) 191 return Parser(model) 192 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/budoux/main.py b/budoux/main.py --- a/budoux/main.py +++ b/budoux/main.py @@ -59,14 +59,11 @@ typing.Dict[str, str]: A dictionary of model languages and its paths. """ models = glob.glob( - pkg_resources.resource_filename(__name__, "models") + "/*-*.json") + pkg_resources.resource_filename(__name__, "models") + "/*.json") langs = {} for model in models: - model_name = model.split(os.sep)[-1][:-5] - if model_name.startswith('zh-'): - langs[model_name] = model - else: - langs[model_name[:2]] = model + lang = model.split(os.sep)[-1][:-5] + langs[lang] = model return langs @@ -129,7 +126,7 @@ "--model", metavar="JSON", type=check_file, - default=pkg_resources.resource_filename(__name__, "models/ja-knbc.json"), + default=pkg_resources.resource_filename(__name__, "models/ja.json"), help="custom model file path", ) model_select_group.add_argument( diff --git a/budoux/parser.py b/budoux/parser.py --- a/budoux/parser.py +++ b/budoux/parser.py @@ -164,7 +164,7 @@ Returns: A parser (:obj:`budoux.Parser`). """ - with open(os.path.join(MODEL_DIR, 'ja-knbc.json'), encoding='utf-8') as f: + with open(os.path.join(MODEL_DIR, 'ja.json'), encoding='utf-8') as f: model = json.load(f) return Parser(model)
{"golden_diff": "diff --git a/budoux/main.py b/budoux/main.py\n--- a/budoux/main.py\n+++ b/budoux/main.py\n@@ -59,14 +59,11 @@\n typing.Dict[str, str]: A dictionary of model languages and its paths.\n \"\"\"\n models = glob.glob(\n- pkg_resources.resource_filename(__name__, \"models\") + \"/*-*.json\")\n+ pkg_resources.resource_filename(__name__, \"models\") + \"/*.json\")\n langs = {}\n for model in models:\n- model_name = model.split(os.sep)[-1][:-5]\n- if model_name.startswith('zh-'):\n- langs[model_name] = model\n- else:\n- langs[model_name[:2]] = model\n+ lang = model.split(os.sep)[-1][:-5]\n+ langs[lang] = model\n return langs\n \n \n@@ -129,7 +126,7 @@\n \"--model\",\n metavar=\"JSON\",\n type=check_file,\n- default=pkg_resources.resource_filename(__name__, \"models/ja-knbc.json\"),\n+ default=pkg_resources.resource_filename(__name__, \"models/ja.json\"),\n help=\"custom model file path\",\n )\n model_select_group.add_argument(\ndiff --git a/budoux/parser.py b/budoux/parser.py\n--- a/budoux/parser.py\n+++ b/budoux/parser.py\n@@ -164,7 +164,7 @@\n Returns:\n A parser (:obj:`budoux.Parser`).\n \"\"\"\n- with open(os.path.join(MODEL_DIR, 'ja-knbc.json'), encoding='utf-8') as f:\n+ with open(os.path.join(MODEL_DIR, 'ja.json'), encoding='utf-8') as f:\n model = json.load(f)\n return Parser(model)\n", "issue": "Add `lang` parameter to JavaScript CLI\nConsider to add the `--lang` option to the JavaScript CLI tool similar to #55 \n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BudouX Script to provide CLI for user.\"\"\"\nimport argparse\nimport glob\nimport json\nimport os\nimport shutil\nimport sys\nimport textwrap\nimport typing\n\nimport pkg_resources\n\nimport budoux\n\nArgList = typing.Optional[typing.List[str]]\n\n\nclass BudouxHelpFormatter(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawDescriptionHelpFormatter):\n pass\n\n\ndef check_file(path: str) -> str:\n \"\"\"Check if a given filepath exists or not.\n\n Args:\n path (str): Model path\n\n Raises:\n FileNotFoundError: Raise if given path does not exist.\n\n Returns:\n str: A model path.\n \"\"\"\n if os.path.isfile(path):\n return path\n else:\n raise argparse.ArgumentTypeError(f\"'{path}' is not found.\")\n\n\ndef get_model_langs() -> typing.Dict[str, str]:\n \"\"\"Get a dictionary of model languages and its paths.\n\n Returns:\n typing.Dict[str, str]: A dictionary of model languages and its paths.\n \"\"\"\n models = glob.glob(\n pkg_resources.resource_filename(__name__, \"models\") + \"/*-*.json\")\n langs = {}\n for model in models:\n model_name = model.split(os.sep)[-1][:-5]\n if model_name.startswith('zh-'):\n langs[model_name] = model\n else:\n langs[model_name[:2]] = model\n return langs\n\n\ndef check_lang(lang: str) -> str:\n \"\"\"Check if given language exists or not.\n\n Args:\n lang (str): language code (e.g.: 'ja')\n\n Raises:\n argparse.ArgumentTypeError: Raise if no model for given language exists.\n\n Returns:\n str: A model path.\n \"\"\"\n langs = get_model_langs()\n if lang in langs:\n return langs[lang]\n else:\n raise argparse.ArgumentTypeError(\n f\"'{lang}' does not exist in builtin models. (supported languages: {list(langs.keys())})\"\n )\n\n\ndef parse_args(test: ArgList = None) -> argparse.Namespace:\n \"\"\"Parse commandline arguments.\n\n Args:\n test (typing.Optional[typing.List[str]], optional): Commandline args for testing. Defaults to None.\n\n Returns:\n argparse.Namespace: Parsed data of args.\n \"\"\"\n parser = argparse.ArgumentParser(\n prog=\"budoux\",\n formatter_class=(lambda prog: BudouxHelpFormatter(\n prog,\n **{\n \"width\": shutil.get_terminal_size(fallback=(120, 50)).columns,\n \"max_help_position\": 30,\n },\n )),\n description=textwrap.dedent(\"\"\"\\\n BudouX is the successor to Budou,\n the machine learning powered line break organizer tool.\"\"\"),\n epilog=\"\\n- \".join(\n [\"supported languages of `-l`, `--lang`:\",\n *get_model_langs().keys()]))\n\n parser.add_argument(\"text\", metavar=\"TXT\", nargs=\"?\", type=str, help=\"text\")\n parser.add_argument(\n \"-H\",\n \"--html\",\n action=\"store_true\",\n help=\"HTML mode\",\n )\n model_select_group = parser.add_mutually_exclusive_group()\n model_select_group.add_argument(\n \"-m\",\n \"--model\",\n metavar=\"JSON\",\n type=check_file,\n default=pkg_resources.resource_filename(__name__, \"models/ja-knbc.json\"),\n help=\"custom model file path\",\n )\n model_select_group.add_argument(\n \"-l\",\n \"--lang\",\n metavar=\"LANG\",\n type=check_lang,\n help=\"language of custom model\",\n )\n parser.add_argument(\n \"-d\",\n \"--delim\",\n metavar=\"STR\",\n type=str,\n default=\"---\",\n help=\"output delimiter in TEXT mode\",\n )\n parser.add_argument(\n \"-V\",\n \"--version\",\n action=\"version\",\n version=\"%(prog)s {}\".format(budoux.__version__),\n )\n if test is not None:\n return parser.parse_args(test)\n else:\n return parser.parse_args()\n\n\ndef _main(test: ArgList = None) -> str:\n args = parse_args(test=test)\n model_path = args.lang or args.model\n with open(model_path, 'r', encoding='utf-8') as f:\n model = json.load(f)\n\n parser = budoux.Parser(model)\n if args.html:\n if args.text is None:\n inputs_html = sys.stdin.read()\n else:\n inputs_html = args.text\n res = parser.translate_html_string(inputs_html)\n else:\n if args.text is None:\n inputs = [v.rstrip() for v in sys.stdin.readlines()]\n else:\n inputs = [v.rstrip() for v in args.text.splitlines()]\n outputs = [parser.parse(sentence) for sentence in inputs]\n conbined_output = [\"\\n\".join(output) for output in outputs]\n ors = \"\\n\" + args.delim + \"\\n\"\n res = ors.join(conbined_output)\n\n return res\n\n\ndef main(test: ArgList = None) -> None:\n try:\n print(_main(test))\n except KeyboardInterrupt:\n exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "budoux/main.py"}, {"content": "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BudouX parser to provide semantic chunks.\"\"\"\n\nimport json\nimport os\nimport typing\nfrom html.parser import HTMLParser\n\nfrom .feature_extractor import get_feature\nfrom .utils import INVALID, SEP\n\nMODEL_DIR = os.path.join(os.path.dirname(__file__), 'models')\nPARENT_CSS_STYLE = 'word-break: keep-all; overflow-wrap: break-word;'\nwith open(\n os.path.join(os.path.dirname(__file__), 'skip_nodes.json'),\n encoding='utf-8') as f:\n SKIP_NODES: typing.Set[str] = set(json.load(f))\n\nHTMLAttr = typing.List[typing.Tuple[str, typing.Union[str, None]]]\n\n\nclass TextContentExtractor(HTMLParser):\n \"\"\"An HTML parser to extract text content.\n\n Attributes:\n output (str): Accumulated text content.\n \"\"\"\n output = ''\n\n def handle_data(self, data: str) -> None:\n self.output += data\n\n\nclass HTMLChunkResolver(HTMLParser):\n \"\"\"An HTML parser to resolve the given HTML string and semantic chunks.\n\n Attributes:\n output (str): The HTML string to output.\n chunks (str): Chunks concatenated with the separator string.\n \"\"\"\n output = ''\n\n def __init__(self, chunks: typing.List[str]):\n \"\"\"Initializes the parser.\n\n Args:\n chunks (List[str]): The chunks to resolve.\n \"\"\"\n HTMLParser.__init__(self)\n self.chunks_joined = SEP.join(chunks)\n self.to_skip = False\n\n def handle_starttag(self, tag: str, attrs: HTMLAttr) -> None:\n attr_pairs = []\n for attr in attrs:\n if attr[1] is None:\n attr_pairs.append(attr[0])\n else:\n attr_pairs.append('%s=\"%s\"' % (attr[0], attr[1]))\n encoded_attrs = ' '.join(attr_pairs)\n self.output += '<%s %s>' % (tag, encoded_attrs)\n self.to_skip = tag.upper() in SKIP_NODES\n\n def handle_endtag(self, tag: str) -> None:\n self.output += '</%s>' % (tag)\n self.to_skip = False\n\n def handle_data(self, data: str) -> None:\n if self.to_skip:\n self.output += data\n if self.chunks_joined[0] == SEP:\n self.chunks_joined = self.chunks_joined[1 + len(data):]\n else:\n self.chunks_joined = self.chunks_joined[len(data):]\n return None\n for char in data:\n if char == self.chunks_joined[0]:\n self.chunks_joined = self.chunks_joined[1:]\n self.output += char\n else:\n self.chunks_joined = self.chunks_joined[2:]\n self.output += '<wbr>' + char\n\n\nclass Parser:\n \"\"\"BudouX's Parser.\n\n The main parser object with a variety of class methods to provide semantic\n chunks and markups from the given input string.\n\n Attributes:\n model: A dict mapping a feature (str) and its score (int).\n \"\"\"\n\n def __init__(self, model: typing.Dict[str, int]):\n \"\"\"Initializes the parser.\n\n Args:\n model (Dict[str, int]): A dict mapping a feature and its score.\n \"\"\"\n self.model = model\n\n def parse(self, sentence: str) -> typing.List[str]:\n \"\"\"Parses the input sentence and returns a list of semantic chunks.\n\n Args:\n sentence (str): An input sentence.\n\n Returns:\n A list of semantic chunks (List[str]).\n \"\"\"\n if sentence == '':\n return []\n chunks = [sentence[0]]\n base_score = -sum(self.model.values())\n for i in range(1, len(sentence)):\n feature = get_feature(\n sentence[i - 3] if i > 2 else INVALID,\n sentence[i - 2] if i > 1 else INVALID, sentence[i - 1], sentence[i],\n sentence[i + 1] if i + 1 < len(sentence) else INVALID,\n sentence[i + 2] if i + 2 < len(sentence) else INVALID)\n score = base_score + 2 * sum(\n self.model[f] for f in feature if f in self.model)\n if score > 0:\n chunks.append(sentence[i])\n else:\n chunks[-1] += sentence[i]\n return chunks\n\n def translate_html_string(self, html: str) -> str:\n \"\"\"Translates the given HTML string with markups for semantic line breaks.\n\n Args:\n html (str): An input html string.\n\n Returns:\n The translated HTML string (str).\n \"\"\"\n # TODO: Align with the JavaScript API regarding the parent element addition.\n text_content_extractor = TextContentExtractor()\n text_content_extractor.feed(html)\n text_content = text_content_extractor.output\n chunks = self.parse(text_content)\n resolver = HTMLChunkResolver(chunks)\n resolver.feed(html)\n return '<span style=\"%s\">%s</span>' % (PARENT_CSS_STYLE, resolver.output)\n\n\ndef load_default_japanese_parser() -> Parser:\n \"\"\"Loads a parser equipped with the default Japanese model.\n\n Returns:\n A parser (:obj:`budoux.Parser`).\n \"\"\"\n with open(os.path.join(MODEL_DIR, 'ja-knbc.json'), encoding='utf-8') as f:\n model = json.load(f)\n return Parser(model)\n\n\ndef load_default_simplified_chinese_parser() -> Parser:\n \"\"\"Loads a parser equipped with the default Simplified Chinese model.\n\n Returns:\n A parser (:obj:`budoux.Parser`).\n \"\"\"\n with open(os.path.join(MODEL_DIR, 'zh-hans.json'), encoding='utf-8') as f:\n model = json.load(f)\n return Parser(model)\n\n\ndef load_default_traditional_chinese_parser() -> Parser:\n \"\"\"Loads a parser equipped with the default Traditional Chinese model.\n\n Returns:\n A parser (:obj:`budoux.Parser`).\n \"\"\"\n with open(os.path.join(MODEL_DIR, 'zh-hant.json'), encoding='utf-8') as f:\n model = json.load(f)\n return Parser(model)\n", "path": "budoux/parser.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BudouX Script to provide CLI for user.\"\"\"\nimport argparse\nimport glob\nimport json\nimport os\nimport shutil\nimport sys\nimport textwrap\nimport typing\n\nimport pkg_resources\n\nimport budoux\n\nArgList = typing.Optional[typing.List[str]]\n\n\nclass BudouxHelpFormatter(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawDescriptionHelpFormatter):\n pass\n\n\ndef check_file(path: str) -> str:\n \"\"\"Check if a given filepath exists or not.\n\n Args:\n path (str): Model path\n\n Raises:\n FileNotFoundError: Raise if given path does not exist.\n\n Returns:\n str: A model path.\n \"\"\"\n if os.path.isfile(path):\n return path\n else:\n raise argparse.ArgumentTypeError(f\"'{path}' is not found.\")\n\n\ndef get_model_langs() -> typing.Dict[str, str]:\n \"\"\"Get a dictionary of model languages and its paths.\n\n Returns:\n typing.Dict[str, str]: A dictionary of model languages and its paths.\n \"\"\"\n models = glob.glob(\n pkg_resources.resource_filename(__name__, \"models\") + \"/*.json\")\n langs = {}\n for model in models:\n lang = model.split(os.sep)[-1][:-5]\n langs[lang] = model\n return langs\n\n\ndef check_lang(lang: str) -> str:\n \"\"\"Check if given language exists or not.\n\n Args:\n lang (str): language code (e.g.: 'ja')\n\n Raises:\n argparse.ArgumentTypeError: Raise if no model for given language exists.\n\n Returns:\n str: A model path.\n \"\"\"\n langs = get_model_langs()\n if lang in langs:\n return langs[lang]\n else:\n raise argparse.ArgumentTypeError(\n f\"'{lang}' does not exist in builtin models. (supported languages: {list(langs.keys())})\"\n )\n\n\ndef parse_args(test: ArgList = None) -> argparse.Namespace:\n \"\"\"Parse commandline arguments.\n\n Args:\n test (typing.Optional[typing.List[str]], optional): Commandline args for testing. Defaults to None.\n\n Returns:\n argparse.Namespace: Parsed data of args.\n \"\"\"\n parser = argparse.ArgumentParser(\n prog=\"budoux\",\n formatter_class=(lambda prog: BudouxHelpFormatter(\n prog,\n **{\n \"width\": shutil.get_terminal_size(fallback=(120, 50)).columns,\n \"max_help_position\": 30,\n },\n )),\n description=textwrap.dedent(\"\"\"\\\n BudouX is the successor to Budou,\n the machine learning powered line break organizer tool.\"\"\"),\n epilog=\"\\n- \".join(\n [\"supported languages of `-l`, `--lang`:\",\n *get_model_langs().keys()]))\n\n parser.add_argument(\"text\", metavar=\"TXT\", nargs=\"?\", type=str, help=\"text\")\n parser.add_argument(\n \"-H\",\n \"--html\",\n action=\"store_true\",\n help=\"HTML mode\",\n )\n model_select_group = parser.add_mutually_exclusive_group()\n model_select_group.add_argument(\n \"-m\",\n \"--model\",\n metavar=\"JSON\",\n type=check_file,\n default=pkg_resources.resource_filename(__name__, \"models/ja.json\"),\n help=\"custom model file path\",\n )\n model_select_group.add_argument(\n \"-l\",\n \"--lang\",\n metavar=\"LANG\",\n type=check_lang,\n help=\"language of custom model\",\n )\n parser.add_argument(\n \"-d\",\n \"--delim\",\n metavar=\"STR\",\n type=str,\n default=\"---\",\n help=\"output delimiter in TEXT mode\",\n )\n parser.add_argument(\n \"-V\",\n \"--version\",\n action=\"version\",\n version=\"%(prog)s {}\".format(budoux.__version__),\n )\n if test is not None:\n return parser.parse_args(test)\n else:\n return parser.parse_args()\n\n\ndef _main(test: ArgList = None) -> str:\n args = parse_args(test=test)\n model_path = args.lang or args.model\n with open(model_path, 'r', encoding='utf-8') as f:\n model = json.load(f)\n\n parser = budoux.Parser(model)\n if args.html:\n if args.text is None:\n inputs_html = sys.stdin.read()\n else:\n inputs_html = args.text\n res = parser.translate_html_string(inputs_html)\n else:\n if args.text is None:\n inputs = [v.rstrip() for v in sys.stdin.readlines()]\n else:\n inputs = [v.rstrip() for v in args.text.splitlines()]\n outputs = [parser.parse(sentence) for sentence in inputs]\n conbined_output = [\"\\n\".join(output) for output in outputs]\n ors = \"\\n\" + args.delim + \"\\n\"\n res = ors.join(conbined_output)\n\n return res\n\n\ndef main(test: ArgList = None) -> None:\n try:\n print(_main(test))\n except KeyboardInterrupt:\n exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "budoux/main.py"}, {"content": "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BudouX parser to provide semantic chunks.\"\"\"\n\nimport json\nimport os\nimport typing\nfrom html.parser import HTMLParser\n\nfrom .feature_extractor import get_feature\nfrom .utils import INVALID, SEP\n\nMODEL_DIR = os.path.join(os.path.dirname(__file__), 'models')\nPARENT_CSS_STYLE = 'word-break: keep-all; overflow-wrap: break-word;'\nwith open(\n os.path.join(os.path.dirname(__file__), 'skip_nodes.json'),\n encoding='utf-8') as f:\n SKIP_NODES: typing.Set[str] = set(json.load(f))\n\nHTMLAttr = typing.List[typing.Tuple[str, typing.Union[str, None]]]\n\n\nclass TextContentExtractor(HTMLParser):\n \"\"\"An HTML parser to extract text content.\n\n Attributes:\n output (str): Accumulated text content.\n \"\"\"\n output = ''\n\n def handle_data(self, data: str) -> None:\n self.output += data\n\n\nclass HTMLChunkResolver(HTMLParser):\n \"\"\"An HTML parser to resolve the given HTML string and semantic chunks.\n\n Attributes:\n output (str): The HTML string to output.\n chunks (str): Chunks concatenated with the separator string.\n \"\"\"\n output = ''\n\n def __init__(self, chunks: typing.List[str]):\n \"\"\"Initializes the parser.\n\n Args:\n chunks (List[str]): The chunks to resolve.\n \"\"\"\n HTMLParser.__init__(self)\n self.chunks_joined = SEP.join(chunks)\n self.to_skip = False\n\n def handle_starttag(self, tag: str, attrs: HTMLAttr) -> None:\n attr_pairs = []\n for attr in attrs:\n if attr[1] is None:\n attr_pairs.append(attr[0])\n else:\n attr_pairs.append('%s=\"%s\"' % (attr[0], attr[1]))\n encoded_attrs = ' '.join(attr_pairs)\n self.output += '<%s %s>' % (tag, encoded_attrs)\n self.to_skip = tag.upper() in SKIP_NODES\n\n def handle_endtag(self, tag: str) -> None:\n self.output += '</%s>' % (tag)\n self.to_skip = False\n\n def handle_data(self, data: str) -> None:\n if self.to_skip:\n self.output += data\n if self.chunks_joined[0] == SEP:\n self.chunks_joined = self.chunks_joined[1 + len(data):]\n else:\n self.chunks_joined = self.chunks_joined[len(data):]\n return None\n for char in data:\n if char == self.chunks_joined[0]:\n self.chunks_joined = self.chunks_joined[1:]\n self.output += char\n else:\n self.chunks_joined = self.chunks_joined[2:]\n self.output += '<wbr>' + char\n\n\nclass Parser:\n \"\"\"BudouX's Parser.\n\n The main parser object with a variety of class methods to provide semantic\n chunks and markups from the given input string.\n\n Attributes:\n model: A dict mapping a feature (str) and its score (int).\n \"\"\"\n\n def __init__(self, model: typing.Dict[str, int]):\n \"\"\"Initializes the parser.\n\n Args:\n model (Dict[str, int]): A dict mapping a feature and its score.\n \"\"\"\n self.model = model\n\n def parse(self, sentence: str) -> typing.List[str]:\n \"\"\"Parses the input sentence and returns a list of semantic chunks.\n\n Args:\n sentence (str): An input sentence.\n\n Returns:\n A list of semantic chunks (List[str]).\n \"\"\"\n if sentence == '':\n return []\n chunks = [sentence[0]]\n base_score = -sum(self.model.values())\n for i in range(1, len(sentence)):\n feature = get_feature(\n sentence[i - 3] if i > 2 else INVALID,\n sentence[i - 2] if i > 1 else INVALID, sentence[i - 1], sentence[i],\n sentence[i + 1] if i + 1 < len(sentence) else INVALID,\n sentence[i + 2] if i + 2 < len(sentence) else INVALID)\n score = base_score + 2 * sum(\n self.model[f] for f in feature if f in self.model)\n if score > 0:\n chunks.append(sentence[i])\n else:\n chunks[-1] += sentence[i]\n return chunks\n\n def translate_html_string(self, html: str) -> str:\n \"\"\"Translates the given HTML string with markups for semantic line breaks.\n\n Args:\n html (str): An input html string.\n\n Returns:\n The translated HTML string (str).\n \"\"\"\n # TODO: Align with the JavaScript API regarding the parent element addition.\n text_content_extractor = TextContentExtractor()\n text_content_extractor.feed(html)\n text_content = text_content_extractor.output\n chunks = self.parse(text_content)\n resolver = HTMLChunkResolver(chunks)\n resolver.feed(html)\n return '<span style=\"%s\">%s</span>' % (PARENT_CSS_STYLE, resolver.output)\n\n\ndef load_default_japanese_parser() -> Parser:\n \"\"\"Loads a parser equipped with the default Japanese model.\n\n Returns:\n A parser (:obj:`budoux.Parser`).\n \"\"\"\n with open(os.path.join(MODEL_DIR, 'ja.json'), encoding='utf-8') as f:\n model = json.load(f)\n return Parser(model)\n\n\ndef load_default_simplified_chinese_parser() -> Parser:\n \"\"\"Loads a parser equipped with the default Simplified Chinese model.\n\n Returns:\n A parser (:obj:`budoux.Parser`).\n \"\"\"\n with open(os.path.join(MODEL_DIR, 'zh-hans.json'), encoding='utf-8') as f:\n model = json.load(f)\n return Parser(model)\n\n\ndef load_default_traditional_chinese_parser() -> Parser:\n \"\"\"Loads a parser equipped with the default Traditional Chinese model.\n\n Returns:\n A parser (:obj:`budoux.Parser`).\n \"\"\"\n with open(os.path.join(MODEL_DIR, 'zh-hant.json'), encoding='utf-8') as f:\n model = json.load(f)\n return Parser(model)\n", "path": "budoux/parser.py"}]}
4,006
403
gh_patches_debug_9496
rasdani/github-patches
git_diff
rotki__rotki-2260
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Staked Cream price appears incorrectly In version 1.13.0 of Rotki the staked Cream price and logo appear incorrectly. The CRM's price and logo show up instead of CREAM's. The previous version of Rotki was showing the price correctly but the logo was still incorrect. I think cryptocompare is used as price oracle for CREAM. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `rotkehlchen/icons.py` Content: ``` 1 import itertools 2 import logging 3 from pathlib import Path 4 from typing import Optional, Set 5 6 import gevent 7 import requests 8 from typing_extensions import Literal 9 10 from rotkehlchen.assets.asset import Asset 11 from rotkehlchen.assets.resolver import AssetResolver, asset_type_mapping 12 from rotkehlchen.errors import RemoteError 13 from rotkehlchen.externalapis.coingecko import Coingecko, DELISTED_ASSETS 14 from rotkehlchen.typing import AssetType 15 from rotkehlchen.utils.hashing import file_md5 16 from rotkehlchen.constants.timing import DEFAULT_TIMEOUT_TUPLE 17 18 log = logging.getLogger(__name__) 19 20 21 class IconManager(): 22 """ 23 Manages the icons for all the assets of the application 24 25 The get_icon() and the periodic task of query_uncached_icons_batch() may at 26 a point query the same icon but that's fine and not worth of locking mechanism as 27 it should be rather rare and worst case scenario once in a blue moon we waste 28 an API call. In the end the right file would be written on disk. 29 """ 30 31 def __init__(self, data_dir: Path, coingecko: Coingecko) -> None: 32 self.icons_dir = data_dir / 'icons' 33 self.coingecko = coingecko 34 self.icons_dir.mkdir(parents=True, exist_ok=True) 35 self.failed_assets: Set[Asset] = set() 36 37 def iconfile_path(self, asset: Asset, size: Literal['thumb', 'small', 'large']) -> Path: 38 return self.icons_dir / f'{asset.identifier}_{size}.png' 39 40 def iconfile_md5( 41 self, 42 asset: Asset, 43 size: Literal['thumb', 'small', 'large'], 44 ) -> Optional[str]: 45 path = self.iconfile_path(asset, size) 46 if not path.is_file(): 47 return None 48 49 return file_md5(path) 50 51 def _query_coingecko_for_icon(self, asset: Asset) -> bool: 52 """Queries coingecko for icons of an asset 53 54 If query was okay it returns True, else False 55 """ 56 # Do not bother querying if asset is delisted. Nothing is returned. 57 # we only keep delisted asset coingecko mappings since historical prices 58 # can still be queried. 59 if asset.identifier in DELISTED_ASSETS: 60 self.failed_assets.add(asset) 61 return False 62 63 try: 64 data = self.coingecko.asset_data(asset) 65 except RemoteError as e: 66 log.warning( 67 f'Problem querying coingecko for asset data of {asset.identifier}: {str(e)}', 68 ) 69 # If a query fails (99% of fails will be 404s) don't repeat them 70 self.failed_assets.add(asset) 71 return False 72 73 for size in ('thumb', 'small', 'large'): 74 url = getattr(data.images, size) 75 try: 76 response = requests.get(url, timeout=DEFAULT_TIMEOUT_TUPLE) 77 except requests.exceptions.RequestException: 78 # Any problem getting the image skip it: https://github.com/rotki/rotki/issues/1370 79 continue 80 81 with open(self.iconfile_path(asset, size), 'wb') as f: # type: ignore 82 f.write(response.content) 83 84 return True 85 86 def get_icon( 87 self, 88 asset: Asset, given_size: Literal['thumb', 'small', 'large'], 89 ) -> Optional[bytes]: 90 """Returns the byte data of the requested icon 91 92 If the icon can't be found it returns None. 93 94 If the icon is found cached locally it's returned directly. 95 96 If not, all icons of the asset are queried from coingecko and cached 97 locally before the requested data are returned. 98 """ 99 if not asset.has_coingecko(): 100 return None 101 102 needed_path = self.iconfile_path(asset, given_size) 103 if needed_path.is_file(): 104 with open(needed_path, 'rb') as f: 105 image_data = f.read() 106 return image_data 107 108 # else query coingecko for the icons and cache all of them 109 if self._query_coingecko_for_icon(asset) is False: 110 return None 111 112 if not needed_path.is_file(): 113 return None 114 115 with open(needed_path, 'rb') as f: 116 image_data = f.read() 117 return image_data 118 119 def query_uncached_icons_batch(self, batch_size: int) -> bool: 120 """Queries a batch of uncached icons for assets 121 122 Returns true if there is more icons left to cache after this batch. 123 """ 124 coingecko_integrated_assets = [] 125 126 for identifier, asset_data in AssetResolver().assets.items(): 127 asset_type = asset_type_mapping[asset_data['type']] 128 if asset_type != AssetType.FIAT and asset_data['coingecko'] != '': 129 coingecko_integrated_assets.append(identifier) 130 131 cached_assets = [ 132 str(x.name)[:-10] for x in self.icons_dir.glob('*_thumb.png') if x.is_file() 133 ] 134 uncached_assets = ( 135 set(coingecko_integrated_assets) - set(cached_assets) - self.failed_assets 136 ) 137 log.info( 138 f'Periodic task to query coingecko for {batch_size} uncached asset icons. ' 139 f'Uncached assets: {len(uncached_assets)}. Cached assets: {len(cached_assets)}', 140 ) 141 for asset_name in itertools.islice(uncached_assets, batch_size): 142 self._query_coingecko_for_icon(Asset(asset_name)) 143 144 return len(uncached_assets) > batch_size 145 146 def periodically_query_icons_until_all_cached( 147 self, 148 batch_size: int, 149 sleep_time_secs: float, 150 ) -> None: 151 """Periodically query all uncached icons until we have icons cached for all 152 of the known assets that have coingecko integration""" 153 if batch_size == 0: 154 return 155 156 while True: 157 carry_on = self.query_uncached_icons_batch(batch_size=batch_size) 158 if not carry_on: 159 break 160 gevent.sleep(sleep_time_secs) 161 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/rotkehlchen/icons.py b/rotkehlchen/icons.py --- a/rotkehlchen/icons.py +++ b/rotkehlchen/icons.py @@ -26,7 +26,7 @@ a point query the same icon but that's fine and not worth of locking mechanism as it should be rather rare and worst case scenario once in a blue moon we waste an API call. In the end the right file would be written on disk. -""" + """ def __init__(self, data_dir: Path, coingecko: Coingecko) -> None: self.icons_dir = data_dir / 'icons'
{"golden_diff": "diff --git a/rotkehlchen/icons.py b/rotkehlchen/icons.py\n--- a/rotkehlchen/icons.py\n+++ b/rotkehlchen/icons.py\n@@ -26,7 +26,7 @@\n a point query the same icon but that's fine and not worth of locking mechanism as\n it should be rather rare and worst case scenario once in a blue moon we waste\n an API call. In the end the right file would be written on disk.\n-\"\"\"\n+ \"\"\"\n \n def __init__(self, data_dir: Path, coingecko: Coingecko) -> None:\n self.icons_dir = data_dir / 'icons'\n", "issue": "Staked Cream price appears incorrectly\nIn version 1.13.0 of Rotki the staked Cream price and logo appear incorrectly. The CRM's price and logo show up instead of CREAM's.\r\n\r\nThe previous version of Rotki was showing the price correctly but the logo was still incorrect.\r\n\r\nI think cryptocompare is used as price oracle for CREAM.\n", "before_files": [{"content": "import itertools\nimport logging\nfrom pathlib import Path\nfrom typing import Optional, Set\n\nimport gevent\nimport requests\nfrom typing_extensions import Literal\n\nfrom rotkehlchen.assets.asset import Asset\nfrom rotkehlchen.assets.resolver import AssetResolver, asset_type_mapping\nfrom rotkehlchen.errors import RemoteError\nfrom rotkehlchen.externalapis.coingecko import Coingecko, DELISTED_ASSETS\nfrom rotkehlchen.typing import AssetType\nfrom rotkehlchen.utils.hashing import file_md5\nfrom rotkehlchen.constants.timing import DEFAULT_TIMEOUT_TUPLE\n\nlog = logging.getLogger(__name__)\n\n\nclass IconManager():\n \"\"\"\n Manages the icons for all the assets of the application\n\n The get_icon() and the periodic task of query_uncached_icons_batch() may at\n a point query the same icon but that's fine and not worth of locking mechanism as\n it should be rather rare and worst case scenario once in a blue moon we waste\n an API call. In the end the right file would be written on disk.\n\"\"\"\n\n def __init__(self, data_dir: Path, coingecko: Coingecko) -> None:\n self.icons_dir = data_dir / 'icons'\n self.coingecko = coingecko\n self.icons_dir.mkdir(parents=True, exist_ok=True)\n self.failed_assets: Set[Asset] = set()\n\n def iconfile_path(self, asset: Asset, size: Literal['thumb', 'small', 'large']) -> Path:\n return self.icons_dir / f'{asset.identifier}_{size}.png'\n\n def iconfile_md5(\n self,\n asset: Asset,\n size: Literal['thumb', 'small', 'large'],\n ) -> Optional[str]:\n path = self.iconfile_path(asset, size)\n if not path.is_file():\n return None\n\n return file_md5(path)\n\n def _query_coingecko_for_icon(self, asset: Asset) -> bool:\n \"\"\"Queries coingecko for icons of an asset\n\n If query was okay it returns True, else False\n \"\"\"\n # Do not bother querying if asset is delisted. Nothing is returned.\n # we only keep delisted asset coingecko mappings since historical prices\n # can still be queried.\n if asset.identifier in DELISTED_ASSETS:\n self.failed_assets.add(asset)\n return False\n\n try:\n data = self.coingecko.asset_data(asset)\n except RemoteError as e:\n log.warning(\n f'Problem querying coingecko for asset data of {asset.identifier}: {str(e)}',\n )\n # If a query fails (99% of fails will be 404s) don't repeat them\n self.failed_assets.add(asset)\n return False\n\n for size in ('thumb', 'small', 'large'):\n url = getattr(data.images, size)\n try:\n response = requests.get(url, timeout=DEFAULT_TIMEOUT_TUPLE)\n except requests.exceptions.RequestException:\n # Any problem getting the image skip it: https://github.com/rotki/rotki/issues/1370\n continue\n\n with open(self.iconfile_path(asset, size), 'wb') as f: # type: ignore\n f.write(response.content)\n\n return True\n\n def get_icon(\n self,\n asset: Asset, given_size: Literal['thumb', 'small', 'large'],\n ) -> Optional[bytes]:\n \"\"\"Returns the byte data of the requested icon\n\n If the icon can't be found it returns None.\n\n If the icon is found cached locally it's returned directly.\n\n If not, all icons of the asset are queried from coingecko and cached\n locally before the requested data are returned.\n \"\"\"\n if not asset.has_coingecko():\n return None\n\n needed_path = self.iconfile_path(asset, given_size)\n if needed_path.is_file():\n with open(needed_path, 'rb') as f:\n image_data = f.read()\n return image_data\n\n # else query coingecko for the icons and cache all of them\n if self._query_coingecko_for_icon(asset) is False:\n return None\n\n if not needed_path.is_file():\n return None\n\n with open(needed_path, 'rb') as f:\n image_data = f.read()\n return image_data\n\n def query_uncached_icons_batch(self, batch_size: int) -> bool:\n \"\"\"Queries a batch of uncached icons for assets\n\n Returns true if there is more icons left to cache after this batch.\n \"\"\"\n coingecko_integrated_assets = []\n\n for identifier, asset_data in AssetResolver().assets.items():\n asset_type = asset_type_mapping[asset_data['type']]\n if asset_type != AssetType.FIAT and asset_data['coingecko'] != '':\n coingecko_integrated_assets.append(identifier)\n\n cached_assets = [\n str(x.name)[:-10] for x in self.icons_dir.glob('*_thumb.png') if x.is_file()\n ]\n uncached_assets = (\n set(coingecko_integrated_assets) - set(cached_assets) - self.failed_assets\n )\n log.info(\n f'Periodic task to query coingecko for {batch_size} uncached asset icons. '\n f'Uncached assets: {len(uncached_assets)}. Cached assets: {len(cached_assets)}',\n )\n for asset_name in itertools.islice(uncached_assets, batch_size):\n self._query_coingecko_for_icon(Asset(asset_name))\n\n return len(uncached_assets) > batch_size\n\n def periodically_query_icons_until_all_cached(\n self,\n batch_size: int,\n sleep_time_secs: float,\n ) -> None:\n \"\"\"Periodically query all uncached icons until we have icons cached for all\n of the known assets that have coingecko integration\"\"\"\n if batch_size == 0:\n return\n\n while True:\n carry_on = self.query_uncached_icons_batch(batch_size=batch_size)\n if not carry_on:\n break\n gevent.sleep(sleep_time_secs)\n", "path": "rotkehlchen/icons.py"}], "after_files": [{"content": "import itertools\nimport logging\nfrom pathlib import Path\nfrom typing import Optional, Set\n\nimport gevent\nimport requests\nfrom typing_extensions import Literal\n\nfrom rotkehlchen.assets.asset import Asset\nfrom rotkehlchen.assets.resolver import AssetResolver, asset_type_mapping\nfrom rotkehlchen.errors import RemoteError\nfrom rotkehlchen.externalapis.coingecko import Coingecko, DELISTED_ASSETS\nfrom rotkehlchen.typing import AssetType\nfrom rotkehlchen.utils.hashing import file_md5\nfrom rotkehlchen.constants.timing import DEFAULT_TIMEOUT_TUPLE\n\nlog = logging.getLogger(__name__)\n\n\nclass IconManager():\n \"\"\"\n Manages the icons for all the assets of the application\n\n The get_icon() and the periodic task of query_uncached_icons_batch() may at\n a point query the same icon but that's fine and not worth of locking mechanism as\n it should be rather rare and worst case scenario once in a blue moon we waste\n an API call. In the end the right file would be written on disk.\n \"\"\"\n\n def __init__(self, data_dir: Path, coingecko: Coingecko) -> None:\n self.icons_dir = data_dir / 'icons'\n self.coingecko = coingecko\n self.icons_dir.mkdir(parents=True, exist_ok=True)\n self.failed_assets: Set[Asset] = set()\n\n def iconfile_path(self, asset: Asset, size: Literal['thumb', 'small', 'large']) -> Path:\n return self.icons_dir / f'{asset.identifier}_{size}.png'\n\n def iconfile_md5(\n self,\n asset: Asset,\n size: Literal['thumb', 'small', 'large'],\n ) -> Optional[str]:\n path = self.iconfile_path(asset, size)\n if not path.is_file():\n return None\n\n return file_md5(path)\n\n def _query_coingecko_for_icon(self, asset: Asset) -> bool:\n \"\"\"Queries coingecko for icons of an asset\n\n If query was okay it returns True, else False\n \"\"\"\n # Do not bother querying if asset is delisted. Nothing is returned.\n # we only keep delisted asset coingecko mappings since historical prices\n # can still be queried.\n if asset.identifier in DELISTED_ASSETS:\n self.failed_assets.add(asset)\n return False\n\n try:\n data = self.coingecko.asset_data(asset)\n except RemoteError as e:\n log.warning(\n f'Problem querying coingecko for asset data of {asset.identifier}: {str(e)}',\n )\n # If a query fails (99% of fails will be 404s) don't repeat them\n self.failed_assets.add(asset)\n return False\n\n for size in ('thumb', 'small', 'large'):\n url = getattr(data.images, size)\n try:\n response = requests.get(url, timeout=DEFAULT_TIMEOUT_TUPLE)\n except requests.exceptions.RequestException:\n # Any problem getting the image skip it: https://github.com/rotki/rotki/issues/1370\n continue\n\n with open(self.iconfile_path(asset, size), 'wb') as f: # type: ignore\n f.write(response.content)\n\n return True\n\n def get_icon(\n self,\n asset: Asset, given_size: Literal['thumb', 'small', 'large'],\n ) -> Optional[bytes]:\n \"\"\"Returns the byte data of the requested icon\n\n If the icon can't be found it returns None.\n\n If the icon is found cached locally it's returned directly.\n\n If not, all icons of the asset are queried from coingecko and cached\n locally before the requested data are returned.\n \"\"\"\n if not asset.has_coingecko():\n return None\n\n needed_path = self.iconfile_path(asset, given_size)\n if needed_path.is_file():\n with open(needed_path, 'rb') as f:\n image_data = f.read()\n return image_data\n\n # else query coingecko for the icons and cache all of them\n if self._query_coingecko_for_icon(asset) is False:\n return None\n\n if not needed_path.is_file():\n return None\n\n with open(needed_path, 'rb') as f:\n image_data = f.read()\n return image_data\n\n def query_uncached_icons_batch(self, batch_size: int) -> bool:\n \"\"\"Queries a batch of uncached icons for assets\n\n Returns true if there is more icons left to cache after this batch.\n \"\"\"\n coingecko_integrated_assets = []\n\n for identifier, asset_data in AssetResolver().assets.items():\n asset_type = asset_type_mapping[asset_data['type']]\n if asset_type != AssetType.FIAT and asset_data['coingecko'] != '':\n coingecko_integrated_assets.append(identifier)\n\n cached_assets = [\n str(x.name)[:-10] for x in self.icons_dir.glob('*_thumb.png') if x.is_file()\n ]\n uncached_assets = (\n set(coingecko_integrated_assets) - set(cached_assets) - self.failed_assets\n )\n log.info(\n f'Periodic task to query coingecko for {batch_size} uncached asset icons. '\n f'Uncached assets: {len(uncached_assets)}. Cached assets: {len(cached_assets)}',\n )\n for asset_name in itertools.islice(uncached_assets, batch_size):\n self._query_coingecko_for_icon(Asset(asset_name))\n\n return len(uncached_assets) > batch_size\n\n def periodically_query_icons_until_all_cached(\n self,\n batch_size: int,\n sleep_time_secs: float,\n ) -> None:\n \"\"\"Periodically query all uncached icons until we have icons cached for all\n of the known assets that have coingecko integration\"\"\"\n if batch_size == 0:\n return\n\n while True:\n carry_on = self.query_uncached_icons_batch(batch_size=batch_size)\n if not carry_on:\n break\n gevent.sleep(sleep_time_secs)\n", "path": "rotkehlchen/icons.py"}]}
2,038
146
gh_patches_debug_4092
rasdani/github-patches
git_diff
zulip__zulip-27536
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Document ability to drag-and-drop anywhere to upload a file We should document the functionality introduced in #14579 / https://github.com/zulip/zulip/pull/26041. I would probably replace the "Via Markdown" tab with "Via drag-and-drop", and modify the instructions to explain that you can drag anywhere, whether or not the compose box is open. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `zerver/lib/markdown/tabbed_sections.py` Content: ``` 1 import re 2 from typing import Any, Dict, List, Mapping, Optional 3 4 import markdown 5 from markdown.extensions import Extension 6 from markdown.preprocessors import Preprocessor 7 from typing_extensions import override 8 9 from zerver.lib.markdown.priorities import PREPROCESSOR_PRIORITES 10 11 START_TABBED_SECTION_REGEX = re.compile(r"^\{start_tabs\}$") 12 END_TABBED_SECTION_REGEX = re.compile(r"^\{end_tabs\}$") 13 TAB_CONTENT_REGEX = re.compile(r"^\{tab\|([^}]+)\}$") 14 15 TABBED_SECTION_TEMPLATE = """ 16 <div class="tabbed-section {tab_class}" markdown="1"> 17 {nav_bar} 18 <div class="blocks"> 19 {blocks} 20 </div> 21 </div> 22 """.strip() 23 24 NAV_BAR_TEMPLATE = """ 25 <ul class="nav"> 26 {tabs} 27 </ul> 28 """.strip() 29 30 NAV_LIST_ITEM_TEMPLATE = """ 31 <li data-tab-key="{data_tab_key}" tabindex="0">{label}</li> 32 """.strip() 33 34 DIV_TAB_CONTENT_TEMPLATE = """ 35 <div data-tab-key="{data_tab_key}" markdown="1"> 36 {content} 37 </div> 38 """.strip() 39 40 # If adding new entries here, also check if you need to update 41 # tabbed-instructions.js 42 TAB_SECTION_LABELS = { 43 "desktop-web": "Desktop/Web", 44 "ios": "iOS", 45 "android": "Android", 46 "mac": "macOS", 47 "windows": "Windows", 48 "linux": "Linux", 49 "python": "Python", 50 "js": "JavaScript", 51 "curl": "curl", 52 "zulip-send": "zulip-send", 53 "web": "Web", 54 "desktop": "Desktop", 55 "mobile": "Mobile", 56 "mm-default": "Default installation", 57 "mm-cloud": "Cloud instance", 58 "mm-docker": "Docker", 59 "mm-gitlab-omnibus": "GitLab Omnibus", 60 "mm-self-hosting-cloud-export": "Self hosting (cloud export)", 61 "require-invitations": "Require invitations", 62 "allow-anyone-to-join": "Allow anyone to join", 63 "restrict-by-email-domain": "Restrict by email domain", 64 "zoom": "Zoom", 65 "jitsi-meet": "Jitsi Meet", 66 "bigbluebutton": "BigBlueButton", 67 "disable": "Disabled", 68 "chrome": "Chrome", 69 "firefox": "Firefox", 70 "desktop-app": "Desktop app", 71 "system-proxy-settings": "System proxy settings", 72 "custom-proxy-settings": "Custom proxy settings", 73 "stream": "From a stream view", 74 "not-stream": "From other views", 75 "via-recent-conversations": "Via recent conversations", 76 "via-inbox-view": "Via inbox view", 77 "via-left-sidebar": "Via left sidebar", 78 "instructions-for-all-platforms": "Instructions for all platforms", 79 "public-streams": "Public streams", 80 "private-streams": "Private streams", 81 "web-public-streams": "Web-public streams", 82 "via-user-card": "Via user card", 83 "via-user-profile": "Via user profile", 84 "via-organization-settings": "Via organization settings", 85 "via-personal-settings": "Via personal settings", 86 "via-stream-settings": "Via stream settings", 87 "default-subdomain": "Default subdomain", 88 "custom-subdomain": "Custom subdomain", 89 "zulip-cloud": "Zulip Cloud", 90 "self-hosting": "Self hosting", 91 "okta": "Okta", 92 "onelogin": "OneLogin", 93 "azuread": "AzureAD", 94 "keycloak": "Keycloak", 95 "auth0": "Auth0", 96 "logged-in": "If you are logged in", 97 "logged-out": "If you are logged out", 98 "user": "User", 99 "bot": "Bot", 100 "on-sign-up": "On sign-up", 101 "via-paste": "Via paste", 102 "via-markdown": "Via Markdown", 103 "via-compose-box-buttons": "Via compose box buttons", 104 "stream-compose": "Compose to a stream", 105 "dm-compose": "Compose a DM", 106 "v6": "Zulip Server 6.0+", 107 "v4": "Zulip Server 4.0+", 108 } 109 110 111 class TabbedSectionsGenerator(Extension): 112 @override 113 def extendMarkdown(self, md: markdown.Markdown) -> None: 114 md.preprocessors.register( 115 TabbedSectionsPreprocessor(md, self.getConfigs()), 116 "tabbed_sections", 117 PREPROCESSOR_PRIORITES["tabbed_sections"], 118 ) 119 120 121 class TabbedSectionsPreprocessor(Preprocessor): 122 def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None: 123 super().__init__(md) 124 125 @override 126 def run(self, lines: List[str]) -> List[str]: 127 tab_section = self.parse_tabs(lines) 128 while tab_section: 129 if "tabs" in tab_section: 130 tab_class = "has-tabs" 131 else: 132 tab_class = "no-tabs" 133 tab_section["tabs"] = [ 134 { 135 "tab_key": "instructions-for-all-platforms", 136 "start": tab_section["start_tabs_index"], 137 } 138 ] 139 nav_bar = self.generate_nav_bar(tab_section) 140 content_blocks = self.generate_content_blocks(tab_section, lines) 141 rendered_tabs = TABBED_SECTION_TEMPLATE.format( 142 tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks 143 ) 144 145 start = tab_section["start_tabs_index"] 146 end = tab_section["end_tabs_index"] + 1 147 lines = [*lines[:start], rendered_tabs, *lines[end:]] 148 tab_section = self.parse_tabs(lines) 149 return lines 150 151 def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str: 152 tab_content_blocks = [] 153 for index, tab in enumerate(tab_section["tabs"]): 154 start_index = tab["start"] + 1 155 try: 156 # If there are more tabs, we can use the starting index 157 # of the next tab as the ending index of the previous one 158 end_index = tab_section["tabs"][index + 1]["start"] 159 except IndexError: 160 # Otherwise, just use the end of the entire section 161 end_index = tab_section["end_tabs_index"] 162 163 content = "\n".join(lines[start_index:end_index]).strip() 164 tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format( 165 data_tab_key=tab["tab_key"], 166 # Wrapping the content in two newlines is necessary here. 167 # If we don't do this, the inner Markdown does not get 168 # rendered properly. 169 content=f"\n{content}\n", 170 ) 171 tab_content_blocks.append(tab_content_block) 172 return "\n".join(tab_content_blocks) 173 174 def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str: 175 li_elements = [] 176 for tab in tab_section["tabs"]: 177 tab_key = tab.get("tab_key") 178 tab_label = TAB_SECTION_LABELS.get(tab_key) 179 if tab_label is None: 180 raise ValueError( 181 f"Tab '{tab_key}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py" 182 ) 183 184 li = NAV_LIST_ITEM_TEMPLATE.format(data_tab_key=tab_key, label=tab_label) 185 li_elements.append(li) 186 187 return NAV_BAR_TEMPLATE.format(tabs="\n".join(li_elements)) 188 189 def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]: 190 block: Dict[str, Any] = {} 191 for index, line in enumerate(lines): 192 start_match = START_TABBED_SECTION_REGEX.search(line) 193 if start_match: 194 block["start_tabs_index"] = index 195 196 tab_content_match = TAB_CONTENT_REGEX.search(line) 197 if tab_content_match: 198 block.setdefault("tabs", []) 199 tab = {"start": index, "tab_key": tab_content_match.group(1)} 200 block["tabs"].append(tab) 201 202 end_match = END_TABBED_SECTION_REGEX.search(line) 203 if end_match: 204 block["end_tabs_index"] = index 205 break 206 return block 207 208 209 def makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator: 210 return TabbedSectionsGenerator(**kwargs) 211 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/zerver/lib/markdown/tabbed_sections.py b/zerver/lib/markdown/tabbed_sections.py --- a/zerver/lib/markdown/tabbed_sections.py +++ b/zerver/lib/markdown/tabbed_sections.py @@ -99,6 +99,7 @@ "bot": "Bot", "on-sign-up": "On sign-up", "via-paste": "Via paste", + "via-drag-and-drop": "Via drag-and-drop", "via-markdown": "Via Markdown", "via-compose-box-buttons": "Via compose box buttons", "stream-compose": "Compose to a stream",
{"golden_diff": "diff --git a/zerver/lib/markdown/tabbed_sections.py b/zerver/lib/markdown/tabbed_sections.py\n--- a/zerver/lib/markdown/tabbed_sections.py\n+++ b/zerver/lib/markdown/tabbed_sections.py\n@@ -99,6 +99,7 @@\n \"bot\": \"Bot\",\n \"on-sign-up\": \"On sign-up\",\n \"via-paste\": \"Via paste\",\n+ \"via-drag-and-drop\": \"Via drag-and-drop\",\n \"via-markdown\": \"Via Markdown\",\n \"via-compose-box-buttons\": \"Via compose box buttons\",\n \"stream-compose\": \"Compose to a stream\",\n", "issue": "Document ability to drag-and-drop anywhere to upload a file\nWe should document the functionality introduced in #14579 / https://github.com/zulip/zulip/pull/26041.\r\n\r\nI would probably replace the \"Via Markdown\" tab with \"Via drag-and-drop\", and modify the instructions to explain that you can drag anywhere, whether or not the compose box is open.\n", "before_files": [{"content": "import re\nfrom typing import Any, Dict, List, Mapping, Optional\n\nimport markdown\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\nfrom typing_extensions import override\n\nfrom zerver.lib.markdown.priorities import PREPROCESSOR_PRIORITES\n\nSTART_TABBED_SECTION_REGEX = re.compile(r\"^\\{start_tabs\\}$\")\nEND_TABBED_SECTION_REGEX = re.compile(r\"^\\{end_tabs\\}$\")\nTAB_CONTENT_REGEX = re.compile(r\"^\\{tab\\|([^}]+)\\}$\")\n\nTABBED_SECTION_TEMPLATE = \"\"\"\n<div class=\"tabbed-section {tab_class}\" markdown=\"1\">\n{nav_bar}\n<div class=\"blocks\">\n{blocks}\n</div>\n</div>\n\"\"\".strip()\n\nNAV_BAR_TEMPLATE = \"\"\"\n<ul class=\"nav\">\n{tabs}\n</ul>\n\"\"\".strip()\n\nNAV_LIST_ITEM_TEMPLATE = \"\"\"\n<li data-tab-key=\"{data_tab_key}\" tabindex=\"0\">{label}</li>\n\"\"\".strip()\n\nDIV_TAB_CONTENT_TEMPLATE = \"\"\"\n<div data-tab-key=\"{data_tab_key}\" markdown=\"1\">\n{content}\n</div>\n\"\"\".strip()\n\n# If adding new entries here, also check if you need to update\n# tabbed-instructions.js\nTAB_SECTION_LABELS = {\n \"desktop-web\": \"Desktop/Web\",\n \"ios\": \"iOS\",\n \"android\": \"Android\",\n \"mac\": \"macOS\",\n \"windows\": \"Windows\",\n \"linux\": \"Linux\",\n \"python\": \"Python\",\n \"js\": \"JavaScript\",\n \"curl\": \"curl\",\n \"zulip-send\": \"zulip-send\",\n \"web\": \"Web\",\n \"desktop\": \"Desktop\",\n \"mobile\": \"Mobile\",\n \"mm-default\": \"Default installation\",\n \"mm-cloud\": \"Cloud instance\",\n \"mm-docker\": \"Docker\",\n \"mm-gitlab-omnibus\": \"GitLab Omnibus\",\n \"mm-self-hosting-cloud-export\": \"Self hosting (cloud export)\",\n \"require-invitations\": \"Require invitations\",\n \"allow-anyone-to-join\": \"Allow anyone to join\",\n \"restrict-by-email-domain\": \"Restrict by email domain\",\n \"zoom\": \"Zoom\",\n \"jitsi-meet\": \"Jitsi Meet\",\n \"bigbluebutton\": \"BigBlueButton\",\n \"disable\": \"Disabled\",\n \"chrome\": \"Chrome\",\n \"firefox\": \"Firefox\",\n \"desktop-app\": \"Desktop app\",\n \"system-proxy-settings\": \"System proxy settings\",\n \"custom-proxy-settings\": \"Custom proxy settings\",\n \"stream\": \"From a stream view\",\n \"not-stream\": \"From other views\",\n \"via-recent-conversations\": \"Via recent conversations\",\n \"via-inbox-view\": \"Via inbox view\",\n \"via-left-sidebar\": \"Via left sidebar\",\n \"instructions-for-all-platforms\": \"Instructions for all platforms\",\n \"public-streams\": \"Public streams\",\n \"private-streams\": \"Private streams\",\n \"web-public-streams\": \"Web-public streams\",\n \"via-user-card\": \"Via user card\",\n \"via-user-profile\": \"Via user profile\",\n \"via-organization-settings\": \"Via organization settings\",\n \"via-personal-settings\": \"Via personal settings\",\n \"via-stream-settings\": \"Via stream settings\",\n \"default-subdomain\": \"Default subdomain\",\n \"custom-subdomain\": \"Custom subdomain\",\n \"zulip-cloud\": \"Zulip Cloud\",\n \"self-hosting\": \"Self hosting\",\n \"okta\": \"Okta\",\n \"onelogin\": \"OneLogin\",\n \"azuread\": \"AzureAD\",\n \"keycloak\": \"Keycloak\",\n \"auth0\": \"Auth0\",\n \"logged-in\": \"If you are logged in\",\n \"logged-out\": \"If you are logged out\",\n \"user\": \"User\",\n \"bot\": \"Bot\",\n \"on-sign-up\": \"On sign-up\",\n \"via-paste\": \"Via paste\",\n \"via-markdown\": \"Via Markdown\",\n \"via-compose-box-buttons\": \"Via compose box buttons\",\n \"stream-compose\": \"Compose to a stream\",\n \"dm-compose\": \"Compose a DM\",\n \"v6\": \"Zulip Server 6.0+\",\n \"v4\": \"Zulip Server 4.0+\",\n}\n\n\nclass TabbedSectionsGenerator(Extension):\n @override\n def extendMarkdown(self, md: markdown.Markdown) -> None:\n md.preprocessors.register(\n TabbedSectionsPreprocessor(md, self.getConfigs()),\n \"tabbed_sections\",\n PREPROCESSOR_PRIORITES[\"tabbed_sections\"],\n )\n\n\nclass TabbedSectionsPreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:\n super().__init__(md)\n\n @override\n def run(self, lines: List[str]) -> List[str]:\n tab_section = self.parse_tabs(lines)\n while tab_section:\n if \"tabs\" in tab_section:\n tab_class = \"has-tabs\"\n else:\n tab_class = \"no-tabs\"\n tab_section[\"tabs\"] = [\n {\n \"tab_key\": \"instructions-for-all-platforms\",\n \"start\": tab_section[\"start_tabs_index\"],\n }\n ]\n nav_bar = self.generate_nav_bar(tab_section)\n content_blocks = self.generate_content_blocks(tab_section, lines)\n rendered_tabs = TABBED_SECTION_TEMPLATE.format(\n tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks\n )\n\n start = tab_section[\"start_tabs_index\"]\n end = tab_section[\"end_tabs_index\"] + 1\n lines = [*lines[:start], rendered_tabs, *lines[end:]]\n tab_section = self.parse_tabs(lines)\n return lines\n\n def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:\n tab_content_blocks = []\n for index, tab in enumerate(tab_section[\"tabs\"]):\n start_index = tab[\"start\"] + 1\n try:\n # If there are more tabs, we can use the starting index\n # of the next tab as the ending index of the previous one\n end_index = tab_section[\"tabs\"][index + 1][\"start\"]\n except IndexError:\n # Otherwise, just use the end of the entire section\n end_index = tab_section[\"end_tabs_index\"]\n\n content = \"\\n\".join(lines[start_index:end_index]).strip()\n tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(\n data_tab_key=tab[\"tab_key\"],\n # Wrapping the content in two newlines is necessary here.\n # If we don't do this, the inner Markdown does not get\n # rendered properly.\n content=f\"\\n{content}\\n\",\n )\n tab_content_blocks.append(tab_content_block)\n return \"\\n\".join(tab_content_blocks)\n\n def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:\n li_elements = []\n for tab in tab_section[\"tabs\"]:\n tab_key = tab.get(\"tab_key\")\n tab_label = TAB_SECTION_LABELS.get(tab_key)\n if tab_label is None:\n raise ValueError(\n f\"Tab '{tab_key}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py\"\n )\n\n li = NAV_LIST_ITEM_TEMPLATE.format(data_tab_key=tab_key, label=tab_label)\n li_elements.append(li)\n\n return NAV_BAR_TEMPLATE.format(tabs=\"\\n\".join(li_elements))\n\n def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:\n block: Dict[str, Any] = {}\n for index, line in enumerate(lines):\n start_match = START_TABBED_SECTION_REGEX.search(line)\n if start_match:\n block[\"start_tabs_index\"] = index\n\n tab_content_match = TAB_CONTENT_REGEX.search(line)\n if tab_content_match:\n block.setdefault(\"tabs\", [])\n tab = {\"start\": index, \"tab_key\": tab_content_match.group(1)}\n block[\"tabs\"].append(tab)\n\n end_match = END_TABBED_SECTION_REGEX.search(line)\n if end_match:\n block[\"end_tabs_index\"] = index\n break\n return block\n\n\ndef makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:\n return TabbedSectionsGenerator(**kwargs)\n", "path": "zerver/lib/markdown/tabbed_sections.py"}], "after_files": [{"content": "import re\nfrom typing import Any, Dict, List, Mapping, Optional\n\nimport markdown\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\nfrom typing_extensions import override\n\nfrom zerver.lib.markdown.priorities import PREPROCESSOR_PRIORITES\n\nSTART_TABBED_SECTION_REGEX = re.compile(r\"^\\{start_tabs\\}$\")\nEND_TABBED_SECTION_REGEX = re.compile(r\"^\\{end_tabs\\}$\")\nTAB_CONTENT_REGEX = re.compile(r\"^\\{tab\\|([^}]+)\\}$\")\n\nTABBED_SECTION_TEMPLATE = \"\"\"\n<div class=\"tabbed-section {tab_class}\" markdown=\"1\">\n{nav_bar}\n<div class=\"blocks\">\n{blocks}\n</div>\n</div>\n\"\"\".strip()\n\nNAV_BAR_TEMPLATE = \"\"\"\n<ul class=\"nav\">\n{tabs}\n</ul>\n\"\"\".strip()\n\nNAV_LIST_ITEM_TEMPLATE = \"\"\"\n<li data-tab-key=\"{data_tab_key}\" tabindex=\"0\">{label}</li>\n\"\"\".strip()\n\nDIV_TAB_CONTENT_TEMPLATE = \"\"\"\n<div data-tab-key=\"{data_tab_key}\" markdown=\"1\">\n{content}\n</div>\n\"\"\".strip()\n\n# If adding new entries here, also check if you need to update\n# tabbed-instructions.js\nTAB_SECTION_LABELS = {\n \"desktop-web\": \"Desktop/Web\",\n \"ios\": \"iOS\",\n \"android\": \"Android\",\n \"mac\": \"macOS\",\n \"windows\": \"Windows\",\n \"linux\": \"Linux\",\n \"python\": \"Python\",\n \"js\": \"JavaScript\",\n \"curl\": \"curl\",\n \"zulip-send\": \"zulip-send\",\n \"web\": \"Web\",\n \"desktop\": \"Desktop\",\n \"mobile\": \"Mobile\",\n \"mm-default\": \"Default installation\",\n \"mm-cloud\": \"Cloud instance\",\n \"mm-docker\": \"Docker\",\n \"mm-gitlab-omnibus\": \"GitLab Omnibus\",\n \"mm-self-hosting-cloud-export\": \"Self hosting (cloud export)\",\n \"require-invitations\": \"Require invitations\",\n \"allow-anyone-to-join\": \"Allow anyone to join\",\n \"restrict-by-email-domain\": \"Restrict by email domain\",\n \"zoom\": \"Zoom\",\n \"jitsi-meet\": \"Jitsi Meet\",\n \"bigbluebutton\": \"BigBlueButton\",\n \"disable\": \"Disabled\",\n \"chrome\": \"Chrome\",\n \"firefox\": \"Firefox\",\n \"desktop-app\": \"Desktop app\",\n \"system-proxy-settings\": \"System proxy settings\",\n \"custom-proxy-settings\": \"Custom proxy settings\",\n \"stream\": \"From a stream view\",\n \"not-stream\": \"From other views\",\n \"via-recent-conversations\": \"Via recent conversations\",\n \"via-inbox-view\": \"Via inbox view\",\n \"via-left-sidebar\": \"Via left sidebar\",\n \"instructions-for-all-platforms\": \"Instructions for all platforms\",\n \"public-streams\": \"Public streams\",\n \"private-streams\": \"Private streams\",\n \"web-public-streams\": \"Web-public streams\",\n \"via-user-card\": \"Via user card\",\n \"via-user-profile\": \"Via user profile\",\n \"via-organization-settings\": \"Via organization settings\",\n \"via-personal-settings\": \"Via personal settings\",\n \"via-stream-settings\": \"Via stream settings\",\n \"default-subdomain\": \"Default subdomain\",\n \"custom-subdomain\": \"Custom subdomain\",\n \"zulip-cloud\": \"Zulip Cloud\",\n \"self-hosting\": \"Self hosting\",\n \"okta\": \"Okta\",\n \"onelogin\": \"OneLogin\",\n \"azuread\": \"AzureAD\",\n \"keycloak\": \"Keycloak\",\n \"auth0\": \"Auth0\",\n \"logged-in\": \"If you are logged in\",\n \"logged-out\": \"If you are logged out\",\n \"user\": \"User\",\n \"bot\": \"Bot\",\n \"on-sign-up\": \"On sign-up\",\n \"via-paste\": \"Via paste\",\n \"via-drag-and-drop\": \"Via drag-and-drop\",\n \"via-markdown\": \"Via Markdown\",\n \"via-compose-box-buttons\": \"Via compose box buttons\",\n \"stream-compose\": \"Compose to a stream\",\n \"dm-compose\": \"Compose a DM\",\n \"v6\": \"Zulip Server 6.0+\",\n \"v4\": \"Zulip Server 4.0+\",\n}\n\n\nclass TabbedSectionsGenerator(Extension):\n @override\n def extendMarkdown(self, md: markdown.Markdown) -> None:\n md.preprocessors.register(\n TabbedSectionsPreprocessor(md, self.getConfigs()),\n \"tabbed_sections\",\n PREPROCESSOR_PRIORITES[\"tabbed_sections\"],\n )\n\n\nclass TabbedSectionsPreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:\n super().__init__(md)\n\n @override\n def run(self, lines: List[str]) -> List[str]:\n tab_section = self.parse_tabs(lines)\n while tab_section:\n if \"tabs\" in tab_section:\n tab_class = \"has-tabs\"\n else:\n tab_class = \"no-tabs\"\n tab_section[\"tabs\"] = [\n {\n \"tab_key\": \"instructions-for-all-platforms\",\n \"start\": tab_section[\"start_tabs_index\"],\n }\n ]\n nav_bar = self.generate_nav_bar(tab_section)\n content_blocks = self.generate_content_blocks(tab_section, lines)\n rendered_tabs = TABBED_SECTION_TEMPLATE.format(\n tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks\n )\n\n start = tab_section[\"start_tabs_index\"]\n end = tab_section[\"end_tabs_index\"] + 1\n lines = [*lines[:start], rendered_tabs, *lines[end:]]\n tab_section = self.parse_tabs(lines)\n return lines\n\n def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:\n tab_content_blocks = []\n for index, tab in enumerate(tab_section[\"tabs\"]):\n start_index = tab[\"start\"] + 1\n try:\n # If there are more tabs, we can use the starting index\n # of the next tab as the ending index of the previous one\n end_index = tab_section[\"tabs\"][index + 1][\"start\"]\n except IndexError:\n # Otherwise, just use the end of the entire section\n end_index = tab_section[\"end_tabs_index\"]\n\n content = \"\\n\".join(lines[start_index:end_index]).strip()\n tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(\n data_tab_key=tab[\"tab_key\"],\n # Wrapping the content in two newlines is necessary here.\n # If we don't do this, the inner Markdown does not get\n # rendered properly.\n content=f\"\\n{content}\\n\",\n )\n tab_content_blocks.append(tab_content_block)\n return \"\\n\".join(tab_content_blocks)\n\n def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:\n li_elements = []\n for tab in tab_section[\"tabs\"]:\n tab_key = tab.get(\"tab_key\")\n tab_label = TAB_SECTION_LABELS.get(tab_key)\n if tab_label is None:\n raise ValueError(\n f\"Tab '{tab_key}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py\"\n )\n\n li = NAV_LIST_ITEM_TEMPLATE.format(data_tab_key=tab_key, label=tab_label)\n li_elements.append(li)\n\n return NAV_BAR_TEMPLATE.format(tabs=\"\\n\".join(li_elements))\n\n def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:\n block: Dict[str, Any] = {}\n for index, line in enumerate(lines):\n start_match = START_TABBED_SECTION_REGEX.search(line)\n if start_match:\n block[\"start_tabs_index\"] = index\n\n tab_content_match = TAB_CONTENT_REGEX.search(line)\n if tab_content_match:\n block.setdefault(\"tabs\", [])\n tab = {\"start\": index, \"tab_key\": tab_content_match.group(1)}\n block[\"tabs\"].append(tab)\n\n end_match = END_TABBED_SECTION_REGEX.search(line)\n if end_match:\n block[\"end_tabs_index\"] = index\n break\n return block\n\n\ndef makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:\n return TabbedSectionsGenerator(**kwargs)\n", "path": "zerver/lib/markdown/tabbed_sections.py"}]}
2,677
137
gh_patches_debug_30301
rasdani/github-patches
git_diff
napari__napari-4445
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- npe2 plugins need to be added to `napari --info` ## 🐛 Bug currently, `napari --info` doesn't include npe2 plugins --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `napari/utils/info.py` Content: ``` 1 import os 2 import platform 3 import subprocess 4 import sys 5 6 import napari 7 8 OS_RELEASE_PATH = "/etc/os-release" 9 10 11 def _linux_sys_name(): 12 """ 13 Try to discover linux system name base on /etc/os-release file or lsb_release command output 14 https://www.freedesktop.org/software/systemd/man/os-release.html 15 """ 16 if os.path.exists(OS_RELEASE_PATH): 17 with open(OS_RELEASE_PATH) as f_p: 18 data = {} 19 for line in f_p: 20 field, value = line.split("=") 21 data[field.strip()] = value.strip().strip('"') 22 if "PRETTY_NAME" in data: 23 return data["PRETTY_NAME"] 24 if "NAME" in data: 25 if "VERSION" in data: 26 return f'{data["NAME"]} {data["VERSION"]}' 27 if "VERSION_ID" in data: 28 return f'{data["NAME"]} {data["VERSION_ID"]}' 29 return f'{data["NAME"]} (no version)' 30 31 try: 32 res = subprocess.run( 33 ["lsb_release", "-d", "-r"], check=True, capture_output=True 34 ) 35 text = res.stdout.decode() 36 data = {} 37 for line in text.split("\n"): 38 key, val = line.split(":") 39 data[key.strip()] = val.strip() 40 version_str = data["Description"] 41 if not version_str.endswith(data["Release"]): 42 version_str += " " + data["Release"] 43 return version_str 44 except subprocess.CalledProcessError: 45 pass 46 return "" 47 48 49 def _sys_name(): 50 """ 51 Discover MacOS or Linux Human readable information. For Linux provide information about distribution. 52 """ 53 try: 54 if sys.platform == "linux": 55 return _linux_sys_name() 56 if sys.platform == "darwin": 57 try: 58 res = subprocess.run( 59 ["sw_vers", "-productVersion"], 60 check=True, 61 capture_output=True, 62 ) 63 return f"MacOS {res.stdout.decode().strip()}" 64 except subprocess.CalledProcessError: 65 pass 66 except Exception: 67 pass 68 return "" 69 70 71 def sys_info(as_html=False): 72 """Gathers relevant module versions for troubleshooting purposes. 73 74 Parameters 75 ---------- 76 as_html : bool 77 if True, info will be returned as HTML, suitable for a QTextEdit widget 78 """ 79 from napari.plugins import plugin_manager 80 81 sys_version = sys.version.replace('\n', ' ') 82 text = ( 83 f"<b>napari</b>: {napari.__version__}<br>" 84 f"<b>Platform</b>: {platform.platform()}<br>" 85 ) 86 87 __sys_name = _sys_name() 88 if __sys_name: 89 text += f"<b>System</b>: {__sys_name}<br>" 90 91 text += f"<b>Python</b>: {sys_version}<br>" 92 93 try: 94 from qtpy import API_NAME, PYQT_VERSION, PYSIDE_VERSION, QtCore 95 96 if API_NAME == 'PySide2': 97 API_VERSION = PYSIDE_VERSION 98 elif API_NAME == 'PyQt5': 99 API_VERSION = PYQT_VERSION 100 else: 101 API_VERSION = '' 102 103 text += ( 104 f"<b>Qt</b>: {QtCore.__version__}<br>" 105 f"<b>{API_NAME}</b>: {API_VERSION}<br>" 106 ) 107 108 except Exception as e: 109 text += f"<b>Qt</b>: Import failed ({e})<br>" 110 111 modules = ( 112 ('numpy', 'NumPy'), 113 ('scipy', 'SciPy'), 114 ('dask', 'Dask'), 115 ('vispy', 'VisPy'), 116 ) 117 118 loaded = {} 119 for module, name in modules: 120 try: 121 loaded[module] = __import__(module) 122 text += f"<b>{name}</b>: {loaded[module].__version__}<br>" 123 except Exception as e: 124 text += f"<b>{name}</b>: Import failed ({e})<br>" 125 126 text += "<br><b>OpenGL:</b><br>" 127 128 if loaded.get('vispy', False): 129 sys_info_text = ( 130 "<br>".join( 131 [ 132 loaded['vispy'].sys_info().split("\n")[index] 133 for index in [-4, -3] 134 ] 135 ) 136 .replace("'", "") 137 .replace("<br>", "<br> - ") 138 ) 139 text += f' - {sys_info_text}<br>' 140 else: 141 text += " - failed to load vispy" 142 143 text += "<br><b>Screens:</b><br>" 144 145 try: 146 from qtpy.QtGui import QGuiApplication 147 148 screen_list = QGuiApplication.screens() 149 for i, screen in enumerate(screen_list, start=1): 150 text += f" - screen {i}: resolution {screen.geometry().width()}x{screen.geometry().height()}, scale {screen.devicePixelRatio()}<br>" 151 except Exception as e: 152 text += f" - failed to load screen information {e}" 153 154 plugin_manager.discover() 155 plugin_strings = [] 156 for meta in plugin_manager.list_plugin_metadata(): 157 plugin_name = meta.get('plugin_name') 158 if plugin_name == 'builtins': 159 continue 160 version = meta.get('version') 161 version_string = f": {version}" if version else "" 162 plugin_strings.append(f" - {plugin_name}{version_string}") 163 text += '<br><b>Plugins</b>:' 164 text += ( 165 ("<br>" + "<br>".join(sorted(plugin_strings))) 166 if plugin_strings 167 else ' None' 168 ) 169 170 if not as_html: 171 text = ( 172 text.replace("<br>", "\n").replace("<b>", "").replace("</b>", "") 173 ) 174 return text 175 176 177 citation_text = ( 178 'napari contributors (2019). napari: a ' 179 'multi-dimensional image viewer for python. ' 180 'doi:10.5281/zenodo.3555620' 181 ) 182 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/napari/utils/info.py b/napari/utils/info.py --- a/napari/utils/info.py +++ b/napari/utils/info.py @@ -76,6 +76,8 @@ as_html : bool if True, info will be returned as HTML, suitable for a QTextEdit widget """ + from npe2 import PluginManager as Npe2PluginManager + from napari.plugins import plugin_manager sys_version = sys.version.replace('\n', ' ') @@ -152,17 +154,27 @@ text += f" - failed to load screen information {e}" plugin_manager.discover() - plugin_strings = [] + plugin_strings = {} for meta in plugin_manager.list_plugin_metadata(): plugin_name = meta.get('plugin_name') if plugin_name == 'builtins': continue version = meta.get('version') version_string = f": {version}" if version else "" - plugin_strings.append(f" - {plugin_name}{version_string}") + plugin_strings[plugin_name] = f" - {plugin_name}{version_string}" + + npe2_plugin_manager = Npe2PluginManager.instance() + for manifest in npe2_plugin_manager.iter_manifests(): + plugin_name = manifest.name + if plugin_name in ("napari", "builtins"): + continue + version = manifest.package_version + version_string = f": {version}" if version else "" + plugin_strings[plugin_name] = f" - {plugin_name}{version_string}" + text += '<br><b>Plugins</b>:' text += ( - ("<br>" + "<br>".join(sorted(plugin_strings))) + ("<br>" + "<br>".join(sorted(plugin_strings.values()))) if plugin_strings else ' None' )
{"golden_diff": "diff --git a/napari/utils/info.py b/napari/utils/info.py\n--- a/napari/utils/info.py\n+++ b/napari/utils/info.py\n@@ -76,6 +76,8 @@\n as_html : bool\n if True, info will be returned as HTML, suitable for a QTextEdit widget\n \"\"\"\n+ from npe2 import PluginManager as Npe2PluginManager\n+\n from napari.plugins import plugin_manager\n \n sys_version = sys.version.replace('\\n', ' ')\n@@ -152,17 +154,27 @@\n text += f\" - failed to load screen information {e}\"\n \n plugin_manager.discover()\n- plugin_strings = []\n+ plugin_strings = {}\n for meta in plugin_manager.list_plugin_metadata():\n plugin_name = meta.get('plugin_name')\n if plugin_name == 'builtins':\n continue\n version = meta.get('version')\n version_string = f\": {version}\" if version else \"\"\n- plugin_strings.append(f\" - {plugin_name}{version_string}\")\n+ plugin_strings[plugin_name] = f\" - {plugin_name}{version_string}\"\n+\n+ npe2_plugin_manager = Npe2PluginManager.instance()\n+ for manifest in npe2_plugin_manager.iter_manifests():\n+ plugin_name = manifest.name\n+ if plugin_name in (\"napari\", \"builtins\"):\n+ continue\n+ version = manifest.package_version\n+ version_string = f\": {version}\" if version else \"\"\n+ plugin_strings[plugin_name] = f\" - {plugin_name}{version_string}\"\n+\n text += '<br><b>Plugins</b>:'\n text += (\n- (\"<br>\" + \"<br>\".join(sorted(plugin_strings)))\n+ (\"<br>\" + \"<br>\".join(sorted(plugin_strings.values())))\n if plugin_strings\n else ' None'\n )\n", "issue": "npe2 plugins need to be added to `napari --info` \n## \ud83d\udc1b Bug\r\ncurrently, `napari --info` doesn't include npe2 plugins\n", "before_files": [{"content": "import os\nimport platform\nimport subprocess\nimport sys\n\nimport napari\n\nOS_RELEASE_PATH = \"/etc/os-release\"\n\n\ndef _linux_sys_name():\n \"\"\"\n Try to discover linux system name base on /etc/os-release file or lsb_release command output\n https://www.freedesktop.org/software/systemd/man/os-release.html\n \"\"\"\n if os.path.exists(OS_RELEASE_PATH):\n with open(OS_RELEASE_PATH) as f_p:\n data = {}\n for line in f_p:\n field, value = line.split(\"=\")\n data[field.strip()] = value.strip().strip('\"')\n if \"PRETTY_NAME\" in data:\n return data[\"PRETTY_NAME\"]\n if \"NAME\" in data:\n if \"VERSION\" in data:\n return f'{data[\"NAME\"]} {data[\"VERSION\"]}'\n if \"VERSION_ID\" in data:\n return f'{data[\"NAME\"]} {data[\"VERSION_ID\"]}'\n return f'{data[\"NAME\"]} (no version)'\n\n try:\n res = subprocess.run(\n [\"lsb_release\", \"-d\", \"-r\"], check=True, capture_output=True\n )\n text = res.stdout.decode()\n data = {}\n for line in text.split(\"\\n\"):\n key, val = line.split(\":\")\n data[key.strip()] = val.strip()\n version_str = data[\"Description\"]\n if not version_str.endswith(data[\"Release\"]):\n version_str += \" \" + data[\"Release\"]\n return version_str\n except subprocess.CalledProcessError:\n pass\n return \"\"\n\n\ndef _sys_name():\n \"\"\"\n Discover MacOS or Linux Human readable information. For Linux provide information about distribution.\n \"\"\"\n try:\n if sys.platform == \"linux\":\n return _linux_sys_name()\n if sys.platform == \"darwin\":\n try:\n res = subprocess.run(\n [\"sw_vers\", \"-productVersion\"],\n check=True,\n capture_output=True,\n )\n return f\"MacOS {res.stdout.decode().strip()}\"\n except subprocess.CalledProcessError:\n pass\n except Exception:\n pass\n return \"\"\n\n\ndef sys_info(as_html=False):\n \"\"\"Gathers relevant module versions for troubleshooting purposes.\n\n Parameters\n ----------\n as_html : bool\n if True, info will be returned as HTML, suitable for a QTextEdit widget\n \"\"\"\n from napari.plugins import plugin_manager\n\n sys_version = sys.version.replace('\\n', ' ')\n text = (\n f\"<b>napari</b>: {napari.__version__}<br>\"\n f\"<b>Platform</b>: {platform.platform()}<br>\"\n )\n\n __sys_name = _sys_name()\n if __sys_name:\n text += f\"<b>System</b>: {__sys_name}<br>\"\n\n text += f\"<b>Python</b>: {sys_version}<br>\"\n\n try:\n from qtpy import API_NAME, PYQT_VERSION, PYSIDE_VERSION, QtCore\n\n if API_NAME == 'PySide2':\n API_VERSION = PYSIDE_VERSION\n elif API_NAME == 'PyQt5':\n API_VERSION = PYQT_VERSION\n else:\n API_VERSION = ''\n\n text += (\n f\"<b>Qt</b>: {QtCore.__version__}<br>\"\n f\"<b>{API_NAME}</b>: {API_VERSION}<br>\"\n )\n\n except Exception as e:\n text += f\"<b>Qt</b>: Import failed ({e})<br>\"\n\n modules = (\n ('numpy', 'NumPy'),\n ('scipy', 'SciPy'),\n ('dask', 'Dask'),\n ('vispy', 'VisPy'),\n )\n\n loaded = {}\n for module, name in modules:\n try:\n loaded[module] = __import__(module)\n text += f\"<b>{name}</b>: {loaded[module].__version__}<br>\"\n except Exception as e:\n text += f\"<b>{name}</b>: Import failed ({e})<br>\"\n\n text += \"<br><b>OpenGL:</b><br>\"\n\n if loaded.get('vispy', False):\n sys_info_text = (\n \"<br>\".join(\n [\n loaded['vispy'].sys_info().split(\"\\n\")[index]\n for index in [-4, -3]\n ]\n )\n .replace(\"'\", \"\")\n .replace(\"<br>\", \"<br> - \")\n )\n text += f' - {sys_info_text}<br>'\n else:\n text += \" - failed to load vispy\"\n\n text += \"<br><b>Screens:</b><br>\"\n\n try:\n from qtpy.QtGui import QGuiApplication\n\n screen_list = QGuiApplication.screens()\n for i, screen in enumerate(screen_list, start=1):\n text += f\" - screen {i}: resolution {screen.geometry().width()}x{screen.geometry().height()}, scale {screen.devicePixelRatio()}<br>\"\n except Exception as e:\n text += f\" - failed to load screen information {e}\"\n\n plugin_manager.discover()\n plugin_strings = []\n for meta in plugin_manager.list_plugin_metadata():\n plugin_name = meta.get('plugin_name')\n if plugin_name == 'builtins':\n continue\n version = meta.get('version')\n version_string = f\": {version}\" if version else \"\"\n plugin_strings.append(f\" - {plugin_name}{version_string}\")\n text += '<br><b>Plugins</b>:'\n text += (\n (\"<br>\" + \"<br>\".join(sorted(plugin_strings)))\n if plugin_strings\n else ' None'\n )\n\n if not as_html:\n text = (\n text.replace(\"<br>\", \"\\n\").replace(\"<b>\", \"\").replace(\"</b>\", \"\")\n )\n return text\n\n\ncitation_text = (\n 'napari contributors (2019). napari: a '\n 'multi-dimensional image viewer for python. '\n 'doi:10.5281/zenodo.3555620'\n)\n", "path": "napari/utils/info.py"}], "after_files": [{"content": "import os\nimport platform\nimport subprocess\nimport sys\n\nimport napari\n\nOS_RELEASE_PATH = \"/etc/os-release\"\n\n\ndef _linux_sys_name():\n \"\"\"\n Try to discover linux system name base on /etc/os-release file or lsb_release command output\n https://www.freedesktop.org/software/systemd/man/os-release.html\n \"\"\"\n if os.path.exists(OS_RELEASE_PATH):\n with open(OS_RELEASE_PATH) as f_p:\n data = {}\n for line in f_p:\n field, value = line.split(\"=\")\n data[field.strip()] = value.strip().strip('\"')\n if \"PRETTY_NAME\" in data:\n return data[\"PRETTY_NAME\"]\n if \"NAME\" in data:\n if \"VERSION\" in data:\n return f'{data[\"NAME\"]} {data[\"VERSION\"]}'\n if \"VERSION_ID\" in data:\n return f'{data[\"NAME\"]} {data[\"VERSION_ID\"]}'\n return f'{data[\"NAME\"]} (no version)'\n\n try:\n res = subprocess.run(\n [\"lsb_release\", \"-d\", \"-r\"], check=True, capture_output=True\n )\n text = res.stdout.decode()\n data = {}\n for line in text.split(\"\\n\"):\n key, val = line.split(\":\")\n data[key.strip()] = val.strip()\n version_str = data[\"Description\"]\n if not version_str.endswith(data[\"Release\"]):\n version_str += \" \" + data[\"Release\"]\n return version_str\n except subprocess.CalledProcessError:\n pass\n return \"\"\n\n\ndef _sys_name():\n \"\"\"\n Discover MacOS or Linux Human readable information. For Linux provide information about distribution.\n \"\"\"\n try:\n if sys.platform == \"linux\":\n return _linux_sys_name()\n if sys.platform == \"darwin\":\n try:\n res = subprocess.run(\n [\"sw_vers\", \"-productVersion\"],\n check=True,\n capture_output=True,\n )\n return f\"MacOS {res.stdout.decode().strip()}\"\n except subprocess.CalledProcessError:\n pass\n except Exception:\n pass\n return \"\"\n\n\ndef sys_info(as_html=False):\n \"\"\"Gathers relevant module versions for troubleshooting purposes.\n\n Parameters\n ----------\n as_html : bool\n if True, info will be returned as HTML, suitable for a QTextEdit widget\n \"\"\"\n from npe2 import PluginManager as Npe2PluginManager\n\n from napari.plugins import plugin_manager\n\n sys_version = sys.version.replace('\\n', ' ')\n text = (\n f\"<b>napari</b>: {napari.__version__}<br>\"\n f\"<b>Platform</b>: {platform.platform()}<br>\"\n )\n\n __sys_name = _sys_name()\n if __sys_name:\n text += f\"<b>System</b>: {__sys_name}<br>\"\n\n text += f\"<b>Python</b>: {sys_version}<br>\"\n\n try:\n from qtpy import API_NAME, PYQT_VERSION, PYSIDE_VERSION, QtCore\n\n if API_NAME == 'PySide2':\n API_VERSION = PYSIDE_VERSION\n elif API_NAME == 'PyQt5':\n API_VERSION = PYQT_VERSION\n else:\n API_VERSION = ''\n\n text += (\n f\"<b>Qt</b>: {QtCore.__version__}<br>\"\n f\"<b>{API_NAME}</b>: {API_VERSION}<br>\"\n )\n\n except Exception as e:\n text += f\"<b>Qt</b>: Import failed ({e})<br>\"\n\n modules = (\n ('numpy', 'NumPy'),\n ('scipy', 'SciPy'),\n ('dask', 'Dask'),\n ('vispy', 'VisPy'),\n )\n\n loaded = {}\n for module, name in modules:\n try:\n loaded[module] = __import__(module)\n text += f\"<b>{name}</b>: {loaded[module].__version__}<br>\"\n except Exception as e:\n text += f\"<b>{name}</b>: Import failed ({e})<br>\"\n\n text += \"<br><b>OpenGL:</b><br>\"\n\n if loaded.get('vispy', False):\n sys_info_text = (\n \"<br>\".join(\n [\n loaded['vispy'].sys_info().split(\"\\n\")[index]\n for index in [-4, -3]\n ]\n )\n .replace(\"'\", \"\")\n .replace(\"<br>\", \"<br> - \")\n )\n text += f' - {sys_info_text}<br>'\n else:\n text += \" - failed to load vispy\"\n\n text += \"<br><b>Screens:</b><br>\"\n\n try:\n from qtpy.QtGui import QGuiApplication\n\n screen_list = QGuiApplication.screens()\n for i, screen in enumerate(screen_list, start=1):\n text += f\" - screen {i}: resolution {screen.geometry().width()}x{screen.geometry().height()}, scale {screen.devicePixelRatio()}<br>\"\n except Exception as e:\n text += f\" - failed to load screen information {e}\"\n\n plugin_manager.discover()\n plugin_strings = {}\n for meta in plugin_manager.list_plugin_metadata():\n plugin_name = meta.get('plugin_name')\n if plugin_name == 'builtins':\n continue\n version = meta.get('version')\n version_string = f\": {version}\" if version else \"\"\n plugin_strings[plugin_name] = f\" - {plugin_name}{version_string}\"\n\n npe2_plugin_manager = Npe2PluginManager.instance()\n for manifest in npe2_plugin_manager.iter_manifests():\n plugin_name = manifest.name\n if plugin_name in (\"napari\", \"builtins\"):\n continue\n version = manifest.package_version\n version_string = f\": {version}\" if version else \"\"\n plugin_strings[plugin_name] = f\" - {plugin_name}{version_string}\"\n\n text += '<br><b>Plugins</b>:'\n text += (\n (\"<br>\" + \"<br>\".join(sorted(plugin_strings.values())))\n if plugin_strings\n else ' None'\n )\n\n if not as_html:\n text = (\n text.replace(\"<br>\", \"\\n\").replace(\"<b>\", \"\").replace(\"</b>\", \"\")\n )\n return text\n\n\ncitation_text = (\n 'napari contributors (2019). napari: a '\n 'multi-dimensional image viewer for python. '\n 'doi:10.5281/zenodo.3555620'\n)\n", "path": "napari/utils/info.py"}]}
2,044
409
gh_patches_debug_29116
rasdani/github-patches
git_diff
numba__numba-6290
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- cmath linking issues in CUDA From the mailing list: https://groups.google.com/a/continuum.io/forum/#!topic/numba-users/oBcaFcsD-x4 This: ```python from numba import cuda import cmath import numpy as np @cuda.jit(device = True) def quarticRoots(a, b, c, d, e): del_0 = c**2 - 3.*b*d + 12.*a*e del_1 = 2.*c**3 - 9.*b*c*d + 27*e*b**2 + 27*a*d**2 - 72*a*c*e p = (8.*a*c - 3.*b**2)/(8.*a**2) q = (b**3 - 4.*a*b*c + 8.*d*a**2)/(8.*a**3) Q = ((del_1 + cmath.sqrt(del_1**2 - 4.*del_0**3))/2.)**(1./3.)#**(1./3.) is the problem. S = 0.5*cmath.sqrt(-2./3.*p + 1/(3.*a)*(Q + del_0/Q)) x1 = -b/(4.*a) - S + 0.5*cmath.sqrt(-4.*S**2 - 2.*p + q/S) x2 = -b/(4.*a) - S - 0.5*cmath.sqrt(-4.*S**2 - 2.*p + q/S) x3 = -b/(4.*a) + S + 0.5*cmath.sqrt(-4.*S**2 - 2.*p - q/S) x4 = -b/(4.*a) + S - 0.5*cmath.sqrt(-4.*S**2 - 2.*p - q/S) return (x1, x2, x3, x4) @cuda.jit def launcher(d_array): output = quarticRoots(2.,4.,2.,1.,1.) d_array[0] = output[0] d_array[1] = output[1] d_array[2] = output[2] d_array[3] = output[3] def main(): d_array = cuda.device_array(4, dtype = np.complex64) launcher[1,1](d_array) output = d_array.copy_to_host() print output main() ``` yields: ```python Traceback (most recent call last): File "<path>numba/numba/cuda/cudadrv/driver.py", line 1432, in complete driver.cuLinkComplete(self.handle, byref(cubin), byref(size)) File "<path>numba/numba/cuda/cudadrv/driver.py", line 288, in safe_cuda_api_call self._check_error(fname, retcode) File "<path>numba/numba/cuda/cudadrv/driver.py", line 323, in _check_error raise CudaAPIError(retcode, msg) numba.cuda.cudadrv.driver.CudaAPIError: [999] Call to cuLinkComplete results in CUDA_ERROR_UNKNOWN During handling of the above exception, another exception occurred: Traceback (most recent call last): File "issue_13.py", line 39, in <module> main() File "issue_13.py", line 35, in main launcher[1,1](d_array) File "<path>numba/numba/cuda/compiler.py", line 701, in __call__ kernel = self.specialize(*args) File "<path>numba/numba/cuda/compiler.py", line 712, in specialize kernel = self.compile(argtypes) File "<path>numba/numba/cuda/compiler.py", line 730, in compile kernel.bind() File "<path>numba/numba/cuda/compiler.py", line 489, in bind self._func.get() File "<path>numba/numba/cuda/compiler.py", line 377, in get cubin, _size = linker.complete() File "<path>numba/numba/cuda/cudadrv/driver.py", line 1434, in complete raise LinkerError("%s\n%s" % (e, self.error_log)) numba.cuda.cudadrv.driver.LinkerError: [999] Call to cuLinkComplete results in CUDA_ERROR_UNKNOWN error : Undefined reference to 'numba_cpow' in '<cudapy-ptx>' ``` the `cmath.sqrt()` creates a complex domain result with is then raised to the power `1/3`, this ends up as a call to `numba_cpow` which is fine on the CPU but doesn't have an impl in CUDA, as a result there's a linking error. Two issues: 1. Can the additional cmath function impls for the CPU be done for CUDA. 2. If not can this problem be caught earlier in the complication chain and a better message produced such that is doesn't appear as a link error. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `numba/cuda/libdevice.py` Content: ``` 1 import math 2 from llvmlite.llvmpy.core import Type 3 from numba.core import types, cgutils 4 from numba.core.imputils import Registry 5 6 registry = Registry() 7 lower = registry.lower 8 9 float_set = types.float32, types.float64 10 11 12 def bool_implement(nvname, ty): 13 def core(context, builder, sig, args): 14 assert sig.return_type == types.boolean, nvname 15 fty = context.get_value_type(ty) 16 lmod = builder.module 17 fnty = Type.function(Type.int(), [fty]) 18 fn = lmod.get_or_insert_function(fnty, name=nvname) 19 result = builder.call(fn, args) 20 return context.cast(builder, result, types.int32, types.boolean) 21 22 return core 23 24 25 def unary_implement(nvname, ty): 26 def core(context, builder, sig, args): 27 fty = context.get_value_type(ty) 28 lmod = builder.module 29 fnty = Type.function(fty, [fty]) 30 fn = lmod.get_or_insert_function(fnty, name=nvname) 31 return builder.call(fn, args) 32 33 return core 34 35 36 def binary_implement(nvname, ty): 37 def core(context, builder, sig, args): 38 fty = context.get_value_type(ty) 39 lmod = builder.module 40 fnty = Type.function(fty, [fty, fty]) 41 fn = lmod.get_or_insert_function(fnty, name=nvname) 42 return builder.call(fn, args) 43 44 return core 45 46 47 def powi_implement(nvname): 48 def core(context, builder, sig, args): 49 [base, pow] = args 50 [basety, powty] = sig.args 51 lmod = builder.module 52 fty = context.get_value_type(basety) 53 ity = context.get_value_type(types.int32) 54 fnty = Type.function(fty, [fty, ity]) 55 fn = lmod.get_or_insert_function(fnty, name=nvname) 56 return builder.call(fn, [base, pow]) 57 58 return core 59 60 61 lower(math.pow, types.float32, types.int32)(powi_implement('__nv_powif')) 62 lower(math.pow, types.float64, types.int32)(powi_implement('__nv_powi')) 63 64 65 def frexp_implement(nvname): 66 def core(context, builder, sig, args): 67 fracty, expty = sig.return_type 68 float_type = context.get_value_type(fracty) 69 int_type = context.get_value_type(expty) 70 fnty = Type.function(float_type, [float_type, Type.pointer(int_type)]) 71 72 fn = builder.module.get_or_insert_function(fnty, name=nvname) 73 expptr = cgutils.alloca_once(builder, int_type, name='exp') 74 75 ret = builder.call(fn, (args[0], expptr)) 76 return cgutils.pack_struct(builder, (ret, builder.load(expptr))) 77 78 return core 79 80 81 lower(math.frexp, types.float32)(frexp_implement('__nv_frexpf')) 82 lower(math.frexp, types.float64)(frexp_implement('__nv_frexp')) 83 84 lower(math.ldexp, types.float32, types.int32)(powi_implement('__nv_ldexpf')) 85 lower(math.ldexp, types.float64, types.int32)(powi_implement('__nv_ldexp')) 86 87 88 booleans = [] 89 booleans += [('__nv_isnand', '__nv_isnanf', math.isnan)] 90 booleans += [('__nv_isinfd', '__nv_isinff', math.isinf)] 91 booleans += [('__nv_isfinited', '__nv_finitef', math.isfinite)] 92 93 unarys = [] 94 unarys += [('__nv_ceil', '__nv_ceilf', math.ceil)] 95 unarys += [('__nv_floor', '__nv_floorf', math.floor)] 96 unarys += [('__nv_fabs', '__nv_fabsf', math.fabs)] 97 unarys += [('__nv_exp', '__nv_expf', math.exp)] 98 unarys += [('__nv_expm1', '__nv_expm1f', math.expm1)] 99 unarys += [('__nv_erf', '__nv_erff', math.erf)] 100 unarys += [('__nv_erfc', '__nv_erfcf', math.erfc)] 101 unarys += [('__nv_tgamma', '__nv_tgammaf', math.gamma)] 102 unarys += [('__nv_lgamma', '__nv_lgammaf', math.lgamma)] 103 unarys += [('__nv_sqrt', '__nv_sqrtf', math.sqrt)] 104 unarys += [('__nv_log', '__nv_logf', math.log)] 105 unarys += [('__nv_log10', '__nv_log10f', math.log10)] 106 unarys += [('__nv_log1p', '__nv_log1pf', math.log1p)] 107 unarys += [('__nv_acosh', '__nv_acoshf', math.acosh)] 108 unarys += [('__nv_acos', '__nv_acosf', math.acos)] 109 unarys += [('__nv_cos', '__nv_cosf', math.cos)] 110 unarys += [('__nv_cosh', '__nv_coshf', math.cosh)] 111 unarys += [('__nv_asinh', '__nv_asinhf', math.asinh)] 112 unarys += [('__nv_asin', '__nv_asinf', math.asin)] 113 unarys += [('__nv_sin', '__nv_sinf', math.sin)] 114 unarys += [('__nv_sinh', '__nv_sinhf', math.sinh)] 115 unarys += [('__nv_atan', '__nv_atanf', math.atan)] 116 unarys += [('__nv_atanh', '__nv_atanhf', math.atanh)] 117 unarys += [('__nv_tan', '__nv_tanf', math.tan)] 118 unarys += [('__nv_tanh', '__nv_tanhf', math.tanh)] 119 120 binarys = [] 121 binarys += [('__nv_copysign', '__nv_copysignf', math.copysign)] 122 binarys += [('__nv_atan2', '__nv_atan2f', math.atan2)] 123 binarys += [('__nv_pow', '__nv_powf', math.pow)] 124 binarys += [('__nv_fmod', '__nv_fmodf', math.fmod)] 125 binarys += [('__nv_hypot', '__nv_hypotf', math.hypot)] 126 127 128 for name64, name32, key in booleans: 129 impl64 = bool_implement(name64, types.float64) 130 lower(key, types.float64)(impl64) 131 impl32 = bool_implement(name32, types.float32) 132 lower(key, types.float32)(impl32) 133 134 135 for name64, name32, key in unarys: 136 impl64 = unary_implement(name64, types.float64) 137 lower(key, types.float64)(impl64) 138 impl32 = unary_implement(name32, types.float32) 139 lower(key, types.float32)(impl32) 140 141 for name64, name32, key in binarys: 142 impl64 = binary_implement(name64, types.float64) 143 lower(key, types.float64, types.float64)(impl64) 144 impl32 = binary_implement(name32, types.float32) 145 lower(key, types.float32, types.float32)(impl32) 146 147 148 def modf_implement(nvname, ty): 149 def core(context, builder, sig, args): 150 arg, = args 151 argty, = sig.args 152 fty = context.get_value_type(argty) 153 lmod = builder.module 154 ptr = cgutils.alloca_once(builder, fty) 155 fnty = Type.function(fty, [fty, fty.as_pointer()]) 156 fn = lmod.get_or_insert_function(fnty, name=nvname) 157 out = builder.call(fn, [arg, ptr]) 158 ret = context.make_tuple(builder, types.UniTuple(argty, 2), 159 [out, builder.load(ptr)]) 160 return ret 161 return core 162 163 164 for (ty, intrin) in ((types.float64, '__nv_modf',), 165 (types.float32, '__nv_modff',)): 166 lower(math.modf, ty)(modf_implement(intrin, ty)) 167 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/numba/cuda/libdevice.py b/numba/cuda/libdevice.py --- a/numba/cuda/libdevice.py +++ b/numba/cuda/libdevice.py @@ -1,4 +1,5 @@ import math +import operator from llvmlite.llvmpy.core import Type from numba.core import types, cgutils from numba.core.imputils import Registry @@ -164,3 +165,41 @@ for (ty, intrin) in ((types.float64, '__nv_modf',), (types.float32, '__nv_modff',)): lower(math.modf, ty)(modf_implement(intrin, ty)) + + +# Complex power implementations - translations of _Py_c_pow from CPython +# https://github.com/python/cpython/blob/a755410e054e1e2390de5830befc08fe80706c66/Objects/complexobject.c#L123-L151 +# +# The complex64 variant casts all constants and some variables to ensure that +# as much computation is done in single precision as possible. A small number +# of operations are still done in 64-bit, but these come from libdevice code. + +def cpow_implement(fty, cty): + def core(context, builder, sig, args): + def cpow_internal(a, b): + + if b.real == fty(0.0) and b.imag == fty(0.0): + return cty(1.0) + cty(0.0j) + elif a.real == fty(0.0) and b.real == fty(0.0): + return cty(0.0) + cty(0.0j) + + vabs = math.hypot(a.real, a.imag) + len = math.pow(vabs, b.real) + at = math.atan2(a.imag, a.real) + phase = at * b.real + if b.imag != fty(0.0): + len /= math.exp(at * b.imag) + phase += b.imag * math.log(vabs) + + return len * (cty(math.cos(phase)) + + cty(math.sin(phase) * cty(1.0j))) + + return context.compile_internal(builder, cpow_internal, sig, args) + + lower(operator.pow, cty, cty)(core) + lower(operator.ipow, cty, cty)(core) + lower(pow, cty, cty)(core) + + +cpow_implement(types.float32, types.complex64) +cpow_implement(types.float64, types.complex128)
{"golden_diff": "diff --git a/numba/cuda/libdevice.py b/numba/cuda/libdevice.py\n--- a/numba/cuda/libdevice.py\n+++ b/numba/cuda/libdevice.py\n@@ -1,4 +1,5 @@\n import math\n+import operator\n from llvmlite.llvmpy.core import Type\n from numba.core import types, cgutils\n from numba.core.imputils import Registry\n@@ -164,3 +165,41 @@\n for (ty, intrin) in ((types.float64, '__nv_modf',),\n (types.float32, '__nv_modff',)):\n lower(math.modf, ty)(modf_implement(intrin, ty))\n+\n+\n+# Complex power implementations - translations of _Py_c_pow from CPython\n+# https://github.com/python/cpython/blob/a755410e054e1e2390de5830befc08fe80706c66/Objects/complexobject.c#L123-L151\n+#\n+# The complex64 variant casts all constants and some variables to ensure that\n+# as much computation is done in single precision as possible. A small number\n+# of operations are still done in 64-bit, but these come from libdevice code.\n+\n+def cpow_implement(fty, cty):\n+ def core(context, builder, sig, args):\n+ def cpow_internal(a, b):\n+\n+ if b.real == fty(0.0) and b.imag == fty(0.0):\n+ return cty(1.0) + cty(0.0j)\n+ elif a.real == fty(0.0) and b.real == fty(0.0):\n+ return cty(0.0) + cty(0.0j)\n+\n+ vabs = math.hypot(a.real, a.imag)\n+ len = math.pow(vabs, b.real)\n+ at = math.atan2(a.imag, a.real)\n+ phase = at * b.real\n+ if b.imag != fty(0.0):\n+ len /= math.exp(at * b.imag)\n+ phase += b.imag * math.log(vabs)\n+\n+ return len * (cty(math.cos(phase)) +\n+ cty(math.sin(phase) * cty(1.0j)))\n+\n+ return context.compile_internal(builder, cpow_internal, sig, args)\n+\n+ lower(operator.pow, cty, cty)(core)\n+ lower(operator.ipow, cty, cty)(core)\n+ lower(pow, cty, cty)(core)\n+\n+\n+cpow_implement(types.float32, types.complex64)\n+cpow_implement(types.float64, types.complex128)\n", "issue": "cmath linking issues in CUDA\nFrom the mailing list:\r\nhttps://groups.google.com/a/continuum.io/forum/#!topic/numba-users/oBcaFcsD-x4\r\nThis:\r\n```python\r\nfrom numba import cuda\r\nimport cmath\r\nimport numpy as np\r\n\r\[email protected](device = True)\r\ndef quarticRoots(a, b, c, d, e):\r\n\r\n\tdel_0 = c**2 - 3.*b*d + 12.*a*e\r\n\tdel_1 = 2.*c**3 - 9.*b*c*d + 27*e*b**2 + 27*a*d**2 - 72*a*c*e\r\n\r\n\tp = (8.*a*c - 3.*b**2)/(8.*a**2)\r\n\tq = (b**3 - 4.*a*b*c + 8.*d*a**2)/(8.*a**3)\r\n\r\n\tQ = ((del_1 + cmath.sqrt(del_1**2 - 4.*del_0**3))/2.)**(1./3.)#**(1./3.) is the problem.\r\n\tS = 0.5*cmath.sqrt(-2./3.*p + 1/(3.*a)*(Q + del_0/Q))\r\n\r\n\tx1 = -b/(4.*a) - S + 0.5*cmath.sqrt(-4.*S**2 - 2.*p + q/S)\r\n\tx2 = -b/(4.*a) - S - 0.5*cmath.sqrt(-4.*S**2 - 2.*p + q/S)\r\n\tx3 = -b/(4.*a) + S + 0.5*cmath.sqrt(-4.*S**2 - 2.*p - q/S)\r\n\tx4 = -b/(4.*a) + S - 0.5*cmath.sqrt(-4.*S**2 - 2.*p - q/S)\r\n\r\n\treturn (x1, x2, x3, x4)\r\n\r\[email protected]\r\ndef launcher(d_array):\r\n\toutput = quarticRoots(2.,4.,2.,1.,1.)\r\n\r\n\td_array[0] = output[0]\r\n\td_array[1] = output[1]\r\n\td_array[2] = output[2]\r\n\td_array[3] = output[3]\r\n\r\ndef main():\r\n\td_array = cuda.device_array(4, dtype = np.complex64)\r\n\tlauncher[1,1](d_array)\r\n\toutput = d_array.copy_to_host()\r\n\tprint output\r\n\r\nmain()\r\n```\r\nyields:\r\n```python\r\nTraceback (most recent call last):\r\n File \"<path>numba/numba/cuda/cudadrv/driver.py\", line 1432, in complete\r\n driver.cuLinkComplete(self.handle, byref(cubin), byref(size))\r\n File \"<path>numba/numba/cuda/cudadrv/driver.py\", line 288, in safe_cuda_api_call\r\n self._check_error(fname, retcode)\r\n File \"<path>numba/numba/cuda/cudadrv/driver.py\", line 323, in _check_error\r\n raise CudaAPIError(retcode, msg)\r\nnumba.cuda.cudadrv.driver.CudaAPIError: [999] Call to cuLinkComplete results in CUDA_ERROR_UNKNOWN\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"issue_13.py\", line 39, in <module>\r\n main()\r\n File \"issue_13.py\", line 35, in main\r\n launcher[1,1](d_array)\r\n File \"<path>numba/numba/cuda/compiler.py\", line 701, in __call__\r\n kernel = self.specialize(*args)\r\n File \"<path>numba/numba/cuda/compiler.py\", line 712, in specialize\r\n kernel = self.compile(argtypes)\r\n File \"<path>numba/numba/cuda/compiler.py\", line 730, in compile\r\n kernel.bind()\r\n File \"<path>numba/numba/cuda/compiler.py\", line 489, in bind\r\n self._func.get()\r\n File \"<path>numba/numba/cuda/compiler.py\", line 377, in get\r\n cubin, _size = linker.complete()\r\n File \"<path>numba/numba/cuda/cudadrv/driver.py\", line 1434, in complete\r\n raise LinkerError(\"%s\\n%s\" % (e, self.error_log))\r\nnumba.cuda.cudadrv.driver.LinkerError: [999] Call to cuLinkComplete results in CUDA_ERROR_UNKNOWN\r\nerror : Undefined reference to 'numba_cpow' in '<cudapy-ptx>'\r\n```\r\nthe `cmath.sqrt()` creates a complex domain result with is then raised to the power `1/3`, this ends up as a call to `numba_cpow` which is fine on the CPU but doesn't have an impl in CUDA, as a result there's a linking error.\r\n\r\nTwo issues:\r\n1. Can the additional cmath function impls for the CPU be done for CUDA.\r\n2. If not can this problem be caught earlier in the complication chain and a better message produced such that is doesn't appear as a link error.\r\n\n", "before_files": [{"content": "import math\nfrom llvmlite.llvmpy.core import Type\nfrom numba.core import types, cgutils\nfrom numba.core.imputils import Registry\n\nregistry = Registry()\nlower = registry.lower\n\nfloat_set = types.float32, types.float64\n\n\ndef bool_implement(nvname, ty):\n def core(context, builder, sig, args):\n assert sig.return_type == types.boolean, nvname\n fty = context.get_value_type(ty)\n lmod = builder.module\n fnty = Type.function(Type.int(), [fty])\n fn = lmod.get_or_insert_function(fnty, name=nvname)\n result = builder.call(fn, args)\n return context.cast(builder, result, types.int32, types.boolean)\n\n return core\n\n\ndef unary_implement(nvname, ty):\n def core(context, builder, sig, args):\n fty = context.get_value_type(ty)\n lmod = builder.module\n fnty = Type.function(fty, [fty])\n fn = lmod.get_or_insert_function(fnty, name=nvname)\n return builder.call(fn, args)\n\n return core\n\n\ndef binary_implement(nvname, ty):\n def core(context, builder, sig, args):\n fty = context.get_value_type(ty)\n lmod = builder.module\n fnty = Type.function(fty, [fty, fty])\n fn = lmod.get_or_insert_function(fnty, name=nvname)\n return builder.call(fn, args)\n\n return core\n\n\ndef powi_implement(nvname):\n def core(context, builder, sig, args):\n [base, pow] = args\n [basety, powty] = sig.args\n lmod = builder.module\n fty = context.get_value_type(basety)\n ity = context.get_value_type(types.int32)\n fnty = Type.function(fty, [fty, ity])\n fn = lmod.get_or_insert_function(fnty, name=nvname)\n return builder.call(fn, [base, pow])\n\n return core\n\n\nlower(math.pow, types.float32, types.int32)(powi_implement('__nv_powif'))\nlower(math.pow, types.float64, types.int32)(powi_implement('__nv_powi'))\n\n\ndef frexp_implement(nvname):\n def core(context, builder, sig, args):\n fracty, expty = sig.return_type\n float_type = context.get_value_type(fracty)\n int_type = context.get_value_type(expty)\n fnty = Type.function(float_type, [float_type, Type.pointer(int_type)])\n\n fn = builder.module.get_or_insert_function(fnty, name=nvname)\n expptr = cgutils.alloca_once(builder, int_type, name='exp')\n\n ret = builder.call(fn, (args[0], expptr))\n return cgutils.pack_struct(builder, (ret, builder.load(expptr)))\n\n return core\n\n\nlower(math.frexp, types.float32)(frexp_implement('__nv_frexpf'))\nlower(math.frexp, types.float64)(frexp_implement('__nv_frexp'))\n\nlower(math.ldexp, types.float32, types.int32)(powi_implement('__nv_ldexpf'))\nlower(math.ldexp, types.float64, types.int32)(powi_implement('__nv_ldexp'))\n\n\nbooleans = []\nbooleans += [('__nv_isnand', '__nv_isnanf', math.isnan)]\nbooleans += [('__nv_isinfd', '__nv_isinff', math.isinf)]\nbooleans += [('__nv_isfinited', '__nv_finitef', math.isfinite)]\n\nunarys = []\nunarys += [('__nv_ceil', '__nv_ceilf', math.ceil)]\nunarys += [('__nv_floor', '__nv_floorf', math.floor)]\nunarys += [('__nv_fabs', '__nv_fabsf', math.fabs)]\nunarys += [('__nv_exp', '__nv_expf', math.exp)]\nunarys += [('__nv_expm1', '__nv_expm1f', math.expm1)]\nunarys += [('__nv_erf', '__nv_erff', math.erf)]\nunarys += [('__nv_erfc', '__nv_erfcf', math.erfc)]\nunarys += [('__nv_tgamma', '__nv_tgammaf', math.gamma)]\nunarys += [('__nv_lgamma', '__nv_lgammaf', math.lgamma)]\nunarys += [('__nv_sqrt', '__nv_sqrtf', math.sqrt)]\nunarys += [('__nv_log', '__nv_logf', math.log)]\nunarys += [('__nv_log10', '__nv_log10f', math.log10)]\nunarys += [('__nv_log1p', '__nv_log1pf', math.log1p)]\nunarys += [('__nv_acosh', '__nv_acoshf', math.acosh)]\nunarys += [('__nv_acos', '__nv_acosf', math.acos)]\nunarys += [('__nv_cos', '__nv_cosf', math.cos)]\nunarys += [('__nv_cosh', '__nv_coshf', math.cosh)]\nunarys += [('__nv_asinh', '__nv_asinhf', math.asinh)]\nunarys += [('__nv_asin', '__nv_asinf', math.asin)]\nunarys += [('__nv_sin', '__nv_sinf', math.sin)]\nunarys += [('__nv_sinh', '__nv_sinhf', math.sinh)]\nunarys += [('__nv_atan', '__nv_atanf', math.atan)]\nunarys += [('__nv_atanh', '__nv_atanhf', math.atanh)]\nunarys += [('__nv_tan', '__nv_tanf', math.tan)]\nunarys += [('__nv_tanh', '__nv_tanhf', math.tanh)]\n\nbinarys = []\nbinarys += [('__nv_copysign', '__nv_copysignf', math.copysign)]\nbinarys += [('__nv_atan2', '__nv_atan2f', math.atan2)]\nbinarys += [('__nv_pow', '__nv_powf', math.pow)]\nbinarys += [('__nv_fmod', '__nv_fmodf', math.fmod)]\nbinarys += [('__nv_hypot', '__nv_hypotf', math.hypot)]\n\n\nfor name64, name32, key in booleans:\n impl64 = bool_implement(name64, types.float64)\n lower(key, types.float64)(impl64)\n impl32 = bool_implement(name32, types.float32)\n lower(key, types.float32)(impl32)\n\n\nfor name64, name32, key in unarys:\n impl64 = unary_implement(name64, types.float64)\n lower(key, types.float64)(impl64)\n impl32 = unary_implement(name32, types.float32)\n lower(key, types.float32)(impl32)\n\nfor name64, name32, key in binarys:\n impl64 = binary_implement(name64, types.float64)\n lower(key, types.float64, types.float64)(impl64)\n impl32 = binary_implement(name32, types.float32)\n lower(key, types.float32, types.float32)(impl32)\n\n\ndef modf_implement(nvname, ty):\n def core(context, builder, sig, args):\n arg, = args\n argty, = sig.args\n fty = context.get_value_type(argty)\n lmod = builder.module\n ptr = cgutils.alloca_once(builder, fty)\n fnty = Type.function(fty, [fty, fty.as_pointer()])\n fn = lmod.get_or_insert_function(fnty, name=nvname)\n out = builder.call(fn, [arg, ptr])\n ret = context.make_tuple(builder, types.UniTuple(argty, 2),\n [out, builder.load(ptr)])\n return ret\n return core\n\n\nfor (ty, intrin) in ((types.float64, '__nv_modf',),\n (types.float32, '__nv_modff',)):\n lower(math.modf, ty)(modf_implement(intrin, ty))\n", "path": "numba/cuda/libdevice.py"}], "after_files": [{"content": "import math\nimport operator\nfrom llvmlite.llvmpy.core import Type\nfrom numba.core import types, cgutils\nfrom numba.core.imputils import Registry\n\nregistry = Registry()\nlower = registry.lower\n\nfloat_set = types.float32, types.float64\n\n\ndef bool_implement(nvname, ty):\n def core(context, builder, sig, args):\n assert sig.return_type == types.boolean, nvname\n fty = context.get_value_type(ty)\n lmod = builder.module\n fnty = Type.function(Type.int(), [fty])\n fn = lmod.get_or_insert_function(fnty, name=nvname)\n result = builder.call(fn, args)\n return context.cast(builder, result, types.int32, types.boolean)\n\n return core\n\n\ndef unary_implement(nvname, ty):\n def core(context, builder, sig, args):\n fty = context.get_value_type(ty)\n lmod = builder.module\n fnty = Type.function(fty, [fty])\n fn = lmod.get_or_insert_function(fnty, name=nvname)\n return builder.call(fn, args)\n\n return core\n\n\ndef binary_implement(nvname, ty):\n def core(context, builder, sig, args):\n fty = context.get_value_type(ty)\n lmod = builder.module\n fnty = Type.function(fty, [fty, fty])\n fn = lmod.get_or_insert_function(fnty, name=nvname)\n return builder.call(fn, args)\n\n return core\n\n\ndef powi_implement(nvname):\n def core(context, builder, sig, args):\n [base, pow] = args\n [basety, powty] = sig.args\n lmod = builder.module\n fty = context.get_value_type(basety)\n ity = context.get_value_type(types.int32)\n fnty = Type.function(fty, [fty, ity])\n fn = lmod.get_or_insert_function(fnty, name=nvname)\n return builder.call(fn, [base, pow])\n\n return core\n\n\nlower(math.pow, types.float32, types.int32)(powi_implement('__nv_powif'))\nlower(math.pow, types.float64, types.int32)(powi_implement('__nv_powi'))\n\n\ndef frexp_implement(nvname):\n def core(context, builder, sig, args):\n fracty, expty = sig.return_type\n float_type = context.get_value_type(fracty)\n int_type = context.get_value_type(expty)\n fnty = Type.function(float_type, [float_type, Type.pointer(int_type)])\n\n fn = builder.module.get_or_insert_function(fnty, name=nvname)\n expptr = cgutils.alloca_once(builder, int_type, name='exp')\n\n ret = builder.call(fn, (args[0], expptr))\n return cgutils.pack_struct(builder, (ret, builder.load(expptr)))\n\n return core\n\n\nlower(math.frexp, types.float32)(frexp_implement('__nv_frexpf'))\nlower(math.frexp, types.float64)(frexp_implement('__nv_frexp'))\n\nlower(math.ldexp, types.float32, types.int32)(powi_implement('__nv_ldexpf'))\nlower(math.ldexp, types.float64, types.int32)(powi_implement('__nv_ldexp'))\n\n\nbooleans = []\nbooleans += [('__nv_isnand', '__nv_isnanf', math.isnan)]\nbooleans += [('__nv_isinfd', '__nv_isinff', math.isinf)]\nbooleans += [('__nv_isfinited', '__nv_finitef', math.isfinite)]\n\nunarys = []\nunarys += [('__nv_ceil', '__nv_ceilf', math.ceil)]\nunarys += [('__nv_floor', '__nv_floorf', math.floor)]\nunarys += [('__nv_fabs', '__nv_fabsf', math.fabs)]\nunarys += [('__nv_exp', '__nv_expf', math.exp)]\nunarys += [('__nv_expm1', '__nv_expm1f', math.expm1)]\nunarys += [('__nv_erf', '__nv_erff', math.erf)]\nunarys += [('__nv_erfc', '__nv_erfcf', math.erfc)]\nunarys += [('__nv_tgamma', '__nv_tgammaf', math.gamma)]\nunarys += [('__nv_lgamma', '__nv_lgammaf', math.lgamma)]\nunarys += [('__nv_sqrt', '__nv_sqrtf', math.sqrt)]\nunarys += [('__nv_log', '__nv_logf', math.log)]\nunarys += [('__nv_log10', '__nv_log10f', math.log10)]\nunarys += [('__nv_log1p', '__nv_log1pf', math.log1p)]\nunarys += [('__nv_acosh', '__nv_acoshf', math.acosh)]\nunarys += [('__nv_acos', '__nv_acosf', math.acos)]\nunarys += [('__nv_cos', '__nv_cosf', math.cos)]\nunarys += [('__nv_cosh', '__nv_coshf', math.cosh)]\nunarys += [('__nv_asinh', '__nv_asinhf', math.asinh)]\nunarys += [('__nv_asin', '__nv_asinf', math.asin)]\nunarys += [('__nv_sin', '__nv_sinf', math.sin)]\nunarys += [('__nv_sinh', '__nv_sinhf', math.sinh)]\nunarys += [('__nv_atan', '__nv_atanf', math.atan)]\nunarys += [('__nv_atanh', '__nv_atanhf', math.atanh)]\nunarys += [('__nv_tan', '__nv_tanf', math.tan)]\nunarys += [('__nv_tanh', '__nv_tanhf', math.tanh)]\n\nbinarys = []\nbinarys += [('__nv_copysign', '__nv_copysignf', math.copysign)]\nbinarys += [('__nv_atan2', '__nv_atan2f', math.atan2)]\nbinarys += [('__nv_pow', '__nv_powf', math.pow)]\nbinarys += [('__nv_fmod', '__nv_fmodf', math.fmod)]\nbinarys += [('__nv_hypot', '__nv_hypotf', math.hypot)]\n\n\nfor name64, name32, key in booleans:\n impl64 = bool_implement(name64, types.float64)\n lower(key, types.float64)(impl64)\n impl32 = bool_implement(name32, types.float32)\n lower(key, types.float32)(impl32)\n\n\nfor name64, name32, key in unarys:\n impl64 = unary_implement(name64, types.float64)\n lower(key, types.float64)(impl64)\n impl32 = unary_implement(name32, types.float32)\n lower(key, types.float32)(impl32)\n\nfor name64, name32, key in binarys:\n impl64 = binary_implement(name64, types.float64)\n lower(key, types.float64, types.float64)(impl64)\n impl32 = binary_implement(name32, types.float32)\n lower(key, types.float32, types.float32)(impl32)\n\n\ndef modf_implement(nvname, ty):\n def core(context, builder, sig, args):\n arg, = args\n argty, = sig.args\n fty = context.get_value_type(argty)\n lmod = builder.module\n ptr = cgutils.alloca_once(builder, fty)\n fnty = Type.function(fty, [fty, fty.as_pointer()])\n fn = lmod.get_or_insert_function(fnty, name=nvname)\n out = builder.call(fn, [arg, ptr])\n ret = context.make_tuple(builder, types.UniTuple(argty, 2),\n [out, builder.load(ptr)])\n return ret\n return core\n\n\nfor (ty, intrin) in ((types.float64, '__nv_modf',),\n (types.float32, '__nv_modff',)):\n lower(math.modf, ty)(modf_implement(intrin, ty))\n\n\n# Complex power implementations - translations of _Py_c_pow from CPython\n# https://github.com/python/cpython/blob/a755410e054e1e2390de5830befc08fe80706c66/Objects/complexobject.c#L123-L151\n#\n# The complex64 variant casts all constants and some variables to ensure that\n# as much computation is done in single precision as possible. A small number\n# of operations are still done in 64-bit, but these come from libdevice code.\n\ndef cpow_implement(fty, cty):\n def core(context, builder, sig, args):\n def cpow_internal(a, b):\n\n if b.real == fty(0.0) and b.imag == fty(0.0):\n return cty(1.0) + cty(0.0j)\n elif a.real == fty(0.0) and b.real == fty(0.0):\n return cty(0.0) + cty(0.0j)\n\n vabs = math.hypot(a.real, a.imag)\n len = math.pow(vabs, b.real)\n at = math.atan2(a.imag, a.real)\n phase = at * b.real\n if b.imag != fty(0.0):\n len /= math.exp(at * b.imag)\n phase += b.imag * math.log(vabs)\n\n return len * (cty(math.cos(phase)) +\n cty(math.sin(phase) * cty(1.0j)))\n\n return context.compile_internal(builder, cpow_internal, sig, args)\n\n lower(operator.pow, cty, cty)(core)\n lower(operator.ipow, cty, cty)(core)\n lower(pow, cty, cty)(core)\n\n\ncpow_implement(types.float32, types.complex64)\ncpow_implement(types.float64, types.complex128)\n", "path": "numba/cuda/libdevice.py"}]}
3,622
626
gh_patches_debug_14393
rasdani/github-patches
git_diff
falconry__falcon-993
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Default OPTIONS responder does not set Content-Length to "0" Per RFC 7231: > A server MUST generate a Content-Length field with a value of "0" if no payload body is to be sent in the response. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `falcon/responders.py` Content: ``` 1 # Copyright 2013 by Rackspace Hosting, Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Default responder implementations.""" 16 17 from falcon.errors import HTTPBadRequest 18 from falcon.errors import HTTPMethodNotAllowed 19 from falcon.errors import HTTPNotFound 20 from falcon.status_codes import HTTP_204 21 22 23 def path_not_found(req, resp, **kwargs): 24 """Raise 404 HTTPNotFound error""" 25 raise HTTPNotFound() 26 27 28 def bad_request(req, resp, **kwargs): 29 """Raise 400 HTTPBadRequest error""" 30 raise HTTPBadRequest('Bad request', 'Invalid HTTP method') 31 32 33 def create_method_not_allowed(allowed_methods): 34 """Creates a responder for "405 Method Not Allowed" 35 36 Args: 37 allowed_methods: A list of HTTP methods (uppercase) that should be 38 returned in the Allow header. 39 40 """ 41 def method_not_allowed(req, resp, **kwargs): 42 """Raise 405 HTTPMethodNotAllowed error""" 43 raise HTTPMethodNotAllowed(allowed_methods) 44 45 return method_not_allowed 46 47 48 def create_default_options(allowed_methods): 49 """Creates a default responder for the OPTIONS method 50 51 Args: 52 allowed_methods: A list of HTTP methods (uppercase) that should be 53 returned in the Allow header. 54 55 """ 56 allowed = ', '.join(allowed_methods) 57 58 def on_options(req, resp, **kwargs): 59 resp.status = HTTP_204 60 resp.set_header('Allow', allowed) 61 resp.set_header('Content-Length', '0') 62 63 return on_options 64 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/falcon/responders.py b/falcon/responders.py --- a/falcon/responders.py +++ b/falcon/responders.py @@ -17,7 +17,7 @@ from falcon.errors import HTTPBadRequest from falcon.errors import HTTPMethodNotAllowed from falcon.errors import HTTPNotFound -from falcon.status_codes import HTTP_204 +from falcon.status_codes import HTTP_200 def path_not_found(req, resp, **kwargs): @@ -56,7 +56,7 @@ allowed = ', '.join(allowed_methods) def on_options(req, resp, **kwargs): - resp.status = HTTP_204 + resp.status = HTTP_200 resp.set_header('Allow', allowed) resp.set_header('Content-Length', '0')
{"golden_diff": "diff --git a/falcon/responders.py b/falcon/responders.py\n--- a/falcon/responders.py\n+++ b/falcon/responders.py\n@@ -17,7 +17,7 @@\n from falcon.errors import HTTPBadRequest\n from falcon.errors import HTTPMethodNotAllowed\n from falcon.errors import HTTPNotFound\n-from falcon.status_codes import HTTP_204\n+from falcon.status_codes import HTTP_200\n \n \n def path_not_found(req, resp, **kwargs):\n@@ -56,7 +56,7 @@\n allowed = ', '.join(allowed_methods)\n \n def on_options(req, resp, **kwargs):\n- resp.status = HTTP_204\n+ resp.status = HTTP_200\n resp.set_header('Allow', allowed)\n resp.set_header('Content-Length', '0')\n", "issue": "Default OPTIONS responder does not set Content-Length to \"0\"\nPer RFC 7231:\n\n> A server MUST generate a Content-Length field with a value of \"0\" if no payload body is to be sent in the response.\n\n", "before_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Default responder implementations.\"\"\"\n\nfrom falcon.errors import HTTPBadRequest\nfrom falcon.errors import HTTPMethodNotAllowed\nfrom falcon.errors import HTTPNotFound\nfrom falcon.status_codes import HTTP_204\n\n\ndef path_not_found(req, resp, **kwargs):\n \"\"\"Raise 404 HTTPNotFound error\"\"\"\n raise HTTPNotFound()\n\n\ndef bad_request(req, resp, **kwargs):\n \"\"\"Raise 400 HTTPBadRequest error\"\"\"\n raise HTTPBadRequest('Bad request', 'Invalid HTTP method')\n\n\ndef create_method_not_allowed(allowed_methods):\n \"\"\"Creates a responder for \"405 Method Not Allowed\"\n\n Args:\n allowed_methods: A list of HTTP methods (uppercase) that should be\n returned in the Allow header.\n\n \"\"\"\n def method_not_allowed(req, resp, **kwargs):\n \"\"\"Raise 405 HTTPMethodNotAllowed error\"\"\"\n raise HTTPMethodNotAllowed(allowed_methods)\n\n return method_not_allowed\n\n\ndef create_default_options(allowed_methods):\n \"\"\"Creates a default responder for the OPTIONS method\n\n Args:\n allowed_methods: A list of HTTP methods (uppercase) that should be\n returned in the Allow header.\n\n \"\"\"\n allowed = ', '.join(allowed_methods)\n\n def on_options(req, resp, **kwargs):\n resp.status = HTTP_204\n resp.set_header('Allow', allowed)\n resp.set_header('Content-Length', '0')\n\n return on_options\n", "path": "falcon/responders.py"}], "after_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Default responder implementations.\"\"\"\n\nfrom falcon.errors import HTTPBadRequest\nfrom falcon.errors import HTTPMethodNotAllowed\nfrom falcon.errors import HTTPNotFound\nfrom falcon.status_codes import HTTP_200\n\n\ndef path_not_found(req, resp, **kwargs):\n \"\"\"Raise 404 HTTPNotFound error\"\"\"\n raise HTTPNotFound()\n\n\ndef bad_request(req, resp, **kwargs):\n \"\"\"Raise 400 HTTPBadRequest error\"\"\"\n raise HTTPBadRequest('Bad request', 'Invalid HTTP method')\n\n\ndef create_method_not_allowed(allowed_methods):\n \"\"\"Creates a responder for \"405 Method Not Allowed\"\n\n Args:\n allowed_methods: A list of HTTP methods (uppercase) that should be\n returned in the Allow header.\n\n \"\"\"\n def method_not_allowed(req, resp, **kwargs):\n \"\"\"Raise 405 HTTPMethodNotAllowed error\"\"\"\n raise HTTPMethodNotAllowed(allowed_methods)\n\n return method_not_allowed\n\n\ndef create_default_options(allowed_methods):\n \"\"\"Creates a default responder for the OPTIONS method\n\n Args:\n allowed_methods: A list of HTTP methods (uppercase) that should be\n returned in the Allow header.\n\n \"\"\"\n allowed = ', '.join(allowed_methods)\n\n def on_options(req, resp, **kwargs):\n resp.status = HTTP_200\n resp.set_header('Allow', allowed)\n resp.set_header('Content-Length', '0')\n\n return on_options\n", "path": "falcon/responders.py"}]}
874
182
gh_patches_debug_23436
rasdani/github-patches
git_diff
great-expectations__great_expectations-6264
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Use cleaner solution for non-truncating division in python 2 Prefer `from __future__ import division` to `1.*x/y` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `great_expectations/rule_based_profiler/rule/rule.py` Content: ``` 1 import copy 2 import json 3 from typing import Any, Callable, Dict, List, Optional, Union 4 5 from great_expectations.core.batch import Batch, BatchRequestBase 6 from great_expectations.core.util import ( 7 convert_to_json_serializable, 8 determine_progress_bar_method_by_environment, 9 ) 10 from great_expectations.rule_based_profiler.config.base import ( 11 domainBuilderConfigSchema, 12 expectationConfigurationBuilderConfigSchema, 13 parameterBuilderConfigSchema, 14 ) 15 from great_expectations.rule_based_profiler.domain import Domain 16 from great_expectations.rule_based_profiler.domain_builder import DomainBuilder 17 from great_expectations.rule_based_profiler.expectation_configuration_builder import ( 18 ExpectationConfigurationBuilder, 19 ) 20 from great_expectations.rule_based_profiler.helpers.configuration_reconciliation import ( 21 DEFAULT_RECONCILATION_DIRECTIVES, 22 ReconciliationDirectives, 23 reconcile_rule_variables, 24 ) 25 from great_expectations.rule_based_profiler.helpers.util import ( 26 convert_variables_to_dict, 27 ) 28 from great_expectations.rule_based_profiler.parameter_builder import ParameterBuilder 29 from great_expectations.rule_based_profiler.parameter_container import ( 30 ParameterContainer, 31 build_parameter_container_for_variables, 32 ) 33 from great_expectations.rule_based_profiler.rule_state import RuleState 34 from great_expectations.types import SerializableDictDot 35 from great_expectations.util import ( 36 deep_filter_properties_iterable, 37 measure_execution_time, 38 ) 39 40 41 class Rule(SerializableDictDot): 42 def __init__( 43 self, 44 name: str, 45 variables: Optional[Union[ParameterContainer, Dict[str, Any]]] = None, 46 domain_builder: Optional[DomainBuilder] = None, 47 parameter_builders: Optional[List[ParameterBuilder]] = None, 48 expectation_configuration_builders: Optional[ 49 List[ExpectationConfigurationBuilder] 50 ] = None, 51 ) -> None: 52 """ 53 Sets Rule name, variables, domain builder, parameters builders, configuration builders, and other instance data. 54 55 Args: 56 name: A string representing the name of the ProfilerRule 57 variables: Any variables to be substituted within the rules 58 domain_builder: A Domain Builder object used to build rule data domain 59 parameter_builders: A Parameter Builder list used to configure necessary rule evaluation parameters 60 expectation_configuration_builders: A list of Expectation Configuration Builders 61 """ 62 self._name = name 63 64 if variables is None: 65 variables = {} 66 67 # Convert variables argument to ParameterContainer 68 _variables: ParameterContainer 69 if isinstance(variables, ParameterContainer): 70 _variables = variables 71 else: 72 _variables: ParameterContainer = build_parameter_container_for_variables( 73 variables_configs=variables 74 ) 75 76 self.variables = _variables 77 78 self._domain_builder = domain_builder 79 self._parameter_builders = parameter_builders 80 self._expectation_configuration_builders = expectation_configuration_builders 81 82 self._execution_time = None 83 84 @measure_execution_time( 85 execution_time_holder_object_reference_name="rule_state", 86 execution_time_property_name="rule_execution_time", 87 pretty_print=False, 88 ) 89 def run( 90 self, 91 variables: Optional[ParameterContainer] = None, 92 batch_list: Optional[List[Batch]] = None, 93 batch_request: Optional[Union[BatchRequestBase, dict]] = None, 94 recompute_existing_parameter_values: bool = False, 95 reconciliation_directives: ReconciliationDirectives = DEFAULT_RECONCILATION_DIRECTIVES, 96 rule_state: Optional[RuleState] = None, 97 ) -> RuleState: 98 """ 99 Builds a list of Expectation Configurations, returning a single Expectation Configuration entry for every 100 ConfigurationBuilder available based on the instantiation. 101 102 Args: 103 variables: Attribute name/value pairs, commonly-used in Builder objects 104 batch_list: Explicit list of Batch objects to supply data at runtime 105 batch_request: Explicit batch_request used to supply data at runtime 106 recompute_existing_parameter_values: If "True", recompute value if "fully_qualified_parameter_name" exists 107 reconciliation_directives: directives for how each rule component should be overwritten 108 rule_state: holds "Rule" execution state and responds to "execution_time_property_name" ("execution_time") 109 110 Returns: 111 RuleState representing effect of executing Rule 112 """ 113 variables = build_parameter_container_for_variables( 114 variables_configs=reconcile_rule_variables( 115 variables=self.variables, 116 variables_config=convert_variables_to_dict(variables=variables), 117 reconciliation_strategy=reconciliation_directives.variables, 118 ) 119 ) 120 121 if rule_state is None: 122 rule_state = RuleState() 123 124 domains: List[Domain] = self._get_rule_domains( 125 variables=variables, 126 batch_list=batch_list, 127 batch_request=batch_request, 128 rule_state=rule_state, 129 ) 130 131 rule_state.rule = self 132 rule_state.variables = variables 133 rule_state.domains = domains 134 135 rule_state.reset_parameter_containers() 136 137 pbar_method: Callable = determine_progress_bar_method_by_environment() 138 139 domain: Domain 140 for domain in pbar_method( 141 domains, 142 desc="Profiling Dataset:", 143 position=1, 144 leave=False, 145 bar_format="{desc:25}{percentage:3.0f}%|{bar}{r_bar}", 146 ): 147 rule_state.initialize_parameter_container_for_domain(domain=domain) 148 149 parameter_builders: List[ParameterBuilder] = self.parameter_builders or [] 150 parameter_builder: ParameterBuilder 151 for parameter_builder in parameter_builders: 152 parameter_builder.build_parameters( 153 domain=domain, 154 variables=variables, 155 parameters=rule_state.parameters, 156 parameter_computation_impl=None, 157 batch_list=batch_list, 158 batch_request=batch_request, 159 recompute_existing_parameter_values=recompute_existing_parameter_values, 160 ) 161 162 expectation_configuration_builders: List[ 163 ExpectationConfigurationBuilder 164 ] = (self.expectation_configuration_builders or []) 165 166 expectation_configuration_builder: ExpectationConfigurationBuilder 167 168 for expectation_configuration_builder in expectation_configuration_builders: 169 expectation_configuration_builder.resolve_validation_dependencies( 170 domain=domain, 171 variables=variables, 172 parameters=rule_state.parameters, 173 batch_list=batch_list, 174 batch_request=batch_request, 175 recompute_existing_parameter_values=recompute_existing_parameter_values, 176 ) 177 178 return rule_state 179 180 @property 181 def name(self) -> str: 182 return self._name 183 184 @name.setter 185 def name(self, value: str) -> None: 186 self._name = value 187 188 @property 189 def variables(self) -> Optional[ParameterContainer]: 190 # Returning a copy of the "self._variables" state variable in order to prevent write-before-read hazard. 191 return copy.deepcopy(self._variables) 192 193 @variables.setter 194 def variables(self, value: Optional[ParameterContainer]) -> None: 195 self._variables = value 196 197 @property 198 def domain_builder(self) -> Optional[DomainBuilder]: 199 return self._domain_builder 200 201 @property 202 def parameter_builders(self) -> Optional[List[ParameterBuilder]]: 203 return self._parameter_builders 204 205 @property 206 def expectation_configuration_builders( 207 self, 208 ) -> Optional[List[ExpectationConfigurationBuilder]]: 209 return self._expectation_configuration_builders 210 211 @property 212 def execution_time(self) -> Optional[float]: # Execution time (in seconds). 213 """ 214 Property that holds "execution_time" of this "Rule" (in seconds). 215 """ 216 return self._execution_time 217 218 def to_dict(self) -> dict: 219 parameter_builder_configs: Optional[List[dict]] = None 220 parameter_builders: Optional[ 221 Dict[str, ParameterBuilder] 222 ] = self._get_parameter_builders_as_dict() 223 parameter_builder: ParameterBuilder 224 if parameter_builders is not None: 225 # Roundtrip through schema validation to add/or restore any missing fields. 226 parameter_builder_configs = [ 227 parameterBuilderConfigSchema.load(parameter_builder.to_dict()).to_dict() 228 for parameter_builder in parameter_builders.values() 229 ] 230 231 expectation_configuration_builder_configs: Optional[List[dict]] = None 232 expectation_configuration_builders: Optional[ 233 Dict[str, ExpectationConfigurationBuilder] 234 ] = self._get_expectation_configuration_builders_as_dict() 235 expectation_configuration_builder: ExpectationConfigurationBuilder 236 if expectation_configuration_builders is not None: 237 # Roundtrip through schema validation to add/or restore any missing fields. 238 expectation_configuration_builder_configs = [ 239 expectationConfigurationBuilderConfigSchema.load( 240 expectation_configuration_builder.to_dict() 241 ).to_dict() 242 for expectation_configuration_builder in expectation_configuration_builders.values() 243 ] 244 245 return { 246 # Roundtrip through schema validation to add/or restore any missing fields. 247 "domain_builder": domainBuilderConfigSchema.load( 248 self.domain_builder.to_dict() 249 ).to_dict(), 250 "parameter_builders": parameter_builder_configs, 251 "expectation_configuration_builders": expectation_configuration_builder_configs, 252 } 253 254 def to_json_dict(self) -> dict: 255 """ 256 # TODO: <Alex>2/4/2022</Alex> 257 This implementation of "SerializableDictDot.to_json_dict() occurs frequently and should ideally serve as the 258 reference implementation in the "SerializableDictDot" class itself. However, the circular import dependencies, 259 due to the location of the "great_expectations/types/__init__.py" and "great_expectations/core/util.py" modules 260 make this refactoring infeasible at the present time. 261 """ 262 dict_obj: dict = self.to_dict() 263 variables_dict: Optional[Dict[str, Any]] = convert_variables_to_dict( 264 variables=self.variables 265 ) 266 dict_obj["variables"] = variables_dict 267 serializeable_dict: dict = convert_to_json_serializable(data=dict_obj) 268 return serializeable_dict 269 270 def __repr__(self) -> str: 271 """ 272 # TODO: <Alex>2/4/2022</Alex> 273 This implementation of a custom "__repr__()" occurs frequently and should ideally serve as the reference 274 implementation in the "SerializableDictDot" class. However, the circular import dependencies, due to the 275 location of the "great_expectations/types/__init__.py" and "great_expectations/core/util.py" modules make this 276 refactoring infeasible at the present time. 277 """ 278 json_dict: dict = self.to_json_dict() 279 deep_filter_properties_iterable( 280 properties=json_dict, 281 inplace=True, 282 ) 283 return json.dumps(json_dict, indent=2) 284 285 def __str__(self) -> str: 286 """ 287 # TODO: <Alex>2/4/2022</Alex> 288 This implementation of a custom "__str__()" occurs frequently and should ideally serve as the reference 289 implementation in the "SerializableDictDot" class. However, the circular import dependencies, due to the 290 location of the "great_expectations/types/__init__.py" and "great_expectations/core/util.py" modules make this 291 refactoring infeasible at the present time. 292 """ 293 return self.__repr__() 294 295 def _get_parameter_builders_as_dict(self) -> Dict[str, ParameterBuilder]: 296 parameter_builders: List[ParameterBuilder] = self.parameter_builders 297 if parameter_builders is None: 298 parameter_builders = [] 299 300 parameter_builder: ParameterBuilder 301 return { 302 parameter_builder.name: parameter_builder 303 for parameter_builder in parameter_builders 304 } 305 306 def _get_expectation_configuration_builders_as_dict( 307 self, 308 ) -> Dict[str, ExpectationConfigurationBuilder]: 309 expectation_configuration_builders: List[ 310 ExpectationConfigurationBuilder 311 ] = self.expectation_configuration_builders 312 if expectation_configuration_builders is None: 313 expectation_configuration_builders = [] 314 315 expectation_configuration_builder: ExpectationConfigurationBuilder 316 return { 317 expectation_configuration_builder.expectation_type: expectation_configuration_builder 318 for expectation_configuration_builder in expectation_configuration_builders 319 } 320 321 # noinspection PyUnusedLocal 322 @measure_execution_time( 323 execution_time_holder_object_reference_name="rule_state", 324 execution_time_property_name="rule_domain_builder_execution_time", 325 pretty_print=False, 326 ) 327 def _get_rule_domains( 328 self, 329 variables: Optional[ParameterContainer] = None, 330 batch_list: Optional[List[Batch]] = None, 331 batch_request: Optional[Union[BatchRequestBase, dict]] = None, 332 rule_state: Optional[RuleState] = None, 333 ) -> List[Domain]: 334 domains: List[Domain] = ( 335 [] 336 if self.domain_builder is None 337 else self.domain_builder.get_domains( 338 rule_name=self.name, 339 variables=variables, 340 batch_list=batch_list, 341 batch_request=batch_request, 342 ) 343 ) 344 return domains 345 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/great_expectations/rule_based_profiler/rule/rule.py b/great_expectations/rule_based_profiler/rule/rule.py --- a/great_expectations/rule_based_profiler/rule/rule.py +++ b/great_expectations/rule_based_profiler/rule/rule.py @@ -79,8 +79,6 @@ self._parameter_builders = parameter_builders self._expectation_configuration_builders = expectation_configuration_builders - self._execution_time = None - @measure_execution_time( execution_time_holder_object_reference_name="rule_state", execution_time_property_name="rule_execution_time", @@ -208,13 +206,6 @@ ) -> Optional[List[ExpectationConfigurationBuilder]]: return self._expectation_configuration_builders - @property - def execution_time(self) -> Optional[float]: # Execution time (in seconds). - """ - Property that holds "execution_time" of this "Rule" (in seconds). - """ - return self._execution_time - def to_dict(self) -> dict: parameter_builder_configs: Optional[List[dict]] = None parameter_builders: Optional[
{"golden_diff": "diff --git a/great_expectations/rule_based_profiler/rule/rule.py b/great_expectations/rule_based_profiler/rule/rule.py\n--- a/great_expectations/rule_based_profiler/rule/rule.py\n+++ b/great_expectations/rule_based_profiler/rule/rule.py\n@@ -79,8 +79,6 @@\n self._parameter_builders = parameter_builders\n self._expectation_configuration_builders = expectation_configuration_builders\n \n- self._execution_time = None\n-\n @measure_execution_time(\n execution_time_holder_object_reference_name=\"rule_state\",\n execution_time_property_name=\"rule_execution_time\",\n@@ -208,13 +206,6 @@\n ) -> Optional[List[ExpectationConfigurationBuilder]]:\n return self._expectation_configuration_builders\n \n- @property\n- def execution_time(self) -> Optional[float]: # Execution time (in seconds).\n- \"\"\"\n- Property that holds \"execution_time\" of this \"Rule\" (in seconds).\n- \"\"\"\n- return self._execution_time\n-\n def to_dict(self) -> dict:\n parameter_builder_configs: Optional[List[dict]] = None\n parameter_builders: Optional[\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import copy\nimport json\nfrom typing import Any, Callable, Dict, List, Optional, Union\n\nfrom great_expectations.core.batch import Batch, BatchRequestBase\nfrom great_expectations.core.util import (\n convert_to_json_serializable,\n determine_progress_bar_method_by_environment,\n)\nfrom great_expectations.rule_based_profiler.config.base import (\n domainBuilderConfigSchema,\n expectationConfigurationBuilderConfigSchema,\n parameterBuilderConfigSchema,\n)\nfrom great_expectations.rule_based_profiler.domain import Domain\nfrom great_expectations.rule_based_profiler.domain_builder import DomainBuilder\nfrom great_expectations.rule_based_profiler.expectation_configuration_builder import (\n ExpectationConfigurationBuilder,\n)\nfrom great_expectations.rule_based_profiler.helpers.configuration_reconciliation import (\n DEFAULT_RECONCILATION_DIRECTIVES,\n ReconciliationDirectives,\n reconcile_rule_variables,\n)\nfrom great_expectations.rule_based_profiler.helpers.util import (\n convert_variables_to_dict,\n)\nfrom great_expectations.rule_based_profiler.parameter_builder import ParameterBuilder\nfrom great_expectations.rule_based_profiler.parameter_container import (\n ParameterContainer,\n build_parameter_container_for_variables,\n)\nfrom great_expectations.rule_based_profiler.rule_state import RuleState\nfrom great_expectations.types import SerializableDictDot\nfrom great_expectations.util import (\n deep_filter_properties_iterable,\n measure_execution_time,\n)\n\n\nclass Rule(SerializableDictDot):\n def __init__(\n self,\n name: str,\n variables: Optional[Union[ParameterContainer, Dict[str, Any]]] = None,\n domain_builder: Optional[DomainBuilder] = None,\n parameter_builders: Optional[List[ParameterBuilder]] = None,\n expectation_configuration_builders: Optional[\n List[ExpectationConfigurationBuilder]\n ] = None,\n ) -> None:\n \"\"\"\n Sets Rule name, variables, domain builder, parameters builders, configuration builders, and other instance data.\n\n Args:\n name: A string representing the name of the ProfilerRule\n variables: Any variables to be substituted within the rules\n domain_builder: A Domain Builder object used to build rule data domain\n parameter_builders: A Parameter Builder list used to configure necessary rule evaluation parameters\n expectation_configuration_builders: A list of Expectation Configuration Builders\n \"\"\"\n self._name = name\n\n if variables is None:\n variables = {}\n\n # Convert variables argument to ParameterContainer\n _variables: ParameterContainer\n if isinstance(variables, ParameterContainer):\n _variables = variables\n else:\n _variables: ParameterContainer = build_parameter_container_for_variables(\n variables_configs=variables\n )\n\n self.variables = _variables\n\n self._domain_builder = domain_builder\n self._parameter_builders = parameter_builders\n self._expectation_configuration_builders = expectation_configuration_builders\n\n self._execution_time = None\n\n @measure_execution_time(\n execution_time_holder_object_reference_name=\"rule_state\",\n execution_time_property_name=\"rule_execution_time\",\n pretty_print=False,\n )\n def run(\n self,\n variables: Optional[ParameterContainer] = None,\n batch_list: Optional[List[Batch]] = None,\n batch_request: Optional[Union[BatchRequestBase, dict]] = None,\n recompute_existing_parameter_values: bool = False,\n reconciliation_directives: ReconciliationDirectives = DEFAULT_RECONCILATION_DIRECTIVES,\n rule_state: Optional[RuleState] = None,\n ) -> RuleState:\n \"\"\"\n Builds a list of Expectation Configurations, returning a single Expectation Configuration entry for every\n ConfigurationBuilder available based on the instantiation.\n\n Args:\n variables: Attribute name/value pairs, commonly-used in Builder objects\n batch_list: Explicit list of Batch objects to supply data at runtime\n batch_request: Explicit batch_request used to supply data at runtime\n recompute_existing_parameter_values: If \"True\", recompute value if \"fully_qualified_parameter_name\" exists\n reconciliation_directives: directives for how each rule component should be overwritten\n rule_state: holds \"Rule\" execution state and responds to \"execution_time_property_name\" (\"execution_time\")\n\n Returns:\n RuleState representing effect of executing Rule\n \"\"\"\n variables = build_parameter_container_for_variables(\n variables_configs=reconcile_rule_variables(\n variables=self.variables,\n variables_config=convert_variables_to_dict(variables=variables),\n reconciliation_strategy=reconciliation_directives.variables,\n )\n )\n\n if rule_state is None:\n rule_state = RuleState()\n\n domains: List[Domain] = self._get_rule_domains(\n variables=variables,\n batch_list=batch_list,\n batch_request=batch_request,\n rule_state=rule_state,\n )\n\n rule_state.rule = self\n rule_state.variables = variables\n rule_state.domains = domains\n\n rule_state.reset_parameter_containers()\n\n pbar_method: Callable = determine_progress_bar_method_by_environment()\n\n domain: Domain\n for domain in pbar_method(\n domains,\n desc=\"Profiling Dataset:\",\n position=1,\n leave=False,\n bar_format=\"{desc:25}{percentage:3.0f}%|{bar}{r_bar}\",\n ):\n rule_state.initialize_parameter_container_for_domain(domain=domain)\n\n parameter_builders: List[ParameterBuilder] = self.parameter_builders or []\n parameter_builder: ParameterBuilder\n for parameter_builder in parameter_builders:\n parameter_builder.build_parameters(\n domain=domain,\n variables=variables,\n parameters=rule_state.parameters,\n parameter_computation_impl=None,\n batch_list=batch_list,\n batch_request=batch_request,\n recompute_existing_parameter_values=recompute_existing_parameter_values,\n )\n\n expectation_configuration_builders: List[\n ExpectationConfigurationBuilder\n ] = (self.expectation_configuration_builders or [])\n\n expectation_configuration_builder: ExpectationConfigurationBuilder\n\n for expectation_configuration_builder in expectation_configuration_builders:\n expectation_configuration_builder.resolve_validation_dependencies(\n domain=domain,\n variables=variables,\n parameters=rule_state.parameters,\n batch_list=batch_list,\n batch_request=batch_request,\n recompute_existing_parameter_values=recompute_existing_parameter_values,\n )\n\n return rule_state\n\n @property\n def name(self) -> str:\n return self._name\n\n @name.setter\n def name(self, value: str) -> None:\n self._name = value\n\n @property\n def variables(self) -> Optional[ParameterContainer]:\n # Returning a copy of the \"self._variables\" state variable in order to prevent write-before-read hazard.\n return copy.deepcopy(self._variables)\n\n @variables.setter\n def variables(self, value: Optional[ParameterContainer]) -> None:\n self._variables = value\n\n @property\n def domain_builder(self) -> Optional[DomainBuilder]:\n return self._domain_builder\n\n @property\n def parameter_builders(self) -> Optional[List[ParameterBuilder]]:\n return self._parameter_builders\n\n @property\n def expectation_configuration_builders(\n self,\n ) -> Optional[List[ExpectationConfigurationBuilder]]:\n return self._expectation_configuration_builders\n\n @property\n def execution_time(self) -> Optional[float]: # Execution time (in seconds).\n \"\"\"\n Property that holds \"execution_time\" of this \"Rule\" (in seconds).\n \"\"\"\n return self._execution_time\n\n def to_dict(self) -> dict:\n parameter_builder_configs: Optional[List[dict]] = None\n parameter_builders: Optional[\n Dict[str, ParameterBuilder]\n ] = self._get_parameter_builders_as_dict()\n parameter_builder: ParameterBuilder\n if parameter_builders is not None:\n # Roundtrip through schema validation to add/or restore any missing fields.\n parameter_builder_configs = [\n parameterBuilderConfigSchema.load(parameter_builder.to_dict()).to_dict()\n for parameter_builder in parameter_builders.values()\n ]\n\n expectation_configuration_builder_configs: Optional[List[dict]] = None\n expectation_configuration_builders: Optional[\n Dict[str, ExpectationConfigurationBuilder]\n ] = self._get_expectation_configuration_builders_as_dict()\n expectation_configuration_builder: ExpectationConfigurationBuilder\n if expectation_configuration_builders is not None:\n # Roundtrip through schema validation to add/or restore any missing fields.\n expectation_configuration_builder_configs = [\n expectationConfigurationBuilderConfigSchema.load(\n expectation_configuration_builder.to_dict()\n ).to_dict()\n for expectation_configuration_builder in expectation_configuration_builders.values()\n ]\n\n return {\n # Roundtrip through schema validation to add/or restore any missing fields.\n \"domain_builder\": domainBuilderConfigSchema.load(\n self.domain_builder.to_dict()\n ).to_dict(),\n \"parameter_builders\": parameter_builder_configs,\n \"expectation_configuration_builders\": expectation_configuration_builder_configs,\n }\n\n def to_json_dict(self) -> dict:\n \"\"\"\n # TODO: <Alex>2/4/2022</Alex>\n This implementation of \"SerializableDictDot.to_json_dict() occurs frequently and should ideally serve as the\n reference implementation in the \"SerializableDictDot\" class itself. However, the circular import dependencies,\n due to the location of the \"great_expectations/types/__init__.py\" and \"great_expectations/core/util.py\" modules\n make this refactoring infeasible at the present time.\n \"\"\"\n dict_obj: dict = self.to_dict()\n variables_dict: Optional[Dict[str, Any]] = convert_variables_to_dict(\n variables=self.variables\n )\n dict_obj[\"variables\"] = variables_dict\n serializeable_dict: dict = convert_to_json_serializable(data=dict_obj)\n return serializeable_dict\n\n def __repr__(self) -> str:\n \"\"\"\n # TODO: <Alex>2/4/2022</Alex>\n This implementation of a custom \"__repr__()\" occurs frequently and should ideally serve as the reference\n implementation in the \"SerializableDictDot\" class. However, the circular import dependencies, due to the\n location of the \"great_expectations/types/__init__.py\" and \"great_expectations/core/util.py\" modules make this\n refactoring infeasible at the present time.\n \"\"\"\n json_dict: dict = self.to_json_dict()\n deep_filter_properties_iterable(\n properties=json_dict,\n inplace=True,\n )\n return json.dumps(json_dict, indent=2)\n\n def __str__(self) -> str:\n \"\"\"\n # TODO: <Alex>2/4/2022</Alex>\n This implementation of a custom \"__str__()\" occurs frequently and should ideally serve as the reference\n implementation in the \"SerializableDictDot\" class. However, the circular import dependencies, due to the\n location of the \"great_expectations/types/__init__.py\" and \"great_expectations/core/util.py\" modules make this\n refactoring infeasible at the present time.\n \"\"\"\n return self.__repr__()\n\n def _get_parameter_builders_as_dict(self) -> Dict[str, ParameterBuilder]:\n parameter_builders: List[ParameterBuilder] = self.parameter_builders\n if parameter_builders is None:\n parameter_builders = []\n\n parameter_builder: ParameterBuilder\n return {\n parameter_builder.name: parameter_builder\n for parameter_builder in parameter_builders\n }\n\n def _get_expectation_configuration_builders_as_dict(\n self,\n ) -> Dict[str, ExpectationConfigurationBuilder]:\n expectation_configuration_builders: List[\n ExpectationConfigurationBuilder\n ] = self.expectation_configuration_builders\n if expectation_configuration_builders is None:\n expectation_configuration_builders = []\n\n expectation_configuration_builder: ExpectationConfigurationBuilder\n return {\n expectation_configuration_builder.expectation_type: expectation_configuration_builder\n for expectation_configuration_builder in expectation_configuration_builders\n }\n\n # noinspection PyUnusedLocal\n @measure_execution_time(\n execution_time_holder_object_reference_name=\"rule_state\",\n execution_time_property_name=\"rule_domain_builder_execution_time\",\n pretty_print=False,\n )\n def _get_rule_domains(\n self,\n variables: Optional[ParameterContainer] = None,\n batch_list: Optional[List[Batch]] = None,\n batch_request: Optional[Union[BatchRequestBase, dict]] = None,\n rule_state: Optional[RuleState] = None,\n ) -> List[Domain]:\n domains: List[Domain] = (\n []\n if self.domain_builder is None\n else self.domain_builder.get_domains(\n rule_name=self.name,\n variables=variables,\n batch_list=batch_list,\n batch_request=batch_request,\n )\n )\n return domains\n", "path": "great_expectations/rule_based_profiler/rule/rule.py"}], "after_files": [{"content": "import copy\nimport json\nfrom typing import Any, Callable, Dict, List, Optional, Union\n\nfrom great_expectations.core.batch import Batch, BatchRequestBase\nfrom great_expectations.core.util import (\n convert_to_json_serializable,\n determine_progress_bar_method_by_environment,\n)\nfrom great_expectations.rule_based_profiler.config.base import (\n domainBuilderConfigSchema,\n expectationConfigurationBuilderConfigSchema,\n parameterBuilderConfigSchema,\n)\nfrom great_expectations.rule_based_profiler.domain import Domain\nfrom great_expectations.rule_based_profiler.domain_builder import DomainBuilder\nfrom great_expectations.rule_based_profiler.expectation_configuration_builder import (\n ExpectationConfigurationBuilder,\n)\nfrom great_expectations.rule_based_profiler.helpers.configuration_reconciliation import (\n DEFAULT_RECONCILATION_DIRECTIVES,\n ReconciliationDirectives,\n reconcile_rule_variables,\n)\nfrom great_expectations.rule_based_profiler.helpers.util import (\n convert_variables_to_dict,\n)\nfrom great_expectations.rule_based_profiler.parameter_builder import ParameterBuilder\nfrom great_expectations.rule_based_profiler.parameter_container import (\n ParameterContainer,\n build_parameter_container_for_variables,\n)\nfrom great_expectations.rule_based_profiler.rule_state import RuleState\nfrom great_expectations.types import SerializableDictDot\nfrom great_expectations.util import (\n deep_filter_properties_iterable,\n measure_execution_time,\n)\n\n\nclass Rule(SerializableDictDot):\n def __init__(\n self,\n name: str,\n variables: Optional[Union[ParameterContainer, Dict[str, Any]]] = None,\n domain_builder: Optional[DomainBuilder] = None,\n parameter_builders: Optional[List[ParameterBuilder]] = None,\n expectation_configuration_builders: Optional[\n List[ExpectationConfigurationBuilder]\n ] = None,\n ) -> None:\n \"\"\"\n Sets Rule name, variables, domain builder, parameters builders, configuration builders, and other instance data.\n\n Args:\n name: A string representing the name of the ProfilerRule\n variables: Any variables to be substituted within the rules\n domain_builder: A Domain Builder object used to build rule data domain\n parameter_builders: A Parameter Builder list used to configure necessary rule evaluation parameters\n expectation_configuration_builders: A list of Expectation Configuration Builders\n \"\"\"\n self._name = name\n\n if variables is None:\n variables = {}\n\n # Convert variables argument to ParameterContainer\n _variables: ParameterContainer\n if isinstance(variables, ParameterContainer):\n _variables = variables\n else:\n _variables: ParameterContainer = build_parameter_container_for_variables(\n variables_configs=variables\n )\n\n self.variables = _variables\n\n self._domain_builder = domain_builder\n self._parameter_builders = parameter_builders\n self._expectation_configuration_builders = expectation_configuration_builders\n\n @measure_execution_time(\n execution_time_holder_object_reference_name=\"rule_state\",\n execution_time_property_name=\"rule_execution_time\",\n pretty_print=False,\n )\n def run(\n self,\n variables: Optional[ParameterContainer] = None,\n batch_list: Optional[List[Batch]] = None,\n batch_request: Optional[Union[BatchRequestBase, dict]] = None,\n recompute_existing_parameter_values: bool = False,\n reconciliation_directives: ReconciliationDirectives = DEFAULT_RECONCILATION_DIRECTIVES,\n rule_state: Optional[RuleState] = None,\n ) -> RuleState:\n \"\"\"\n Builds a list of Expectation Configurations, returning a single Expectation Configuration entry for every\n ConfigurationBuilder available based on the instantiation.\n\n Args:\n variables: Attribute name/value pairs, commonly-used in Builder objects\n batch_list: Explicit list of Batch objects to supply data at runtime\n batch_request: Explicit batch_request used to supply data at runtime\n recompute_existing_parameter_values: If \"True\", recompute value if \"fully_qualified_parameter_name\" exists\n reconciliation_directives: directives for how each rule component should be overwritten\n rule_state: holds \"Rule\" execution state and responds to \"execution_time_property_name\" (\"execution_time\")\n\n Returns:\n RuleState representing effect of executing Rule\n \"\"\"\n variables = build_parameter_container_for_variables(\n variables_configs=reconcile_rule_variables(\n variables=self.variables,\n variables_config=convert_variables_to_dict(variables=variables),\n reconciliation_strategy=reconciliation_directives.variables,\n )\n )\n\n if rule_state is None:\n rule_state = RuleState()\n\n domains: List[Domain] = self._get_rule_domains(\n variables=variables,\n batch_list=batch_list,\n batch_request=batch_request,\n rule_state=rule_state,\n )\n\n rule_state.rule = self\n rule_state.variables = variables\n rule_state.domains = domains\n\n rule_state.reset_parameter_containers()\n\n pbar_method: Callable = determine_progress_bar_method_by_environment()\n\n domain: Domain\n for domain in pbar_method(\n domains,\n desc=\"Profiling Dataset:\",\n position=1,\n leave=False,\n bar_format=\"{desc:25}{percentage:3.0f}%|{bar}{r_bar}\",\n ):\n rule_state.initialize_parameter_container_for_domain(domain=domain)\n\n parameter_builders: List[ParameterBuilder] = self.parameter_builders or []\n parameter_builder: ParameterBuilder\n for parameter_builder in parameter_builders:\n parameter_builder.build_parameters(\n domain=domain,\n variables=variables,\n parameters=rule_state.parameters,\n parameter_computation_impl=None,\n batch_list=batch_list,\n batch_request=batch_request,\n recompute_existing_parameter_values=recompute_existing_parameter_values,\n )\n\n expectation_configuration_builders: List[\n ExpectationConfigurationBuilder\n ] = (self.expectation_configuration_builders or [])\n\n expectation_configuration_builder: ExpectationConfigurationBuilder\n\n for expectation_configuration_builder in expectation_configuration_builders:\n expectation_configuration_builder.resolve_validation_dependencies(\n domain=domain,\n variables=variables,\n parameters=rule_state.parameters,\n batch_list=batch_list,\n batch_request=batch_request,\n recompute_existing_parameter_values=recompute_existing_parameter_values,\n )\n\n return rule_state\n\n @property\n def name(self) -> str:\n return self._name\n\n @name.setter\n def name(self, value: str) -> None:\n self._name = value\n\n @property\n def variables(self) -> Optional[ParameterContainer]:\n # Returning a copy of the \"self._variables\" state variable in order to prevent write-before-read hazard.\n return copy.deepcopy(self._variables)\n\n @variables.setter\n def variables(self, value: Optional[ParameterContainer]) -> None:\n self._variables = value\n\n @property\n def domain_builder(self) -> Optional[DomainBuilder]:\n return self._domain_builder\n\n @property\n def parameter_builders(self) -> Optional[List[ParameterBuilder]]:\n return self._parameter_builders\n\n @property\n def expectation_configuration_builders(\n self,\n ) -> Optional[List[ExpectationConfigurationBuilder]]:\n return self._expectation_configuration_builders\n\n def to_dict(self) -> dict:\n parameter_builder_configs: Optional[List[dict]] = None\n parameter_builders: Optional[\n Dict[str, ParameterBuilder]\n ] = self._get_parameter_builders_as_dict()\n parameter_builder: ParameterBuilder\n if parameter_builders is not None:\n # Roundtrip through schema validation to add/or restore any missing fields.\n parameter_builder_configs = [\n parameterBuilderConfigSchema.load(parameter_builder.to_dict()).to_dict()\n for parameter_builder in parameter_builders.values()\n ]\n\n expectation_configuration_builder_configs: Optional[List[dict]] = None\n expectation_configuration_builders: Optional[\n Dict[str, ExpectationConfigurationBuilder]\n ] = self._get_expectation_configuration_builders_as_dict()\n expectation_configuration_builder: ExpectationConfigurationBuilder\n if expectation_configuration_builders is not None:\n # Roundtrip through schema validation to add/or restore any missing fields.\n expectation_configuration_builder_configs = [\n expectationConfigurationBuilderConfigSchema.load(\n expectation_configuration_builder.to_dict()\n ).to_dict()\n for expectation_configuration_builder in expectation_configuration_builders.values()\n ]\n\n return {\n # Roundtrip through schema validation to add/or restore any missing fields.\n \"domain_builder\": domainBuilderConfigSchema.load(\n self.domain_builder.to_dict()\n ).to_dict(),\n \"parameter_builders\": parameter_builder_configs,\n \"expectation_configuration_builders\": expectation_configuration_builder_configs,\n }\n\n def to_json_dict(self) -> dict:\n \"\"\"\n # TODO: <Alex>2/4/2022</Alex>\n This implementation of \"SerializableDictDot.to_json_dict() occurs frequently and should ideally serve as the\n reference implementation in the \"SerializableDictDot\" class itself. However, the circular import dependencies,\n due to the location of the \"great_expectations/types/__init__.py\" and \"great_expectations/core/util.py\" modules\n make this refactoring infeasible at the present time.\n \"\"\"\n dict_obj: dict = self.to_dict()\n variables_dict: Optional[Dict[str, Any]] = convert_variables_to_dict(\n variables=self.variables\n )\n dict_obj[\"variables\"] = variables_dict\n serializeable_dict: dict = convert_to_json_serializable(data=dict_obj)\n return serializeable_dict\n\n def __repr__(self) -> str:\n \"\"\"\n # TODO: <Alex>2/4/2022</Alex>\n This implementation of a custom \"__repr__()\" occurs frequently and should ideally serve as the reference\n implementation in the \"SerializableDictDot\" class. However, the circular import dependencies, due to the\n location of the \"great_expectations/types/__init__.py\" and \"great_expectations/core/util.py\" modules make this\n refactoring infeasible at the present time.\n \"\"\"\n json_dict: dict = self.to_json_dict()\n deep_filter_properties_iterable(\n properties=json_dict,\n inplace=True,\n )\n return json.dumps(json_dict, indent=2)\n\n def __str__(self) -> str:\n \"\"\"\n # TODO: <Alex>2/4/2022</Alex>\n This implementation of a custom \"__str__()\" occurs frequently and should ideally serve as the reference\n implementation in the \"SerializableDictDot\" class. However, the circular import dependencies, due to the\n location of the \"great_expectations/types/__init__.py\" and \"great_expectations/core/util.py\" modules make this\n refactoring infeasible at the present time.\n \"\"\"\n return self.__repr__()\n\n def _get_parameter_builders_as_dict(self) -> Dict[str, ParameterBuilder]:\n parameter_builders: List[ParameterBuilder] = self.parameter_builders\n if parameter_builders is None:\n parameter_builders = []\n\n parameter_builder: ParameterBuilder\n return {\n parameter_builder.name: parameter_builder\n for parameter_builder in parameter_builders\n }\n\n def _get_expectation_configuration_builders_as_dict(\n self,\n ) -> Dict[str, ExpectationConfigurationBuilder]:\n expectation_configuration_builders: List[\n ExpectationConfigurationBuilder\n ] = self.expectation_configuration_builders\n if expectation_configuration_builders is None:\n expectation_configuration_builders = []\n\n expectation_configuration_builder: ExpectationConfigurationBuilder\n return {\n expectation_configuration_builder.expectation_type: expectation_configuration_builder\n for expectation_configuration_builder in expectation_configuration_builders\n }\n\n # noinspection PyUnusedLocal\n @measure_execution_time(\n execution_time_holder_object_reference_name=\"rule_state\",\n execution_time_property_name=\"rule_domain_builder_execution_time\",\n pretty_print=False,\n )\n def _get_rule_domains(\n self,\n variables: Optional[ParameterContainer] = None,\n batch_list: Optional[List[Batch]] = None,\n batch_request: Optional[Union[BatchRequestBase, dict]] = None,\n rule_state: Optional[RuleState] = None,\n ) -> List[Domain]:\n domains: List[Domain] = (\n []\n if self.domain_builder is None\n else self.domain_builder.get_domains(\n rule_name=self.name,\n variables=variables,\n batch_list=batch_list,\n batch_request=batch_request,\n )\n )\n return domains\n", "path": "great_expectations/rule_based_profiler/rule/rule.py"}]}
3,896
264
gh_patches_debug_7878
rasdani/github-patches
git_diff
jupyterhub__zero-to-jupyterhub-k8s-8
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add Jupyter structure for docs - [x] Add readthedocs.yml - [x] move environment.yml inside source directory - [ ] add project jupyter copyright --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `doc/source/conf.py` Content: ``` 1 #!/usr/bin/env python3 2 # -*- coding: utf-8 -*- 3 # 4 # Learning with JupyterHub documentation build configuration file, created by 5 # sphinx-quickstart on Fri Mar 17 16:07:58 2017. 6 # 7 # This file is execfile()d with the current directory set to its 8 # containing dir. 9 # 10 # Note that not all possible configuration values are present in this 11 # autogenerated file. 12 # 13 # All configuration values have a default; values that are commented out 14 # serve to show the default. 15 16 # If extensions (or modules to document with autodoc) are in another directory, 17 # add these directories to sys.path here. If the directory is relative to the 18 # documentation root, use os.path.abspath to make it absolute, like shown here. 19 # 20 # import os 21 # import sys 22 # sys.path.insert(0, os.path.abspath('.')) 23 import recommonmark 24 25 # -- General configuration ------------------------------------------------ 26 27 # If your documentation needs a minimal Sphinx version, state it here. 28 # 29 # needs_sphinx = '1.0' 30 31 # Add any Sphinx extension module names here, as strings. They can be 32 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 33 # ones. 34 extensions = ['sphinx.ext.mathjax'] 35 36 # Add any paths that contain templates here, relative to this directory. 37 templates_path = ['_templates'] 38 39 source_parsers = { 40 '.md': 'recommonmark.parser.CommonMarkParser', 41 } 42 43 # The suffix(es) of source filenames. 44 # You can specify multiple suffix as a list of string: 45 # 46 # source_suffix = ['.rst', '.md'] 47 source_suffix = ['.rst', '.md'] 48 49 # The master toctree document. 50 master_doc = 'index' 51 52 # General information about the project. 53 project = 'Zero to Jupyterhub' 54 copyright = '2017, Chris Holdgraf' 55 author = 'Chris Holdgraf' 56 57 # The version info for the project you're documenting, acts as replacement for 58 # |version| and |release|, also used in various other places throughout the 59 # built documents. 60 # 61 # The short X.Y version. 62 version = '0.1' 63 # The full version, including alpha/beta/rc tags. 64 release = '0.1' 65 66 # The language for content autogenerated by Sphinx. Refer to documentation 67 # for a list of supported languages. 68 # 69 # This is also used if you do content translation via gettext catalogs. 70 # Usually you set "language" from the command line for these cases. 71 language = None 72 73 # List of patterns, relative to source directory, that match files and 74 # directories to ignore when looking for source files. 75 # This patterns also effect to html_static_path and html_extra_path 76 exclude_patterns = [] 77 78 # The name of the Pygments (syntax highlighting) style to use. 79 pygments_style = 'sphinx' 80 81 # If true, `todo` and `todoList` produce output, else they produce nothing. 82 todo_include_todos = False 83 84 85 # -- Options for HTML output ---------------------------------------------- 86 87 # The theme to use for HTML and HTML Help pages. See the documentation for 88 # a list of builtin themes. 89 # 90 html_theme = 'alabaster' 91 html_favicon = '_static/images/logo/favicon.ico' 92 html_logo = '_static/images/logo/logo.png' 93 94 # Theme options are theme-specific and customize the look and feel of a theme 95 # further. For a list of options available for each theme, see the 96 # documentation. 97 # 98 # html_theme_options = {} 99 100 # Add any paths that contain custom static files (such as style sheets) here, 101 # relative to this directory. They are copied after the builtin static files, 102 # so a file named "default.css" will overwrite the builtin "default.css". 103 html_static_path = ['_static'] 104 105 106 # -- Options for HTMLHelp output ------------------------------------------ 107 108 # Output file base name for HTML help builder. 109 htmlhelp_basename = 'ZeroToJupyterhubDoc' 110 111 112 # -- Options for LaTeX output --------------------------------------------- 113 114 latex_elements = { 115 # The paper size ('letterpaper' or 'a4paper'). 116 # 117 # 'papersize': 'letterpaper', 118 119 # The font size ('10pt', '11pt' or '12pt'). 120 # 121 # 'pointsize': '10pt', 122 123 # Additional stuff for the LaTeX preamble. 124 # 125 # 'preamble': '', 126 127 # Latex figure (float) alignment 128 # 129 # 'figure_align': 'htbp', 130 } 131 132 # Grouping the document tree into LaTeX files. List of tuples 133 # (source start file, target name, title, 134 # author, documentclass [howto, manual, or own class]). 135 latex_documents = [ 136 (master_doc, 'ZeroToJupyterhubDoc.tex', 'Zero to JupyterHub', 137 'Chris Holdgraf', 'manual'), 138 ] 139 140 141 # -- Options for manual page output --------------------------------------- 142 143 # One entry per manual page. List of tuples 144 # (source start file, name, description, authors, manual section). 145 man_pages = [ 146 (master_doc, 'zerotojupyterhub', 'Zero to JupyterHub', 147 [author], 1) 148 ] 149 150 151 # -- Options for Texinfo output ------------------------------------------- 152 153 # Grouping the document tree into Texinfo files. List of tuples 154 # (source start file, target name, title, author, 155 # dir menu entry, description, category) 156 texinfo_documents = [ 157 (master_doc, 'ZeroToJupyterhubDoc', 'Zero to JupyterHub', 158 author, 'ZeroToJupyterhubDoc', 'One line description of project.', 159 'Miscellaneous'), 160 ] 161 162 163 164 # -- Options for Epub output ---------------------------------------------- 165 166 # Bibliographic Dublin Core info. 167 epub_title = project 168 epub_author = author 169 epub_publisher = author 170 epub_copyright = copyright 171 172 # The unique identifier of the text. This can be a ISBN number 173 # or the project homepage. 174 # 175 # epub_identifier = '' 176 177 # A unique identification for the text. 178 # 179 # epub_uid = '' 180 181 # A list of files that should not be packed into the epub file. 182 epub_exclude_files = ['search.html'] 183 184 185 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -50,9 +50,9 @@ master_doc = 'index' # General information about the project. -project = 'Zero to Jupyterhub' -copyright = '2017, Chris Holdgraf' -author = 'Chris Holdgraf' +project = u'Zero to JupyterHub with Kubernetes' +copyright = u'2016, Project Jupyter team' +author = u'Project Jupyter team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the
{"golden_diff": "diff --git a/doc/source/conf.py b/doc/source/conf.py\n--- a/doc/source/conf.py\n+++ b/doc/source/conf.py\n@@ -50,9 +50,9 @@\n master_doc = 'index'\n \n # General information about the project.\n-project = 'Zero to Jupyterhub'\n-copyright = '2017, Chris Holdgraf'\n-author = 'Chris Holdgraf'\n+project = u'Zero to JupyterHub with Kubernetes'\n+copyright = u'2016, Project Jupyter team'\n+author = u'Project Jupyter team'\n \n # The version info for the project you're documenting, acts as replacement for\n # |version| and |release|, also used in various other places throughout the\n", "issue": "Add Jupyter structure for docs\n- [x] Add readthedocs.yml\r\n- [x] move environment.yml inside source directory\r\n- [ ] add project jupyter copyright\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Learning with JupyterHub documentation build configuration file, created by\n# sphinx-quickstart on Fri Mar 17 16:07:58 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\nimport recommonmark\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.mathjax']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\nsource_parsers = {\n '.md': 'recommonmark.parser.CommonMarkParser',\n}\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = ['.rst', '.md']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Zero to Jupyterhub'\ncopyright = '2017, Chris Holdgraf'\nauthor = 'Chris Holdgraf'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0.1'\n# The full version, including alpha/beta/rc tags.\nrelease = '0.1'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'alabaster'\nhtml_favicon = '_static/images/logo/favicon.ico'\nhtml_logo = '_static/images/logo/logo.png'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'ZeroToJupyterhubDoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'ZeroToJupyterhubDoc.tex', 'Zero to JupyterHub',\n 'Chris Holdgraf', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'zerotojupyterhub', 'Zero to JupyterHub',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'ZeroToJupyterhubDoc', 'Zero to JupyterHub',\n author, 'ZeroToJupyterhubDoc', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\n\n# -- Options for Epub output ----------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n\n\n", "path": "doc/source/conf.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Learning with JupyterHub documentation build configuration file, created by\n# sphinx-quickstart on Fri Mar 17 16:07:58 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\nimport recommonmark\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.mathjax']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\nsource_parsers = {\n '.md': 'recommonmark.parser.CommonMarkParser',\n}\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = ['.rst', '.md']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Zero to JupyterHub with Kubernetes'\ncopyright = u'2016, Project Jupyter team'\nauthor = u'Project Jupyter team'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0.1'\n# The full version, including alpha/beta/rc tags.\nrelease = '0.1'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'alabaster'\nhtml_favicon = '_static/images/logo/favicon.ico'\nhtml_logo = '_static/images/logo/logo.png'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'ZeroToJupyterhubDoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'ZeroToJupyterhubDoc.tex', 'Zero to JupyterHub',\n 'Chris Holdgraf', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'zerotojupyterhub', 'Zero to JupyterHub',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'ZeroToJupyterhubDoc', 'Zero to JupyterHub',\n author, 'ZeroToJupyterhubDoc', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\n\n# -- Options for Epub output ----------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n\n\n", "path": "doc/source/conf.py"}]}
2,056
156
gh_patches_debug_27996
rasdani/github-patches
git_diff
goauthentik__authentik-6031
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add UnwillingToPerformError to ldap/password.py:95 **Is your feature request related to a problem? Please describe.** Authentik tries to modify the password the AD way, which uses a `modify` operation with the `unicodePwd` field, and then if it gets an `AttributeError` it tries the extended operation. However, [LLDAP](https://github.com/lldap/lldap) doesn't return an `AttributeError` but an `UnwillingToPerformError` since LLDAP doesn't support the modify operation at all, so it completely fails. **Describe the solution you'd like** Add an `UnwillingToPerformError` to the `except` on [`ldap/password.py:95`](https://github.com/goauthentik/authentik/blob/main/authentik/sources/ldap/password.py#L95) **Describe alternatives you've considered** There's no alternative. **Additional context** • [LLDAP log](https://cdn.discordapp.com/attachments/1108339414494613514/1108870676980449300/dockerlogs.txt) <details> <summary>Authentik Event Log</summary> <img width="1604" alt="image" src="https://github.com/goauthentik/authentik/assets/2737082/047b64fa-468f-4a91-b5e7-4eecd27768b6"> </details> This is a follow-up on https://github.com/goauthentik/authentik/issues/5652. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `authentik/sources/ldap/password.py` Content: ``` 1 """Help validate and update passwords in LDAP""" 2 from enum import IntFlag 3 from re import split 4 from typing import Optional 5 6 from ldap3 import BASE 7 from ldap3.core.exceptions import LDAPAttributeError 8 from structlog.stdlib import get_logger 9 10 from authentik.core.models import User 11 from authentik.sources.ldap.auth import LDAP_DISTINGUISHED_NAME 12 from authentik.sources.ldap.models import LDAPSource 13 14 LOGGER = get_logger() 15 16 NON_ALPHA = r"~!@#$%^&*_-+=`|\(){}[]:;\"'<>,.?/" 17 RE_DISPLAYNAME_SEPARATORS = r",\.–—_\s#\t" 18 19 20 class PwdProperties(IntFlag): 21 """Possible values for the pwdProperties attribute""" 22 23 DOMAIN_PASSWORD_COMPLEX = 1 24 DOMAIN_PASSWORD_NO_ANON_CHANGE = 2 25 DOMAIN_PASSWORD_NO_CLEAR_CHANGE = 4 26 DOMAIN_LOCKOUT_ADMINS = 8 27 DOMAIN_PASSWORD_STORE_CLEARTEXT = 16 28 DOMAIN_REFUSE_PASSWORD_CHANGE = 32 29 30 31 class PasswordCategories(IntFlag): 32 """Password categories as defined by Microsoft, a category can only be counted 33 once, hence intflag.""" 34 35 NONE = 0 36 ALPHA_LOWER = 1 37 ALPHA_UPPER = 2 38 ALPHA_OTHER = 4 39 NUMERIC = 8 40 SYMBOL = 16 41 42 43 class LDAPPasswordChanger: 44 """Help validate and update passwords in LDAP""" 45 46 _source: LDAPSource 47 48 def __init__(self, source: LDAPSource) -> None: 49 self._source = source 50 self._connection = source.connection() 51 52 def get_domain_root_dn(self) -> str: 53 """Attempt to get root DN via MS specific fields or generic LDAP fields""" 54 info = self._connection.server.info 55 if "rootDomainNamingContext" in info.other: 56 return info.other["rootDomainNamingContext"][0] 57 naming_contexts = info.naming_contexts 58 naming_contexts.sort(key=len) 59 return naming_contexts[0] 60 61 def check_ad_password_complexity_enabled(self) -> bool: 62 """Check if DOMAIN_PASSWORD_COMPLEX is enabled""" 63 root_dn = self.get_domain_root_dn() 64 try: 65 root_attrs = self._connection.extend.standard.paged_search( 66 search_base=root_dn, 67 search_filter="(objectClass=*)", 68 search_scope=BASE, 69 attributes=["pwdProperties"], 70 ) 71 root_attrs = list(root_attrs)[0] 72 except (LDAPAttributeError, KeyError, IndexError): 73 return False 74 raw_pwd_properties = root_attrs.get("attributes", {}).get("pwdProperties", None) 75 if not raw_pwd_properties: 76 return False 77 78 try: 79 pwd_properties = PwdProperties(raw_pwd_properties) 80 except ValueError: 81 return False 82 if PwdProperties.DOMAIN_PASSWORD_COMPLEX in pwd_properties: 83 return True 84 85 return False 86 87 def change_password(self, user: User, password: str): 88 """Change user's password""" 89 user_dn = user.attributes.get(LDAP_DISTINGUISHED_NAME, None) 90 if not user_dn: 91 LOGGER.info(f"User has no {LDAP_DISTINGUISHED_NAME} set.") 92 return 93 try: 94 self._connection.extend.microsoft.modify_password(user_dn, password) 95 except LDAPAttributeError: 96 self._connection.extend.standard.modify_password(user_dn, new_password=password) 97 98 def _ad_check_password_existing(self, password: str, user_dn: str) -> bool: 99 """Check if a password contains sAMAccount or displayName""" 100 users = list( 101 self._connection.extend.standard.paged_search( 102 search_base=user_dn, 103 search_filter=self._source.user_object_filter, 104 search_scope=BASE, 105 attributes=["displayName", "sAMAccountName"], 106 ) 107 ) 108 if len(users) != 1: 109 raise AssertionError() 110 user_attributes = users[0]["attributes"] 111 # If sAMAccountName is longer than 3 chars, check if its contained in password 112 if len(user_attributes["sAMAccountName"]) >= 3: 113 if password.lower() in user_attributes["sAMAccountName"].lower(): 114 return False 115 # No display name set, can't check any further 116 if len(user_attributes["displayName"]) < 1: 117 return True 118 for display_name in user_attributes["displayName"]: 119 display_name_tokens = split(RE_DISPLAYNAME_SEPARATORS, display_name) 120 for token in display_name_tokens: 121 # Ignore tokens under 3 chars 122 if len(token) < 3: 123 continue 124 if token.lower() in password.lower(): 125 return False 126 return True 127 128 def ad_password_complexity(self, password: str, user: Optional[User] = None) -> bool: 129 """Check if password matches Active directory password policies 130 131 https://docs.microsoft.com/en-us/windows/security/threat-protection/ 132 security-policy-settings/password-must-meet-complexity-requirements 133 """ 134 if user: 135 # Check if password contains sAMAccountName or displayNames 136 if LDAP_DISTINGUISHED_NAME in user.attributes: 137 existing_user_check = self._ad_check_password_existing( 138 password, user.attributes.get(LDAP_DISTINGUISHED_NAME) 139 ) 140 if not existing_user_check: 141 LOGGER.debug("Password failed name check", user=user) 142 return existing_user_check 143 144 # Step 2, match at least 3 of 5 categories 145 matched_categories = PasswordCategories.NONE 146 required = 3 147 for letter in password: 148 # Only match one category per letter, 149 if letter.islower(): 150 matched_categories |= PasswordCategories.ALPHA_LOWER 151 elif letter.isupper(): 152 matched_categories |= PasswordCategories.ALPHA_UPPER 153 elif not letter.isascii() and letter.isalpha(): 154 # Not exactly matching microsoft's policy, but count it as "Other unicode" char 155 # when its alpha and not ascii 156 matched_categories |= PasswordCategories.ALPHA_OTHER 157 elif letter.isnumeric(): 158 matched_categories |= PasswordCategories.NUMERIC 159 elif letter in NON_ALPHA: 160 matched_categories |= PasswordCategories.SYMBOL 161 if bin(matched_categories).count("1") < required: 162 LOGGER.debug( 163 "Password didn't match enough categories", 164 has=matched_categories, 165 must=required, 166 ) 167 return False 168 LOGGER.debug("Password matched categories", has=matched_categories, must=required) 169 return True 170 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/authentik/sources/ldap/password.py b/authentik/sources/ldap/password.py --- a/authentik/sources/ldap/password.py +++ b/authentik/sources/ldap/password.py @@ -4,7 +4,7 @@ from typing import Optional from ldap3 import BASE -from ldap3.core.exceptions import LDAPAttributeError +from ldap3.core.exceptions import LDAPAttributeError, LDAPUnwillingToPerformResult from structlog.stdlib import get_logger from authentik.core.models import User @@ -69,7 +69,7 @@ attributes=["pwdProperties"], ) root_attrs = list(root_attrs)[0] - except (LDAPAttributeError, KeyError, IndexError): + except (LDAPAttributeError, LDAPUnwillingToPerformResult, KeyError, IndexError): return False raw_pwd_properties = root_attrs.get("attributes", {}).get("pwdProperties", None) if not raw_pwd_properties: @@ -92,7 +92,7 @@ return try: self._connection.extend.microsoft.modify_password(user_dn, password) - except LDAPAttributeError: + except (LDAPAttributeError, LDAPUnwillingToPerformResult): self._connection.extend.standard.modify_password(user_dn, new_password=password) def _ad_check_password_existing(self, password: str, user_dn: str) -> bool:
{"golden_diff": "diff --git a/authentik/sources/ldap/password.py b/authentik/sources/ldap/password.py\n--- a/authentik/sources/ldap/password.py\n+++ b/authentik/sources/ldap/password.py\n@@ -4,7 +4,7 @@\n from typing import Optional\n \n from ldap3 import BASE\n-from ldap3.core.exceptions import LDAPAttributeError\n+from ldap3.core.exceptions import LDAPAttributeError, LDAPUnwillingToPerformResult\n from structlog.stdlib import get_logger\n \n from authentik.core.models import User\n@@ -69,7 +69,7 @@\n attributes=[\"pwdProperties\"],\n )\n root_attrs = list(root_attrs)[0]\n- except (LDAPAttributeError, KeyError, IndexError):\n+ except (LDAPAttributeError, LDAPUnwillingToPerformResult, KeyError, IndexError):\n return False\n raw_pwd_properties = root_attrs.get(\"attributes\", {}).get(\"pwdProperties\", None)\n if not raw_pwd_properties:\n@@ -92,7 +92,7 @@\n return\n try:\n self._connection.extend.microsoft.modify_password(user_dn, password)\n- except LDAPAttributeError:\n+ except (LDAPAttributeError, LDAPUnwillingToPerformResult):\n self._connection.extend.standard.modify_password(user_dn, new_password=password)\n \n def _ad_check_password_existing(self, password: str, user_dn: str) -> bool:\n", "issue": "Add UnwillingToPerformError to ldap/password.py:95\n**Is your feature request related to a problem? Please describe.**\r\nAuthentik tries to modify the password the AD way, which uses a `modify` operation with the `unicodePwd` field, and then if it gets an `AttributeError` it tries the extended operation. However, [LLDAP](https://github.com/lldap/lldap) doesn't return an `AttributeError` but an `UnwillingToPerformError` since LLDAP doesn't support the modify operation at all, so it completely fails.\r\n\r\n**Describe the solution you'd like**\r\nAdd an `UnwillingToPerformError` to the `except` on [`ldap/password.py:95`](https://github.com/goauthentik/authentik/blob/main/authentik/sources/ldap/password.py#L95)\r\n\r\n**Describe alternatives you've considered**\r\nThere's no alternative.\r\n\r\n**Additional context**\r\n\u2022 [LLDAP log](https://cdn.discordapp.com/attachments/1108339414494613514/1108870676980449300/dockerlogs.txt)\r\n<details>\r\n<summary>Authentik Event Log</summary>\r\n<img width=\"1604\" alt=\"image\" src=\"https://github.com/goauthentik/authentik/assets/2737082/047b64fa-468f-4a91-b5e7-4eecd27768b6\">\r\n</details>\r\n\r\n\r\nThis is a follow-up on https://github.com/goauthentik/authentik/issues/5652.\r\n\n", "before_files": [{"content": "\"\"\"Help validate and update passwords in LDAP\"\"\"\nfrom enum import IntFlag\nfrom re import split\nfrom typing import Optional\n\nfrom ldap3 import BASE\nfrom ldap3.core.exceptions import LDAPAttributeError\nfrom structlog.stdlib import get_logger\n\nfrom authentik.core.models import User\nfrom authentik.sources.ldap.auth import LDAP_DISTINGUISHED_NAME\nfrom authentik.sources.ldap.models import LDAPSource\n\nLOGGER = get_logger()\n\nNON_ALPHA = r\"~!@#$%^&*_-+=`|\\(){}[]:;\\\"'<>,.?/\"\nRE_DISPLAYNAME_SEPARATORS = r\",\\.\u2013\u2014_\\s#\\t\"\n\n\nclass PwdProperties(IntFlag):\n \"\"\"Possible values for the pwdProperties attribute\"\"\"\n\n DOMAIN_PASSWORD_COMPLEX = 1\n DOMAIN_PASSWORD_NO_ANON_CHANGE = 2\n DOMAIN_PASSWORD_NO_CLEAR_CHANGE = 4\n DOMAIN_LOCKOUT_ADMINS = 8\n DOMAIN_PASSWORD_STORE_CLEARTEXT = 16\n DOMAIN_REFUSE_PASSWORD_CHANGE = 32\n\n\nclass PasswordCategories(IntFlag):\n \"\"\"Password categories as defined by Microsoft, a category can only be counted\n once, hence intflag.\"\"\"\n\n NONE = 0\n ALPHA_LOWER = 1\n ALPHA_UPPER = 2\n ALPHA_OTHER = 4\n NUMERIC = 8\n SYMBOL = 16\n\n\nclass LDAPPasswordChanger:\n \"\"\"Help validate and update passwords in LDAP\"\"\"\n\n _source: LDAPSource\n\n def __init__(self, source: LDAPSource) -> None:\n self._source = source\n self._connection = source.connection()\n\n def get_domain_root_dn(self) -> str:\n \"\"\"Attempt to get root DN via MS specific fields or generic LDAP fields\"\"\"\n info = self._connection.server.info\n if \"rootDomainNamingContext\" in info.other:\n return info.other[\"rootDomainNamingContext\"][0]\n naming_contexts = info.naming_contexts\n naming_contexts.sort(key=len)\n return naming_contexts[0]\n\n def check_ad_password_complexity_enabled(self) -> bool:\n \"\"\"Check if DOMAIN_PASSWORD_COMPLEX is enabled\"\"\"\n root_dn = self.get_domain_root_dn()\n try:\n root_attrs = self._connection.extend.standard.paged_search(\n search_base=root_dn,\n search_filter=\"(objectClass=*)\",\n search_scope=BASE,\n attributes=[\"pwdProperties\"],\n )\n root_attrs = list(root_attrs)[0]\n except (LDAPAttributeError, KeyError, IndexError):\n return False\n raw_pwd_properties = root_attrs.get(\"attributes\", {}).get(\"pwdProperties\", None)\n if not raw_pwd_properties:\n return False\n\n try:\n pwd_properties = PwdProperties(raw_pwd_properties)\n except ValueError:\n return False\n if PwdProperties.DOMAIN_PASSWORD_COMPLEX in pwd_properties:\n return True\n\n return False\n\n def change_password(self, user: User, password: str):\n \"\"\"Change user's password\"\"\"\n user_dn = user.attributes.get(LDAP_DISTINGUISHED_NAME, None)\n if not user_dn:\n LOGGER.info(f\"User has no {LDAP_DISTINGUISHED_NAME} set.\")\n return\n try:\n self._connection.extend.microsoft.modify_password(user_dn, password)\n except LDAPAttributeError:\n self._connection.extend.standard.modify_password(user_dn, new_password=password)\n\n def _ad_check_password_existing(self, password: str, user_dn: str) -> bool:\n \"\"\"Check if a password contains sAMAccount or displayName\"\"\"\n users = list(\n self._connection.extend.standard.paged_search(\n search_base=user_dn,\n search_filter=self._source.user_object_filter,\n search_scope=BASE,\n attributes=[\"displayName\", \"sAMAccountName\"],\n )\n )\n if len(users) != 1:\n raise AssertionError()\n user_attributes = users[0][\"attributes\"]\n # If sAMAccountName is longer than 3 chars, check if its contained in password\n if len(user_attributes[\"sAMAccountName\"]) >= 3:\n if password.lower() in user_attributes[\"sAMAccountName\"].lower():\n return False\n # No display name set, can't check any further\n if len(user_attributes[\"displayName\"]) < 1:\n return True\n for display_name in user_attributes[\"displayName\"]:\n display_name_tokens = split(RE_DISPLAYNAME_SEPARATORS, display_name)\n for token in display_name_tokens:\n # Ignore tokens under 3 chars\n if len(token) < 3:\n continue\n if token.lower() in password.lower():\n return False\n return True\n\n def ad_password_complexity(self, password: str, user: Optional[User] = None) -> bool:\n \"\"\"Check if password matches Active directory password policies\n\n https://docs.microsoft.com/en-us/windows/security/threat-protection/\n security-policy-settings/password-must-meet-complexity-requirements\n \"\"\"\n if user:\n # Check if password contains sAMAccountName or displayNames\n if LDAP_DISTINGUISHED_NAME in user.attributes:\n existing_user_check = self._ad_check_password_existing(\n password, user.attributes.get(LDAP_DISTINGUISHED_NAME)\n )\n if not existing_user_check:\n LOGGER.debug(\"Password failed name check\", user=user)\n return existing_user_check\n\n # Step 2, match at least 3 of 5 categories\n matched_categories = PasswordCategories.NONE\n required = 3\n for letter in password:\n # Only match one category per letter,\n if letter.islower():\n matched_categories |= PasswordCategories.ALPHA_LOWER\n elif letter.isupper():\n matched_categories |= PasswordCategories.ALPHA_UPPER\n elif not letter.isascii() and letter.isalpha():\n # Not exactly matching microsoft's policy, but count it as \"Other unicode\" char\n # when its alpha and not ascii\n matched_categories |= PasswordCategories.ALPHA_OTHER\n elif letter.isnumeric():\n matched_categories |= PasswordCategories.NUMERIC\n elif letter in NON_ALPHA:\n matched_categories |= PasswordCategories.SYMBOL\n if bin(matched_categories).count(\"1\") < required:\n LOGGER.debug(\n \"Password didn't match enough categories\",\n has=matched_categories,\n must=required,\n )\n return False\n LOGGER.debug(\"Password matched categories\", has=matched_categories, must=required)\n return True\n", "path": "authentik/sources/ldap/password.py"}], "after_files": [{"content": "\"\"\"Help validate and update passwords in LDAP\"\"\"\nfrom enum import IntFlag\nfrom re import split\nfrom typing import Optional\n\nfrom ldap3 import BASE\nfrom ldap3.core.exceptions import LDAPAttributeError, LDAPUnwillingToPerformResult\nfrom structlog.stdlib import get_logger\n\nfrom authentik.core.models import User\nfrom authentik.sources.ldap.auth import LDAP_DISTINGUISHED_NAME\nfrom authentik.sources.ldap.models import LDAPSource\n\nLOGGER = get_logger()\n\nNON_ALPHA = r\"~!@#$%^&*_-+=`|\\(){}[]:;\\\"'<>,.?/\"\nRE_DISPLAYNAME_SEPARATORS = r\",\\.\u2013\u2014_\\s#\\t\"\n\n\nclass PwdProperties(IntFlag):\n \"\"\"Possible values for the pwdProperties attribute\"\"\"\n\n DOMAIN_PASSWORD_COMPLEX = 1\n DOMAIN_PASSWORD_NO_ANON_CHANGE = 2\n DOMAIN_PASSWORD_NO_CLEAR_CHANGE = 4\n DOMAIN_LOCKOUT_ADMINS = 8\n DOMAIN_PASSWORD_STORE_CLEARTEXT = 16\n DOMAIN_REFUSE_PASSWORD_CHANGE = 32\n\n\nclass PasswordCategories(IntFlag):\n \"\"\"Password categories as defined by Microsoft, a category can only be counted\n once, hence intflag.\"\"\"\n\n NONE = 0\n ALPHA_LOWER = 1\n ALPHA_UPPER = 2\n ALPHA_OTHER = 4\n NUMERIC = 8\n SYMBOL = 16\n\n\nclass LDAPPasswordChanger:\n \"\"\"Help validate and update passwords in LDAP\"\"\"\n\n _source: LDAPSource\n\n def __init__(self, source: LDAPSource) -> None:\n self._source = source\n self._connection = source.connection()\n\n def get_domain_root_dn(self) -> str:\n \"\"\"Attempt to get root DN via MS specific fields or generic LDAP fields\"\"\"\n info = self._connection.server.info\n if \"rootDomainNamingContext\" in info.other:\n return info.other[\"rootDomainNamingContext\"][0]\n naming_contexts = info.naming_contexts\n naming_contexts.sort(key=len)\n return naming_contexts[0]\n\n def check_ad_password_complexity_enabled(self) -> bool:\n \"\"\"Check if DOMAIN_PASSWORD_COMPLEX is enabled\"\"\"\n root_dn = self.get_domain_root_dn()\n try:\n root_attrs = self._connection.extend.standard.paged_search(\n search_base=root_dn,\n search_filter=\"(objectClass=*)\",\n search_scope=BASE,\n attributes=[\"pwdProperties\"],\n )\n root_attrs = list(root_attrs)[0]\n except (LDAPAttributeError, LDAPUnwillingToPerformResult, KeyError, IndexError):\n return False\n raw_pwd_properties = root_attrs.get(\"attributes\", {}).get(\"pwdProperties\", None)\n if not raw_pwd_properties:\n return False\n\n try:\n pwd_properties = PwdProperties(raw_pwd_properties)\n except ValueError:\n return False\n if PwdProperties.DOMAIN_PASSWORD_COMPLEX in pwd_properties:\n return True\n\n return False\n\n def change_password(self, user: User, password: str):\n \"\"\"Change user's password\"\"\"\n user_dn = user.attributes.get(LDAP_DISTINGUISHED_NAME, None)\n if not user_dn:\n LOGGER.info(f\"User has no {LDAP_DISTINGUISHED_NAME} set.\")\n return\n try:\n self._connection.extend.microsoft.modify_password(user_dn, password)\n except (LDAPAttributeError, LDAPUnwillingToPerformResult):\n self._connection.extend.standard.modify_password(user_dn, new_password=password)\n\n def _ad_check_password_existing(self, password: str, user_dn: str) -> bool:\n \"\"\"Check if a password contains sAMAccount or displayName\"\"\"\n users = list(\n self._connection.extend.standard.paged_search(\n search_base=user_dn,\n search_filter=self._source.user_object_filter,\n search_scope=BASE,\n attributes=[\"displayName\", \"sAMAccountName\"],\n )\n )\n if len(users) != 1:\n raise AssertionError()\n user_attributes = users[0][\"attributes\"]\n # If sAMAccountName is longer than 3 chars, check if its contained in password\n if len(user_attributes[\"sAMAccountName\"]) >= 3:\n if password.lower() in user_attributes[\"sAMAccountName\"].lower():\n return False\n # No display name set, can't check any further\n if len(user_attributes[\"displayName\"]) < 1:\n return True\n for display_name in user_attributes[\"displayName\"]:\n display_name_tokens = split(RE_DISPLAYNAME_SEPARATORS, display_name)\n for token in display_name_tokens:\n # Ignore tokens under 3 chars\n if len(token) < 3:\n continue\n if token.lower() in password.lower():\n return False\n return True\n\n def ad_password_complexity(self, password: str, user: Optional[User] = None) -> bool:\n \"\"\"Check if password matches Active directory password policies\n\n https://docs.microsoft.com/en-us/windows/security/threat-protection/\n security-policy-settings/password-must-meet-complexity-requirements\n \"\"\"\n if user:\n # Check if password contains sAMAccountName or displayNames\n if LDAP_DISTINGUISHED_NAME in user.attributes:\n existing_user_check = self._ad_check_password_existing(\n password, user.attributes.get(LDAP_DISTINGUISHED_NAME)\n )\n if not existing_user_check:\n LOGGER.debug(\"Password failed name check\", user=user)\n return existing_user_check\n\n # Step 2, match at least 3 of 5 categories\n matched_categories = PasswordCategories.NONE\n required = 3\n for letter in password:\n # Only match one category per letter,\n if letter.islower():\n matched_categories |= PasswordCategories.ALPHA_LOWER\n elif letter.isupper():\n matched_categories |= PasswordCategories.ALPHA_UPPER\n elif not letter.isascii() and letter.isalpha():\n # Not exactly matching microsoft's policy, but count it as \"Other unicode\" char\n # when its alpha and not ascii\n matched_categories |= PasswordCategories.ALPHA_OTHER\n elif letter.isnumeric():\n matched_categories |= PasswordCategories.NUMERIC\n elif letter in NON_ALPHA:\n matched_categories |= PasswordCategories.SYMBOL\n if bin(matched_categories).count(\"1\") < required:\n LOGGER.debug(\n \"Password didn't match enough categories\",\n has=matched_categories,\n must=required,\n )\n return False\n LOGGER.debug(\"Password matched categories\", has=matched_categories, must=required)\n return True\n", "path": "authentik/sources/ldap/password.py"}]}
2,399
295
gh_patches_debug_30967
rasdani/github-patches
git_diff
RedHatInsights__insights-core-1643
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Exceptions on the command line with invalid arguments in the archive position When trying to run `python -m insights foo` or `python -m insights existent_but_invalid.py`, an exception is raised all the way to the top, and if the file exists, a partial execution against the current machine happens. We should catch the exception, never partially execute, and provide a simple message for a better CLI experience. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `insights/__init__.py` Content: ``` 1 """ 2 Insights Core is a data collection and analysis framework that is built for 3 extensibility and rapid development. It includes a set of reusable components 4 for gathering data in myriad ways and providing a reliable object model for it. 5 6 .. code-block: python 7 8 >>> from insights import run 9 >>> from insights.parsers import installed_rpms as rpm 10 >>> lower = rpm.Rpm("bash-4.4.11-1.fc26") 11 >>> upper = rpm.Rpm("bash-4.4.22-1.fc26") 12 >>> results = run(rpm.Installed) 13 >>> rpms = results[rpm.Installed] 14 >>> rpms.newest("bash") 15 "0:bash-4.4.12-7.fc26" 16 >>> lower <= rpms.newest("bash") < upper 17 True 18 """ 19 from __future__ import print_function 20 import logging 21 import pkgutil 22 import os 23 import sys 24 import yaml 25 26 from .core import Scannable, LogFileOutput, Parser, IniConfigFile # noqa: F401 27 from .core import FileListing, LegacyItemAccess, SysconfigOptions # noqa: F401 28 from .core import YAMLParser, JSONParser, XMLParser, CommandParser # noqa: F401 29 from .core import AttributeDict # noqa: F401 30 from .core import Syslog # noqa: F401 31 from .core.archives import COMPRESSION_TYPES, extract # noqa: F401 32 from .core import dr # noqa: F401 33 from .core.context import ClusterArchiveContext, HostContext, HostArchiveContext, SerializedArchiveContext # noqa: F401 34 from .core.dr import SkipComponent # noqa: F401 35 from .core.hydration import create_context 36 from .core.plugins import combiner, fact, metadata, parser, rule # noqa: F401 37 from .core.plugins import datasource, condition, incident # noqa: F401 38 from .core.plugins import make_response, make_metadata, make_fingerprint # noqa: F401 39 from .core.plugins import make_pass, make_fail # noqa: F401 40 from .core.filters import add_filter, apply_filters, get_filters # noqa: F401 41 from .core.serde import Hydration 42 from .formats import get_formatter 43 from .parsers import get_active_lines # noqa: F401 44 from .util import defaults # noqa: F401 45 46 log = logging.getLogger(__name__) 47 48 49 package_info = dict((k, None) for k in ["RELEASE", "COMMIT", "VERSION", "NAME"]) 50 51 52 for name in package_info: 53 package_info[name] = pkgutil.get_data(__name__, name).strip().decode("utf-8") 54 55 56 def get_nvr(): 57 return "{0}-{1}-{2}".format(package_info["NAME"], 58 package_info["VERSION"], 59 package_info["RELEASE"]) 60 61 62 RULES_STATUS = {} 63 """ 64 Mapping of dictionaries containing nvr and commitid for each rule repo included 65 in this instance 66 67 {"rule_repo_1": {"version": nvr(), "commit": sha1}} 68 """ 69 70 71 def add_status(name, nvr, commit): 72 """ 73 Rule repositories should call this method in their package __init__ to 74 register their version information. 75 """ 76 RULES_STATUS[name] = {"version": nvr, "commit": commit} 77 78 79 def process_dir(broker, root, graph, context, inventory=None): 80 ctx = create_context(root, context) 81 log.debug("Processing %s with %s" % (root, ctx)) 82 83 if isinstance(ctx, ClusterArchiveContext): 84 from .core.cluster import process_cluster 85 archives = [f for f in ctx.all_files if f.endswith(COMPRESSION_TYPES)] 86 return process_cluster(archives, broker=broker, inventory=inventory) 87 88 broker[ctx.__class__] = ctx 89 if isinstance(ctx, SerializedArchiveContext): 90 h = Hydration(ctx.root) 91 broker = h.hydrate(broker=broker) 92 broker = dr.run(graph, broker=broker) 93 return broker 94 95 96 def _run(broker, graph=None, root=None, context=None, inventory=None): 97 """ 98 run is a general interface that is meant for stand alone scripts to use 99 when executing insights components. 100 101 Args: 102 root (str): None will causes a host collection in which command and 103 file specs are run. A directory or archive path will cause 104 collection from the directory or archive, and only file type specs 105 or those that depend on `insights.core.context.HostArchiveContext` 106 will execute. 107 component (function or class): The component to execute. Will only execute 108 the component and its dependency graph. If None, all components with 109 met dependencies will execute. 110 111 Returns: 112 broker: object containing the result of the evaluation. 113 """ 114 115 if not root: 116 context = context or HostContext 117 broker[context] = context() 118 return dr.run(graph, broker=broker) 119 120 if os.path.isdir(root): 121 return process_dir(broker, root, graph, context, inventory=inventory) 122 else: 123 with extract(root) as ex: 124 return process_dir(broker, ex.tmp_dir, graph, context, inventory=inventory) 125 126 127 def apply_configs(configs): 128 """ 129 Configures components. They can be enabled or disabled, have timeouts set 130 if applicable, and have metadata customized. Valid keys are name, enabled, 131 metadata, and timeout. 132 133 Args: 134 configs (list): a list of dictionaries with the following keys: 135 name, enabled, metadata, and timeout. All keys are optional except 136 name. 137 138 name is the prefix or exact name of any loaded component. Any 139 component starting with name will have the associated configuration 140 applied. 141 142 enabled is whether the matching components will execute even if 143 their dependencies are met. Defaults to True. 144 145 timeout sets the class level timeout attribute of any component so 146 long as the attribute already exists. 147 148 metadata is any dictionary that you want to attach to the 149 component. The dictionary can be retrieved by the component at 150 runtime. 151 """ 152 delegate_keys = sorted(dr.DELEGATES, key=dr.get_name) 153 for comp_cfg in configs: 154 name = comp_cfg["name"] 155 for c in delegate_keys: 156 delegate = dr.DELEGATES[c] 157 cname = dr.get_name(c) 158 if cname.startswith(name): 159 dr.ENABLED[c] = comp_cfg.get("enabled", True) 160 delegate.metadata.update(comp_cfg.get("metadata", {})) 161 delegate.tags = set(comp_cfg.get("tags", delegate.tags)) 162 for k, v in delegate.metadata.items(): 163 if hasattr(c, k): 164 setattr(c, k, v) 165 if hasattr(c, "timeout"): 166 c.timeout = comp_cfg.get("timeout", c.timeout) 167 if cname == name: 168 break 169 170 171 def _load_context(path): 172 if path is None: 173 return 174 175 if "." not in path: 176 path = ".".join(["insights.core.context", path]) 177 return dr.get_component(path) 178 179 180 def run(component=None, root=None, print_summary=False, 181 context=None, inventory=None, print_component=None): 182 183 from .core import dr 184 dr.load_components("insights.specs.default") 185 dr.load_components("insights.specs.insights_archive") 186 dr.load_components("insights.specs.sos_archive") 187 dr.load_components("insights.specs.jdr_archive") 188 189 args = None 190 formatter = None 191 if print_summary: 192 import argparse 193 import logging 194 p = argparse.ArgumentParser(add_help=False) 195 p.add_argument("archive", nargs="?", help="Archive or directory to analyze.") 196 p.add_argument("-p", "--plugins", default="", help="Comma-separated list without spaces of package(s) or module(s) containing plugins.") 197 p.add_argument("-c", "--config", help="Configure components.") 198 p.add_argument("-i", "--inventory", help="Ansible inventory file for cluster analysis.") 199 p.add_argument("-v", "--verbose", help="Verbose output.", action="store_true") 200 p.add_argument("-f", "--format", help="Output format.", default="insights.formats.text") 201 p.add_argument("-D", "--debug", help="Verbose debug output.", action="store_true") 202 p.add_argument("--context", help="Execution Context. Defaults to HostContext if an archive isn't passed.") 203 204 class Args(object): 205 pass 206 207 args = Args() 208 p.parse_known_args(namespace=args) 209 p = argparse.ArgumentParser(parents=[p]) 210 args.format = "insights.formats._json" if args.format == "json" else args.format 211 args.format = "insights.formats._yaml" if args.format == "yaml" else args.format 212 fmt = args.format if "." in args.format else "insights.formats." + args.format 213 Formatter = dr.get_component(fmt) 214 if not Formatter: 215 dr.load_components(fmt, continue_on_error=False) 216 Formatter = get_formatter(fmt) 217 Formatter.configure(p) 218 p.parse_args(namespace=args) 219 formatter = Formatter(args) 220 221 logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO if args.verbose else logging.ERROR) 222 context = _load_context(args.context) or context 223 inventory = args.inventory 224 225 root = args.archive or root 226 if root: 227 root = os.path.realpath(root) 228 229 plugins = [] 230 if args.plugins: 231 for path in args.plugins.split(","): 232 path = path.strip() 233 if path.endswith(".py"): 234 path, _ = os.path.splitext(path) 235 path = path.rstrip("/").replace("/", ".") 236 plugins.append(path) 237 238 for p in plugins: 239 dr.load_components(p, continue_on_error=False) 240 241 if args.config: 242 with open(args.config) as f: 243 apply_configs(yaml.safe_load(f)) 244 245 if component is None: 246 component = [] 247 plugins = tuple(plugins) 248 for c in dr.DELEGATES: 249 if c.__module__.startswith(plugins): 250 component.append(c) 251 252 if component: 253 if not isinstance(component, (list, set)): 254 component = [component] 255 graph = {} 256 for c in component: 257 graph.update(dr.get_dependency_graph(c)) 258 else: 259 graph = dr.COMPONENTS[dr.GROUPS.single] 260 261 broker = dr.Broker() 262 263 if formatter: 264 formatter.preprocess(broker) 265 broker = _run(broker, graph, root, context=context, inventory=inventory) 266 formatter.postprocess(broker) 267 elif print_component: 268 broker = _run(broker, graph, root, context=context, inventory=inventory) 269 broker.print_component(print_component) 270 else: 271 broker = _run(broker, graph, root, context=context, inventory=inventory) 272 273 return broker 274 275 276 def main(): 277 if "" not in sys.path: 278 sys.path.insert(0, "") 279 run(print_summary=True) 280 281 282 if __name__ == "__main__": 283 main() 284 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/insights/__init__.py b/insights/__init__.py --- a/insights/__init__.py +++ b/insights/__init__.py @@ -28,7 +28,7 @@ from .core import YAMLParser, JSONParser, XMLParser, CommandParser # noqa: F401 from .core import AttributeDict # noqa: F401 from .core import Syslog # noqa: F401 -from .core.archives import COMPRESSION_TYPES, extract # noqa: F401 +from .core.archives import COMPRESSION_TYPES, extract, InvalidArchive, InvalidContentType # noqa: F401 from .core import dr # noqa: F401 from .core.context import ClusterArchiveContext, HostContext, HostArchiveContext, SerializedArchiveContext # noqa: F401 from .core.dr import SkipComponent # noqa: F401 @@ -260,17 +260,25 @@ broker = dr.Broker() - if formatter: - formatter.preprocess(broker) - broker = _run(broker, graph, root, context=context, inventory=inventory) - formatter.postprocess(broker) - elif print_component: - broker = _run(broker, graph, root, context=context, inventory=inventory) - broker.print_component(print_component) - else: - broker = _run(broker, graph, root, context=context, inventory=inventory) - - return broker + try: + if formatter: + formatter.preprocess(broker) + broker = _run(broker, graph, root, context=context, inventory=inventory) + formatter.postprocess(broker) + elif print_component: + broker = _run(broker, graph, root, context=context, inventory=inventory) + broker.print_component(print_component) + else: + broker = _run(broker, graph, root, context=context, inventory=inventory) + + return broker + except (InvalidContentType, InvalidArchive): + if args and args.archive: + path = args.archive + msg = "Invalid directory or archive. Did you mean to pass -p {p}?" + log.error(msg.format(p=path)) + else: + raise def main():
{"golden_diff": "diff --git a/insights/__init__.py b/insights/__init__.py\n--- a/insights/__init__.py\n+++ b/insights/__init__.py\n@@ -28,7 +28,7 @@\n from .core import YAMLParser, JSONParser, XMLParser, CommandParser # noqa: F401\n from .core import AttributeDict # noqa: F401\n from .core import Syslog # noqa: F401\n-from .core.archives import COMPRESSION_TYPES, extract # noqa: F401\n+from .core.archives import COMPRESSION_TYPES, extract, InvalidArchive, InvalidContentType # noqa: F401\n from .core import dr # noqa: F401\n from .core.context import ClusterArchiveContext, HostContext, HostArchiveContext, SerializedArchiveContext # noqa: F401\n from .core.dr import SkipComponent # noqa: F401\n@@ -260,17 +260,25 @@\n \n broker = dr.Broker()\n \n- if formatter:\n- formatter.preprocess(broker)\n- broker = _run(broker, graph, root, context=context, inventory=inventory)\n- formatter.postprocess(broker)\n- elif print_component:\n- broker = _run(broker, graph, root, context=context, inventory=inventory)\n- broker.print_component(print_component)\n- else:\n- broker = _run(broker, graph, root, context=context, inventory=inventory)\n-\n- return broker\n+ try:\n+ if formatter:\n+ formatter.preprocess(broker)\n+ broker = _run(broker, graph, root, context=context, inventory=inventory)\n+ formatter.postprocess(broker)\n+ elif print_component:\n+ broker = _run(broker, graph, root, context=context, inventory=inventory)\n+ broker.print_component(print_component)\n+ else:\n+ broker = _run(broker, graph, root, context=context, inventory=inventory)\n+\n+ return broker\n+ except (InvalidContentType, InvalidArchive):\n+ if args and args.archive:\n+ path = args.archive\n+ msg = \"Invalid directory or archive. Did you mean to pass -p {p}?\"\n+ log.error(msg.format(p=path))\n+ else:\n+ raise\n \n \n def main():\n", "issue": "Exceptions on the command line with invalid arguments in the archive position\nWhen trying to run `python -m insights foo` or `python -m insights existent_but_invalid.py`, an exception is raised all the way to the top, and if the file exists, a partial execution against the current machine happens. We should catch the exception, never partially execute, and provide a simple message for a better CLI experience.\n", "before_files": [{"content": "\"\"\"\nInsights Core is a data collection and analysis framework that is built for\nextensibility and rapid development. It includes a set of reusable components\nfor gathering data in myriad ways and providing a reliable object model for it.\n\n.. code-block: python\n\n >>> from insights import run\n >>> from insights.parsers import installed_rpms as rpm\n >>> lower = rpm.Rpm(\"bash-4.4.11-1.fc26\")\n >>> upper = rpm.Rpm(\"bash-4.4.22-1.fc26\")\n >>> results = run(rpm.Installed)\n >>> rpms = results[rpm.Installed]\n >>> rpms.newest(\"bash\")\n \"0:bash-4.4.12-7.fc26\"\n >>> lower <= rpms.newest(\"bash\") < upper\n True\n\"\"\"\nfrom __future__ import print_function\nimport logging\nimport pkgutil\nimport os\nimport sys\nimport yaml\n\nfrom .core import Scannable, LogFileOutput, Parser, IniConfigFile # noqa: F401\nfrom .core import FileListing, LegacyItemAccess, SysconfigOptions # noqa: F401\nfrom .core import YAMLParser, JSONParser, XMLParser, CommandParser # noqa: F401\nfrom .core import AttributeDict # noqa: F401\nfrom .core import Syslog # noqa: F401\nfrom .core.archives import COMPRESSION_TYPES, extract # noqa: F401\nfrom .core import dr # noqa: F401\nfrom .core.context import ClusterArchiveContext, HostContext, HostArchiveContext, SerializedArchiveContext # noqa: F401\nfrom .core.dr import SkipComponent # noqa: F401\nfrom .core.hydration import create_context\nfrom .core.plugins import combiner, fact, metadata, parser, rule # noqa: F401\nfrom .core.plugins import datasource, condition, incident # noqa: F401\nfrom .core.plugins import make_response, make_metadata, make_fingerprint # noqa: F401\nfrom .core.plugins import make_pass, make_fail # noqa: F401\nfrom .core.filters import add_filter, apply_filters, get_filters # noqa: F401\nfrom .core.serde import Hydration\nfrom .formats import get_formatter\nfrom .parsers import get_active_lines # noqa: F401\nfrom .util import defaults # noqa: F401\n\nlog = logging.getLogger(__name__)\n\n\npackage_info = dict((k, None) for k in [\"RELEASE\", \"COMMIT\", \"VERSION\", \"NAME\"])\n\n\nfor name in package_info:\n package_info[name] = pkgutil.get_data(__name__, name).strip().decode(\"utf-8\")\n\n\ndef get_nvr():\n return \"{0}-{1}-{2}\".format(package_info[\"NAME\"],\n package_info[\"VERSION\"],\n package_info[\"RELEASE\"])\n\n\nRULES_STATUS = {}\n\"\"\"\nMapping of dictionaries containing nvr and commitid for each rule repo included\nin this instance\n\n{\"rule_repo_1\": {\"version\": nvr(), \"commit\": sha1}}\n\"\"\"\n\n\ndef add_status(name, nvr, commit):\n \"\"\"\n Rule repositories should call this method in their package __init__ to\n register their version information.\n \"\"\"\n RULES_STATUS[name] = {\"version\": nvr, \"commit\": commit}\n\n\ndef process_dir(broker, root, graph, context, inventory=None):\n ctx = create_context(root, context)\n log.debug(\"Processing %s with %s\" % (root, ctx))\n\n if isinstance(ctx, ClusterArchiveContext):\n from .core.cluster import process_cluster\n archives = [f for f in ctx.all_files if f.endswith(COMPRESSION_TYPES)]\n return process_cluster(archives, broker=broker, inventory=inventory)\n\n broker[ctx.__class__] = ctx\n if isinstance(ctx, SerializedArchiveContext):\n h = Hydration(ctx.root)\n broker = h.hydrate(broker=broker)\n broker = dr.run(graph, broker=broker)\n return broker\n\n\ndef _run(broker, graph=None, root=None, context=None, inventory=None):\n \"\"\"\n run is a general interface that is meant for stand alone scripts to use\n when executing insights components.\n\n Args:\n root (str): None will causes a host collection in which command and\n file specs are run. A directory or archive path will cause\n collection from the directory or archive, and only file type specs\n or those that depend on `insights.core.context.HostArchiveContext`\n will execute.\n component (function or class): The component to execute. Will only execute\n the component and its dependency graph. If None, all components with\n met dependencies will execute.\n\n Returns:\n broker: object containing the result of the evaluation.\n \"\"\"\n\n if not root:\n context = context or HostContext\n broker[context] = context()\n return dr.run(graph, broker=broker)\n\n if os.path.isdir(root):\n return process_dir(broker, root, graph, context, inventory=inventory)\n else:\n with extract(root) as ex:\n return process_dir(broker, ex.tmp_dir, graph, context, inventory=inventory)\n\n\ndef apply_configs(configs):\n \"\"\"\n Configures components. They can be enabled or disabled, have timeouts set\n if applicable, and have metadata customized. Valid keys are name, enabled,\n metadata, and timeout.\n\n Args:\n configs (list): a list of dictionaries with the following keys:\n name, enabled, metadata, and timeout. All keys are optional except\n name.\n\n name is the prefix or exact name of any loaded component. Any\n component starting with name will have the associated configuration\n applied.\n\n enabled is whether the matching components will execute even if\n their dependencies are met. Defaults to True.\n\n timeout sets the class level timeout attribute of any component so\n long as the attribute already exists.\n\n metadata is any dictionary that you want to attach to the\n component. The dictionary can be retrieved by the component at\n runtime.\n \"\"\"\n delegate_keys = sorted(dr.DELEGATES, key=dr.get_name)\n for comp_cfg in configs:\n name = comp_cfg[\"name\"]\n for c in delegate_keys:\n delegate = dr.DELEGATES[c]\n cname = dr.get_name(c)\n if cname.startswith(name):\n dr.ENABLED[c] = comp_cfg.get(\"enabled\", True)\n delegate.metadata.update(comp_cfg.get(\"metadata\", {}))\n delegate.tags = set(comp_cfg.get(\"tags\", delegate.tags))\n for k, v in delegate.metadata.items():\n if hasattr(c, k):\n setattr(c, k, v)\n if hasattr(c, \"timeout\"):\n c.timeout = comp_cfg.get(\"timeout\", c.timeout)\n if cname == name:\n break\n\n\ndef _load_context(path):\n if path is None:\n return\n\n if \".\" not in path:\n path = \".\".join([\"insights.core.context\", path])\n return dr.get_component(path)\n\n\ndef run(component=None, root=None, print_summary=False,\n context=None, inventory=None, print_component=None):\n\n from .core import dr\n dr.load_components(\"insights.specs.default\")\n dr.load_components(\"insights.specs.insights_archive\")\n dr.load_components(\"insights.specs.sos_archive\")\n dr.load_components(\"insights.specs.jdr_archive\")\n\n args = None\n formatter = None\n if print_summary:\n import argparse\n import logging\n p = argparse.ArgumentParser(add_help=False)\n p.add_argument(\"archive\", nargs=\"?\", help=\"Archive or directory to analyze.\")\n p.add_argument(\"-p\", \"--plugins\", default=\"\", help=\"Comma-separated list without spaces of package(s) or module(s) containing plugins.\")\n p.add_argument(\"-c\", \"--config\", help=\"Configure components.\")\n p.add_argument(\"-i\", \"--inventory\", help=\"Ansible inventory file for cluster analysis.\")\n p.add_argument(\"-v\", \"--verbose\", help=\"Verbose output.\", action=\"store_true\")\n p.add_argument(\"-f\", \"--format\", help=\"Output format.\", default=\"insights.formats.text\")\n p.add_argument(\"-D\", \"--debug\", help=\"Verbose debug output.\", action=\"store_true\")\n p.add_argument(\"--context\", help=\"Execution Context. Defaults to HostContext if an archive isn't passed.\")\n\n class Args(object):\n pass\n\n args = Args()\n p.parse_known_args(namespace=args)\n p = argparse.ArgumentParser(parents=[p])\n args.format = \"insights.formats._json\" if args.format == \"json\" else args.format\n args.format = \"insights.formats._yaml\" if args.format == \"yaml\" else args.format\n fmt = args.format if \".\" in args.format else \"insights.formats.\" + args.format\n Formatter = dr.get_component(fmt)\n if not Formatter:\n dr.load_components(fmt, continue_on_error=False)\n Formatter = get_formatter(fmt)\n Formatter.configure(p)\n p.parse_args(namespace=args)\n formatter = Formatter(args)\n\n logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO if args.verbose else logging.ERROR)\n context = _load_context(args.context) or context\n inventory = args.inventory\n\n root = args.archive or root\n if root:\n root = os.path.realpath(root)\n\n plugins = []\n if args.plugins:\n for path in args.plugins.split(\",\"):\n path = path.strip()\n if path.endswith(\".py\"):\n path, _ = os.path.splitext(path)\n path = path.rstrip(\"/\").replace(\"/\", \".\")\n plugins.append(path)\n\n for p in plugins:\n dr.load_components(p, continue_on_error=False)\n\n if args.config:\n with open(args.config) as f:\n apply_configs(yaml.safe_load(f))\n\n if component is None:\n component = []\n plugins = tuple(plugins)\n for c in dr.DELEGATES:\n if c.__module__.startswith(plugins):\n component.append(c)\n\n if component:\n if not isinstance(component, (list, set)):\n component = [component]\n graph = {}\n for c in component:\n graph.update(dr.get_dependency_graph(c))\n else:\n graph = dr.COMPONENTS[dr.GROUPS.single]\n\n broker = dr.Broker()\n\n if formatter:\n formatter.preprocess(broker)\n broker = _run(broker, graph, root, context=context, inventory=inventory)\n formatter.postprocess(broker)\n elif print_component:\n broker = _run(broker, graph, root, context=context, inventory=inventory)\n broker.print_component(print_component)\n else:\n broker = _run(broker, graph, root, context=context, inventory=inventory)\n\n return broker\n\n\ndef main():\n if \"\" not in sys.path:\n sys.path.insert(0, \"\")\n run(print_summary=True)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "insights/__init__.py"}], "after_files": [{"content": "\"\"\"\nInsights Core is a data collection and analysis framework that is built for\nextensibility and rapid development. It includes a set of reusable components\nfor gathering data in myriad ways and providing a reliable object model for it.\n\n.. code-block: python\n\n >>> from insights import run\n >>> from insights.parsers import installed_rpms as rpm\n >>> lower = rpm.Rpm(\"bash-4.4.11-1.fc26\")\n >>> upper = rpm.Rpm(\"bash-4.4.22-1.fc26\")\n >>> results = run(rpm.Installed)\n >>> rpms = results[rpm.Installed]\n >>> rpms.newest(\"bash\")\n \"0:bash-4.4.12-7.fc26\"\n >>> lower <= rpms.newest(\"bash\") < upper\n True\n\"\"\"\nfrom __future__ import print_function\nimport logging\nimport pkgutil\nimport os\nimport sys\nimport yaml\n\nfrom .core import Scannable, LogFileOutput, Parser, IniConfigFile # noqa: F401\nfrom .core import FileListing, LegacyItemAccess, SysconfigOptions # noqa: F401\nfrom .core import YAMLParser, JSONParser, XMLParser, CommandParser # noqa: F401\nfrom .core import AttributeDict # noqa: F401\nfrom .core import Syslog # noqa: F401\nfrom .core.archives import COMPRESSION_TYPES, extract, InvalidArchive, InvalidContentType # noqa: F401\nfrom .core import dr # noqa: F401\nfrom .core.context import ClusterArchiveContext, HostContext, HostArchiveContext, SerializedArchiveContext # noqa: F401\nfrom .core.dr import SkipComponent # noqa: F401\nfrom .core.hydration import create_context\nfrom .core.plugins import combiner, fact, metadata, parser, rule # noqa: F401\nfrom .core.plugins import datasource, condition, incident # noqa: F401\nfrom .core.plugins import make_response, make_metadata, make_fingerprint # noqa: F401\nfrom .core.plugins import make_pass, make_fail # noqa: F401\nfrom .core.filters import add_filter, apply_filters, get_filters # noqa: F401\nfrom .core.serde import Hydration\nfrom .formats import get_formatter\nfrom .parsers import get_active_lines # noqa: F401\nfrom .util import defaults # noqa: F401\n\nlog = logging.getLogger(__name__)\n\n\npackage_info = dict((k, None) for k in [\"RELEASE\", \"COMMIT\", \"VERSION\", \"NAME\"])\n\n\nfor name in package_info:\n package_info[name] = pkgutil.get_data(__name__, name).strip().decode(\"utf-8\")\n\n\ndef get_nvr():\n return \"{0}-{1}-{2}\".format(package_info[\"NAME\"],\n package_info[\"VERSION\"],\n package_info[\"RELEASE\"])\n\n\nRULES_STATUS = {}\n\"\"\"\nMapping of dictionaries containing nvr and commitid for each rule repo included\nin this instance\n\n{\"rule_repo_1\": {\"version\": nvr(), \"commit\": sha1}}\n\"\"\"\n\n\ndef add_status(name, nvr, commit):\n \"\"\"\n Rule repositories should call this method in their package __init__ to\n register their version information.\n \"\"\"\n RULES_STATUS[name] = {\"version\": nvr, \"commit\": commit}\n\n\ndef process_dir(broker, root, graph, context, inventory=None):\n ctx = create_context(root, context)\n log.debug(\"Processing %s with %s\" % (root, ctx))\n\n if isinstance(ctx, ClusterArchiveContext):\n from .core.cluster import process_cluster\n archives = [f for f in ctx.all_files if f.endswith(COMPRESSION_TYPES)]\n return process_cluster(archives, broker=broker, inventory=inventory)\n\n broker[ctx.__class__] = ctx\n if isinstance(ctx, SerializedArchiveContext):\n h = Hydration(ctx.root)\n broker = h.hydrate(broker=broker)\n broker = dr.run(graph, broker=broker)\n return broker\n\n\ndef _run(broker, graph=None, root=None, context=None, inventory=None):\n \"\"\"\n run is a general interface that is meant for stand alone scripts to use\n when executing insights components.\n\n Args:\n root (str): None will causes a host collection in which command and\n file specs are run. A directory or archive path will cause\n collection from the directory or archive, and only file type specs\n or those that depend on `insights.core.context.HostArchiveContext`\n will execute.\n component (function or class): The component to execute. Will only execute\n the component and its dependency graph. If None, all components with\n met dependencies will execute.\n\n Returns:\n broker: object containing the result of the evaluation.\n \"\"\"\n\n if not root:\n context = context or HostContext\n broker[context] = context()\n return dr.run(graph, broker=broker)\n\n if os.path.isdir(root):\n return process_dir(broker, root, graph, context, inventory=inventory)\n else:\n with extract(root) as ex:\n return process_dir(broker, ex.tmp_dir, graph, context, inventory=inventory)\n\n\ndef apply_configs(configs):\n \"\"\"\n Configures components. They can be enabled or disabled, have timeouts set\n if applicable, and have metadata customized. Valid keys are name, enabled,\n metadata, and timeout.\n\n Args:\n configs (list): a list of dictionaries with the following keys:\n name, enabled, metadata, and timeout. All keys are optional except\n name.\n\n name is the prefix or exact name of any loaded component. Any\n component starting with name will have the associated configuration\n applied.\n\n enabled is whether the matching components will execute even if\n their dependencies are met. Defaults to True.\n\n timeout sets the class level timeout attribute of any component so\n long as the attribute already exists.\n\n metadata is any dictionary that you want to attach to the\n component. The dictionary can be retrieved by the component at\n runtime.\n \"\"\"\n delegate_keys = sorted(dr.DELEGATES, key=dr.get_name)\n for comp_cfg in configs:\n name = comp_cfg[\"name\"]\n for c in delegate_keys:\n delegate = dr.DELEGATES[c]\n cname = dr.get_name(c)\n if cname.startswith(name):\n dr.ENABLED[c] = comp_cfg.get(\"enabled\", True)\n delegate.metadata.update(comp_cfg.get(\"metadata\", {}))\n delegate.tags = set(comp_cfg.get(\"tags\", delegate.tags))\n for k, v in delegate.metadata.items():\n if hasattr(c, k):\n setattr(c, k, v)\n if hasattr(c, \"timeout\"):\n c.timeout = comp_cfg.get(\"timeout\", c.timeout)\n if cname == name:\n break\n\n\ndef _load_context(path):\n if path is None:\n return\n\n if \".\" not in path:\n path = \".\".join([\"insights.core.context\", path])\n return dr.get_component(path)\n\n\ndef run(component=None, root=None, print_summary=False,\n context=None, inventory=None, print_component=None):\n\n from .core import dr\n dr.load_components(\"insights.specs.default\")\n dr.load_components(\"insights.specs.insights_archive\")\n dr.load_components(\"insights.specs.sos_archive\")\n dr.load_components(\"insights.specs.jdr_archive\")\n\n args = None\n formatter = None\n if print_summary:\n import argparse\n import logging\n p = argparse.ArgumentParser(add_help=False)\n p.add_argument(\"archive\", nargs=\"?\", help=\"Archive or directory to analyze.\")\n p.add_argument(\"-p\", \"--plugins\", default=\"\", help=\"Comma-separated list without spaces of package(s) or module(s) containing plugins.\")\n p.add_argument(\"-c\", \"--config\", help=\"Configure components.\")\n p.add_argument(\"-i\", \"--inventory\", help=\"Ansible inventory file for cluster analysis.\")\n p.add_argument(\"-v\", \"--verbose\", help=\"Verbose output.\", action=\"store_true\")\n p.add_argument(\"-f\", \"--format\", help=\"Output format.\", default=\"insights.formats.text\")\n p.add_argument(\"-D\", \"--debug\", help=\"Verbose debug output.\", action=\"store_true\")\n p.add_argument(\"--context\", help=\"Execution Context. Defaults to HostContext if an archive isn't passed.\")\n\n class Args(object):\n pass\n\n args = Args()\n p.parse_known_args(namespace=args)\n p = argparse.ArgumentParser(parents=[p])\n args.format = \"insights.formats._json\" if args.format == \"json\" else args.format\n args.format = \"insights.formats._yaml\" if args.format == \"yaml\" else args.format\n fmt = args.format if \".\" in args.format else \"insights.formats.\" + args.format\n Formatter = dr.get_component(fmt)\n if not Formatter:\n dr.load_components(fmt, continue_on_error=False)\n Formatter = get_formatter(fmt)\n Formatter.configure(p)\n p.parse_args(namespace=args)\n formatter = Formatter(args)\n\n logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO if args.verbose else logging.ERROR)\n context = _load_context(args.context) or context\n inventory = args.inventory\n\n root = args.archive or root\n if root:\n root = os.path.realpath(root)\n\n plugins = []\n if args.plugins:\n for path in args.plugins.split(\",\"):\n path = path.strip()\n if path.endswith(\".py\"):\n path, _ = os.path.splitext(path)\n path = path.rstrip(\"/\").replace(\"/\", \".\")\n plugins.append(path)\n\n for p in plugins:\n dr.load_components(p, continue_on_error=False)\n\n if args.config:\n with open(args.config) as f:\n apply_configs(yaml.safe_load(f))\n\n if component is None:\n component = []\n plugins = tuple(plugins)\n for c in dr.DELEGATES:\n if c.__module__.startswith(plugins):\n component.append(c)\n\n if component:\n if not isinstance(component, (list, set)):\n component = [component]\n graph = {}\n for c in component:\n graph.update(dr.get_dependency_graph(c))\n else:\n graph = dr.COMPONENTS[dr.GROUPS.single]\n\n broker = dr.Broker()\n\n try:\n if formatter:\n formatter.preprocess(broker)\n broker = _run(broker, graph, root, context=context, inventory=inventory)\n formatter.postprocess(broker)\n elif print_component:\n broker = _run(broker, graph, root, context=context, inventory=inventory)\n broker.print_component(print_component)\n else:\n broker = _run(broker, graph, root, context=context, inventory=inventory)\n\n return broker\n except (InvalidContentType, InvalidArchive):\n if args and args.archive:\n path = args.archive\n msg = \"Invalid directory or archive. Did you mean to pass -p {p}?\"\n log.error(msg.format(p=path))\n else:\n raise\n\n\ndef main():\n if \"\" not in sys.path:\n sys.path.insert(0, \"\")\n run(print_summary=True)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "insights/__init__.py"}]}
3,478
517
gh_patches_debug_37383
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-3568
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Python3.4 PyQt5 QML application requires environment variables With the PyQt5 fixes merged https://github.com/pyinstaller/pyinstaller/pull/3439 I decided to try creating an executable that I have been having trouble with. https://github.com/pyinstaller/pyinstaller/pull/3439#issuecomment-379064155 This is the source code https://github.com/Siecje/qml-testing/tree/PyInstaller I'm using - Windows 7 32-bit - Qt 5.10.1 - PyQt5 compiled from source - Python 3.4.4 - pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip When I run the .exe I get an error ``` QWindowsEGLStaticContext::create: Failed to load and resolve libEGL function Failed to load opengl32sw.dll (The specified module could not be found.) Failed to load and resolve WGL/OpenGL functions Failed to create OpenGL context for format QsurfaceFormat(version 2.0, options QFlags<QSurfaceFormat::FormatOption>(), depthBufferSize 24, ...<snip> This is most likely caused by not having the necessary graphics drivers installed. Install a driver providing OpenGL 2.0 or higher, or, if this is not possible, make sure the ANGLE Open GL ES 2.0 emulation libraries (libEGL.dll, libLESv2.dll and d3dcompiler_*.dll) are available in the application executabl's directory or in a location listed in PATH. ``` To run the application I can copy these four .dlls into the `dist\main\` directory. - libEGL.dll - libGLESv2.dll - d3dcompiler_47.dll - opengl32sw.dll When I run it I get Command Prompt window with this output. ``` QWindowsWGLStaticContext::create: Could not initialize EGL display: error 0x3001 QWindowsWGLStaticContext::create: When using ANGLE, check if d3dcompiler_4x.dll is available. ``` Instead of copying those .dll files I can add the Qt bin directory to my PATH. ``` set PATH=%PATH%;C:\Qt\5.10.1\msvc2015\bin call main.exe QWindowsWGLStaticContext::create: Could not initialize EGL display: error 0x3001 QWindowsWGLStaticContext::create: When using ANGLE, check if d3dcompiler_4x.dll is available. ``` When I copy the `dist\main\` to another computer (Windows 10). I have to set two environment variables before the application will work. ``` set QT_QPA_PLATFORM_PLUGIN_PATH=%exeDir%\PyQt5\Qt\plugins\platforms set QML2_IMPORT_PATH=%exeDir%\PyQt5\Qt\qml ``` There are no error messages on the Windows 10 computer with these two environment variables set. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `PyInstaller/hooks/hook-PyQt5.py` Content: ``` 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2005-2018, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License with exception 5 # for distributing bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 #----------------------------------------------------------------------------- 9 import os 10 11 from PyInstaller.utils.hooks import pyqt5_library_info, collect_system_data_files 12 13 hiddenimports = ['sip'] 14 15 # Collect the ``qt.conf`` file. 16 datas = [x for x in 17 collect_system_data_files(pyqt5_library_info.location['PrefixPath'], 18 'PyQt5') 19 if os.path.basename(x[0]) == 'qt.conf'] 20 21 # Include ICU files, if they exist. See the "Deployment approach" section in 22 # ``PyInstaller/utils/hooks/qt.py``. 23 [(os.path.join(pyqt5_library_info.location['BinariesPath'], dll), 24 os.path.join('PyQt5', 'Qt', 'bin', dll)) 25 for dll in ('icudt??.dll', 'icuin??.dll', 'icuuc??.dll')] 26 27 # TODO: Include software rendering for OpenGL. See the "Deployment approach". However, because the standard PyQt5 wheel `doesn't include <https://www.riverbankcomputing.com/pipermail/pyqt/2018-June/040387.html>`_ ``d3dcompiler_XX.dll``, this produces failures. When the wheel is updated, this code can be uncommented. 28 ##binaries = [] 29 ##for dll in ('libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll', 'opengl32sw.dll'): 30 ## dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'], dll) 31 ## # Only add files if they exist. 32 ## if glob(dll_path): 33 ## binaries += [(dll_path, os.path.join('PyQt5', 'Qt', 'bin', dll))] 34 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/PyInstaller/hooks/hook-PyQt5.py b/PyInstaller/hooks/hook-PyQt5.py --- a/PyInstaller/hooks/hook-PyQt5.py +++ b/PyInstaller/hooks/hook-PyQt5.py @@ -6,6 +6,7 @@ # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- +import glob import os from PyInstaller.utils.hooks import pyqt5_library_info, collect_system_data_files @@ -18,16 +19,42 @@ 'PyQt5') if os.path.basename(x[0]) == 'qt.conf'] -# Include ICU files, if they exist. See the "Deployment approach" section in -# ``PyInstaller/utils/hooks/qt.py``. -[(os.path.join(pyqt5_library_info.location['BinariesPath'], dll), - os.path.join('PyQt5', 'Qt', 'bin', dll)) - for dll in ('icudt??.dll', 'icuin??.dll', 'icuuc??.dll')] - -# TODO: Include software rendering for OpenGL. See the "Deployment approach". However, because the standard PyQt5 wheel `doesn't include <https://www.riverbankcomputing.com/pipermail/pyqt/2018-June/040387.html>`_ ``d3dcompiler_XX.dll``, this produces failures. When the wheel is updated, this code can be uncommented. -##binaries = [] -##for dll in ('libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll', 'opengl32sw.dll'): -## dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'], dll) -## # Only add files if they exist. -## if glob(dll_path): -## binaries += [(dll_path, os.path.join('PyQt5', 'Qt', 'bin', dll))] + +def find_all_or_none(globs_to_include, num_files): + """ + globs_to_include is a list of file name globs + If the number of found files does not match num_files + then no files will be included. + """ + # TODO: This function is required because CI is failing to include libEGL + # The error in AppVeyor is: + # [2312] LOADER: Running pyi_lib_PyQt5-uic.py + # Failed to load libEGL (Access is denied.) + # More info: https://github.com/pyinstaller/pyinstaller/pull/3568 + # Since the PyQt5 wheels do not include d3dcompiler_4?.dll, libEGL.dll and + # libGLESv2.dll will not be included for PyQt5 builds during CI. + to_include = [] + for dll in globs_to_include: + dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'], + dll) + dll_file_paths = glob.glob(dll_path) + for dll_file_path in dll_file_paths: + file_name = os.path.basename(dll_file_path) + dst_dll_path = os.path.join('PyQt5', 'Qt', 'bin', file_name) + to_include.append((dll_file_path, dst_dll_path)) + if len(to_include) == num_files: + return to_include + return [] + + +binaries = [] +angle_files = ['libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll'] +binaries += find_all_or_none(angle_files, 3) + +opengl_software_renderer = ['opengl32sw.dll'] +binaries += find_all_or_none(opengl_software_renderer, 1) + +# Include ICU files, if they exist. +# See the "Deployment approach" section in ``PyInstaller/utils/hooks/qt.py``. +icu_files = ['icudt??.dll', 'icuin??.dll', 'icuuc??.dll'] +binaries += find_all_or_none(icu_files, 3)
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-PyQt5.py b/PyInstaller/hooks/hook-PyQt5.py\n--- a/PyInstaller/hooks/hook-PyQt5.py\n+++ b/PyInstaller/hooks/hook-PyQt5.py\n@@ -6,6 +6,7 @@\n #\n # The full license is in the file COPYING.txt, distributed with this software.\n #-----------------------------------------------------------------------------\n+import glob\n import os\n \n from PyInstaller.utils.hooks import pyqt5_library_info, collect_system_data_files\n@@ -18,16 +19,42 @@\n 'PyQt5')\n if os.path.basename(x[0]) == 'qt.conf']\n \n-# Include ICU files, if they exist. See the \"Deployment approach\" section in\n-# ``PyInstaller/utils/hooks/qt.py``.\n-[(os.path.join(pyqt5_library_info.location['BinariesPath'], dll),\n- os.path.join('PyQt5', 'Qt', 'bin', dll))\n- for dll in ('icudt??.dll', 'icuin??.dll', 'icuuc??.dll')]\n-\n-# TODO: Include software rendering for OpenGL. See the \"Deployment approach\". However, because the standard PyQt5 wheel `doesn't include <https://www.riverbankcomputing.com/pipermail/pyqt/2018-June/040387.html>`_ ``d3dcompiler_XX.dll``, this produces failures. When the wheel is updated, this code can be uncommented.\n-##binaries = []\n-##for dll in ('libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll', 'opengl32sw.dll'):\n-## dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'], dll)\n-## # Only add files if they exist.\n-## if glob(dll_path):\n-## binaries += [(dll_path, os.path.join('PyQt5', 'Qt', 'bin', dll))]\n+\n+def find_all_or_none(globs_to_include, num_files):\n+ \"\"\"\n+ globs_to_include is a list of file name globs\n+ If the number of found files does not match num_files\n+ then no files will be included.\n+ \"\"\"\n+ # TODO: This function is required because CI is failing to include libEGL\n+ # The error in AppVeyor is:\n+ # [2312] LOADER: Running pyi_lib_PyQt5-uic.py\n+ # Failed to load libEGL (Access is denied.)\n+ # More info: https://github.com/pyinstaller/pyinstaller/pull/3568\n+ # Since the PyQt5 wheels do not include d3dcompiler_4?.dll, libEGL.dll and\n+ # libGLESv2.dll will not be included for PyQt5 builds during CI.\n+ to_include = []\n+ for dll in globs_to_include:\n+ dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'],\n+ dll)\n+ dll_file_paths = glob.glob(dll_path)\n+ for dll_file_path in dll_file_paths:\n+ file_name = os.path.basename(dll_file_path)\n+ dst_dll_path = os.path.join('PyQt5', 'Qt', 'bin', file_name)\n+ to_include.append((dll_file_path, dst_dll_path))\n+ if len(to_include) == num_files:\n+ return to_include\n+ return []\n+\n+\n+binaries = []\n+angle_files = ['libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll']\n+binaries += find_all_or_none(angle_files, 3)\n+\n+opengl_software_renderer = ['opengl32sw.dll']\n+binaries += find_all_or_none(opengl_software_renderer, 1)\n+\n+# Include ICU files, if they exist.\n+# See the \"Deployment approach\" section in ``PyInstaller/utils/hooks/qt.py``.\n+icu_files = ['icudt??.dll', 'icuin??.dll', 'icuuc??.dll']\n+binaries += find_all_or_none(icu_files, 3)\n", "issue": "Python3.4 PyQt5 QML application requires environment variables\nWith the PyQt5 fixes merged https://github.com/pyinstaller/pyinstaller/pull/3439 I decided to try creating an executable that I have been having trouble with. https://github.com/pyinstaller/pyinstaller/pull/3439#issuecomment-379064155\r\n\r\nThis is the source code https://github.com/Siecje/qml-testing/tree/PyInstaller\r\n\r\nI'm using\r\n- Windows 7 32-bit\r\n- Qt 5.10.1\r\n- PyQt5 compiled from source\r\n- Python 3.4.4\r\n- pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip\r\nWhen I run the .exe I get an error\r\n\r\n```\r\nQWindowsEGLStaticContext::create: Failed to load and resolve libEGL function\r\nFailed to load opengl32sw.dll (The specified module could not be found.)\r\nFailed to load and resolve WGL/OpenGL functions\r\nFailed to create OpenGL context for format QsurfaceFormat(version 2.0, options QFlags<QSurfaceFormat::FormatOption>(), depthBufferSize 24, ...<snip>\r\nThis is most likely caused by not having the necessary graphics drivers installed.\r\n\r\nInstall a driver providing OpenGL 2.0 or higher, or, if this is not possible, make sure the ANGLE Open GL ES 2.0 emulation libraries (libEGL.dll, libLESv2.dll and d3dcompiler_*.dll) are available in the application executabl's directory or in a location listed in PATH.\r\n```\r\n\r\nTo run the application I can copy these four .dlls into the `dist\\main\\` directory.\r\n\r\n- libEGL.dll\r\n- libGLESv2.dll\r\n- d3dcompiler_47.dll\r\n- opengl32sw.dll\r\n\r\nWhen I run it I get Command Prompt window with this output.\r\n```\r\nQWindowsWGLStaticContext::create: Could not initialize EGL display: error 0x3001\r\nQWindowsWGLStaticContext::create: When using ANGLE, check if d3dcompiler_4x.dll is available.\r\n```\r\nInstead of copying those .dll files I can add the Qt bin directory to my PATH.\r\n\r\n```\r\nset PATH=%PATH%;C:\\Qt\\5.10.1\\msvc2015\\bin\r\ncall main.exe\r\nQWindowsWGLStaticContext::create: Could not initialize EGL display: error 0x3001\r\nQWindowsWGLStaticContext::create: When using ANGLE, check if d3dcompiler_4x.dll is available.\r\n```\r\n\r\nWhen I copy the `dist\\main\\` to another computer (Windows 10).\r\n\r\nI have to set two environment variables before the application will work.\r\n\r\n```\r\nset QT_QPA_PLATFORM_PLUGIN_PATH=%exeDir%\\PyQt5\\Qt\\plugins\\platforms\r\nset QML2_IMPORT_PATH=%exeDir%\\PyQt5\\Qt\\qml\r\n```\r\n\r\nThere are no error messages on the Windows 10 computer with these two environment variables set.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2018, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nimport os\n\nfrom PyInstaller.utils.hooks import pyqt5_library_info, collect_system_data_files\n\nhiddenimports = ['sip']\n\n# Collect the ``qt.conf`` file.\ndatas = [x for x in\n collect_system_data_files(pyqt5_library_info.location['PrefixPath'],\n 'PyQt5')\n if os.path.basename(x[0]) == 'qt.conf']\n\n# Include ICU files, if they exist. See the \"Deployment approach\" section in\n# ``PyInstaller/utils/hooks/qt.py``.\n[(os.path.join(pyqt5_library_info.location['BinariesPath'], dll),\n os.path.join('PyQt5', 'Qt', 'bin', dll))\n for dll in ('icudt??.dll', 'icuin??.dll', 'icuuc??.dll')]\n\n# TODO: Include software rendering for OpenGL. See the \"Deployment approach\". However, because the standard PyQt5 wheel `doesn't include <https://www.riverbankcomputing.com/pipermail/pyqt/2018-June/040387.html>`_ ``d3dcompiler_XX.dll``, this produces failures. When the wheel is updated, this code can be uncommented.\n##binaries = []\n##for dll in ('libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll', 'opengl32sw.dll'):\n## dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'], dll)\n## # Only add files if they exist.\n## if glob(dll_path):\n## binaries += [(dll_path, os.path.join('PyQt5', 'Qt', 'bin', dll))]\n", "path": "PyInstaller/hooks/hook-PyQt5.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2018, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nimport glob\nimport os\n\nfrom PyInstaller.utils.hooks import pyqt5_library_info, collect_system_data_files\n\nhiddenimports = ['sip']\n\n# Collect the ``qt.conf`` file.\ndatas = [x for x in\n collect_system_data_files(pyqt5_library_info.location['PrefixPath'],\n 'PyQt5')\n if os.path.basename(x[0]) == 'qt.conf']\n\n\ndef find_all_or_none(globs_to_include, num_files):\n \"\"\"\n globs_to_include is a list of file name globs\n If the number of found files does not match num_files\n then no files will be included.\n \"\"\"\n # TODO: This function is required because CI is failing to include libEGL\n # The error in AppVeyor is:\n # [2312] LOADER: Running pyi_lib_PyQt5-uic.py\n # Failed to load libEGL (Access is denied.)\n # More info: https://github.com/pyinstaller/pyinstaller/pull/3568\n # Since the PyQt5 wheels do not include d3dcompiler_4?.dll, libEGL.dll and\n # libGLESv2.dll will not be included for PyQt5 builds during CI.\n to_include = []\n for dll in globs_to_include:\n dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'],\n dll)\n dll_file_paths = glob.glob(dll_path)\n for dll_file_path in dll_file_paths:\n file_name = os.path.basename(dll_file_path)\n dst_dll_path = os.path.join('PyQt5', 'Qt', 'bin', file_name)\n to_include.append((dll_file_path, dst_dll_path))\n if len(to_include) == num_files:\n return to_include\n return []\n\n\nbinaries = []\nangle_files = ['libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll']\nbinaries += find_all_or_none(angle_files, 3)\n\nopengl_software_renderer = ['opengl32sw.dll']\nbinaries += find_all_or_none(opengl_software_renderer, 1)\n\n# Include ICU files, if they exist.\n# See the \"Deployment approach\" section in ``PyInstaller/utils/hooks/qt.py``.\nicu_files = ['icudt??.dll', 'icuin??.dll', 'icuuc??.dll']\nbinaries += find_all_or_none(icu_files, 3)\n", "path": "PyInstaller/hooks/hook-PyQt5.py"}]}
1,408
918
gh_patches_debug_6456
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-1427
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [opentelemetry-instrumentation-redis] Stop sending db.name argument or rename it to redis[0-15] Hi! I've been recently playing with OpenTelemetry for Python (Flask) application and noticed that for Redis **db.name** argument is send to OpenTelemetry collector which seems to be a number of database (integer). This seems to be incorrect as in Redis there is no db name concept (databases are numbered from 0 to 15). Technically, it shouldn't be any problem with that but it may break some OpenTelemetry backends which expects a real DB name not a number. I have done some additional debugging and found that for node.js and .NET **db.name** argument is not send to collector. Shouldn't we have some consistency here? **Describe your environment** $ python --version Python 3.8.13 $ $ pip list | grep 'opentelemetry\|redis' opentelemetry-api 1.13.0 opentelemetry-distro 0.34b0 opentelemetry-exporter-otlp 1.13.0 opentelemetry-exporter-otlp-proto-grpc 1.13.0 opentelemetry-exporter-otlp-proto-http 1.13.0 opentelemetry-instrumentation 0.34b0 opentelemetry-instrumentation-aws-lambda 0.34b0 opentelemetry-instrumentation-dbapi 0.34b0 opentelemetry-instrumentation-flask 0.34b0 opentelemetry-instrumentation-grpc 0.34b0 opentelemetry-instrumentation-jinja2 0.34b0 opentelemetry-instrumentation-logging 0.34b0 opentelemetry-instrumentation-redis 0.34b0 opentelemetry-instrumentation-requests 0.34b0 opentelemetry-instrumentation-sqlite3 0.34b0 opentelemetry-instrumentation-urllib 0.34b0 opentelemetry-instrumentation-urllib3 0.34b0 opentelemetry-instrumentation-wsgi 0.34b0 opentelemetry-propagator-aws-xray 1.0.1 opentelemetry-proto 1.13.0 opentelemetry-sdk 1.13.0 opentelemetry-semantic-conventions 0.34b0 opentelemetry-util-http 0.34b0 redis 4.3.4 **Steps to reproduce** Any Python app with connection to Redis will show this behavior. **What is the expected behavior?** Stop sending db.name argument or rename it to redis[0-15] **What is the actual behavior?** The db.name argument is send as a number of Redis database. **Additional context** Please see below some logs from OpenTelemetry collector for python and node.js to see a difference. ===> PYTHON EXAMPLE ScopeSpans #0 ScopeSpans SchemaURL: InstrumentationScope opentelemetry.instrumentation.redis 0.34b0 Span #0 Trace ID : 4bc10b43ab0a0d3042f38ebbb32baef1 Parent ID : 79e2aed933827894 ID : 22f4fba607e73a33 Name : HMSET Kind : SPAN_KIND_CLIENT Start time : 2022-10-21 09:40:50.606962566 +0000 UTC End time : 2022-10-21 09:40:50.609568624 +0000 UTC Status code : STATUS_CODE_UNSET Status message : Attributes: -> db.statement: STRING(HMSET person1-hash name jane age 20) -> db.system: STRING(redis) -> db.name: INT(0) -> db.redis.database_index: INT(0) -> net.peer.name: STRING(redis-svc) -> net.peer.port: STRING(6379) -> net.transport: STRING(ip_tcp) -> db.redis.args_length: INT(6) ===> NODEJS EXAMPLE ScopeSpans #0 ScopeSpans SchemaURL: InstrumentationScope @opentelemetry/instrumentation-redis-4 0.33.0 Span #0 Trace ID : 21a071f4d1d7c860ecb758398d304f60 Parent ID : 1bbf5328c079ceda ID : 13dc47b2521f7f82 Name : redis-GET Kind : SPAN_KIND_CLIENT Start time : 2022-10-21 09:47:16.9553723 +0000 UTC End time : 2022-10-21 09:47:16.957585 +0000 UTC Status code : STATUS_CODE_UNSET Status message : Attributes: -> db.system: STRING(redis) -> net.peer.name: STRING(redis-svc) -> net.peer.port: INT(6379) -> db.statement: STRING(GET) ResourceSpans #4 Resource SchemaURL: Resource labels: -> service.name: STRING(nodejs-redis) -> telemetry.sdk.language: STRING(nodejs) -> telemetry.sdk.name: STRING(opentelemetry) -> telemetry.sdk.version: STRING(0.24.0) I am happy to contribute to it by reviewing the code fix and testing the behavior. @svrnm @sanketmehta28 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # 15 """ 16 Some utils used by the redis integration 17 """ 18 from opentelemetry.semconv.trace import ( 19 DbSystemValues, 20 NetTransportValues, 21 SpanAttributes, 22 ) 23 24 25 def _extract_conn_attributes(conn_kwargs): 26 """Transform redis conn info into dict""" 27 attributes = { 28 SpanAttributes.DB_SYSTEM: DbSystemValues.REDIS.value, 29 } 30 db = conn_kwargs.get("db", 0) 31 attributes[SpanAttributes.DB_NAME] = db 32 attributes[SpanAttributes.DB_REDIS_DATABASE_INDEX] = db 33 try: 34 attributes[SpanAttributes.NET_PEER_NAME] = conn_kwargs.get( 35 "host", "localhost" 36 ) 37 attributes[SpanAttributes.NET_PEER_PORT] = conn_kwargs.get( 38 "port", 6379 39 ) 40 attributes[ 41 SpanAttributes.NET_TRANSPORT 42 ] = NetTransportValues.IP_TCP.value 43 except KeyError: 44 attributes[SpanAttributes.NET_PEER_NAME] = conn_kwargs.get("path", "") 45 attributes[ 46 SpanAttributes.NET_TRANSPORT 47 ] = NetTransportValues.UNIX.value 48 49 return attributes 50 51 52 def _format_command_args(args): 53 """Format command arguments and trim them as needed""" 54 value_max_len = 100 55 value_too_long_mark = "..." 56 cmd_max_len = 1000 57 length = 0 58 out = [] 59 for arg in args: 60 cmd = str(arg) 61 62 if len(cmd) > value_max_len: 63 cmd = cmd[:value_max_len] + value_too_long_mark 64 65 if length + len(cmd) > cmd_max_len: 66 prefix = cmd[: cmd_max_len - length] 67 out.append(f"{prefix}{value_too_long_mark}") 68 break 69 70 out.append(cmd) 71 length += len(cmd) 72 73 return " ".join(out) 74 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py --- a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py +++ b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py @@ -28,7 +28,6 @@ SpanAttributes.DB_SYSTEM: DbSystemValues.REDIS.value, } db = conn_kwargs.get("db", 0) - attributes[SpanAttributes.DB_NAME] = db attributes[SpanAttributes.DB_REDIS_DATABASE_INDEX] = db try: attributes[SpanAttributes.NET_PEER_NAME] = conn_kwargs.get(
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py\n--- a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py\n+++ b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py\n@@ -28,7 +28,6 @@\n SpanAttributes.DB_SYSTEM: DbSystemValues.REDIS.value,\n }\n db = conn_kwargs.get(\"db\", 0)\n- attributes[SpanAttributes.DB_NAME] = db\n attributes[SpanAttributes.DB_REDIS_DATABASE_INDEX] = db\n try:\n attributes[SpanAttributes.NET_PEER_NAME] = conn_kwargs.get(\n", "issue": "[opentelemetry-instrumentation-redis] Stop sending db.name argument or rename it to redis[0-15]\nHi! I've been recently playing with OpenTelemetry for Python (Flask) application and noticed that for Redis **db.name** argument is send to OpenTelemetry collector which seems to be a number of database (integer). This seems to be incorrect as in Redis there is no db name concept (databases are numbered from 0 to 15). Technically, it shouldn't be any problem with that but it may break some OpenTelemetry backends which expects a real DB name not a number. I have done some additional debugging and found that for node.js and .NET **db.name** argument is not send to collector. Shouldn't we have some consistency here?\r\n\r\n**Describe your environment** \r\n\r\n$ python --version\r\nPython 3.8.13\r\n$ \r\n$ pip list | grep 'opentelemetry\\|redis'\r\nopentelemetry-api 1.13.0\r\nopentelemetry-distro 0.34b0\r\nopentelemetry-exporter-otlp 1.13.0\r\nopentelemetry-exporter-otlp-proto-grpc 1.13.0\r\nopentelemetry-exporter-otlp-proto-http 1.13.0\r\nopentelemetry-instrumentation 0.34b0\r\nopentelemetry-instrumentation-aws-lambda 0.34b0\r\nopentelemetry-instrumentation-dbapi 0.34b0\r\nopentelemetry-instrumentation-flask 0.34b0\r\nopentelemetry-instrumentation-grpc 0.34b0\r\nopentelemetry-instrumentation-jinja2 0.34b0\r\nopentelemetry-instrumentation-logging 0.34b0\r\nopentelemetry-instrumentation-redis 0.34b0\r\nopentelemetry-instrumentation-requests 0.34b0\r\nopentelemetry-instrumentation-sqlite3 0.34b0\r\nopentelemetry-instrumentation-urllib 0.34b0\r\nopentelemetry-instrumentation-urllib3 0.34b0\r\nopentelemetry-instrumentation-wsgi 0.34b0\r\nopentelemetry-propagator-aws-xray 1.0.1\r\nopentelemetry-proto 1.13.0\r\nopentelemetry-sdk 1.13.0\r\nopentelemetry-semantic-conventions 0.34b0\r\nopentelemetry-util-http 0.34b0\r\nredis 4.3.4\r\n\r\n**Steps to reproduce**\r\nAny Python app with connection to Redis will show this behavior.\r\n\r\n**What is the expected behavior?**\r\nStop sending db.name argument or rename it to redis[0-15]\r\n\r\n**What is the actual behavior?**\r\nThe db.name argument is send as a number of Redis database.\r\n\r\n**Additional context**\r\n\r\nPlease see below some logs from OpenTelemetry collector for python and node.js to see a difference.\r\n\r\n===> PYTHON EXAMPLE\r\n\r\nScopeSpans #0\r\nScopeSpans SchemaURL: \r\nInstrumentationScope opentelemetry.instrumentation.redis 0.34b0\r\nSpan #0\r\n Trace ID : 4bc10b43ab0a0d3042f38ebbb32baef1\r\n Parent ID : 79e2aed933827894\r\n ID : 22f4fba607e73a33\r\n Name : HMSET\r\n Kind : SPAN_KIND_CLIENT\r\n Start time : 2022-10-21 09:40:50.606962566 +0000 UTC\r\n End time : 2022-10-21 09:40:50.609568624 +0000 UTC\r\n Status code : STATUS_CODE_UNSET\r\n Status message : \r\nAttributes:\r\n -> db.statement: STRING(HMSET person1-hash name jane age 20)\r\n -> db.system: STRING(redis)\r\n -> db.name: INT(0)\r\n -> db.redis.database_index: INT(0)\r\n -> net.peer.name: STRING(redis-svc)\r\n -> net.peer.port: STRING(6379)\r\n -> net.transport: STRING(ip_tcp)\r\n -> db.redis.args_length: INT(6)\r\n\r\n\r\n===> NODEJS EXAMPLE\r\n\r\nScopeSpans #0\r\nScopeSpans SchemaURL: \r\nInstrumentationScope @opentelemetry/instrumentation-redis-4 0.33.0\r\nSpan #0\r\n Trace ID : 21a071f4d1d7c860ecb758398d304f60\r\n Parent ID : 1bbf5328c079ceda\r\n ID : 13dc47b2521f7f82\r\n Name : redis-GET\r\n Kind : SPAN_KIND_CLIENT\r\n Start time : 2022-10-21 09:47:16.9553723 +0000 UTC\r\n End time : 2022-10-21 09:47:16.957585 +0000 UTC\r\n Status code : STATUS_CODE_UNSET\r\n Status message : \r\nAttributes:\r\n -> db.system: STRING(redis)\r\n -> net.peer.name: STRING(redis-svc)\r\n -> net.peer.port: INT(6379)\r\n -> db.statement: STRING(GET)\r\nResourceSpans #4\r\nResource SchemaURL: \r\nResource labels:\r\n -> service.name: STRING(nodejs-redis)\r\n -> telemetry.sdk.language: STRING(nodejs)\r\n -> telemetry.sdk.name: STRING(opentelemetry)\r\n -> telemetry.sdk.version: STRING(0.24.0)\r\n \r\nI am happy to contribute to it by reviewing the code fix and testing the behavior.\r\n\r\n@svrnm @sanketmehta28\r\n \n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nSome utils used by the redis integration\n\"\"\"\nfrom opentelemetry.semconv.trace import (\n DbSystemValues,\n NetTransportValues,\n SpanAttributes,\n)\n\n\ndef _extract_conn_attributes(conn_kwargs):\n \"\"\"Transform redis conn info into dict\"\"\"\n attributes = {\n SpanAttributes.DB_SYSTEM: DbSystemValues.REDIS.value,\n }\n db = conn_kwargs.get(\"db\", 0)\n attributes[SpanAttributes.DB_NAME] = db\n attributes[SpanAttributes.DB_REDIS_DATABASE_INDEX] = db\n try:\n attributes[SpanAttributes.NET_PEER_NAME] = conn_kwargs.get(\n \"host\", \"localhost\"\n )\n attributes[SpanAttributes.NET_PEER_PORT] = conn_kwargs.get(\n \"port\", 6379\n )\n attributes[\n SpanAttributes.NET_TRANSPORT\n ] = NetTransportValues.IP_TCP.value\n except KeyError:\n attributes[SpanAttributes.NET_PEER_NAME] = conn_kwargs.get(\"path\", \"\")\n attributes[\n SpanAttributes.NET_TRANSPORT\n ] = NetTransportValues.UNIX.value\n\n return attributes\n\n\ndef _format_command_args(args):\n \"\"\"Format command arguments and trim them as needed\"\"\"\n value_max_len = 100\n value_too_long_mark = \"...\"\n cmd_max_len = 1000\n length = 0\n out = []\n for arg in args:\n cmd = str(arg)\n\n if len(cmd) > value_max_len:\n cmd = cmd[:value_max_len] + value_too_long_mark\n\n if length + len(cmd) > cmd_max_len:\n prefix = cmd[: cmd_max_len - length]\n out.append(f\"{prefix}{value_too_long_mark}\")\n break\n\n out.append(cmd)\n length += len(cmd)\n\n return \" \".join(out)\n", "path": "instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nSome utils used by the redis integration\n\"\"\"\nfrom opentelemetry.semconv.trace import (\n DbSystemValues,\n NetTransportValues,\n SpanAttributes,\n)\n\n\ndef _extract_conn_attributes(conn_kwargs):\n \"\"\"Transform redis conn info into dict\"\"\"\n attributes = {\n SpanAttributes.DB_SYSTEM: DbSystemValues.REDIS.value,\n }\n db = conn_kwargs.get(\"db\", 0)\n attributes[SpanAttributes.DB_REDIS_DATABASE_INDEX] = db\n try:\n attributes[SpanAttributes.NET_PEER_NAME] = conn_kwargs.get(\n \"host\", \"localhost\"\n )\n attributes[SpanAttributes.NET_PEER_PORT] = conn_kwargs.get(\n \"port\", 6379\n )\n attributes[\n SpanAttributes.NET_TRANSPORT\n ] = NetTransportValues.IP_TCP.value\n except KeyError:\n attributes[SpanAttributes.NET_PEER_NAME] = conn_kwargs.get(\"path\", \"\")\n attributes[\n SpanAttributes.NET_TRANSPORT\n ] = NetTransportValues.UNIX.value\n\n return attributes\n\n\ndef _format_command_args(args):\n \"\"\"Format command arguments and trim them as needed\"\"\"\n value_max_len = 100\n value_too_long_mark = \"...\"\n cmd_max_len = 1000\n length = 0\n out = []\n for arg in args:\n cmd = str(arg)\n\n if len(cmd) > value_max_len:\n cmd = cmd[:value_max_len] + value_too_long_mark\n\n if length + len(cmd) > cmd_max_len:\n prefix = cmd[: cmd_max_len - length]\n out.append(f\"{prefix}{value_too_long_mark}\")\n break\n\n out.append(cmd)\n length += len(cmd)\n\n return \" \".join(out)\n", "path": "instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py"}]}
2,305
182
gh_patches_debug_24813
rasdani/github-patches
git_diff
scrapy__scrapy-2577
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- BrowserLikeContextFactory not available in some conditions While tracing the error that lead to #2555 I tried the workaround [mentioned in the documentation](https://doc.scrapy.org/en/latest/topics/settings.html#downloader-clientcontextfactory) without success. This code lives [incontextfactory.py](https://github.com/scrapy/scrapy/blob/c3411373e8a8ee2786588bdad7be469c69a25e2a/scrapy/core/downloader/contextfactory.py#L63) but was not reachable as the import was failing on my system due to #2555. This file is a large `try/except` block with many potential points of failure and it's likely to trip other users in the future. That said, could this be refactored to provide a fallback for `BrowserLikeContextFactory` or otherwise reduce the scope of the `try/except` to avoid breaking the API? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scrapy/core/downloader/contextfactory.py` Content: ``` 1 from OpenSSL import SSL 2 from twisted.internet.ssl import ClientContextFactory 3 4 try: 5 6 from zope.interface.declarations import implementer 7 8 # the following should be available from Twisted 14.0.0 9 from twisted.internet.ssl import (optionsForClientTLS, 10 CertificateOptions, 11 platformTrust) 12 13 from twisted.web.client import BrowserLikePolicyForHTTPS 14 from twisted.web.iweb import IPolicyForHTTPS 15 16 from scrapy.core.downloader.tls import ScrapyClientTLSOptions, DEFAULT_CIPHERS 17 18 19 @implementer(IPolicyForHTTPS) 20 class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS): 21 """ 22 Non-peer-certificate verifying HTTPS context factory 23 24 Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD) 25 which allows TLS protocol negotiation 26 27 'A TLS/SSL connection established with [this method] may 28 understand the SSLv3, TLSv1, TLSv1.1 and TLSv1.2 protocols.' 29 """ 30 31 def __init__(self, method=SSL.SSLv23_METHOD, *args, **kwargs): 32 super(ScrapyClientContextFactory, self).__init__(*args, **kwargs) 33 self._ssl_method = method 34 35 def getCertificateOptions(self): 36 # setting verify=True will require you to provide CAs 37 # to verify against; in other words: it's not that simple 38 39 # backward-compatible SSL/TLS method: 40 # 41 # * this will respect `method` attribute in often recommended 42 # `ScrapyClientContextFactory` subclass 43 # (https://github.com/scrapy/scrapy/issues/1429#issuecomment-131782133) 44 # 45 # * getattr() for `_ssl_method` attribute for context factories 46 # not calling super(..., self).__init__ 47 return CertificateOptions(verify=False, 48 method=getattr(self, 'method', 49 getattr(self, '_ssl_method', None)), 50 fixBrokenPeers=True, 51 acceptableCiphers=DEFAULT_CIPHERS) 52 53 # kept for old-style HTTP/1.0 downloader context twisted calls, 54 # e.g. connectSSL() 55 def getContext(self, hostname=None, port=None): 56 return self.getCertificateOptions().getContext() 57 58 def creatorForNetloc(self, hostname, port): 59 return ScrapyClientTLSOptions(hostname.decode("ascii"), self.getContext()) 60 61 62 @implementer(IPolicyForHTTPS) 63 class BrowserLikeContextFactory(ScrapyClientContextFactory): 64 """ 65 Twisted-recommended context factory for web clients. 66 67 Quoting http://twistedmatrix.com/documents/current/api/twisted.web.client.Agent.html: 68 "The default is to use a BrowserLikePolicyForHTTPS, 69 so unless you have special requirements you can leave this as-is." 70 71 creatorForNetloc() is the same as BrowserLikePolicyForHTTPS 72 except this context factory allows setting the TLS/SSL method to use. 73 74 Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD) 75 which allows TLS protocol negotiation. 76 """ 77 def creatorForNetloc(self, hostname, port): 78 79 # trustRoot set to platformTrust() will use the platform's root CAs. 80 # 81 # This means that a website like https://www.cacert.org will be rejected 82 # by default, since CAcert.org CA certificate is seldom shipped. 83 return optionsForClientTLS(hostname.decode("ascii"), 84 trustRoot=platformTrust(), 85 extraCertificateOptions={ 86 'method': self._ssl_method, 87 }) 88 89 except ImportError: 90 91 class ScrapyClientContextFactory(ClientContextFactory): 92 "A SSL context factory which is more permissive against SSL bugs." 93 # see https://github.com/scrapy/scrapy/issues/82 94 # and https://github.com/scrapy/scrapy/issues/26 95 # and https://github.com/scrapy/scrapy/issues/981 96 97 def __init__(self, method=SSL.SSLv23_METHOD): 98 self.method = method 99 100 def getContext(self, hostname=None, port=None): 101 ctx = ClientContextFactory.getContext(self) 102 # Enable all workarounds to SSL bugs as documented by 103 # http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html 104 ctx.set_options(SSL.OP_ALL) 105 return ctx 106 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scrapy/core/downloader/contextfactory.py b/scrapy/core/downloader/contextfactory.py --- a/scrapy/core/downloader/contextfactory.py +++ b/scrapy/core/downloader/contextfactory.py @@ -1,15 +1,15 @@ from OpenSSL import SSL from twisted.internet.ssl import ClientContextFactory -try: +from scrapy import twisted_version + +if twisted_version >= (14, 0, 0): from zope.interface.declarations import implementer - # the following should be available from Twisted 14.0.0 from twisted.internet.ssl import (optionsForClientTLS, CertificateOptions, platformTrust) - from twisted.web.client import BrowserLikePolicyForHTTPS from twisted.web.iweb import IPolicyForHTTPS @@ -86,7 +86,7 @@ 'method': self._ssl_method, }) -except ImportError: +else: class ScrapyClientContextFactory(ClientContextFactory): "A SSL context factory which is more permissive against SSL bugs."
{"golden_diff": "diff --git a/scrapy/core/downloader/contextfactory.py b/scrapy/core/downloader/contextfactory.py\n--- a/scrapy/core/downloader/contextfactory.py\n+++ b/scrapy/core/downloader/contextfactory.py\n@@ -1,15 +1,15 @@\n from OpenSSL import SSL\n from twisted.internet.ssl import ClientContextFactory\n \n-try:\n+from scrapy import twisted_version\n+\n+if twisted_version >= (14, 0, 0):\n \n from zope.interface.declarations import implementer\n \n- # the following should be available from Twisted 14.0.0\n from twisted.internet.ssl import (optionsForClientTLS,\n CertificateOptions,\n platformTrust)\n-\n from twisted.web.client import BrowserLikePolicyForHTTPS\n from twisted.web.iweb import IPolicyForHTTPS\n \n@@ -86,7 +86,7 @@\n 'method': self._ssl_method,\n })\n \n-except ImportError:\n+else:\n \n class ScrapyClientContextFactory(ClientContextFactory):\n \"A SSL context factory which is more permissive against SSL bugs.\"\n", "issue": "BrowserLikeContextFactory not available in some conditions\nWhile tracing the error that lead to #2555 I tried the workaround [mentioned in the documentation](https://doc.scrapy.org/en/latest/topics/settings.html#downloader-clientcontextfactory) without success.\r\n\r\nThis code lives [incontextfactory.py](https://github.com/scrapy/scrapy/blob/c3411373e8a8ee2786588bdad7be469c69a25e2a/scrapy/core/downloader/contextfactory.py#L63) but was not reachable as the import was failing on my system due to #2555.\r\n\r\nThis file is a large `try/except` block with many potential points of failure and it's likely to trip other users in the future.\r\nThat said, could this be refactored to provide a fallback for `BrowserLikeContextFactory` or otherwise reduce the scope of the `try/except` to avoid breaking the API?\n", "before_files": [{"content": "from OpenSSL import SSL\nfrom twisted.internet.ssl import ClientContextFactory\n\ntry:\n\n from zope.interface.declarations import implementer\n\n # the following should be available from Twisted 14.0.0\n from twisted.internet.ssl import (optionsForClientTLS,\n CertificateOptions,\n platformTrust)\n\n from twisted.web.client import BrowserLikePolicyForHTTPS\n from twisted.web.iweb import IPolicyForHTTPS\n\n from scrapy.core.downloader.tls import ScrapyClientTLSOptions, DEFAULT_CIPHERS\n\n\n @implementer(IPolicyForHTTPS)\n class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):\n \"\"\"\n Non-peer-certificate verifying HTTPS context factory\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation\n\n 'A TLS/SSL connection established with [this method] may\n understand the SSLv3, TLSv1, TLSv1.1 and TLSv1.2 protocols.'\n \"\"\"\n\n def __init__(self, method=SSL.SSLv23_METHOD, *args, **kwargs):\n super(ScrapyClientContextFactory, self).__init__(*args, **kwargs)\n self._ssl_method = method\n\n def getCertificateOptions(self):\n # setting verify=True will require you to provide CAs\n # to verify against; in other words: it's not that simple\n\n # backward-compatible SSL/TLS method:\n #\n # * this will respect `method` attribute in often recommended\n # `ScrapyClientContextFactory` subclass\n # (https://github.com/scrapy/scrapy/issues/1429#issuecomment-131782133)\n #\n # * getattr() for `_ssl_method` attribute for context factories\n # not calling super(..., self).__init__\n return CertificateOptions(verify=False,\n method=getattr(self, 'method',\n getattr(self, '_ssl_method', None)),\n fixBrokenPeers=True,\n acceptableCiphers=DEFAULT_CIPHERS)\n\n # kept for old-style HTTP/1.0 downloader context twisted calls,\n # e.g. connectSSL()\n def getContext(self, hostname=None, port=None):\n return self.getCertificateOptions().getContext()\n\n def creatorForNetloc(self, hostname, port):\n return ScrapyClientTLSOptions(hostname.decode(\"ascii\"), self.getContext())\n\n\n @implementer(IPolicyForHTTPS)\n class BrowserLikeContextFactory(ScrapyClientContextFactory):\n \"\"\"\n Twisted-recommended context factory for web clients.\n\n Quoting http://twistedmatrix.com/documents/current/api/twisted.web.client.Agent.html:\n \"The default is to use a BrowserLikePolicyForHTTPS,\n so unless you have special requirements you can leave this as-is.\"\n\n creatorForNetloc() is the same as BrowserLikePolicyForHTTPS\n except this context factory allows setting the TLS/SSL method to use.\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation.\n \"\"\"\n def creatorForNetloc(self, hostname, port):\n\n # trustRoot set to platformTrust() will use the platform's root CAs.\n #\n # This means that a website like https://www.cacert.org will be rejected\n # by default, since CAcert.org CA certificate is seldom shipped.\n return optionsForClientTLS(hostname.decode(\"ascii\"),\n trustRoot=platformTrust(),\n extraCertificateOptions={\n 'method': self._ssl_method,\n })\n\nexcept ImportError:\n\n class ScrapyClientContextFactory(ClientContextFactory):\n \"A SSL context factory which is more permissive against SSL bugs.\"\n # see https://github.com/scrapy/scrapy/issues/82\n # and https://github.com/scrapy/scrapy/issues/26\n # and https://github.com/scrapy/scrapy/issues/981\n\n def __init__(self, method=SSL.SSLv23_METHOD):\n self.method = method\n\n def getContext(self, hostname=None, port=None):\n ctx = ClientContextFactory.getContext(self)\n # Enable all workarounds to SSL bugs as documented by\n # http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html\n ctx.set_options(SSL.OP_ALL)\n return ctx\n", "path": "scrapy/core/downloader/contextfactory.py"}], "after_files": [{"content": "from OpenSSL import SSL\nfrom twisted.internet.ssl import ClientContextFactory\n\nfrom scrapy import twisted_version\n\nif twisted_version >= (14, 0, 0):\n\n from zope.interface.declarations import implementer\n\n from twisted.internet.ssl import (optionsForClientTLS,\n CertificateOptions,\n platformTrust)\n from twisted.web.client import BrowserLikePolicyForHTTPS\n from twisted.web.iweb import IPolicyForHTTPS\n\n from scrapy.core.downloader.tls import ScrapyClientTLSOptions, DEFAULT_CIPHERS\n\n\n @implementer(IPolicyForHTTPS)\n class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):\n \"\"\"\n Non-peer-certificate verifying HTTPS context factory\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation\n\n 'A TLS/SSL connection established with [this method] may\n understand the SSLv3, TLSv1, TLSv1.1 and TLSv1.2 protocols.'\n \"\"\"\n\n def __init__(self, method=SSL.SSLv23_METHOD, *args, **kwargs):\n super(ScrapyClientContextFactory, self).__init__(*args, **kwargs)\n self._ssl_method = method\n\n def getCertificateOptions(self):\n # setting verify=True will require you to provide CAs\n # to verify against; in other words: it's not that simple\n\n # backward-compatible SSL/TLS method:\n #\n # * this will respect `method` attribute in often recommended\n # `ScrapyClientContextFactory` subclass\n # (https://github.com/scrapy/scrapy/issues/1429#issuecomment-131782133)\n #\n # * getattr() for `_ssl_method` attribute for context factories\n # not calling super(..., self).__init__\n return CertificateOptions(verify=False,\n method=getattr(self, 'method',\n getattr(self, '_ssl_method', None)),\n fixBrokenPeers=True,\n acceptableCiphers=DEFAULT_CIPHERS)\n\n # kept for old-style HTTP/1.0 downloader context twisted calls,\n # e.g. connectSSL()\n def getContext(self, hostname=None, port=None):\n return self.getCertificateOptions().getContext()\n\n def creatorForNetloc(self, hostname, port):\n return ScrapyClientTLSOptions(hostname.decode(\"ascii\"), self.getContext())\n\n\n @implementer(IPolicyForHTTPS)\n class BrowserLikeContextFactory(ScrapyClientContextFactory):\n \"\"\"\n Twisted-recommended context factory for web clients.\n\n Quoting http://twistedmatrix.com/documents/current/api/twisted.web.client.Agent.html:\n \"The default is to use a BrowserLikePolicyForHTTPS,\n so unless you have special requirements you can leave this as-is.\"\n\n creatorForNetloc() is the same as BrowserLikePolicyForHTTPS\n except this context factory allows setting the TLS/SSL method to use.\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation.\n \"\"\"\n def creatorForNetloc(self, hostname, port):\n\n # trustRoot set to platformTrust() will use the platform's root CAs.\n #\n # This means that a website like https://www.cacert.org will be rejected\n # by default, since CAcert.org CA certificate is seldom shipped.\n return optionsForClientTLS(hostname.decode(\"ascii\"),\n trustRoot=platformTrust(),\n extraCertificateOptions={\n 'method': self._ssl_method,\n })\n\nelse:\n\n class ScrapyClientContextFactory(ClientContextFactory):\n \"A SSL context factory which is more permissive against SSL bugs.\"\n # see https://github.com/scrapy/scrapy/issues/82\n # and https://github.com/scrapy/scrapy/issues/26\n # and https://github.com/scrapy/scrapy/issues/981\n\n def __init__(self, method=SSL.SSLv23_METHOD):\n self.method = method\n\n def getContext(self, hostname=None, port=None):\n ctx = ClientContextFactory.getContext(self)\n # Enable all workarounds to SSL bugs as documented by\n # http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html\n ctx.set_options(SSL.OP_ALL)\n return ctx\n", "path": "scrapy/core/downloader/contextfactory.py"}]}
1,612
230
gh_patches_debug_63313
rasdani/github-patches
git_diff
searxng__searxng-706
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `?` search syntax is broken The [user documentation says](https://docs.searxng.org/user/search_syntax.html): >**Prefix: `?`** >to add engines and categories to the currently selected categories But to me it is unclear how it is different from the `!` bang syntax. The following queries all yield exactly the same results: * `!bing test !images` * `!bing test ?images` * `?bing test !images` Only `?bing test ?images` for some reason only yields images??? :confused: Can somebody explain to me why somebody would want to use `?` over `!`? If there is no satisfying answer to that question, I vote that we remove (or at least deprecate) the `?` syntax. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `searx/query.py` Content: ``` 1 # SPDX-License-Identifier: AGPL-3.0-or-later 2 3 from abc import abstractmethod, ABC 4 import re 5 6 from searx import settings 7 from searx.languages import language_codes 8 from searx.engines import categories, engines, engine_shortcuts 9 from searx.external_bang import get_bang_definition_and_autocomplete 10 from searx.search import EngineRef 11 from searx.webutils import VALID_LANGUAGE_CODE 12 13 14 class QueryPartParser(ABC): 15 16 __slots__ = "raw_text_query", "enable_autocomplete" 17 18 @staticmethod 19 @abstractmethod 20 def check(raw_value): 21 """Check if raw_value can be parsed""" 22 23 def __init__(self, raw_text_query, enable_autocomplete): 24 self.raw_text_query = raw_text_query 25 self.enable_autocomplete = enable_autocomplete 26 27 @abstractmethod 28 def __call__(self, raw_value): 29 """Try to parse raw_value: set the self.raw_text_query properties 30 31 return True if raw_value has been parsed 32 33 self.raw_text_query.autocomplete_list is also modified 34 if self.enable_autocomplete is True 35 """ 36 37 def _add_autocomplete(self, value): 38 if value not in self.raw_text_query.autocomplete_list: 39 self.raw_text_query.autocomplete_list.append(value) 40 41 42 class TimeoutParser(QueryPartParser): 43 @staticmethod 44 def check(raw_value): 45 return raw_value[0] == '<' 46 47 def __call__(self, raw_value): 48 value = raw_value[1:] 49 found = self._parse(value) if len(value) > 0 else False 50 if self.enable_autocomplete and not value: 51 self._autocomplete() 52 return found 53 54 def _parse(self, value): 55 if not value.isdigit(): 56 return False 57 raw_timeout_limit = int(value) 58 if raw_timeout_limit < 100: 59 # below 100, the unit is the second ( <3 = 3 seconds timeout ) 60 self.raw_text_query.timeout_limit = float(raw_timeout_limit) 61 else: 62 # 100 or above, the unit is the millisecond ( <850 = 850 milliseconds timeout ) 63 self.raw_text_query.timeout_limit = raw_timeout_limit / 1000.0 64 return True 65 66 def _autocomplete(self): 67 for suggestion in ['<3', '<850']: 68 self._add_autocomplete(suggestion) 69 70 71 class LanguageParser(QueryPartParser): 72 @staticmethod 73 def check(raw_value): 74 return raw_value[0] == ':' 75 76 def __call__(self, raw_value): 77 value = raw_value[1:].lower().replace('_', '-') 78 found = self._parse(value) if len(value) > 0 else False 79 if self.enable_autocomplete and not found: 80 self._autocomplete(value) 81 return found 82 83 def _parse(self, value): 84 found = False 85 # check if any language-code is equal with 86 # declared language-codes 87 for lc in language_codes: 88 lang_id, lang_name, country, english_name = map(str.lower, lc) 89 90 # if correct language-code is found 91 # set it as new search-language 92 93 if ( 94 value == lang_id or value == lang_name or value == english_name or value.replace('-', ' ') == country 95 ) and value not in self.raw_text_query.languages: 96 found = True 97 lang_parts = lang_id.split('-') 98 if len(lang_parts) == 2: 99 self.raw_text_query.languages.append(lang_parts[0] + '-' + lang_parts[1].upper()) 100 else: 101 self.raw_text_query.languages.append(lang_id) 102 # to ensure best match (first match is not necessarily the best one) 103 if value == lang_id: 104 break 105 106 # user may set a valid, yet not selectable language 107 if VALID_LANGUAGE_CODE.match(value): 108 lang_parts = value.split('-') 109 if len(lang_parts) > 1: 110 value = lang_parts[0].lower() + '-' + lang_parts[1].upper() 111 if value not in self.raw_text_query.languages: 112 self.raw_text_query.languages.append(value) 113 found = True 114 115 return found 116 117 def _autocomplete(self, value): 118 if not value: 119 # show some example queries 120 if len(settings['search']['languages']) < 10: 121 for lang in settings['search']['languages']: 122 self.raw_text_query.autocomplete_list.append(':' + lang) 123 else: 124 for lang in [":en", ":en_us", ":english", ":united_kingdom"]: 125 self.raw_text_query.autocomplete_list.append(lang) 126 return 127 128 for lc in language_codes: 129 if lc[0] not in settings['search']['languages']: 130 continue 131 lang_id, lang_name, country, english_name = map(str.lower, lc) 132 133 # check if query starts with language-id 134 if lang_id.startswith(value): 135 if len(value) <= 2: 136 self._add_autocomplete(':' + lang_id.split('-')[0]) 137 else: 138 self._add_autocomplete(':' + lang_id) 139 140 # check if query starts with language name 141 if lang_name.startswith(value) or english_name.startswith(value): 142 self._add_autocomplete(':' + lang_name) 143 144 # check if query starts with country 145 # here "new_zealand" is "new-zealand" (see __call__) 146 if country.startswith(value.replace('-', ' ')): 147 self._add_autocomplete(':' + country.replace(' ', '_')) 148 149 150 class ExternalBangParser(QueryPartParser): 151 @staticmethod 152 def check(raw_value): 153 return raw_value.startswith('!!') 154 155 def __call__(self, raw_value): 156 value = raw_value[2:] 157 found, bang_ac_list = self._parse(value) if len(value) > 0 else (False, []) 158 if self.enable_autocomplete: 159 self._autocomplete(bang_ac_list) 160 return found 161 162 def _parse(self, value): 163 found = False 164 bang_definition, bang_ac_list = get_bang_definition_and_autocomplete(value) 165 if bang_definition is not None: 166 self.raw_text_query.external_bang = value 167 found = True 168 return found, bang_ac_list 169 170 def _autocomplete(self, bang_ac_list): 171 if not bang_ac_list: 172 bang_ac_list = ['g', 'ddg', 'bing'] 173 for external_bang in bang_ac_list: 174 self._add_autocomplete('!!' + external_bang) 175 176 177 class BangParser(QueryPartParser): 178 @staticmethod 179 def check(raw_value): 180 return raw_value[0] == '!' or raw_value[0] == '?' 181 182 def __call__(self, raw_value): 183 value = raw_value[1:].replace('-', ' ').replace('_', ' ') 184 found = self._parse(value) if len(value) > 0 else False 185 if found and raw_value[0] == '!': 186 self.raw_text_query.specific = True 187 if self.enable_autocomplete: 188 self._autocomplete(raw_value[0], value) 189 return found 190 191 def _parse(self, value): 192 # check if prefix is equal with engine shortcut 193 if value in engine_shortcuts: 194 value = engine_shortcuts[value] 195 196 # check if prefix is equal with engine name 197 if value in engines: 198 self.raw_text_query.enginerefs.append(EngineRef(value, 'none')) 199 return True 200 201 # check if prefix is equal with categorie name 202 if value in categories: 203 # using all engines for that search, which 204 # are declared under that categorie name 205 self.raw_text_query.enginerefs.extend( 206 EngineRef(engine.name, value) 207 for engine in categories[value] 208 if (engine.name, value) not in self.raw_text_query.disabled_engines 209 ) 210 return True 211 212 return False 213 214 def _autocomplete(self, first_char, value): 215 if not value: 216 # show some example queries 217 for suggestion in ['images', 'wikipedia', 'osm']: 218 if suggestion not in self.raw_text_query.disabled_engines or suggestion in categories: 219 self._add_autocomplete(first_char + suggestion) 220 return 221 222 # check if query starts with categorie name 223 for category in categories: 224 if category.startswith(value): 225 self._add_autocomplete(first_char + category.replace(' ', '_')) 226 227 # check if query starts with engine name 228 for engine in engines: 229 if engine.startswith(value): 230 self._add_autocomplete(first_char + engine.replace(' ', '_')) 231 232 # check if query starts with engine shortcut 233 for engine_shortcut in engine_shortcuts: 234 if engine_shortcut.startswith(value): 235 self._add_autocomplete(first_char + engine_shortcut) 236 237 238 class RawTextQuery: 239 """parse raw text query (the value from the html input)""" 240 241 PARSER_CLASSES = [ 242 TimeoutParser, # this force the timeout 243 LanguageParser, # this force a language 244 ExternalBangParser, # external bang (must be before BangParser) 245 BangParser, # this force a engine or category 246 ] 247 248 def __init__(self, query, disabled_engines): 249 assert isinstance(query, str) 250 # input parameters 251 self.query = query 252 self.disabled_engines = disabled_engines if disabled_engines else [] 253 # parsed values 254 self.enginerefs = [] 255 self.languages = [] 256 self.timeout_limit = None 257 self.external_bang = None 258 self.specific = False 259 self.autocomplete_list = [] 260 # internal properties 261 self.query_parts = [] # use self.getFullQuery() 262 self.user_query_parts = [] # use self.getQuery() 263 self.autocomplete_location = None 264 self._parse_query() 265 266 def _parse_query(self): 267 """ 268 parse self.query, if tags are set, which 269 change the search engine or search-language 270 """ 271 272 # split query, including whitespaces 273 raw_query_parts = re.split(r'(\s+)', self.query) 274 275 last_index_location = None 276 autocomplete_index = len(raw_query_parts) - 1 277 278 for i, query_part in enumerate(raw_query_parts): 279 # part does only contain spaces, skip 280 if query_part.isspace() or query_part == '': 281 continue 282 283 # parse special commands 284 special_part = False 285 for parser_class in RawTextQuery.PARSER_CLASSES: 286 if parser_class.check(query_part): 287 special_part = parser_class(self, i == autocomplete_index)(query_part) 288 break 289 290 # append query part to query_part list 291 qlist = self.query_parts if special_part else self.user_query_parts 292 qlist.append(query_part) 293 last_index_location = (qlist, len(qlist) - 1) 294 295 self.autocomplete_location = last_index_location 296 297 def get_autocomplete_full_query(self, text): 298 qlist, position = self.autocomplete_location 299 qlist[position] = text 300 return self.getFullQuery() 301 302 def changeQuery(self, query): 303 self.user_query_parts = query.strip().split() 304 self.query = self.getFullQuery() 305 self.autocomplete_location = (self.user_query_parts, len(self.user_query_parts) - 1) 306 self.autocomplete_list = [] 307 return self 308 309 def getQuery(self): 310 return ' '.join(self.user_query_parts) 311 312 def getFullQuery(self): 313 """ 314 get full querry including whitespaces 315 """ 316 return '{0} {1}'.format(' '.join(self.query_parts), self.getQuery()).strip() 317 318 def __str__(self): 319 return self.getFullQuery() 320 321 def __repr__(self): 322 return ( 323 f"<{self.__class__.__name__} " 324 + f"query={self.query!r} " 325 + f"disabled_engines={self.disabled_engines!r}\n " 326 + f"languages={self.languages!r} " 327 + f"timeout_limit={self.timeout_limit!r} " 328 + f"external_bang={self.external_bang!r} " 329 + f"specific={self.specific!r} " 330 + f"enginerefs={self.enginerefs!r}\n " 331 + f"autocomplete_list={self.autocomplete_list!r}\n " 332 + f"query_parts={self.query_parts!r}\n " 333 + f"user_query_parts={self.user_query_parts!r} >" 334 ) 335 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/searx/query.py b/searx/query.py --- a/searx/query.py +++ b/searx/query.py @@ -177,7 +177,7 @@ class BangParser(QueryPartParser): @staticmethod def check(raw_value): - return raw_value[0] == '!' or raw_value[0] == '?' + return raw_value[0] == '!' def __call__(self, raw_value): value = raw_value[1:].replace('-', ' ').replace('_', ' ')
{"golden_diff": "diff --git a/searx/query.py b/searx/query.py\n--- a/searx/query.py\n+++ b/searx/query.py\n@@ -177,7 +177,7 @@\n class BangParser(QueryPartParser):\n @staticmethod\n def check(raw_value):\n- return raw_value[0] == '!' or raw_value[0] == '?'\n+ return raw_value[0] == '!'\n \n def __call__(self, raw_value):\n value = raw_value[1:].replace('-', ' ').replace('_', ' ')\n", "issue": "`?` search syntax is broken\nThe [user documentation says](https://docs.searxng.org/user/search_syntax.html):\r\n \r\n>**Prefix: `?`**\r\n>to add engines and categories to the currently selected categories\r\n\r\nBut to me it is unclear how it is different from the `!` bang syntax. The following queries all yield exactly the same results:\r\n\r\n* `!bing test !images`\r\n* `!bing test ?images`\r\n* `?bing test !images`\r\n\r\nOnly `?bing test ?images` for some reason only yields images??? :confused: \r\n\r\nCan somebody explain to me why somebody would want to use `?` over `!`?\r\n\r\nIf there is no satisfying answer to that question, I vote that we remove (or at least deprecate) the `?` syntax.\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\nfrom abc import abstractmethod, ABC\nimport re\n\nfrom searx import settings\nfrom searx.languages import language_codes\nfrom searx.engines import categories, engines, engine_shortcuts\nfrom searx.external_bang import get_bang_definition_and_autocomplete\nfrom searx.search import EngineRef\nfrom searx.webutils import VALID_LANGUAGE_CODE\n\n\nclass QueryPartParser(ABC):\n\n __slots__ = \"raw_text_query\", \"enable_autocomplete\"\n\n @staticmethod\n @abstractmethod\n def check(raw_value):\n \"\"\"Check if raw_value can be parsed\"\"\"\n\n def __init__(self, raw_text_query, enable_autocomplete):\n self.raw_text_query = raw_text_query\n self.enable_autocomplete = enable_autocomplete\n\n @abstractmethod\n def __call__(self, raw_value):\n \"\"\"Try to parse raw_value: set the self.raw_text_query properties\n\n return True if raw_value has been parsed\n\n self.raw_text_query.autocomplete_list is also modified\n if self.enable_autocomplete is True\n \"\"\"\n\n def _add_autocomplete(self, value):\n if value not in self.raw_text_query.autocomplete_list:\n self.raw_text_query.autocomplete_list.append(value)\n\n\nclass TimeoutParser(QueryPartParser):\n @staticmethod\n def check(raw_value):\n return raw_value[0] == '<'\n\n def __call__(self, raw_value):\n value = raw_value[1:]\n found = self._parse(value) if len(value) > 0 else False\n if self.enable_autocomplete and not value:\n self._autocomplete()\n return found\n\n def _parse(self, value):\n if not value.isdigit():\n return False\n raw_timeout_limit = int(value)\n if raw_timeout_limit < 100:\n # below 100, the unit is the second ( <3 = 3 seconds timeout )\n self.raw_text_query.timeout_limit = float(raw_timeout_limit)\n else:\n # 100 or above, the unit is the millisecond ( <850 = 850 milliseconds timeout )\n self.raw_text_query.timeout_limit = raw_timeout_limit / 1000.0\n return True\n\n def _autocomplete(self):\n for suggestion in ['<3', '<850']:\n self._add_autocomplete(suggestion)\n\n\nclass LanguageParser(QueryPartParser):\n @staticmethod\n def check(raw_value):\n return raw_value[0] == ':'\n\n def __call__(self, raw_value):\n value = raw_value[1:].lower().replace('_', '-')\n found = self._parse(value) if len(value) > 0 else False\n if self.enable_autocomplete and not found:\n self._autocomplete(value)\n return found\n\n def _parse(self, value):\n found = False\n # check if any language-code is equal with\n # declared language-codes\n for lc in language_codes:\n lang_id, lang_name, country, english_name = map(str.lower, lc)\n\n # if correct language-code is found\n # set it as new search-language\n\n if (\n value == lang_id or value == lang_name or value == english_name or value.replace('-', ' ') == country\n ) and value not in self.raw_text_query.languages:\n found = True\n lang_parts = lang_id.split('-')\n if len(lang_parts) == 2:\n self.raw_text_query.languages.append(lang_parts[0] + '-' + lang_parts[1].upper())\n else:\n self.raw_text_query.languages.append(lang_id)\n # to ensure best match (first match is not necessarily the best one)\n if value == lang_id:\n break\n\n # user may set a valid, yet not selectable language\n if VALID_LANGUAGE_CODE.match(value):\n lang_parts = value.split('-')\n if len(lang_parts) > 1:\n value = lang_parts[0].lower() + '-' + lang_parts[1].upper()\n if value not in self.raw_text_query.languages:\n self.raw_text_query.languages.append(value)\n found = True\n\n return found\n\n def _autocomplete(self, value):\n if not value:\n # show some example queries\n if len(settings['search']['languages']) < 10:\n for lang in settings['search']['languages']:\n self.raw_text_query.autocomplete_list.append(':' + lang)\n else:\n for lang in [\":en\", \":en_us\", \":english\", \":united_kingdom\"]:\n self.raw_text_query.autocomplete_list.append(lang)\n return\n\n for lc in language_codes:\n if lc[0] not in settings['search']['languages']:\n continue\n lang_id, lang_name, country, english_name = map(str.lower, lc)\n\n # check if query starts with language-id\n if lang_id.startswith(value):\n if len(value) <= 2:\n self._add_autocomplete(':' + lang_id.split('-')[0])\n else:\n self._add_autocomplete(':' + lang_id)\n\n # check if query starts with language name\n if lang_name.startswith(value) or english_name.startswith(value):\n self._add_autocomplete(':' + lang_name)\n\n # check if query starts with country\n # here \"new_zealand\" is \"new-zealand\" (see __call__)\n if country.startswith(value.replace('-', ' ')):\n self._add_autocomplete(':' + country.replace(' ', '_'))\n\n\nclass ExternalBangParser(QueryPartParser):\n @staticmethod\n def check(raw_value):\n return raw_value.startswith('!!')\n\n def __call__(self, raw_value):\n value = raw_value[2:]\n found, bang_ac_list = self._parse(value) if len(value) > 0 else (False, [])\n if self.enable_autocomplete:\n self._autocomplete(bang_ac_list)\n return found\n\n def _parse(self, value):\n found = False\n bang_definition, bang_ac_list = get_bang_definition_and_autocomplete(value)\n if bang_definition is not None:\n self.raw_text_query.external_bang = value\n found = True\n return found, bang_ac_list\n\n def _autocomplete(self, bang_ac_list):\n if not bang_ac_list:\n bang_ac_list = ['g', 'ddg', 'bing']\n for external_bang in bang_ac_list:\n self._add_autocomplete('!!' + external_bang)\n\n\nclass BangParser(QueryPartParser):\n @staticmethod\n def check(raw_value):\n return raw_value[0] == '!' or raw_value[0] == '?'\n\n def __call__(self, raw_value):\n value = raw_value[1:].replace('-', ' ').replace('_', ' ')\n found = self._parse(value) if len(value) > 0 else False\n if found and raw_value[0] == '!':\n self.raw_text_query.specific = True\n if self.enable_autocomplete:\n self._autocomplete(raw_value[0], value)\n return found\n\n def _parse(self, value):\n # check if prefix is equal with engine shortcut\n if value in engine_shortcuts:\n value = engine_shortcuts[value]\n\n # check if prefix is equal with engine name\n if value in engines:\n self.raw_text_query.enginerefs.append(EngineRef(value, 'none'))\n return True\n\n # check if prefix is equal with categorie name\n if value in categories:\n # using all engines for that search, which\n # are declared under that categorie name\n self.raw_text_query.enginerefs.extend(\n EngineRef(engine.name, value)\n for engine in categories[value]\n if (engine.name, value) not in self.raw_text_query.disabled_engines\n )\n return True\n\n return False\n\n def _autocomplete(self, first_char, value):\n if not value:\n # show some example queries\n for suggestion in ['images', 'wikipedia', 'osm']:\n if suggestion not in self.raw_text_query.disabled_engines or suggestion in categories:\n self._add_autocomplete(first_char + suggestion)\n return\n\n # check if query starts with categorie name\n for category in categories:\n if category.startswith(value):\n self._add_autocomplete(first_char + category.replace(' ', '_'))\n\n # check if query starts with engine name\n for engine in engines:\n if engine.startswith(value):\n self._add_autocomplete(first_char + engine.replace(' ', '_'))\n\n # check if query starts with engine shortcut\n for engine_shortcut in engine_shortcuts:\n if engine_shortcut.startswith(value):\n self._add_autocomplete(first_char + engine_shortcut)\n\n\nclass RawTextQuery:\n \"\"\"parse raw text query (the value from the html input)\"\"\"\n\n PARSER_CLASSES = [\n TimeoutParser, # this force the timeout\n LanguageParser, # this force a language\n ExternalBangParser, # external bang (must be before BangParser)\n BangParser, # this force a engine or category\n ]\n\n def __init__(self, query, disabled_engines):\n assert isinstance(query, str)\n # input parameters\n self.query = query\n self.disabled_engines = disabled_engines if disabled_engines else []\n # parsed values\n self.enginerefs = []\n self.languages = []\n self.timeout_limit = None\n self.external_bang = None\n self.specific = False\n self.autocomplete_list = []\n # internal properties\n self.query_parts = [] # use self.getFullQuery()\n self.user_query_parts = [] # use self.getQuery()\n self.autocomplete_location = None\n self._parse_query()\n\n def _parse_query(self):\n \"\"\"\n parse self.query, if tags are set, which\n change the search engine or search-language\n \"\"\"\n\n # split query, including whitespaces\n raw_query_parts = re.split(r'(\\s+)', self.query)\n\n last_index_location = None\n autocomplete_index = len(raw_query_parts) - 1\n\n for i, query_part in enumerate(raw_query_parts):\n # part does only contain spaces, skip\n if query_part.isspace() or query_part == '':\n continue\n\n # parse special commands\n special_part = False\n for parser_class in RawTextQuery.PARSER_CLASSES:\n if parser_class.check(query_part):\n special_part = parser_class(self, i == autocomplete_index)(query_part)\n break\n\n # append query part to query_part list\n qlist = self.query_parts if special_part else self.user_query_parts\n qlist.append(query_part)\n last_index_location = (qlist, len(qlist) - 1)\n\n self.autocomplete_location = last_index_location\n\n def get_autocomplete_full_query(self, text):\n qlist, position = self.autocomplete_location\n qlist[position] = text\n return self.getFullQuery()\n\n def changeQuery(self, query):\n self.user_query_parts = query.strip().split()\n self.query = self.getFullQuery()\n self.autocomplete_location = (self.user_query_parts, len(self.user_query_parts) - 1)\n self.autocomplete_list = []\n return self\n\n def getQuery(self):\n return ' '.join(self.user_query_parts)\n\n def getFullQuery(self):\n \"\"\"\n get full querry including whitespaces\n \"\"\"\n return '{0} {1}'.format(' '.join(self.query_parts), self.getQuery()).strip()\n\n def __str__(self):\n return self.getFullQuery()\n\n def __repr__(self):\n return (\n f\"<{self.__class__.__name__} \"\n + f\"query={self.query!r} \"\n + f\"disabled_engines={self.disabled_engines!r}\\n \"\n + f\"languages={self.languages!r} \"\n + f\"timeout_limit={self.timeout_limit!r} \"\n + f\"external_bang={self.external_bang!r} \"\n + f\"specific={self.specific!r} \"\n + f\"enginerefs={self.enginerefs!r}\\n \"\n + f\"autocomplete_list={self.autocomplete_list!r}\\n \"\n + f\"query_parts={self.query_parts!r}\\n \"\n + f\"user_query_parts={self.user_query_parts!r} >\"\n )\n", "path": "searx/query.py"}], "after_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\nfrom abc import abstractmethod, ABC\nimport re\n\nfrom searx import settings\nfrom searx.languages import language_codes\nfrom searx.engines import categories, engines, engine_shortcuts\nfrom searx.external_bang import get_bang_definition_and_autocomplete\nfrom searx.search import EngineRef\nfrom searx.webutils import VALID_LANGUAGE_CODE\n\n\nclass QueryPartParser(ABC):\n\n __slots__ = \"raw_text_query\", \"enable_autocomplete\"\n\n @staticmethod\n @abstractmethod\n def check(raw_value):\n \"\"\"Check if raw_value can be parsed\"\"\"\n\n def __init__(self, raw_text_query, enable_autocomplete):\n self.raw_text_query = raw_text_query\n self.enable_autocomplete = enable_autocomplete\n\n @abstractmethod\n def __call__(self, raw_value):\n \"\"\"Try to parse raw_value: set the self.raw_text_query properties\n\n return True if raw_value has been parsed\n\n self.raw_text_query.autocomplete_list is also modified\n if self.enable_autocomplete is True\n \"\"\"\n\n def _add_autocomplete(self, value):\n if value not in self.raw_text_query.autocomplete_list:\n self.raw_text_query.autocomplete_list.append(value)\n\n\nclass TimeoutParser(QueryPartParser):\n @staticmethod\n def check(raw_value):\n return raw_value[0] == '<'\n\n def __call__(self, raw_value):\n value = raw_value[1:]\n found = self._parse(value) if len(value) > 0 else False\n if self.enable_autocomplete and not value:\n self._autocomplete()\n return found\n\n def _parse(self, value):\n if not value.isdigit():\n return False\n raw_timeout_limit = int(value)\n if raw_timeout_limit < 100:\n # below 100, the unit is the second ( <3 = 3 seconds timeout )\n self.raw_text_query.timeout_limit = float(raw_timeout_limit)\n else:\n # 100 or above, the unit is the millisecond ( <850 = 850 milliseconds timeout )\n self.raw_text_query.timeout_limit = raw_timeout_limit / 1000.0\n return True\n\n def _autocomplete(self):\n for suggestion in ['<3', '<850']:\n self._add_autocomplete(suggestion)\n\n\nclass LanguageParser(QueryPartParser):\n @staticmethod\n def check(raw_value):\n return raw_value[0] == ':'\n\n def __call__(self, raw_value):\n value = raw_value[1:].lower().replace('_', '-')\n found = self._parse(value) if len(value) > 0 else False\n if self.enable_autocomplete and not found:\n self._autocomplete(value)\n return found\n\n def _parse(self, value):\n found = False\n # check if any language-code is equal with\n # declared language-codes\n for lc in language_codes:\n lang_id, lang_name, country, english_name = map(str.lower, lc)\n\n # if correct language-code is found\n # set it as new search-language\n\n if (\n value == lang_id or value == lang_name or value == english_name or value.replace('-', ' ') == country\n ) and value not in self.raw_text_query.languages:\n found = True\n lang_parts = lang_id.split('-')\n if len(lang_parts) == 2:\n self.raw_text_query.languages.append(lang_parts[0] + '-' + lang_parts[1].upper())\n else:\n self.raw_text_query.languages.append(lang_id)\n # to ensure best match (first match is not necessarily the best one)\n if value == lang_id:\n break\n\n # user may set a valid, yet not selectable language\n if VALID_LANGUAGE_CODE.match(value):\n lang_parts = value.split('-')\n if len(lang_parts) > 1:\n value = lang_parts[0].lower() + '-' + lang_parts[1].upper()\n if value not in self.raw_text_query.languages:\n self.raw_text_query.languages.append(value)\n found = True\n\n return found\n\n def _autocomplete(self, value):\n if not value:\n # show some example queries\n if len(settings['search']['languages']) < 10:\n for lang in settings['search']['languages']:\n self.raw_text_query.autocomplete_list.append(':' + lang)\n else:\n for lang in [\":en\", \":en_us\", \":english\", \":united_kingdom\"]:\n self.raw_text_query.autocomplete_list.append(lang)\n return\n\n for lc in language_codes:\n if lc[0] not in settings['search']['languages']:\n continue\n lang_id, lang_name, country, english_name = map(str.lower, lc)\n\n # check if query starts with language-id\n if lang_id.startswith(value):\n if len(value) <= 2:\n self._add_autocomplete(':' + lang_id.split('-')[0])\n else:\n self._add_autocomplete(':' + lang_id)\n\n # check if query starts with language name\n if lang_name.startswith(value) or english_name.startswith(value):\n self._add_autocomplete(':' + lang_name)\n\n # check if query starts with country\n # here \"new_zealand\" is \"new-zealand\" (see __call__)\n if country.startswith(value.replace('-', ' ')):\n self._add_autocomplete(':' + country.replace(' ', '_'))\n\n\nclass ExternalBangParser(QueryPartParser):\n @staticmethod\n def check(raw_value):\n return raw_value.startswith('!!')\n\n def __call__(self, raw_value):\n value = raw_value[2:]\n found, bang_ac_list = self._parse(value) if len(value) > 0 else (False, [])\n if self.enable_autocomplete:\n self._autocomplete(bang_ac_list)\n return found\n\n def _parse(self, value):\n found = False\n bang_definition, bang_ac_list = get_bang_definition_and_autocomplete(value)\n if bang_definition is not None:\n self.raw_text_query.external_bang = value\n found = True\n return found, bang_ac_list\n\n def _autocomplete(self, bang_ac_list):\n if not bang_ac_list:\n bang_ac_list = ['g', 'ddg', 'bing']\n for external_bang in bang_ac_list:\n self._add_autocomplete('!!' + external_bang)\n\n\nclass BangParser(QueryPartParser):\n @staticmethod\n def check(raw_value):\n return raw_value[0] == '!'\n\n def __call__(self, raw_value):\n value = raw_value[1:].replace('-', ' ').replace('_', ' ')\n found = self._parse(value) if len(value) > 0 else False\n if found and raw_value[0] == '!':\n self.raw_text_query.specific = True\n if self.enable_autocomplete:\n self._autocomplete(raw_value[0], value)\n return found\n\n def _parse(self, value):\n # check if prefix is equal with engine shortcut\n if value in engine_shortcuts:\n value = engine_shortcuts[value]\n\n # check if prefix is equal with engine name\n if value in engines:\n self.raw_text_query.enginerefs.append(EngineRef(value, 'none'))\n return True\n\n # check if prefix is equal with categorie name\n if value in categories:\n # using all engines for that search, which\n # are declared under that categorie name\n self.raw_text_query.enginerefs.extend(\n EngineRef(engine.name, value)\n for engine in categories[value]\n if (engine.name, value) not in self.raw_text_query.disabled_engines\n )\n return True\n\n return False\n\n def _autocomplete(self, first_char, value):\n if not value:\n # show some example queries\n for suggestion in ['images', 'wikipedia', 'osm']:\n if suggestion not in self.raw_text_query.disabled_engines or suggestion in categories:\n self._add_autocomplete(first_char + suggestion)\n return\n\n # check if query starts with categorie name\n for category in categories:\n if category.startswith(value):\n self._add_autocomplete(first_char + category.replace(' ', '_'))\n\n # check if query starts with engine name\n for engine in engines:\n if engine.startswith(value):\n self._add_autocomplete(first_char + engine.replace(' ', '_'))\n\n # check if query starts with engine shortcut\n for engine_shortcut in engine_shortcuts:\n if engine_shortcut.startswith(value):\n self._add_autocomplete(first_char + engine_shortcut)\n\n\nclass RawTextQuery:\n \"\"\"parse raw text query (the value from the html input)\"\"\"\n\n PARSER_CLASSES = [\n TimeoutParser, # this force the timeout\n LanguageParser, # this force a language\n ExternalBangParser, # external bang (must be before BangParser)\n BangParser, # this force a engine or category\n ]\n\n def __init__(self, query, disabled_engines):\n assert isinstance(query, str)\n # input parameters\n self.query = query\n self.disabled_engines = disabled_engines if disabled_engines else []\n # parsed values\n self.enginerefs = []\n self.languages = []\n self.timeout_limit = None\n self.external_bang = None\n self.specific = False\n self.autocomplete_list = []\n # internal properties\n self.query_parts = [] # use self.getFullQuery()\n self.user_query_parts = [] # use self.getQuery()\n self.autocomplete_location = None\n self._parse_query()\n\n def _parse_query(self):\n \"\"\"\n parse self.query, if tags are set, which\n change the search engine or search-language\n \"\"\"\n\n # split query, including whitespaces\n raw_query_parts = re.split(r'(\\s+)', self.query)\n\n last_index_location = None\n autocomplete_index = len(raw_query_parts) - 1\n\n for i, query_part in enumerate(raw_query_parts):\n # part does only contain spaces, skip\n if query_part.isspace() or query_part == '':\n continue\n\n # parse special commands\n special_part = False\n for parser_class in RawTextQuery.PARSER_CLASSES:\n if parser_class.check(query_part):\n special_part = parser_class(self, i == autocomplete_index)(query_part)\n break\n\n # append query part to query_part list\n qlist = self.query_parts if special_part else self.user_query_parts\n qlist.append(query_part)\n last_index_location = (qlist, len(qlist) - 1)\n\n self.autocomplete_location = last_index_location\n\n def get_autocomplete_full_query(self, text):\n qlist, position = self.autocomplete_location\n qlist[position] = text\n return self.getFullQuery()\n\n def changeQuery(self, query):\n self.user_query_parts = query.strip().split()\n self.query = self.getFullQuery()\n self.autocomplete_location = (self.user_query_parts, len(self.user_query_parts) - 1)\n self.autocomplete_list = []\n return self\n\n def getQuery(self):\n return ' '.join(self.user_query_parts)\n\n def getFullQuery(self):\n \"\"\"\n get full querry including whitespaces\n \"\"\"\n return '{0} {1}'.format(' '.join(self.query_parts), self.getQuery()).strip()\n\n def __str__(self):\n return self.getFullQuery()\n\n def __repr__(self):\n return (\n f\"<{self.__class__.__name__} \"\n + f\"query={self.query!r} \"\n + f\"disabled_engines={self.disabled_engines!r}\\n \"\n + f\"languages={self.languages!r} \"\n + f\"timeout_limit={self.timeout_limit!r} \"\n + f\"external_bang={self.external_bang!r} \"\n + f\"specific={self.specific!r} \"\n + f\"enginerefs={self.enginerefs!r}\\n \"\n + f\"autocomplete_list={self.autocomplete_list!r}\\n \"\n + f\"query_parts={self.query_parts!r}\\n \"\n + f\"user_query_parts={self.user_query_parts!r} >\"\n )\n", "path": "searx/query.py"}]}
4,028
121
gh_patches_debug_500
rasdani/github-patches
git_diff
scikit-hep__uproot5-270
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Histogram protocol issue? I am confused about `PlottableAxisContinuous.__getitem__`: for uproot4 it seems to give the one-indexed values where I was expecting zero-indexed, since `__len__` gives the number of bins without flow. For example: ``` >>> h = uproot.open('https://raw.githubusercontent.com/CoffeaTeam/coffea/master/tests/samples/testSF2d.histo.root:scalefactors_Tight_Electron') >>> axis = h.axes[0] >>> len(axis) 10 >>> axis.edges() array([-2.5 , -2. , -1.566, -1.444, -0.8 , 0. , 0.8 , 1.444, 1.566, 2. , 2.5 ]) >>> axis.edges().shape (11,) >>> axis[0] (-2.0, -1.566) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `uproot/behaviors/TAxis.py` Content: ``` 1 # BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE 2 3 """ 4 This module defines the behaviors of ``TAxis``, an axis of a histogram or profile plot. 5 """ 6 7 from __future__ import absolute_import 8 9 try: 10 from collections.abc import Sequence 11 except ImportError: 12 from collections import Sequence 13 14 import numpy 15 16 17 class AxisTraits(object): 18 """ 19 Describes read-only properties of a histogram axis. 20 21 For example, ``axis.traits.discrete`` is True if the histogram has 22 labels; False otherwise. 23 """ 24 25 def __init__(self, axis): 26 self._axis = axis 27 28 def __repr__(self): 29 return "AxisTraits({0})".format(repr(self._axis)) 30 31 @property 32 def circular(self): 33 """ 34 True if the axis "wraps around" (always False for ROOT histograms). 35 """ 36 return False 37 38 @property 39 def discrete(self): 40 """ 41 True if bins are discrete: if they have string-valued labels. 42 """ 43 fNbins = self._axis.member("fNbins") 44 fLabels = self._axis.member("fLabels", none_if_missing=True) 45 return fLabels is not None and len(fLabels) == fNbins 46 47 48 class TAxis(Sequence): 49 def __len__(self): 50 """ 51 The number of bins in the axis. 52 """ 53 return self.member("fNbins") 54 55 def __getitem__(self, where): 56 """ 57 Returns the label at ``where`` if it exists or the interval at ``where``. 58 59 The indexing assumes that ``flow=False``. 60 """ 61 fNbins = self.member("fNbins") 62 fXbins = self.member("fXbins", none_if_missing=True) 63 fLabels = self.member("fLabels", none_if_missing=True) 64 65 if fLabels is not None and len(fLabels) == fNbins: 66 return str(fLabels[where]) 67 68 elif fXbins is None or len(fXbins) != fNbins + 1: 69 fXmin, fXmax = self.member("fXmin"), self.member("fXmax") 70 low = (fXmax - fXmin) * (where) / float(fNbins) + fXmin 71 high = (fXmax - fXmin) * (where + 1) / float(fNbins) + fXmin 72 return low, high 73 74 else: 75 return fXbins[where + 1], fXbins[where + 2] 76 77 def __iter__(self): 78 """ 79 Iterate over the output of ``__getitem__``. 80 """ 81 fNbins = self.member("fNbins") 82 fLabels = self.member("fLabels", none_if_missing=True) 83 84 if fLabels is not None and len(fLabels) == fNbins: 85 for x in fLabels: 86 yield str(x) 87 else: 88 for low, high in self.intervals(): 89 yield low, high 90 91 def __eq__(self, other): 92 """ 93 Two axes are equal if they have the same type and 94 ``list(self) == list(other)``. 95 """ 96 if type(self) is not type(other): 97 return False 98 99 self_fNbins = self.member("fNbins") 100 other_fNbins = other.member("fNbins") 101 if self_fNbins != other_fNbins: 102 return False 103 104 self_fLabels = self.member("fLabels", none_if_missing=True) 105 other_fLabels = other.member("fLabels", none_if_missing=True) 106 self_labeled = self_fLabels is not None and len(self_fLabels) == self_fNbins 107 other_labeled = other_fLabels is not None and len(other_fLabels) == other_fNbins 108 109 if self_labeled and other_labeled: 110 return all(x == y for x, y in zip(self_fLabels, other_fLabels)) 111 elif not self_labeled and not other_labeled: 112 return numpy.array_equal(self.edges(), other.edges()) 113 else: 114 return False 115 116 def __ne__(self, other): 117 """ 118 Some versions of Python don't automatically negate __eq__. 119 """ 120 return not self.__eq__(other) 121 122 @property 123 def traits(self): 124 """ 125 Describes read-only properties of a histogram axis. 126 127 For example, ``axis.traits.discrete`` is True if the histogram has 128 labels; False otherwise. 129 """ 130 return AxisTraits(self) 131 132 @property 133 def low(self): 134 """ 135 The low edge of the first normal (finite-width) bin. 136 137 For ROOT histograms, numerical edges exist even if the axis also has 138 string-valued labels. 139 """ 140 return self.member("fXmin") 141 142 @property 143 def high(self): 144 """ 145 The high edge of the last normal (finite-width) bin. 146 147 For ROOT histograms, numerical edges exist even if the axis also has 148 string-valued labels. 149 """ 150 return self.member("fXmax") 151 152 @property 153 def width(self): 154 """ 155 The average bin width (or only bin width if the binning is uniform). 156 """ 157 fNbins = self.member("fNbins") 158 fXbins = self.member("fXbins", none_if_missing=True) 159 160 if fXbins is None or len(fXbins) != fNbins + 1: 161 return (self.member("fXmax") - self.member("fXmin")) / fNbins 162 else: 163 return self.widths().mean() 164 165 def labels(self, flow=False): 166 """ 167 Args: 168 flow (bool): If True, include ``"underflow"`` and ``"overflow"`` 169 before and after the normal (finite-width) bin labels (if they 170 exist). 171 172 If string-valued labels exist, this returns them as a Python list of 173 Python strings. Otherwise, this returns None. 174 175 Setting ``flow=True`` increases the length of the output by two. 176 """ 177 fNbins = self.member("fNbins") 178 fLabels = self.member("fLabels", none_if_missing=True) 179 180 if fLabels is not None and len(fLabels) == fNbins: 181 out = [str(x) for x in fLabels] 182 if flow: 183 return ["underflow"] + out + ["overflow"] 184 else: 185 return out 186 else: 187 return None 188 189 def edges(self, flow=False): 190 """ 191 Args: 192 flow (bool): If True, include ``-inf`` and ``inf`` before and 193 after the normal (finite-width) bin edges. 194 195 Returns numerical edges between bins as a one-dimensional ``numpy.ndarray`` 196 of ``numpy.float64``. 197 198 Even with ``flow=False``, the number of edges is *one greater than* the 199 number of normal (finite-width) bins because they represent "fenceposts" 200 between the bins, including one below and one above the full range. 201 202 Setting ``flow=True`` increases the length of the output by two. 203 204 For ROOT histograms, numerical edges exist even if the axis also has 205 string-valued labels. 206 """ 207 fNbins = self.member("fNbins") 208 fXbins = self.member("fXbins", none_if_missing=True) 209 210 if fXbins is None or len(fXbins) != fNbins + 1: 211 fXbins = numpy.linspace( 212 self.member("fXmin"), self.member("fXmax"), fNbins + 1 213 ) 214 215 if flow: 216 out = numpy.empty(fNbins + 3, dtype=numpy.float64) 217 out[0] = -numpy.inf 218 out[-1] = numpy.inf 219 out[1:-1] = fXbins 220 else: 221 out = numpy.asarray(fXbins, dtype=fXbins.dtype.newbyteorder("=")) 222 223 return out 224 225 def intervals(self, flow=False): 226 """ 227 Args: 228 flow (bool): If True, include ``[-inf, min]`` and ``[max, inf]`` 229 before and after the normal (finite-width) intervals. 230 231 Returns low, high pairs for each bin interval as a two-dimensional 232 ``numpy.ndarray`` of ``numpy.float64``. 233 234 With ``flow=False``, the number of intervals is equal to the number of 235 normal (finite-width) bins. 236 237 Setting ``flow=True`` increases the length of the output by two. 238 239 For ROOT histograms, numerical intervals exist even if the axis also has 240 string-valued labels. 241 """ 242 fNbins = self.member("fNbins") 243 fXbins = self.member("fXbins", none_if_missing=True) 244 245 if fXbins is None or len(fXbins) != fNbins + 1: 246 fXbins = numpy.linspace( 247 self.member("fXmin"), self.member("fXmax"), fNbins + 1 248 ) 249 250 if flow: 251 out = numpy.empty((fNbins + 2, 2), dtype=numpy.float64) 252 out[0, 0] = -numpy.inf 253 out[-1, 1] = numpy.inf 254 out[1:, 0] = fXbins 255 out[:-1, 1] = fXbins 256 else: 257 out = numpy.empty((fNbins, 2), dtype=numpy.float64) 258 out[:, 0] = fXbins[:-1] 259 out[:, 1] = fXbins[1:] 260 261 return out 262 263 def centers(self, flow=False): 264 """ 265 Args: 266 flow (bool): If True, include ``-inf`` and ``inf`` before and after 267 the normal (finite) bin centers. 268 269 Returns bin center positions as a one-dimensional ``numpy.ndarray`` of 270 ``numpy.float64``. 271 272 With ``flow=False``, the number of bin centers is equal to the number of 273 normal (finite-width) bins. 274 275 Setting ``flow=True`` increases the length of the output by two. 276 277 For ROOT histograms, numerical bin centers exist even if the axis also has 278 string-valued labels. 279 """ 280 edges = self.edges(flow=flow) 281 return (edges[1:] + edges[:-1]) / 2.0 282 283 def widths(self, flow=False): 284 """ 285 Args: 286 flow (bool): If True, include ``-inf`` and ``inf`` before and after 287 the normal (finite) bin widths. 288 289 Returns bin widths as a one-dimensional ``numpy.ndarray`` of 290 ``numpy.float64``. 291 292 With ``flow=False``, the number of bin widths is equal to the number of 293 normal (finite-width) bins. 294 295 Setting ``flow=True`` increases the length of the output by two. 296 297 For ROOT histograms, numerical bin widths exist even if the axis also has 298 string-valued labels. 299 """ 300 fNbins = self.member("fNbins") 301 fXbins = self.member("fXbins", none_if_missing=True) 302 303 if not flow and (fXbins is None or len(fXbins) != fNbins + 1): 304 width = (self.member("fXmax") - self.member("fXmin")) / fNbins 305 return numpy.broadcast_to(width, (fNbins,)) 306 else: 307 edges = self.edges(flow=flow) 308 return edges[1:] - edges[:-1] 309 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/uproot/behaviors/TAxis.py b/uproot/behaviors/TAxis.py --- a/uproot/behaviors/TAxis.py +++ b/uproot/behaviors/TAxis.py @@ -72,7 +72,7 @@ return low, high else: - return fXbins[where + 1], fXbins[where + 2] + return fXbins[where], fXbins[where + 1] def __iter__(self): """
{"golden_diff": "diff --git a/uproot/behaviors/TAxis.py b/uproot/behaviors/TAxis.py\n--- a/uproot/behaviors/TAxis.py\n+++ b/uproot/behaviors/TAxis.py\n@@ -72,7 +72,7 @@\n return low, high\n \n else:\n- return fXbins[where + 1], fXbins[where + 2]\n+ return fXbins[where], fXbins[where + 1]\n \n def __iter__(self):\n \"\"\"\n", "issue": "Histogram protocol issue?\nI am confused about `PlottableAxisContinuous.__getitem__`: for uproot4 it seems to give the one-indexed values where I was expecting zero-indexed, since `__len__` gives the number of bins without flow. For example:\r\n```\r\n>>> h = uproot.open('https://raw.githubusercontent.com/CoffeaTeam/coffea/master/tests/samples/testSF2d.histo.root:scalefactors_Tight_Electron')\r\n>>> axis = h.axes[0]\r\n>>> len(axis)\r\n10\r\n>>> axis.edges()\r\narray([-2.5 , -2. , -1.566, -1.444, -0.8 , 0. , 0.8 , 1.444,\r\n 1.566, 2. , 2.5 ])\r\n>>> axis.edges().shape\r\n(11,)\r\n>>> axis[0]\r\n(-2.0, -1.566)\r\n```\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE\n\n\"\"\"\nThis module defines the behaviors of ``TAxis``, an axis of a histogram or profile plot.\n\"\"\"\n\nfrom __future__ import absolute_import\n\ntry:\n from collections.abc import Sequence\nexcept ImportError:\n from collections import Sequence\n\nimport numpy\n\n\nclass AxisTraits(object):\n \"\"\"\n Describes read-only properties of a histogram axis.\n\n For example, ``axis.traits.discrete`` is True if the histogram has\n labels; False otherwise.\n \"\"\"\n\n def __init__(self, axis):\n self._axis = axis\n\n def __repr__(self):\n return \"AxisTraits({0})\".format(repr(self._axis))\n\n @property\n def circular(self):\n \"\"\"\n True if the axis \"wraps around\" (always False for ROOT histograms).\n \"\"\"\n return False\n\n @property\n def discrete(self):\n \"\"\"\n True if bins are discrete: if they have string-valued labels.\n \"\"\"\n fNbins = self._axis.member(\"fNbins\")\n fLabels = self._axis.member(\"fLabels\", none_if_missing=True)\n return fLabels is not None and len(fLabels) == fNbins\n\n\nclass TAxis(Sequence):\n def __len__(self):\n \"\"\"\n The number of bins in the axis.\n \"\"\"\n return self.member(\"fNbins\")\n\n def __getitem__(self, where):\n \"\"\"\n Returns the label at ``where`` if it exists or the interval at ``where``.\n\n The indexing assumes that ``flow=False``.\n \"\"\"\n fNbins = self.member(\"fNbins\")\n fXbins = self.member(\"fXbins\", none_if_missing=True)\n fLabels = self.member(\"fLabels\", none_if_missing=True)\n\n if fLabels is not None and len(fLabels) == fNbins:\n return str(fLabels[where])\n\n elif fXbins is None or len(fXbins) != fNbins + 1:\n fXmin, fXmax = self.member(\"fXmin\"), self.member(\"fXmax\")\n low = (fXmax - fXmin) * (where) / float(fNbins) + fXmin\n high = (fXmax - fXmin) * (where + 1) / float(fNbins) + fXmin\n return low, high\n\n else:\n return fXbins[where + 1], fXbins[where + 2]\n\n def __iter__(self):\n \"\"\"\n Iterate over the output of ``__getitem__``.\n \"\"\"\n fNbins = self.member(\"fNbins\")\n fLabels = self.member(\"fLabels\", none_if_missing=True)\n\n if fLabels is not None and len(fLabels) == fNbins:\n for x in fLabels:\n yield str(x)\n else:\n for low, high in self.intervals():\n yield low, high\n\n def __eq__(self, other):\n \"\"\"\n Two axes are equal if they have the same type and\n ``list(self) == list(other)``.\n \"\"\"\n if type(self) is not type(other):\n return False\n\n self_fNbins = self.member(\"fNbins\")\n other_fNbins = other.member(\"fNbins\")\n if self_fNbins != other_fNbins:\n return False\n\n self_fLabels = self.member(\"fLabels\", none_if_missing=True)\n other_fLabels = other.member(\"fLabels\", none_if_missing=True)\n self_labeled = self_fLabels is not None and len(self_fLabels) == self_fNbins\n other_labeled = other_fLabels is not None and len(other_fLabels) == other_fNbins\n\n if self_labeled and other_labeled:\n return all(x == y for x, y in zip(self_fLabels, other_fLabels))\n elif not self_labeled and not other_labeled:\n return numpy.array_equal(self.edges(), other.edges())\n else:\n return False\n\n def __ne__(self, other):\n \"\"\"\n Some versions of Python don't automatically negate __eq__.\n \"\"\"\n return not self.__eq__(other)\n\n @property\n def traits(self):\n \"\"\"\n Describes read-only properties of a histogram axis.\n\n For example, ``axis.traits.discrete`` is True if the histogram has\n labels; False otherwise.\n \"\"\"\n return AxisTraits(self)\n\n @property\n def low(self):\n \"\"\"\n The low edge of the first normal (finite-width) bin.\n\n For ROOT histograms, numerical edges exist even if the axis also has\n string-valued labels.\n \"\"\"\n return self.member(\"fXmin\")\n\n @property\n def high(self):\n \"\"\"\n The high edge of the last normal (finite-width) bin.\n\n For ROOT histograms, numerical edges exist even if the axis also has\n string-valued labels.\n \"\"\"\n return self.member(\"fXmax\")\n\n @property\n def width(self):\n \"\"\"\n The average bin width (or only bin width if the binning is uniform).\n \"\"\"\n fNbins = self.member(\"fNbins\")\n fXbins = self.member(\"fXbins\", none_if_missing=True)\n\n if fXbins is None or len(fXbins) != fNbins + 1:\n return (self.member(\"fXmax\") - self.member(\"fXmin\")) / fNbins\n else:\n return self.widths().mean()\n\n def labels(self, flow=False):\n \"\"\"\n Args:\n flow (bool): If True, include ``\"underflow\"`` and ``\"overflow\"``\n before and after the normal (finite-width) bin labels (if they\n exist).\n\n If string-valued labels exist, this returns them as a Python list of\n Python strings. Otherwise, this returns None.\n\n Setting ``flow=True`` increases the length of the output by two.\n \"\"\"\n fNbins = self.member(\"fNbins\")\n fLabels = self.member(\"fLabels\", none_if_missing=True)\n\n if fLabels is not None and len(fLabels) == fNbins:\n out = [str(x) for x in fLabels]\n if flow:\n return [\"underflow\"] + out + [\"overflow\"]\n else:\n return out\n else:\n return None\n\n def edges(self, flow=False):\n \"\"\"\n Args:\n flow (bool): If True, include ``-inf`` and ``inf`` before and\n after the normal (finite-width) bin edges.\n\n Returns numerical edges between bins as a one-dimensional ``numpy.ndarray``\n of ``numpy.float64``.\n\n Even with ``flow=False``, the number of edges is *one greater than* the\n number of normal (finite-width) bins because they represent \"fenceposts\"\n between the bins, including one below and one above the full range.\n\n Setting ``flow=True`` increases the length of the output by two.\n\n For ROOT histograms, numerical edges exist even if the axis also has\n string-valued labels.\n \"\"\"\n fNbins = self.member(\"fNbins\")\n fXbins = self.member(\"fXbins\", none_if_missing=True)\n\n if fXbins is None or len(fXbins) != fNbins + 1:\n fXbins = numpy.linspace(\n self.member(\"fXmin\"), self.member(\"fXmax\"), fNbins + 1\n )\n\n if flow:\n out = numpy.empty(fNbins + 3, dtype=numpy.float64)\n out[0] = -numpy.inf\n out[-1] = numpy.inf\n out[1:-1] = fXbins\n else:\n out = numpy.asarray(fXbins, dtype=fXbins.dtype.newbyteorder(\"=\"))\n\n return out\n\n def intervals(self, flow=False):\n \"\"\"\n Args:\n flow (bool): If True, include ``[-inf, min]`` and ``[max, inf]``\n before and after the normal (finite-width) intervals.\n\n Returns low, high pairs for each bin interval as a two-dimensional\n ``numpy.ndarray`` of ``numpy.float64``.\n\n With ``flow=False``, the number of intervals is equal to the number of\n normal (finite-width) bins.\n\n Setting ``flow=True`` increases the length of the output by two.\n\n For ROOT histograms, numerical intervals exist even if the axis also has\n string-valued labels.\n \"\"\"\n fNbins = self.member(\"fNbins\")\n fXbins = self.member(\"fXbins\", none_if_missing=True)\n\n if fXbins is None or len(fXbins) != fNbins + 1:\n fXbins = numpy.linspace(\n self.member(\"fXmin\"), self.member(\"fXmax\"), fNbins + 1\n )\n\n if flow:\n out = numpy.empty((fNbins + 2, 2), dtype=numpy.float64)\n out[0, 0] = -numpy.inf\n out[-1, 1] = numpy.inf\n out[1:, 0] = fXbins\n out[:-1, 1] = fXbins\n else:\n out = numpy.empty((fNbins, 2), dtype=numpy.float64)\n out[:, 0] = fXbins[:-1]\n out[:, 1] = fXbins[1:]\n\n return out\n\n def centers(self, flow=False):\n \"\"\"\n Args:\n flow (bool): If True, include ``-inf`` and ``inf`` before and after\n the normal (finite) bin centers.\n\n Returns bin center positions as a one-dimensional ``numpy.ndarray`` of\n ``numpy.float64``.\n\n With ``flow=False``, the number of bin centers is equal to the number of\n normal (finite-width) bins.\n\n Setting ``flow=True`` increases the length of the output by two.\n\n For ROOT histograms, numerical bin centers exist even if the axis also has\n string-valued labels.\n \"\"\"\n edges = self.edges(flow=flow)\n return (edges[1:] + edges[:-1]) / 2.0\n\n def widths(self, flow=False):\n \"\"\"\n Args:\n flow (bool): If True, include ``-inf`` and ``inf`` before and after\n the normal (finite) bin widths.\n\n Returns bin widths as a one-dimensional ``numpy.ndarray`` of\n ``numpy.float64``.\n\n With ``flow=False``, the number of bin widths is equal to the number of\n normal (finite-width) bins.\n\n Setting ``flow=True`` increases the length of the output by two.\n\n For ROOT histograms, numerical bin widths exist even if the axis also has\n string-valued labels.\n \"\"\"\n fNbins = self.member(\"fNbins\")\n fXbins = self.member(\"fXbins\", none_if_missing=True)\n\n if not flow and (fXbins is None or len(fXbins) != fNbins + 1):\n width = (self.member(\"fXmax\") - self.member(\"fXmin\")) / fNbins\n return numpy.broadcast_to(width, (fNbins,))\n else:\n edges = self.edges(flow=flow)\n return edges[1:] - edges[:-1]\n", "path": "uproot/behaviors/TAxis.py"}], "after_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE\n\n\"\"\"\nThis module defines the behaviors of ``TAxis``, an axis of a histogram or profile plot.\n\"\"\"\n\nfrom __future__ import absolute_import\n\ntry:\n from collections.abc import Sequence\nexcept ImportError:\n from collections import Sequence\n\nimport numpy\n\n\nclass AxisTraits(object):\n \"\"\"\n Describes read-only properties of a histogram axis.\n\n For example, ``axis.traits.discrete`` is True if the histogram has\n labels; False otherwise.\n \"\"\"\n\n def __init__(self, axis):\n self._axis = axis\n\n def __repr__(self):\n return \"AxisTraits({0})\".format(repr(self._axis))\n\n @property\n def circular(self):\n \"\"\"\n True if the axis \"wraps around\" (always False for ROOT histograms).\n \"\"\"\n return False\n\n @property\n def discrete(self):\n \"\"\"\n True if bins are discrete: if they have string-valued labels.\n \"\"\"\n fNbins = self._axis.member(\"fNbins\")\n fLabels = self._axis.member(\"fLabels\", none_if_missing=True)\n return fLabels is not None and len(fLabels) == fNbins\n\n\nclass TAxis(Sequence):\n def __len__(self):\n \"\"\"\n The number of bins in the axis.\n \"\"\"\n return self.member(\"fNbins\")\n\n def __getitem__(self, where):\n \"\"\"\n Returns the label at ``where`` if it exists or the interval at ``where``.\n\n The indexing assumes that ``flow=False``.\n \"\"\"\n fNbins = self.member(\"fNbins\")\n fXbins = self.member(\"fXbins\", none_if_missing=True)\n fLabels = self.member(\"fLabels\", none_if_missing=True)\n\n if fLabels is not None and len(fLabels) == fNbins:\n return str(fLabels[where])\n\n elif fXbins is None or len(fXbins) != fNbins + 1:\n fXmin, fXmax = self.member(\"fXmin\"), self.member(\"fXmax\")\n low = (fXmax - fXmin) * (where) / float(fNbins) + fXmin\n high = (fXmax - fXmin) * (where + 1) / float(fNbins) + fXmin\n return low, high\n\n else:\n return fXbins[where], fXbins[where + 1]\n\n def __iter__(self):\n \"\"\"\n Iterate over the output of ``__getitem__``.\n \"\"\"\n fNbins = self.member(\"fNbins\")\n fLabels = self.member(\"fLabels\", none_if_missing=True)\n\n if fLabels is not None and len(fLabels) == fNbins:\n for x in fLabels:\n yield str(x)\n else:\n for low, high in self.intervals():\n yield low, high\n\n def __eq__(self, other):\n \"\"\"\n Two axes are equal if they have the same type and\n ``list(self) == list(other)``.\n \"\"\"\n if type(self) is not type(other):\n return False\n\n self_fNbins = self.member(\"fNbins\")\n other_fNbins = other.member(\"fNbins\")\n if self_fNbins != other_fNbins:\n return False\n\n self_fLabels = self.member(\"fLabels\", none_if_missing=True)\n other_fLabels = other.member(\"fLabels\", none_if_missing=True)\n self_labeled = self_fLabels is not None and len(self_fLabels) == self_fNbins\n other_labeled = other_fLabels is not None and len(other_fLabels) == other_fNbins\n\n if self_labeled and other_labeled:\n return all(x == y for x, y in zip(self_fLabels, other_fLabels))\n elif not self_labeled and not other_labeled:\n return numpy.array_equal(self.edges(), other.edges())\n else:\n return False\n\n def __ne__(self, other):\n \"\"\"\n Some versions of Python don't automatically negate __eq__.\n \"\"\"\n return not self.__eq__(other)\n\n @property\n def traits(self):\n \"\"\"\n Describes read-only properties of a histogram axis.\n\n For example, ``axis.traits.discrete`` is True if the histogram has\n labels; False otherwise.\n \"\"\"\n return AxisTraits(self)\n\n @property\n def low(self):\n \"\"\"\n The low edge of the first normal (finite-width) bin.\n\n For ROOT histograms, numerical edges exist even if the axis also has\n string-valued labels.\n \"\"\"\n return self.member(\"fXmin\")\n\n @property\n def high(self):\n \"\"\"\n The high edge of the last normal (finite-width) bin.\n\n For ROOT histograms, numerical edges exist even if the axis also has\n string-valued labels.\n \"\"\"\n return self.member(\"fXmax\")\n\n @property\n def width(self):\n \"\"\"\n The average bin width (or only bin width if the binning is uniform).\n \"\"\"\n fNbins = self.member(\"fNbins\")\n fXbins = self.member(\"fXbins\", none_if_missing=True)\n\n if fXbins is None or len(fXbins) != fNbins + 1:\n return (self.member(\"fXmax\") - self.member(\"fXmin\")) / fNbins\n else:\n return self.widths().mean()\n\n def labels(self, flow=False):\n \"\"\"\n Args:\n flow (bool): If True, include ``\"underflow\"`` and ``\"overflow\"``\n before and after the normal (finite-width) bin labels (if they\n exist).\n\n If string-valued labels exist, this returns them as a Python list of\n Python strings. Otherwise, this returns None.\n\n Setting ``flow=True`` increases the length of the output by two.\n \"\"\"\n fNbins = self.member(\"fNbins\")\n fLabels = self.member(\"fLabels\", none_if_missing=True)\n\n if fLabels is not None and len(fLabels) == fNbins:\n out = [str(x) for x in fLabels]\n if flow:\n return [\"underflow\"] + out + [\"overflow\"]\n else:\n return out\n else:\n return None\n\n def edges(self, flow=False):\n \"\"\"\n Args:\n flow (bool): If True, include ``-inf`` and ``inf`` before and\n after the normal (finite-width) bin edges.\n\n Returns numerical edges between bins as a one-dimensional ``numpy.ndarray``\n of ``numpy.float64``.\n\n Even with ``flow=False``, the number of edges is *one greater than* the\n number of normal (finite-width) bins because they represent \"fenceposts\"\n between the bins, including one below and one above the full range.\n\n Setting ``flow=True`` increases the length of the output by two.\n\n For ROOT histograms, numerical edges exist even if the axis also has\n string-valued labels.\n \"\"\"\n fNbins = self.member(\"fNbins\")\n fXbins = self.member(\"fXbins\", none_if_missing=True)\n\n if fXbins is None or len(fXbins) != fNbins + 1:\n fXbins = numpy.linspace(\n self.member(\"fXmin\"), self.member(\"fXmax\"), fNbins + 1\n )\n\n if flow:\n out = numpy.empty(fNbins + 3, dtype=numpy.float64)\n out[0] = -numpy.inf\n out[-1] = numpy.inf\n out[1:-1] = fXbins\n else:\n out = numpy.asarray(fXbins, dtype=fXbins.dtype.newbyteorder(\"=\"))\n\n return out\n\n def intervals(self, flow=False):\n \"\"\"\n Args:\n flow (bool): If True, include ``[-inf, min]`` and ``[max, inf]``\n before and after the normal (finite-width) intervals.\n\n Returns low, high pairs for each bin interval as a two-dimensional\n ``numpy.ndarray`` of ``numpy.float64``.\n\n With ``flow=False``, the number of intervals is equal to the number of\n normal (finite-width) bins.\n\n Setting ``flow=True`` increases the length of the output by two.\n\n For ROOT histograms, numerical intervals exist even if the axis also has\n string-valued labels.\n \"\"\"\n fNbins = self.member(\"fNbins\")\n fXbins = self.member(\"fXbins\", none_if_missing=True)\n\n if fXbins is None or len(fXbins) != fNbins + 1:\n fXbins = numpy.linspace(\n self.member(\"fXmin\"), self.member(\"fXmax\"), fNbins + 1\n )\n\n if flow:\n out = numpy.empty((fNbins + 2, 2), dtype=numpy.float64)\n out[0, 0] = -numpy.inf\n out[-1, 1] = numpy.inf\n out[1:, 0] = fXbins\n out[:-1, 1] = fXbins\n else:\n out = numpy.empty((fNbins, 2), dtype=numpy.float64)\n out[:, 0] = fXbins[:-1]\n out[:, 1] = fXbins[1:]\n\n return out\n\n def centers(self, flow=False):\n \"\"\"\n Args:\n flow (bool): If True, include ``-inf`` and ``inf`` before and after\n the normal (finite) bin centers.\n\n Returns bin center positions as a one-dimensional ``numpy.ndarray`` of\n ``numpy.float64``.\n\n With ``flow=False``, the number of bin centers is equal to the number of\n normal (finite-width) bins.\n\n Setting ``flow=True`` increases the length of the output by two.\n\n For ROOT histograms, numerical bin centers exist even if the axis also has\n string-valued labels.\n \"\"\"\n edges = self.edges(flow=flow)\n return (edges[1:] + edges[:-1]) / 2.0\n\n def widths(self, flow=False):\n \"\"\"\n Args:\n flow (bool): If True, include ``-inf`` and ``inf`` before and after\n the normal (finite) bin widths.\n\n Returns bin widths as a one-dimensional ``numpy.ndarray`` of\n ``numpy.float64``.\n\n With ``flow=False``, the number of bin widths is equal to the number of\n normal (finite-width) bins.\n\n Setting ``flow=True`` increases the length of the output by two.\n\n For ROOT histograms, numerical bin widths exist even if the axis also has\n string-valued labels.\n \"\"\"\n fNbins = self.member(\"fNbins\")\n fXbins = self.member(\"fXbins\", none_if_missing=True)\n\n if not flow and (fXbins is None or len(fXbins) != fNbins + 1):\n width = (self.member(\"fXmax\") - self.member(\"fXmin\")) / fNbins\n return numpy.broadcast_to(width, (fNbins,))\n else:\n edges = self.edges(flow=flow)\n return edges[1:] - edges[:-1]\n", "path": "uproot/behaviors/TAxis.py"}]}
3,811
114
gh_patches_debug_63531
rasdani/github-patches
git_diff
MongoEngine__mongoengine-2224
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- New release Hi, When is coming new release, because I can't update to mongodb 4.2 because of this: https://github.com/MongoEngine/mongoengine/pull/2160/commits/47f8a126ca167cb8fe020e3cc5604b155dfcdebc. Thanks --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mongoengine/__init__.py` Content: ``` 1 # Import submodules so that we can expose their __all__ 2 from mongoengine import connection 3 from mongoengine import document 4 from mongoengine import errors 5 from mongoengine import fields 6 from mongoengine import queryset 7 from mongoengine import signals 8 9 # Import everything from each submodule so that it can be accessed via 10 # mongoengine, e.g. instead of `from mongoengine.connection import connect`, 11 # users can simply use `from mongoengine import connect`, or even 12 # `from mongoengine import *` and then `connect('testdb')`. 13 from mongoengine.connection import * 14 from mongoengine.document import * 15 from mongoengine.errors import * 16 from mongoengine.fields import * 17 from mongoengine.queryset import * 18 from mongoengine.signals import * 19 20 21 __all__ = ( 22 list(document.__all__) 23 + list(fields.__all__) 24 + list(connection.__all__) 25 + list(queryset.__all__) 26 + list(signals.__all__) 27 + list(errors.__all__) 28 ) 29 30 31 VERSION = (0, 18, 2) 32 33 34 def get_version(): 35 """Return the VERSION as a string. 36 37 For example, if `VERSION == (0, 10, 7)`, return '0.10.7'. 38 """ 39 return ".".join(map(str, VERSION)) 40 41 42 __version__ = get_version() 43 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mongoengine/__init__.py b/mongoengine/__init__.py --- a/mongoengine/__init__.py +++ b/mongoengine/__init__.py @@ -28,7 +28,7 @@ ) -VERSION = (0, 18, 2) +VERSION = (0, 19, 0) def get_version():
{"golden_diff": "diff --git a/mongoengine/__init__.py b/mongoengine/__init__.py\n--- a/mongoengine/__init__.py\n+++ b/mongoengine/__init__.py\n@@ -28,7 +28,7 @@\n )\n \n \n-VERSION = (0, 18, 2)\n+VERSION = (0, 19, 0)\n \n \n def get_version():\n", "issue": "New release\nHi,\r\n\r\nWhen is coming new release, because I can't update to mongodb 4.2 because of this: https://github.com/MongoEngine/mongoengine/pull/2160/commits/47f8a126ca167cb8fe020e3cc5604b155dfcdebc.\r\n\r\nThanks\n", "before_files": [{"content": "# Import submodules so that we can expose their __all__\nfrom mongoengine import connection\nfrom mongoengine import document\nfrom mongoengine import errors\nfrom mongoengine import fields\nfrom mongoengine import queryset\nfrom mongoengine import signals\n\n# Import everything from each submodule so that it can be accessed via\n# mongoengine, e.g. instead of `from mongoengine.connection import connect`,\n# users can simply use `from mongoengine import connect`, or even\n# `from mongoengine import *` and then `connect('testdb')`.\nfrom mongoengine.connection import *\nfrom mongoengine.document import *\nfrom mongoengine.errors import *\nfrom mongoengine.fields import *\nfrom mongoengine.queryset import *\nfrom mongoengine.signals import *\n\n\n__all__ = (\n list(document.__all__)\n + list(fields.__all__)\n + list(connection.__all__)\n + list(queryset.__all__)\n + list(signals.__all__)\n + list(errors.__all__)\n)\n\n\nVERSION = (0, 18, 2)\n\n\ndef get_version():\n \"\"\"Return the VERSION as a string.\n\n For example, if `VERSION == (0, 10, 7)`, return '0.10.7'.\n \"\"\"\n return \".\".join(map(str, VERSION))\n\n\n__version__ = get_version()\n", "path": "mongoengine/__init__.py"}], "after_files": [{"content": "# Import submodules so that we can expose their __all__\nfrom mongoengine import connection\nfrom mongoengine import document\nfrom mongoengine import errors\nfrom mongoengine import fields\nfrom mongoengine import queryset\nfrom mongoengine import signals\n\n# Import everything from each submodule so that it can be accessed via\n# mongoengine, e.g. instead of `from mongoengine.connection import connect`,\n# users can simply use `from mongoengine import connect`, or even\n# `from mongoengine import *` and then `connect('testdb')`.\nfrom mongoengine.connection import *\nfrom mongoengine.document import *\nfrom mongoengine.errors import *\nfrom mongoengine.fields import *\nfrom mongoengine.queryset import *\nfrom mongoengine.signals import *\n\n\n__all__ = (\n list(document.__all__)\n + list(fields.__all__)\n + list(connection.__all__)\n + list(queryset.__all__)\n + list(signals.__all__)\n + list(errors.__all__)\n)\n\n\nVERSION = (0, 19, 0)\n\n\ndef get_version():\n \"\"\"Return the VERSION as a string.\n\n For example, if `VERSION == (0, 10, 7)`, return '0.10.7'.\n \"\"\"\n return \".\".join(map(str, VERSION))\n\n\n__version__ = get_version()\n", "path": "mongoengine/__init__.py"}]}
700
85
gh_patches_debug_10162
rasdani/github-patches
git_diff
pytorch__text-76
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Consistency with sorting: `sort=True` **Problem:** ``` train_iter, dev_iter, test_iter = data.BucketIterator.splits( (train, dev, test), batch_sizes=(32, 256, 256), sort_key=lambda x: len(x.input), sort=True, device=-1) # Use CPU ``` If `sort=True` and `train=True`, then the `train_iter` batches are shuffled. This behavior is unexpected. **Cause:** Because by default `self.shuffle=True` is `train=True`. Then https://github.com/pytorch/text/blob/master/torchtext/data/iterator.py#L113 `shuffle` overrides `sort`. **Possible Solution:** `sort=True` should override `shuffle=None and train=True`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchtext/data/iterator.py` Content: ``` 1 import math 2 import random 3 from contextlib import contextmanager 4 from copy import deepcopy 5 6 from .batch import Batch 7 from .dataset import Dataset 8 9 10 class RandomShuffler(object): 11 """Use random functions while keeping track of the random state to make it 12 reproducible and deterministic.""" 13 14 def __init__(self, random_state=None): 15 self._random_state = random_state 16 if self._random_state is None: 17 self._random_state = random.getstate() 18 19 @contextmanager 20 def use_internal_state(self): 21 """Use a specific RNG state.""" 22 old_state = random.getstate() 23 random.setstate(self._random_state) 24 yield 25 self._random_state = random.getstate() 26 random.setstate(old_state) 27 28 @property 29 def random_state(self): 30 return deepcopy(self._random_state) 31 32 @random_state.setter 33 def random_state(self, s): 34 self._random_state = s 35 36 def __call__(self, data): 37 """Shuffle and return a new list.""" 38 with self.use_internal_state(): 39 return random.sample(data, len(data)) 40 41 42 class Iterator(object): 43 """Defines an iterator that loads batches of data from a Dataset. 44 45 Attributes: 46 dataset: The Dataset object to load Examples from. 47 batch_size: Batch size. 48 batch_size_fn: Function of three arguments (new example to add, current 49 count of examples in the batch, and current effective batch size) 50 that returns the new effective batch size resulting from adding 51 that example to a batch. This is useful for dynamic batching, where 52 this function would add to the current effective batch size the 53 number of tokens in the new example. 54 sort_key: A key to use for sorting examples in order to batch together 55 examples with similar lengths and minimize padding. The sort_key 56 provided to the Iterator constructor overrides the sort_key 57 attribute of the Dataset, or defers to it if None. 58 train: Whether the iterator represents a train set. 59 repeat: Whether to repeat the iterator for multiple epochs. 60 shuffle: Whether to shuffle examples between epochs. 61 sort: Whether to sort examples according to self.sort_key. 62 Note that repeat, shuffle, and sort default to train, train, and 63 (not train). 64 device: Device to create batches on. Use -1 for CPU and None for the 65 currently active GPU device. 66 """ 67 68 def __init__(self, dataset, batch_size, sort_key=None, device=None, 69 batch_size_fn=lambda new, count, sofar: count, train=True, 70 repeat=None, shuffle=None, sort=None): 71 self.batch_size, self.train, self.dataset = batch_size, train, dataset 72 self.batch_size_fn = batch_size_fn 73 self.iterations = 0 74 self.repeat = train if repeat is None else repeat 75 self.shuffle = train if shuffle is None else shuffle 76 self.sort = not train if sort is None else sort 77 if sort_key is None: 78 self.sort_key = dataset.sort_key 79 else: 80 self.sort_key = sort_key 81 self.device = device 82 83 self.random_shuffler = RandomShuffler() 84 85 # For state loading/saving only 86 self._iterations_this_epoch = 0 87 self._random_state_this_epoch = None 88 self._restored_from_state = False 89 90 @classmethod 91 def splits(cls, datasets, batch_sizes=None, **kwargs): 92 """Create Iterator objects for multiple splits of a dataset. 93 94 Arguments: 95 datasets: Tuple of Dataset objects corresponding to the splits. The 96 first such object should be the train set. 97 batch_sizes: Tuple of batch sizes to use for the different splits, 98 or None to use the same batch_size for all splits. 99 Remaining keyword arguments: Passed to the constructor of the 100 iterator class being used. 101 """ 102 if batch_sizes is None: 103 batch_sizes = [kwargs.pop('batch_size')] * len(datasets) 104 ret = [] 105 for i in range(len(datasets)): 106 train = i == 0 107 ret.append(cls( 108 datasets[i], batch_size=batch_sizes[i], train=train, **kwargs)) 109 return tuple(ret) 110 111 def data(self): 112 """Return the examples in the dataset in order, sorted, or shuffled.""" 113 if self.shuffle: 114 xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))] 115 elif self.sort: 116 xs = sorted(self.dataset, key=self.sort_key) 117 else: 118 xs = self.dataset 119 return xs 120 121 def init_epoch(self): 122 """Set up the batch generator for a new epoch.""" 123 124 if self._restored_from_state: 125 self.random_shuffler.random_state = self._random_state_this_epoch 126 else: 127 self._random_state_this_epoch = self.random_shuffler.random_state 128 129 self.create_batches() 130 131 if self._restored_from_state: 132 self._restored_from_state = False 133 else: 134 self._iterations_this_epoch = 0 135 136 if not self.repeat: 137 self.iterations = 0 138 139 def create_batches(self): 140 self.batches = batch(self.data(), self.batch_size, self.batch_size_fn) 141 142 @property 143 def epoch(self): 144 return self.iterations / len(self) 145 146 def __len__(self): 147 return math.ceil(len(self.dataset) / self.batch_size) 148 149 def __iter__(self): 150 while True: 151 self.init_epoch() 152 for idx, minibatch in enumerate(self.batches): 153 # fast-forward if loaded from state 154 if self._iterations_this_epoch > idx: 155 continue 156 self.iterations += 1 157 self._iterations_this_epoch += 1 158 yield Batch(minibatch, self.dataset, self.device, 159 self.train) 160 if not self.repeat: 161 raise StopIteration 162 163 def state_dict(self): 164 return { 165 "iterations": self.iterations, 166 "iterations_this_epoch": self._iterations_this_epoch, 167 "random_state_this_epoch": self._random_state_this_epoch} 168 169 def load_state_dict(self, state_dict): 170 self.iterations = state_dict["iterations"] 171 self._iterations_this_epoch = state_dict["iterations_this_epoch"] 172 self._random_state_this_epoch = state_dict["random_state_this_epoch"] 173 self._restored_from_state = True 174 175 176 class BPTTIterator(Iterator): 177 """Defines an iterator for language modeling tasks that use BPTT. 178 179 Provides contiguous streams of examples together with targets that are 180 one timestep further forward, for language modeling training with 181 backpropagation through time (BPTT). Expects a Dataset with a single 182 example and a single field called 'text' and produces Batches with text and 183 target attributes. 184 185 Attributes: 186 dataset: The Dataset object to load Examples from. 187 batch_size: Batch size. 188 bptt_len: Length of sequences for backpropagation through time. 189 sort_key: A key to use for sorting examples in order to batch together 190 examples with similar lengths and minimize padding. The sort_key 191 provided to the Iterator constructor overrides the sort_key 192 attribute of the Dataset, or defers to it if None. 193 train: Whether the iterator represents a train set. 194 repeat: Whether to repeat the iterator for multiple epochs. 195 shuffle: Whether to shuffle examples between epochs. 196 sort: Whether to sort examples according to self.sort_key. 197 Note that repeat, shuffle, and sort default to train, train, and 198 (not train). 199 device: Device to create batches on. Use -1 for CPU and None for the 200 currently active GPU device. 201 """ 202 203 def __init__(self, dataset, batch_size, bptt_len, **kwargs): 204 self.bptt_len = bptt_len 205 super(BPTTIterator, self).__init__(dataset, batch_size, **kwargs) 206 207 def __len__(self): 208 return math.ceil(len(self.dataset[0].text) / 209 (self.batch_size * self.bptt_len)) 210 211 def __iter__(self): 212 text = self.dataset[0].text 213 TEXT = self.dataset.fields['text'] 214 TEXT.eos_token = None 215 text = text + ([TEXT.pad_token] * (math.ceil(len(text) / self.batch_size) * 216 self.batch_size - len(text))) 217 data = TEXT.numericalize( 218 [text], device=self.device, train=self.train) 219 data = data.view(self.batch_size, -1).t().contiguous() 220 dataset = Dataset(examples=self.dataset.examples, fields=[ 221 ('text', TEXT), ('target', TEXT)]) 222 while True: 223 for i in range(0, len(self) * self.bptt_len, self.bptt_len): 224 seq_len = min(self.bptt_len, len(data) - 1 - i) 225 yield Batch.fromvars( 226 dataset, self.batch_size, train=self.train, 227 text=data[i:i + seq_len], 228 target=data[i + 1:i + 1 + seq_len]) 229 if not self.repeat: 230 raise StopIteration 231 232 233 class BucketIterator(Iterator): 234 """Defines an iterator that batches examples of similar lengths together. 235 236 Minimizes amount of padding needed while producing freshly shuffled 237 batches for each new epoch. See pool for the bucketing procedure used. 238 """ 239 240 def create_batches(self): 241 if self.sort: 242 self.batches = batch(self.data(), self.batch_size, 243 self.batch_size_fn) 244 else: 245 self.batches = pool(self.data(), self.batch_size, 246 self.sort_key, self.batch_size_fn, 247 random_shuffler=self.random_shuffler) 248 249 250 def batch(data, batch_size, batch_size_fn=lambda new, count, sofar: count): 251 """Yield elements from data in chunks of batch_size.""" 252 minibatch, size_so_far = [], 0 253 for ex in data: 254 minibatch.append(ex) 255 size_so_far = batch_size_fn(ex, len(minibatch), size_so_far) 256 if size_so_far == batch_size: 257 yield minibatch 258 minibatch, size_so_far = [], 0 259 elif size_so_far > batch_size: 260 yield minibatch[:-1] 261 minibatch, size_so_far = minibatch[:-1], batch_size_fn(ex, 1, 0) 262 if minibatch: 263 yield minibatch 264 265 266 def pool(data, batch_size, key, batch_size_fn=lambda new, count, sofar: count, 267 random_shuffler=None): 268 """Sort within buckets, then batch, then shuffle batches. 269 270 Partitions data into chunks of size 100*batch_size, sorts examples within 271 each chunk using sort_key, then batch these examples and shuffle the 272 batches. 273 """ 274 if random_shuffler is None: 275 random_shuffler = random.shuffle 276 for p in batch(data, batch_size * 100, batch_size_fn): 277 p_batch = batch(sorted(p, key=key), batch_size, batch_size_fn) 278 for b in random_shuffler(list(p_batch)): 279 yield b 280 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchtext/data/iterator.py b/torchtext/data/iterator.py --- a/torchtext/data/iterator.py +++ b/torchtext/data/iterator.py @@ -110,10 +110,10 @@ def data(self): """Return the examples in the dataset in order, sorted, or shuffled.""" - if self.shuffle: - xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))] - elif self.sort: + if self.sort: xs = sorted(self.dataset, key=self.sort_key) + elif self.shuffle: + xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))] else: xs = self.dataset return xs
{"golden_diff": "diff --git a/torchtext/data/iterator.py b/torchtext/data/iterator.py\n--- a/torchtext/data/iterator.py\n+++ b/torchtext/data/iterator.py\n@@ -110,10 +110,10 @@\n \n def data(self):\n \"\"\"Return the examples in the dataset in order, sorted, or shuffled.\"\"\"\n- if self.shuffle:\n- xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))]\n- elif self.sort:\n+ if self.sort:\n xs = sorted(self.dataset, key=self.sort_key)\n+ elif self.shuffle:\n+ xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))]\n else:\n xs = self.dataset\n return xs\n", "issue": "Consistency with sorting: `sort=True`\n**Problem:**\r\n```\r\n train_iter, dev_iter, test_iter = data.BucketIterator.splits(\r\n (train, dev, test),\r\n batch_sizes=(32, 256, 256),\r\n sort_key=lambda x: len(x.input),\r\n sort=True,\r\n device=-1) # Use CPU\r\n```\r\n\r\nIf `sort=True` and `train=True`, then the `train_iter` batches are shuffled. This behavior is unexpected.\r\n\r\n**Cause:**\r\nBecause by default `self.shuffle=True` is `train=True`. Then https://github.com/pytorch/text/blob/master/torchtext/data/iterator.py#L113 `shuffle` overrides `sort`.\r\n\r\n**Possible Solution:**\r\n`sort=True` should override `shuffle=None and train=True`.\n", "before_files": [{"content": "import math\nimport random\nfrom contextlib import contextmanager\nfrom copy import deepcopy\n\nfrom .batch import Batch\nfrom .dataset import Dataset\n\n\nclass RandomShuffler(object):\n \"\"\"Use random functions while keeping track of the random state to make it\n reproducible and deterministic.\"\"\"\n\n def __init__(self, random_state=None):\n self._random_state = random_state\n if self._random_state is None:\n self._random_state = random.getstate()\n\n @contextmanager\n def use_internal_state(self):\n \"\"\"Use a specific RNG state.\"\"\"\n old_state = random.getstate()\n random.setstate(self._random_state)\n yield\n self._random_state = random.getstate()\n random.setstate(old_state)\n\n @property\n def random_state(self):\n return deepcopy(self._random_state)\n\n @random_state.setter\n def random_state(self, s):\n self._random_state = s\n\n def __call__(self, data):\n \"\"\"Shuffle and return a new list.\"\"\"\n with self.use_internal_state():\n return random.sample(data, len(data))\n\n\nclass Iterator(object):\n \"\"\"Defines an iterator that loads batches of data from a Dataset.\n\n Attributes:\n dataset: The Dataset object to load Examples from.\n batch_size: Batch size.\n batch_size_fn: Function of three arguments (new example to add, current\n count of examples in the batch, and current effective batch size)\n that returns the new effective batch size resulting from adding\n that example to a batch. This is useful for dynamic batching, where\n this function would add to the current effective batch size the\n number of tokens in the new example.\n sort_key: A key to use for sorting examples in order to batch together\n examples with similar lengths and minimize padding. The sort_key\n provided to the Iterator constructor overrides the sort_key\n attribute of the Dataset, or defers to it if None.\n train: Whether the iterator represents a train set.\n repeat: Whether to repeat the iterator for multiple epochs.\n shuffle: Whether to shuffle examples between epochs.\n sort: Whether to sort examples according to self.sort_key.\n Note that repeat, shuffle, and sort default to train, train, and\n (not train).\n device: Device to create batches on. Use -1 for CPU and None for the\n currently active GPU device.\n \"\"\"\n\n def __init__(self, dataset, batch_size, sort_key=None, device=None,\n batch_size_fn=lambda new, count, sofar: count, train=True,\n repeat=None, shuffle=None, sort=None):\n self.batch_size, self.train, self.dataset = batch_size, train, dataset\n self.batch_size_fn = batch_size_fn\n self.iterations = 0\n self.repeat = train if repeat is None else repeat\n self.shuffle = train if shuffle is None else shuffle\n self.sort = not train if sort is None else sort\n if sort_key is None:\n self.sort_key = dataset.sort_key\n else:\n self.sort_key = sort_key\n self.device = device\n\n self.random_shuffler = RandomShuffler()\n\n # For state loading/saving only\n self._iterations_this_epoch = 0\n self._random_state_this_epoch = None\n self._restored_from_state = False\n\n @classmethod\n def splits(cls, datasets, batch_sizes=None, **kwargs):\n \"\"\"Create Iterator objects for multiple splits of a dataset.\n\n Arguments:\n datasets: Tuple of Dataset objects corresponding to the splits. The\n first such object should be the train set.\n batch_sizes: Tuple of batch sizes to use for the different splits,\n or None to use the same batch_size for all splits.\n Remaining keyword arguments: Passed to the constructor of the\n iterator class being used.\n \"\"\"\n if batch_sizes is None:\n batch_sizes = [kwargs.pop('batch_size')] * len(datasets)\n ret = []\n for i in range(len(datasets)):\n train = i == 0\n ret.append(cls(\n datasets[i], batch_size=batch_sizes[i], train=train, **kwargs))\n return tuple(ret)\n\n def data(self):\n \"\"\"Return the examples in the dataset in order, sorted, or shuffled.\"\"\"\n if self.shuffle:\n xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))]\n elif self.sort:\n xs = sorted(self.dataset, key=self.sort_key)\n else:\n xs = self.dataset\n return xs\n\n def init_epoch(self):\n \"\"\"Set up the batch generator for a new epoch.\"\"\"\n\n if self._restored_from_state:\n self.random_shuffler.random_state = self._random_state_this_epoch\n else:\n self._random_state_this_epoch = self.random_shuffler.random_state\n\n self.create_batches()\n\n if self._restored_from_state:\n self._restored_from_state = False\n else:\n self._iterations_this_epoch = 0\n\n if not self.repeat:\n self.iterations = 0\n\n def create_batches(self):\n self.batches = batch(self.data(), self.batch_size, self.batch_size_fn)\n\n @property\n def epoch(self):\n return self.iterations / len(self)\n\n def __len__(self):\n return math.ceil(len(self.dataset) / self.batch_size)\n\n def __iter__(self):\n while True:\n self.init_epoch()\n for idx, minibatch in enumerate(self.batches):\n # fast-forward if loaded from state\n if self._iterations_this_epoch > idx:\n continue\n self.iterations += 1\n self._iterations_this_epoch += 1\n yield Batch(minibatch, self.dataset, self.device,\n self.train)\n if not self.repeat:\n raise StopIteration\n\n def state_dict(self):\n return {\n \"iterations\": self.iterations,\n \"iterations_this_epoch\": self._iterations_this_epoch,\n \"random_state_this_epoch\": self._random_state_this_epoch}\n\n def load_state_dict(self, state_dict):\n self.iterations = state_dict[\"iterations\"]\n self._iterations_this_epoch = state_dict[\"iterations_this_epoch\"]\n self._random_state_this_epoch = state_dict[\"random_state_this_epoch\"]\n self._restored_from_state = True\n\n\nclass BPTTIterator(Iterator):\n \"\"\"Defines an iterator for language modeling tasks that use BPTT.\n\n Provides contiguous streams of examples together with targets that are\n one timestep further forward, for language modeling training with\n backpropagation through time (BPTT). Expects a Dataset with a single\n example and a single field called 'text' and produces Batches with text and\n target attributes.\n\n Attributes:\n dataset: The Dataset object to load Examples from.\n batch_size: Batch size.\n bptt_len: Length of sequences for backpropagation through time.\n sort_key: A key to use for sorting examples in order to batch together\n examples with similar lengths and minimize padding. The sort_key\n provided to the Iterator constructor overrides the sort_key\n attribute of the Dataset, or defers to it if None.\n train: Whether the iterator represents a train set.\n repeat: Whether to repeat the iterator for multiple epochs.\n shuffle: Whether to shuffle examples between epochs.\n sort: Whether to sort examples according to self.sort_key.\n Note that repeat, shuffle, and sort default to train, train, and\n (not train).\n device: Device to create batches on. Use -1 for CPU and None for the\n currently active GPU device.\n \"\"\"\n\n def __init__(self, dataset, batch_size, bptt_len, **kwargs):\n self.bptt_len = bptt_len\n super(BPTTIterator, self).__init__(dataset, batch_size, **kwargs)\n\n def __len__(self):\n return math.ceil(len(self.dataset[0].text) /\n (self.batch_size * self.bptt_len))\n\n def __iter__(self):\n text = self.dataset[0].text\n TEXT = self.dataset.fields['text']\n TEXT.eos_token = None\n text = text + ([TEXT.pad_token] * (math.ceil(len(text) / self.batch_size) *\n self.batch_size - len(text)))\n data = TEXT.numericalize(\n [text], device=self.device, train=self.train)\n data = data.view(self.batch_size, -1).t().contiguous()\n dataset = Dataset(examples=self.dataset.examples, fields=[\n ('text', TEXT), ('target', TEXT)])\n while True:\n for i in range(0, len(self) * self.bptt_len, self.bptt_len):\n seq_len = min(self.bptt_len, len(data) - 1 - i)\n yield Batch.fromvars(\n dataset, self.batch_size, train=self.train,\n text=data[i:i + seq_len],\n target=data[i + 1:i + 1 + seq_len])\n if not self.repeat:\n raise StopIteration\n\n\nclass BucketIterator(Iterator):\n \"\"\"Defines an iterator that batches examples of similar lengths together.\n\n Minimizes amount of padding needed while producing freshly shuffled\n batches for each new epoch. See pool for the bucketing procedure used.\n \"\"\"\n\n def create_batches(self):\n if self.sort:\n self.batches = batch(self.data(), self.batch_size,\n self.batch_size_fn)\n else:\n self.batches = pool(self.data(), self.batch_size,\n self.sort_key, self.batch_size_fn,\n random_shuffler=self.random_shuffler)\n\n\ndef batch(data, batch_size, batch_size_fn=lambda new, count, sofar: count):\n \"\"\"Yield elements from data in chunks of batch_size.\"\"\"\n minibatch, size_so_far = [], 0\n for ex in data:\n minibatch.append(ex)\n size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)\n if size_so_far == batch_size:\n yield minibatch\n minibatch, size_so_far = [], 0\n elif size_so_far > batch_size:\n yield minibatch[:-1]\n minibatch, size_so_far = minibatch[:-1], batch_size_fn(ex, 1, 0)\n if minibatch:\n yield minibatch\n\n\ndef pool(data, batch_size, key, batch_size_fn=lambda new, count, sofar: count,\n random_shuffler=None):\n \"\"\"Sort within buckets, then batch, then shuffle batches.\n\n Partitions data into chunks of size 100*batch_size, sorts examples within\n each chunk using sort_key, then batch these examples and shuffle the\n batches.\n \"\"\"\n if random_shuffler is None:\n random_shuffler = random.shuffle\n for p in batch(data, batch_size * 100, batch_size_fn):\n p_batch = batch(sorted(p, key=key), batch_size, batch_size_fn)\n for b in random_shuffler(list(p_batch)):\n yield b\n", "path": "torchtext/data/iterator.py"}], "after_files": [{"content": "import math\nimport random\nfrom contextlib import contextmanager\nfrom copy import deepcopy\n\nfrom .batch import Batch\nfrom .dataset import Dataset\n\n\nclass RandomShuffler(object):\n \"\"\"Use random functions while keeping track of the random state to make it\n reproducible and deterministic.\"\"\"\n\n def __init__(self, random_state=None):\n self._random_state = random_state\n if self._random_state is None:\n self._random_state = random.getstate()\n\n @contextmanager\n def use_internal_state(self):\n \"\"\"Use a specific RNG state.\"\"\"\n old_state = random.getstate()\n random.setstate(self._random_state)\n yield\n self._random_state = random.getstate()\n random.setstate(old_state)\n\n @property\n def random_state(self):\n return deepcopy(self._random_state)\n\n @random_state.setter\n def random_state(self, s):\n self._random_state = s\n\n def __call__(self, data):\n \"\"\"Shuffle and return a new list.\"\"\"\n with self.use_internal_state():\n return random.sample(data, len(data))\n\n\nclass Iterator(object):\n \"\"\"Defines an iterator that loads batches of data from a Dataset.\n\n Attributes:\n dataset: The Dataset object to load Examples from.\n batch_size: Batch size.\n batch_size_fn: Function of three arguments (new example to add, current\n count of examples in the batch, and current effective batch size)\n that returns the new effective batch size resulting from adding\n that example to a batch. This is useful for dynamic batching, where\n this function would add to the current effective batch size the\n number of tokens in the new example.\n sort_key: A key to use for sorting examples in order to batch together\n examples with similar lengths and minimize padding. The sort_key\n provided to the Iterator constructor overrides the sort_key\n attribute of the Dataset, or defers to it if None.\n train: Whether the iterator represents a train set.\n repeat: Whether to repeat the iterator for multiple epochs.\n shuffle: Whether to shuffle examples between epochs.\n sort: Whether to sort examples according to self.sort_key.\n Note that repeat, shuffle, and sort default to train, train, and\n (not train).\n device: Device to create batches on. Use -1 for CPU and None for the\n currently active GPU device.\n \"\"\"\n\n def __init__(self, dataset, batch_size, sort_key=None, device=None,\n batch_size_fn=lambda new, count, sofar: count, train=True,\n repeat=None, shuffle=None, sort=None):\n self.batch_size, self.train, self.dataset = batch_size, train, dataset\n self.batch_size_fn = batch_size_fn\n self.iterations = 0\n self.repeat = train if repeat is None else repeat\n self.shuffle = train if shuffle is None else shuffle\n self.sort = not train if sort is None else sort\n if sort_key is None:\n self.sort_key = dataset.sort_key\n else:\n self.sort_key = sort_key\n self.device = device\n\n self.random_shuffler = RandomShuffler()\n\n # For state loading/saving only\n self._iterations_this_epoch = 0\n self._random_state_this_epoch = None\n self._restored_from_state = False\n\n @classmethod\n def splits(cls, datasets, batch_sizes=None, **kwargs):\n \"\"\"Create Iterator objects for multiple splits of a dataset.\n\n Arguments:\n datasets: Tuple of Dataset objects corresponding to the splits. The\n first such object should be the train set.\n batch_sizes: Tuple of batch sizes to use for the different splits,\n or None to use the same batch_size for all splits.\n Remaining keyword arguments: Passed to the constructor of the\n iterator class being used.\n \"\"\"\n if batch_sizes is None:\n batch_sizes = [kwargs.pop('batch_size')] * len(datasets)\n ret = []\n for i in range(len(datasets)):\n train = i == 0\n ret.append(cls(\n datasets[i], batch_size=batch_sizes[i], train=train, **kwargs))\n return tuple(ret)\n\n def data(self):\n \"\"\"Return the examples in the dataset in order, sorted, or shuffled.\"\"\"\n if self.sort:\n xs = sorted(self.dataset, key=self.sort_key)\n elif self.shuffle:\n xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))]\n else:\n xs = self.dataset\n return xs\n\n def init_epoch(self):\n \"\"\"Set up the batch generator for a new epoch.\"\"\"\n\n if self._restored_from_state:\n self.random_shuffler.random_state = self._random_state_this_epoch\n else:\n self._random_state_this_epoch = self.random_shuffler.random_state\n\n self.create_batches()\n\n if self._restored_from_state:\n self._restored_from_state = False\n else:\n self._iterations_this_epoch = 0\n\n if not self.repeat:\n self.iterations = 0\n\n def create_batches(self):\n self.batches = batch(self.data(), self.batch_size, self.batch_size_fn)\n\n @property\n def epoch(self):\n return self.iterations / len(self)\n\n def __len__(self):\n return math.ceil(len(self.dataset) / self.batch_size)\n\n def __iter__(self):\n while True:\n self.init_epoch()\n for idx, minibatch in enumerate(self.batches):\n # fast-forward if loaded from state\n if self._iterations_this_epoch > idx:\n continue\n self.iterations += 1\n self._iterations_this_epoch += 1\n yield Batch(minibatch, self.dataset, self.device,\n self.train)\n if not self.repeat:\n raise StopIteration\n\n def state_dict(self):\n return {\n \"iterations\": self.iterations,\n \"iterations_this_epoch\": self._iterations_this_epoch,\n \"random_state_this_epoch\": self._random_state_this_epoch}\n\n def load_state_dict(self, state_dict):\n self.iterations = state_dict[\"iterations\"]\n self._iterations_this_epoch = state_dict[\"iterations_this_epoch\"]\n self._random_state_this_epoch = state_dict[\"random_state_this_epoch\"]\n self._restored_from_state = True\n\n\nclass BPTTIterator(Iterator):\n \"\"\"Defines an iterator for language modeling tasks that use BPTT.\n\n Provides contiguous streams of examples together with targets that are\n one timestep further forward, for language modeling training with\n backpropagation through time (BPTT). Expects a Dataset with a single\n example and a single field called 'text' and produces Batches with text and\n target attributes.\n\n Attributes:\n dataset: The Dataset object to load Examples from.\n batch_size: Batch size.\n bptt_len: Length of sequences for backpropagation through time.\n sort_key: A key to use for sorting examples in order to batch together\n examples with similar lengths and minimize padding. The sort_key\n provided to the Iterator constructor overrides the sort_key\n attribute of the Dataset, or defers to it if None.\n train: Whether the iterator represents a train set.\n repeat: Whether to repeat the iterator for multiple epochs.\n shuffle: Whether to shuffle examples between epochs.\n sort: Whether to sort examples according to self.sort_key.\n Note that repeat, shuffle, and sort default to train, train, and\n (not train).\n device: Device to create batches on. Use -1 for CPU and None for the\n currently active GPU device.\n \"\"\"\n\n def __init__(self, dataset, batch_size, bptt_len, **kwargs):\n self.bptt_len = bptt_len\n super(BPTTIterator, self).__init__(dataset, batch_size, **kwargs)\n\n def __len__(self):\n return math.ceil(len(self.dataset[0].text) /\n (self.batch_size * self.bptt_len))\n\n def __iter__(self):\n text = self.dataset[0].text\n TEXT = self.dataset.fields['text']\n TEXT.eos_token = None\n text = text + ([TEXT.pad_token] * (math.ceil(len(text) / self.batch_size) *\n self.batch_size - len(text)))\n data = TEXT.numericalize(\n [text], device=self.device, train=self.train)\n data = data.view(self.batch_size, -1).t().contiguous()\n dataset = Dataset(examples=self.dataset.examples, fields=[\n ('text', TEXT), ('target', TEXT)])\n while True:\n for i in range(0, len(self) * self.bptt_len, self.bptt_len):\n seq_len = min(self.bptt_len, len(data) - 1 - i)\n yield Batch.fromvars(\n dataset, self.batch_size, train=self.train,\n text=data[i:i + seq_len],\n target=data[i + 1:i + 1 + seq_len])\n if not self.repeat:\n raise StopIteration\n\n\nclass BucketIterator(Iterator):\n \"\"\"Defines an iterator that batches examples of similar lengths together.\n\n Minimizes amount of padding needed while producing freshly shuffled\n batches for each new epoch. See pool for the bucketing procedure used.\n \"\"\"\n\n def create_batches(self):\n if self.sort:\n self.batches = batch(self.data(), self.batch_size,\n self.batch_size_fn)\n else:\n self.batches = pool(self.data(), self.batch_size,\n self.sort_key, self.batch_size_fn,\n random_shuffler=self.random_shuffler)\n\n\ndef batch(data, batch_size, batch_size_fn=lambda new, count, sofar: count):\n \"\"\"Yield elements from data in chunks of batch_size.\"\"\"\n minibatch, size_so_far = [], 0\n for ex in data:\n minibatch.append(ex)\n size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)\n if size_so_far == batch_size:\n yield minibatch\n minibatch, size_so_far = [], 0\n elif size_so_far > batch_size:\n yield minibatch[:-1]\n minibatch, size_so_far = minibatch[:-1], batch_size_fn(ex, 1, 0)\n if minibatch:\n yield minibatch\n\n\ndef pool(data, batch_size, key, batch_size_fn=lambda new, count, sofar: count,\n random_shuffler=None):\n \"\"\"Sort within buckets, then batch, then shuffle batches.\n\n Partitions data into chunks of size 100*batch_size, sorts examples within\n each chunk using sort_key, then batch these examples and shuffle the\n batches.\n \"\"\"\n if random_shuffler is None:\n random_shuffler = random.shuffle\n for p in batch(data, batch_size * 100, batch_size_fn):\n p_batch = batch(sorted(p, key=key), batch_size, batch_size_fn)\n for b in random_shuffler(list(p_batch)):\n yield b\n", "path": "torchtext/data/iterator.py"}]}
3,545
173
gh_patches_debug_38649
rasdani/github-patches
git_diff
sql-machine-learning__elasticdl-1594
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- May need to use lock in async Optimizer Wrapper Currently, `OptimizerWrapper` is not thread-safe for embedding table. However, in async mode, many threads call `OptimizerWrapper.apply_gradients` without using lock. After implementing PS, we should discuss whether we need to add a lock here. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `elasticdl/python/ps/optimizer_wrapper.py` Content: ``` 1 """Optimizer Wrapper for ElasticDL""" 2 3 import threading 4 5 import tensorflow as tf 6 from tensorflow.keras.optimizers import ( 7 SGD, 8 Adadelta, 9 Adagrad, 10 Adam, 11 Adamax, 12 Ftrl, 13 Nadam, 14 RMSprop, 15 ) 16 17 from elasticdl.python.common.log_utils import default_logger as logger 18 from elasticdl.python.ps.embedding_table import get_slot_table_name 19 20 21 def _get_embedding_layer_name_from_var(var): 22 """Get name for ElasticDL embedding layer from variable.""" 23 # Assumes that for ElasticDL embedding layer, variable will be a 24 # string representing its layer name 25 if isinstance(var, str): 26 return var 27 return None 28 29 30 # This function is taken from `tensorflow.keras.optimizers.Optimizer._var_key`. 31 # https://github.com/tensorflow/tensorflow/blob/71d73e56a2e66e4a6805d967cfa48ea 32 # 594f8c54e/tensorflow/python/keras/optimizer_v2/optimizer_v2.py#L1033 33 def _var_key(var): 34 """Key for representing a primary variable, for looking up slots. 35 36 In graph mode the name is derived from the var shared name. 37 In eager mode the name is derived from the var unique id. 38 If distribution strategy exists, get the primary variable first. 39 40 Arguments: 41 var: the variable. 42 43 Returns: 44 the unique name of the variable. 45 """ 46 47 # pylint: disable=protected-access 48 # Get the distributed variable if it exists. 49 if getattr(var, "_distributed_container", None) is not None: 50 var = var._distributed_container() 51 if var._in_graph_mode: 52 return var._shared_name 53 return var._unique_id 54 55 56 class OptimizerWrapper(object): 57 """ ElasticDL optimizer wrapper. 58 59 If model does not use ElasticDL embedding layer, `OptimizerWrapper` 60 does nothing but calls `apply_gradients` function of TensorFlow optimizer. 61 Otherwise, `OptimizerWrapper` looks up embedding vectors and slot values 62 from external kv store before updating variables, and updates embedding 63 vectors and slot values in kv store after updating variables. 64 """ 65 66 def __init__( 67 self, 68 opt, 69 use_async=False, 70 lookup_embedding_func=None, 71 update_embedding_func=None, 72 ): 73 """ 74 Note: 75 We need to support Redis and ElasticDL parameter server at the 76 same time. If `lookup_embedding_func`/`update_embedding_func` 77 is not None, use parameter server to lookup/update embedding. 78 Otherwise use Redis. 79 80 Arguments: 81 opt: A TensorFlow optimizer instance. 82 kv_store_endpoint: The endpoint to kv store. 83 embedding_dims: A python dictionary of 84 {layer name: `embedding_dim`} where layer name is the 85 name of ElasticDL embedding layer and `embedding_dim` 86 is the output dimension of corresponding embedding layer. 87 use_async: A python bool. True if using asynchronous updates. When 88 using asynchronoues updates, `OptimizerWrapper` is thread-safe 89 for non-embedding variables and is not thread-safe for 90 embedding table. 91 lookup_embedding_func: The function to lookup embeddings. The 92 argument of this function is a list of keys. 93 update_embedding_func: The function to update embeddings. The 94 arguments of this function is a key list and a value list. 95 """ 96 self._opt = opt 97 self._use_async = use_async 98 self._lookup_embedding_func = lookup_embedding_func 99 self._update_embedding_func = update_embedding_func 100 self._slot_initial_value = {} 101 102 self._opt_weights_delete_lock = threading.Lock() 103 self._tls = threading.local() 104 self._init_thread_local() 105 106 # "-" in slot name is not supported 107 if isinstance(opt, SGD): 108 self._allowed_slot_names = [] 109 if opt._momentum: 110 self._allowed_slot_names.append("momentum") 111 112 elif isinstance(opt, (Adam, Adamax, Nadam)): 113 self._allowed_slot_names = ["m", "v"] 114 if isinstance(opt, Adam) and self._opt.amsgrad: 115 self._allowed_slot_names.append("vhat") 116 117 elif isinstance(opt, Adadelta): 118 self._allowed_slot_names = ["accum_grad", "accum_var"] 119 120 elif isinstance(opt, (Adagrad, Ftrl)): 121 self._allowed_slot_names = ["accumulator"] 122 if isinstance(opt, Ftrl): 123 self._allowed_slot_names.append("linear") 124 accumu_init = opt._initial_accumulator_value 125 self._slot_initial_value["accumulator"] = accumu_init 126 127 elif isinstance(opt, RMSprop): 128 self._allowed_slot_names = ["rms"] 129 if self._opt._momentum: 130 self._allowed_slot_names.append("momentum") 131 if self._opt.centered: 132 self._allowed_slot_names.append("mg") 133 134 else: 135 raise NotImplementedError( 136 "Optimizer %s is not supported in ElasticDL." % type(opt) 137 ) 138 139 for slot in self._allowed_slot_names: 140 self._slot_initial_value.setdefault(slot, 0.0) 141 142 def _init_thread_local(self): 143 self._tls._unique_ids_all_layers = {} 144 self._tls._embed_variables = {} 145 self._tls._slot_variables = {} 146 147 def apply_gradients(self, grads_and_vars): 148 """Update variable values. 149 150 Args: 151 grads_and_vars: A list of (gradient, variable) pairs. If the 152 variable is from ElasticDL embedding layer, it should be a 153 ElasticDL `Tensor` object. Otherwise it is a TensorFlow 154 variable. 155 """ 156 # TODO (#1255): Discuss whether `OptimizerWrapper` needs a lock after 157 # implementing PS. 158 self._init_thread_local() 159 160 grads_and_vars_new = [] 161 for grad, var in grads_and_vars: 162 # If var is a string, this grad var pair is for ElasticDL embedding 163 if isinstance(var, str): 164 grads_and_vars_new.append( 165 self._get_embedding_var_and_grad(grad, var) 166 ) 167 else: 168 grads_and_vars_new.append((grad, var)) 169 170 self._opt.apply_gradients(grads_and_vars_new) 171 self._update_embedding_param() 172 self._delete_variables() 173 174 def _get_embedding_var_and_grad(self, grad, layer_name): 175 unique_ids, indices = tf.unique(grad.indices) 176 unique_ids = unique_ids.numpy() 177 if layer_name in self._tls._unique_ids_all_layers: 178 # TODO: support grads_and_vars with duplicated layer name 179 logger.warning( 180 "grads_and_vars has duplicated layer name %s." % layer_name 181 ) 182 self._tls._unique_ids_all_layers[layer_name] = unique_ids 183 new_grad = tf.IndexedSlices(values=grad.values, indices=indices) 184 185 embed_value = self._lookup_embedding_func(layer_name, unique_ids) 186 embed_var = self._create_embedding_variable(layer_name, embed_value) 187 self._get_slot_and_set_to_optimizer(layer_name) 188 return new_grad, embed_var 189 190 def _create_embedding_variable(self, name, initial_value): 191 """Creates a TensorFlow variable using given initial value. 192 193 Note that this function saves the created variable to 194 `self._tls._embed_variables`. 195 """ 196 embed_var = tf.Variable( 197 initial_value, 198 name=name + str(threading.get_ident()), 199 shape=initial_value.shape, 200 dtype=tf.float32, 201 trainable=False, 202 ) 203 self._tls._embed_variables[name] = embed_var 204 return embed_var 205 206 def _get_slot_and_set_to_optimizer(self, layer_name): 207 """Looks up slot value and set it to TensorFlow optimizer.""" 208 for slot_name in self._allowed_slot_names: 209 param_name = get_slot_table_name(layer_name, slot_name) 210 indices = self._tls._unique_ids_all_layers[layer_name] 211 slot_value = self._lookup_embedding_func(param_name, indices) 212 # self._create_slot_variable creates a slot variable in tf 213 # optimizer and set slot_value to it. 214 self._create_slot_variable(layer_name, slot_name, slot_value) 215 216 def _get_slot_variable(self, layer_name, slot_name): 217 """Get the variable for specified slot.""" 218 return self._tls._slot_variables.get(layer_name, {}).get( 219 slot_name, None 220 ) 221 222 def _get_embedding_variable(self, layer_name): 223 """Get the variable for the specified ElasticDL embedding layer.""" 224 return self._tls._embed_variables.get(layer_name, None) 225 226 def _create_slot_variable(self, layer_name, slot_name, initial_value): 227 """Creates a slot variable in TensorFlow optimizer using given 228 value. 229 """ 230 embed_var = self._get_embedding_variable(layer_name) 231 if embed_var is None: 232 raise RuntimeError( 233 "Embedding variable for layer %s should be already created." 234 % (layer_name) 235 ) 236 slot_var = self._create_slot_variable_in_optimizer( 237 embed_var, slot_name, initial_value.shape, initial_value 238 ) 239 slot_variables_dict = self._tls._slot_variables.setdefault( 240 layer_name, {} 241 ) 242 slot_variables_dict[slot_name] = slot_var 243 return slot_var 244 245 # This is a function modified from TensorFlow optimizers. 246 # https://github.com/tensorflow/tensorflow/blob/ 247 # 69b1feac62276edcc509ac88af229c6236e645fe/tensorflow/python 248 # /keras/optimizer_v2/optimizer_v2.py#L567 249 def _create_slot_variable_in_optimizer( 250 self, embed_var, slot_name, shape, initial_value 251 ): 252 """Create variable for a slot and save it in TensorFlow optimizer.""" 253 if slot_name not in self._opt._slot_names: 254 self._opt._slot_names.append(slot_name) 255 var_key = _var_key(embed_var) 256 slot_dict = self._opt._slots.setdefault(var_key, {}) 257 slot_var = slot_dict.get(slot_name, None) 258 if slot_var is None: 259 slot_var_name = "%s/%s" % (embed_var._shared_name, slot_name) 260 slot_var = self._opt.add_weight( 261 name=slot_var_name, 262 shape=shape, 263 dtype=embed_var.dtype, 264 initializer=initial_value, 265 trainable=False, 266 ) 267 slot_dict[slot_name] = slot_var 268 self._opt._weights.append(slot_var) 269 return slot_var 270 else: 271 raise RuntimeError( 272 "Variable with var_key %s and slot_name %s is not expected to " 273 "be in self._opt." % (var_key, slot_name) 274 ) 275 276 def _update_embedding_param(self): 277 """Report updated embedding vectors and slots to kv store.""" 278 for layer, ids in self._tls._unique_ids_all_layers.items(): 279 value = self._get_embedding_variable(layer).numpy() 280 self._update_embedding_func(layer, ids, value) 281 282 for slot in self._allowed_slot_names: 283 value = self._get_slot_variable(layer, slot).numpy() 284 slot_table_name = get_slot_table_name(layer, slot) 285 self._update_embedding_func(slot_table_name, ids, value) 286 287 def _delete_variables(self): 288 # Slot variable access in optimizer requires corresponding embedding 289 # variable information. Delete slot variables first. 290 for layer_name, slots in self._tls._slot_variables.items(): 291 embed_var = self._get_embedding_variable(layer_name) 292 embed_var_key = _var_key(embed_var) 293 del self._opt._slots[embed_var_key] 294 for _, var in slots.items(): 295 opt_weight_iter = 0 296 with self._opt_weights_delete_lock: 297 while opt_weight_iter < len(self._opt._weights): 298 if var is self._opt._weights[opt_weight_iter]: 299 self._opt._weights.pop(opt_weight_iter) 300 break 301 else: 302 opt_weight_iter += 1 303 for key in list(self._tls._slot_variables.keys()): 304 del self._tls._slot_variables[key] 305 306 # Delete variables in embed_variables. 307 for key in list(self._tls._embed_variables.keys()): 308 del self._tls._embed_variables[key] 309 310 # Delete variables in unique_ids_all_layers. 311 for key in list(self._tls._unique_ids_all_layers.keys()): 312 del self._tls._unique_ids_all_layers[key] 313 314 @property 315 def allowed_slot_names(self): 316 return self._allowed_slot_names 317 318 # TODO(yunjian.lmh): Do not need to save slot_initial_value in 319 # optimizer wrapper after we do not need to support Redis. 320 @property 321 def slot_initial_value(self): 322 return self._slot_initial_value 323 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/elasticdl/python/ps/optimizer_wrapper.py b/elasticdl/python/ps/optimizer_wrapper.py --- a/elasticdl/python/ps/optimizer_wrapper.py +++ b/elasticdl/python/ps/optimizer_wrapper.py @@ -99,7 +99,7 @@ self._update_embedding_func = update_embedding_func self._slot_initial_value = {} - self._opt_weights_delete_lock = threading.Lock() + self._update_gradient_lock = threading.Lock() self._tls = threading.local() self._init_thread_local() @@ -153,23 +153,23 @@ ElasticDL `Tensor` object. Otherwise it is a TensorFlow variable. """ - # TODO (#1255): Discuss whether `OptimizerWrapper` needs a lock after - # implementing PS. self._init_thread_local() - grads_and_vars_new = [] - for grad, var in grads_and_vars: - # If var is a string, this grad var pair is for ElasticDL embedding - if isinstance(var, str): - grads_and_vars_new.append( - self._get_embedding_var_and_grad(grad, var) - ) - else: - grads_and_vars_new.append((grad, var)) - - self._opt.apply_gradients(grads_and_vars_new) - self._update_embedding_param() - self._delete_variables() + with self._update_gradient_lock: + grads_and_vars_new = [] + for grad, var in grads_and_vars: + # If var is a string, create the grad var pair for + # ElasticDL embedding + if isinstance(var, str): + grads_and_vars_new.append( + self._get_embedding_var_and_grad(grad, var) + ) + else: + grads_and_vars_new.append((grad, var)) + + self._opt.apply_gradients(grads_and_vars_new) + self._update_embedding_param() + self._delete_variables() def _get_embedding_var_and_grad(self, grad, layer_name): unique_ids, indices = tf.unique(grad.indices) @@ -293,13 +293,12 @@ del self._opt._slots[embed_var_key] for _, var in slots.items(): opt_weight_iter = 0 - with self._opt_weights_delete_lock: - while opt_weight_iter < len(self._opt._weights): - if var is self._opt._weights[opt_weight_iter]: - self._opt._weights.pop(opt_weight_iter) - break - else: - opt_weight_iter += 1 + while opt_weight_iter < len(self._opt._weights): + if var is self._opt._weights[opt_weight_iter]: + self._opt._weights.pop(opt_weight_iter) + break + else: + opt_weight_iter += 1 for key in list(self._tls._slot_variables.keys()): del self._tls._slot_variables[key]
{"golden_diff": "diff --git a/elasticdl/python/ps/optimizer_wrapper.py b/elasticdl/python/ps/optimizer_wrapper.py\n--- a/elasticdl/python/ps/optimizer_wrapper.py\n+++ b/elasticdl/python/ps/optimizer_wrapper.py\n@@ -99,7 +99,7 @@\n self._update_embedding_func = update_embedding_func\n self._slot_initial_value = {}\n \n- self._opt_weights_delete_lock = threading.Lock()\n+ self._update_gradient_lock = threading.Lock()\n self._tls = threading.local()\n self._init_thread_local()\n \n@@ -153,23 +153,23 @@\n ElasticDL `Tensor` object. Otherwise it is a TensorFlow\n variable.\n \"\"\"\n- # TODO (#1255): Discuss whether `OptimizerWrapper` needs a lock after\n- # implementing PS.\n self._init_thread_local()\n \n- grads_and_vars_new = []\n- for grad, var in grads_and_vars:\n- # If var is a string, this grad var pair is for ElasticDL embedding\n- if isinstance(var, str):\n- grads_and_vars_new.append(\n- self._get_embedding_var_and_grad(grad, var)\n- )\n- else:\n- grads_and_vars_new.append((grad, var))\n-\n- self._opt.apply_gradients(grads_and_vars_new)\n- self._update_embedding_param()\n- self._delete_variables()\n+ with self._update_gradient_lock:\n+ grads_and_vars_new = []\n+ for grad, var in grads_and_vars:\n+ # If var is a string, create the grad var pair for\n+ # ElasticDL embedding\n+ if isinstance(var, str):\n+ grads_and_vars_new.append(\n+ self._get_embedding_var_and_grad(grad, var)\n+ )\n+ else:\n+ grads_and_vars_new.append((grad, var))\n+\n+ self._opt.apply_gradients(grads_and_vars_new)\n+ self._update_embedding_param()\n+ self._delete_variables()\n \n def _get_embedding_var_and_grad(self, grad, layer_name):\n unique_ids, indices = tf.unique(grad.indices)\n@@ -293,13 +293,12 @@\n del self._opt._slots[embed_var_key]\n for _, var in slots.items():\n opt_weight_iter = 0\n- with self._opt_weights_delete_lock:\n- while opt_weight_iter < len(self._opt._weights):\n- if var is self._opt._weights[opt_weight_iter]:\n- self._opt._weights.pop(opt_weight_iter)\n- break\n- else:\n- opt_weight_iter += 1\n+ while opt_weight_iter < len(self._opt._weights):\n+ if var is self._opt._weights[opt_weight_iter]:\n+ self._opt._weights.pop(opt_weight_iter)\n+ break\n+ else:\n+ opt_weight_iter += 1\n for key in list(self._tls._slot_variables.keys()):\n del self._tls._slot_variables[key]\n", "issue": "May need to use lock in async Optimizer Wrapper\nCurrently, `OptimizerWrapper` is not thread-safe for embedding table. However, in async mode, many threads call `OptimizerWrapper.apply_gradients` without using lock. \r\n\r\nAfter implementing PS, we should discuss whether we need to add a lock here. \n", "before_files": [{"content": "\"\"\"Optimizer Wrapper for ElasticDL\"\"\"\n\nimport threading\n\nimport tensorflow as tf\nfrom tensorflow.keras.optimizers import (\n SGD,\n Adadelta,\n Adagrad,\n Adam,\n Adamax,\n Ftrl,\n Nadam,\n RMSprop,\n)\n\nfrom elasticdl.python.common.log_utils import default_logger as logger\nfrom elasticdl.python.ps.embedding_table import get_slot_table_name\n\n\ndef _get_embedding_layer_name_from_var(var):\n \"\"\"Get name for ElasticDL embedding layer from variable.\"\"\"\n # Assumes that for ElasticDL embedding layer, variable will be a\n # string representing its layer name\n if isinstance(var, str):\n return var\n return None\n\n\n# This function is taken from `tensorflow.keras.optimizers.Optimizer._var_key`.\n# https://github.com/tensorflow/tensorflow/blob/71d73e56a2e66e4a6805d967cfa48ea\n# 594f8c54e/tensorflow/python/keras/optimizer_v2/optimizer_v2.py#L1033\ndef _var_key(var):\n \"\"\"Key for representing a primary variable, for looking up slots.\n\n In graph mode the name is derived from the var shared name.\n In eager mode the name is derived from the var unique id.\n If distribution strategy exists, get the primary variable first.\n\n Arguments:\n var: the variable.\n\n Returns:\n the unique name of the variable.\n \"\"\"\n\n # pylint: disable=protected-access\n # Get the distributed variable if it exists.\n if getattr(var, \"_distributed_container\", None) is not None:\n var = var._distributed_container()\n if var._in_graph_mode:\n return var._shared_name\n return var._unique_id\n\n\nclass OptimizerWrapper(object):\n \"\"\" ElasticDL optimizer wrapper.\n\n If model does not use ElasticDL embedding layer, `OptimizerWrapper`\n does nothing but calls `apply_gradients` function of TensorFlow optimizer.\n Otherwise, `OptimizerWrapper` looks up embedding vectors and slot values\n from external kv store before updating variables, and updates embedding\n vectors and slot values in kv store after updating variables.\n \"\"\"\n\n def __init__(\n self,\n opt,\n use_async=False,\n lookup_embedding_func=None,\n update_embedding_func=None,\n ):\n \"\"\"\n Note:\n We need to support Redis and ElasticDL parameter server at the\n same time. If `lookup_embedding_func`/`update_embedding_func`\n is not None, use parameter server to lookup/update embedding.\n Otherwise use Redis.\n\n Arguments:\n opt: A TensorFlow optimizer instance.\n kv_store_endpoint: The endpoint to kv store.\n embedding_dims: A python dictionary of\n {layer name: `embedding_dim`} where layer name is the\n name of ElasticDL embedding layer and `embedding_dim`\n is the output dimension of corresponding embedding layer.\n use_async: A python bool. True if using asynchronous updates. When\n using asynchronoues updates, `OptimizerWrapper` is thread-safe\n for non-embedding variables and is not thread-safe for\n embedding table.\n lookup_embedding_func: The function to lookup embeddings. The\n argument of this function is a list of keys.\n update_embedding_func: The function to update embeddings. The\n arguments of this function is a key list and a value list.\n \"\"\"\n self._opt = opt\n self._use_async = use_async\n self._lookup_embedding_func = lookup_embedding_func\n self._update_embedding_func = update_embedding_func\n self._slot_initial_value = {}\n\n self._opt_weights_delete_lock = threading.Lock()\n self._tls = threading.local()\n self._init_thread_local()\n\n # \"-\" in slot name is not supported\n if isinstance(opt, SGD):\n self._allowed_slot_names = []\n if opt._momentum:\n self._allowed_slot_names.append(\"momentum\")\n\n elif isinstance(opt, (Adam, Adamax, Nadam)):\n self._allowed_slot_names = [\"m\", \"v\"]\n if isinstance(opt, Adam) and self._opt.amsgrad:\n self._allowed_slot_names.append(\"vhat\")\n\n elif isinstance(opt, Adadelta):\n self._allowed_slot_names = [\"accum_grad\", \"accum_var\"]\n\n elif isinstance(opt, (Adagrad, Ftrl)):\n self._allowed_slot_names = [\"accumulator\"]\n if isinstance(opt, Ftrl):\n self._allowed_slot_names.append(\"linear\")\n accumu_init = opt._initial_accumulator_value\n self._slot_initial_value[\"accumulator\"] = accumu_init\n\n elif isinstance(opt, RMSprop):\n self._allowed_slot_names = [\"rms\"]\n if self._opt._momentum:\n self._allowed_slot_names.append(\"momentum\")\n if self._opt.centered:\n self._allowed_slot_names.append(\"mg\")\n\n else:\n raise NotImplementedError(\n \"Optimizer %s is not supported in ElasticDL.\" % type(opt)\n )\n\n for slot in self._allowed_slot_names:\n self._slot_initial_value.setdefault(slot, 0.0)\n\n def _init_thread_local(self):\n self._tls._unique_ids_all_layers = {}\n self._tls._embed_variables = {}\n self._tls._slot_variables = {}\n\n def apply_gradients(self, grads_and_vars):\n \"\"\"Update variable values.\n\n Args:\n grads_and_vars: A list of (gradient, variable) pairs. If the\n variable is from ElasticDL embedding layer, it should be a\n ElasticDL `Tensor` object. Otherwise it is a TensorFlow\n variable.\n \"\"\"\n # TODO (#1255): Discuss whether `OptimizerWrapper` needs a lock after\n # implementing PS.\n self._init_thread_local()\n\n grads_and_vars_new = []\n for grad, var in grads_and_vars:\n # If var is a string, this grad var pair is for ElasticDL embedding\n if isinstance(var, str):\n grads_and_vars_new.append(\n self._get_embedding_var_and_grad(grad, var)\n )\n else:\n grads_and_vars_new.append((grad, var))\n\n self._opt.apply_gradients(grads_and_vars_new)\n self._update_embedding_param()\n self._delete_variables()\n\n def _get_embedding_var_and_grad(self, grad, layer_name):\n unique_ids, indices = tf.unique(grad.indices)\n unique_ids = unique_ids.numpy()\n if layer_name in self._tls._unique_ids_all_layers:\n # TODO: support grads_and_vars with duplicated layer name\n logger.warning(\n \"grads_and_vars has duplicated layer name %s.\" % layer_name\n )\n self._tls._unique_ids_all_layers[layer_name] = unique_ids\n new_grad = tf.IndexedSlices(values=grad.values, indices=indices)\n\n embed_value = self._lookup_embedding_func(layer_name, unique_ids)\n embed_var = self._create_embedding_variable(layer_name, embed_value)\n self._get_slot_and_set_to_optimizer(layer_name)\n return new_grad, embed_var\n\n def _create_embedding_variable(self, name, initial_value):\n \"\"\"Creates a TensorFlow variable using given initial value.\n\n Note that this function saves the created variable to\n `self._tls._embed_variables`.\n \"\"\"\n embed_var = tf.Variable(\n initial_value,\n name=name + str(threading.get_ident()),\n shape=initial_value.shape,\n dtype=tf.float32,\n trainable=False,\n )\n self._tls._embed_variables[name] = embed_var\n return embed_var\n\n def _get_slot_and_set_to_optimizer(self, layer_name):\n \"\"\"Looks up slot value and set it to TensorFlow optimizer.\"\"\"\n for slot_name in self._allowed_slot_names:\n param_name = get_slot_table_name(layer_name, slot_name)\n indices = self._tls._unique_ids_all_layers[layer_name]\n slot_value = self._lookup_embedding_func(param_name, indices)\n # self._create_slot_variable creates a slot variable in tf\n # optimizer and set slot_value to it.\n self._create_slot_variable(layer_name, slot_name, slot_value)\n\n def _get_slot_variable(self, layer_name, slot_name):\n \"\"\"Get the variable for specified slot.\"\"\"\n return self._tls._slot_variables.get(layer_name, {}).get(\n slot_name, None\n )\n\n def _get_embedding_variable(self, layer_name):\n \"\"\"Get the variable for the specified ElasticDL embedding layer.\"\"\"\n return self._tls._embed_variables.get(layer_name, None)\n\n def _create_slot_variable(self, layer_name, slot_name, initial_value):\n \"\"\"Creates a slot variable in TensorFlow optimizer using given\n value.\n \"\"\"\n embed_var = self._get_embedding_variable(layer_name)\n if embed_var is None:\n raise RuntimeError(\n \"Embedding variable for layer %s should be already created.\"\n % (layer_name)\n )\n slot_var = self._create_slot_variable_in_optimizer(\n embed_var, slot_name, initial_value.shape, initial_value\n )\n slot_variables_dict = self._tls._slot_variables.setdefault(\n layer_name, {}\n )\n slot_variables_dict[slot_name] = slot_var\n return slot_var\n\n # This is a function modified from TensorFlow optimizers.\n # https://github.com/tensorflow/tensorflow/blob/\n # 69b1feac62276edcc509ac88af229c6236e645fe/tensorflow/python\n # /keras/optimizer_v2/optimizer_v2.py#L567\n def _create_slot_variable_in_optimizer(\n self, embed_var, slot_name, shape, initial_value\n ):\n \"\"\"Create variable for a slot and save it in TensorFlow optimizer.\"\"\"\n if slot_name not in self._opt._slot_names:\n self._opt._slot_names.append(slot_name)\n var_key = _var_key(embed_var)\n slot_dict = self._opt._slots.setdefault(var_key, {})\n slot_var = slot_dict.get(slot_name, None)\n if slot_var is None:\n slot_var_name = \"%s/%s\" % (embed_var._shared_name, slot_name)\n slot_var = self._opt.add_weight(\n name=slot_var_name,\n shape=shape,\n dtype=embed_var.dtype,\n initializer=initial_value,\n trainable=False,\n )\n slot_dict[slot_name] = slot_var\n self._opt._weights.append(slot_var)\n return slot_var\n else:\n raise RuntimeError(\n \"Variable with var_key %s and slot_name %s is not expected to \"\n \"be in self._opt.\" % (var_key, slot_name)\n )\n\n def _update_embedding_param(self):\n \"\"\"Report updated embedding vectors and slots to kv store.\"\"\"\n for layer, ids in self._tls._unique_ids_all_layers.items():\n value = self._get_embedding_variable(layer).numpy()\n self._update_embedding_func(layer, ids, value)\n\n for slot in self._allowed_slot_names:\n value = self._get_slot_variable(layer, slot).numpy()\n slot_table_name = get_slot_table_name(layer, slot)\n self._update_embedding_func(slot_table_name, ids, value)\n\n def _delete_variables(self):\n # Slot variable access in optimizer requires corresponding embedding\n # variable information. Delete slot variables first.\n for layer_name, slots in self._tls._slot_variables.items():\n embed_var = self._get_embedding_variable(layer_name)\n embed_var_key = _var_key(embed_var)\n del self._opt._slots[embed_var_key]\n for _, var in slots.items():\n opt_weight_iter = 0\n with self._opt_weights_delete_lock:\n while opt_weight_iter < len(self._opt._weights):\n if var is self._opt._weights[opt_weight_iter]:\n self._opt._weights.pop(opt_weight_iter)\n break\n else:\n opt_weight_iter += 1\n for key in list(self._tls._slot_variables.keys()):\n del self._tls._slot_variables[key]\n\n # Delete variables in embed_variables.\n for key in list(self._tls._embed_variables.keys()):\n del self._tls._embed_variables[key]\n\n # Delete variables in unique_ids_all_layers.\n for key in list(self._tls._unique_ids_all_layers.keys()):\n del self._tls._unique_ids_all_layers[key]\n\n @property\n def allowed_slot_names(self):\n return self._allowed_slot_names\n\n # TODO(yunjian.lmh): Do not need to save slot_initial_value in\n # optimizer wrapper after we do not need to support Redis.\n @property\n def slot_initial_value(self):\n return self._slot_initial_value\n", "path": "elasticdl/python/ps/optimizer_wrapper.py"}], "after_files": [{"content": "\"\"\"Optimizer Wrapper for ElasticDL\"\"\"\n\nimport threading\n\nimport tensorflow as tf\nfrom tensorflow.keras.optimizers import (\n SGD,\n Adadelta,\n Adagrad,\n Adam,\n Adamax,\n Ftrl,\n Nadam,\n RMSprop,\n)\n\nfrom elasticdl.python.common.log_utils import default_logger as logger\nfrom elasticdl.python.ps.embedding_table import get_slot_table_name\n\n\ndef _get_embedding_layer_name_from_var(var):\n \"\"\"Get name for ElasticDL embedding layer from variable.\"\"\"\n # Assumes that for ElasticDL embedding layer, variable will be a\n # string representing its layer name\n if isinstance(var, str):\n return var\n return None\n\n\n# This function is taken from `tensorflow.keras.optimizers.Optimizer._var_key`.\n# https://github.com/tensorflow/tensorflow/blob/71d73e56a2e66e4a6805d967cfa48ea\n# 594f8c54e/tensorflow/python/keras/optimizer_v2/optimizer_v2.py#L1033\ndef _var_key(var):\n \"\"\"Key for representing a primary variable, for looking up slots.\n\n In graph mode the name is derived from the var shared name.\n In eager mode the name is derived from the var unique id.\n If distribution strategy exists, get the primary variable first.\n\n Arguments:\n var: the variable.\n\n Returns:\n the unique name of the variable.\n \"\"\"\n\n # pylint: disable=protected-access\n # Get the distributed variable if it exists.\n if getattr(var, \"_distributed_container\", None) is not None:\n var = var._distributed_container()\n if var._in_graph_mode:\n return var._shared_name\n return var._unique_id\n\n\nclass OptimizerWrapper(object):\n \"\"\" ElasticDL optimizer wrapper.\n\n If model does not use ElasticDL embedding layer, `OptimizerWrapper`\n does nothing but calls `apply_gradients` function of TensorFlow optimizer.\n Otherwise, `OptimizerWrapper` looks up embedding vectors and slot values\n from external kv store before updating variables, and updates embedding\n vectors and slot values in kv store after updating variables.\n \"\"\"\n\n def __init__(\n self,\n opt,\n use_async=False,\n lookup_embedding_func=None,\n update_embedding_func=None,\n ):\n \"\"\"\n Note:\n We need to support Redis and ElasticDL parameter server at the\n same time. If `lookup_embedding_func`/`update_embedding_func`\n is not None, use parameter server to lookup/update embedding.\n Otherwise use Redis.\n\n Arguments:\n opt: A TensorFlow optimizer instance.\n kv_store_endpoint: The endpoint to kv store.\n embedding_dims: A python dictionary of\n {layer name: `embedding_dim`} where layer name is the\n name of ElasticDL embedding layer and `embedding_dim`\n is the output dimension of corresponding embedding layer.\n use_async: A python bool. True if using asynchronous updates. When\n using asynchronoues updates, `OptimizerWrapper` is thread-safe\n for non-embedding variables and is not thread-safe for\n embedding table.\n lookup_embedding_func: The function to lookup embeddings. The\n argument of this function is a list of keys.\n update_embedding_func: The function to update embeddings. The\n arguments of this function is a key list and a value list.\n \"\"\"\n self._opt = opt\n self._use_async = use_async\n self._lookup_embedding_func = lookup_embedding_func\n self._update_embedding_func = update_embedding_func\n self._slot_initial_value = {}\n\n self._update_gradient_lock = threading.Lock()\n self._tls = threading.local()\n self._init_thread_local()\n\n # \"-\" in slot name is not supported\n if isinstance(opt, SGD):\n self._allowed_slot_names = []\n if opt._momentum:\n self._allowed_slot_names.append(\"momentum\")\n\n elif isinstance(opt, (Adam, Adamax, Nadam)):\n self._allowed_slot_names = [\"m\", \"v\"]\n if isinstance(opt, Adam) and self._opt.amsgrad:\n self._allowed_slot_names.append(\"vhat\")\n\n elif isinstance(opt, Adadelta):\n self._allowed_slot_names = [\"accum_grad\", \"accum_var\"]\n\n elif isinstance(opt, (Adagrad, Ftrl)):\n self._allowed_slot_names = [\"accumulator\"]\n if isinstance(opt, Ftrl):\n self._allowed_slot_names.append(\"linear\")\n accumu_init = opt._initial_accumulator_value\n self._slot_initial_value[\"accumulator\"] = accumu_init\n\n elif isinstance(opt, RMSprop):\n self._allowed_slot_names = [\"rms\"]\n if self._opt._momentum:\n self._allowed_slot_names.append(\"momentum\")\n if self._opt.centered:\n self._allowed_slot_names.append(\"mg\")\n\n else:\n raise NotImplementedError(\n \"Optimizer %s is not supported in ElasticDL.\" % type(opt)\n )\n\n for slot in self._allowed_slot_names:\n self._slot_initial_value.setdefault(slot, 0.0)\n\n def _init_thread_local(self):\n self._tls._unique_ids_all_layers = {}\n self._tls._embed_variables = {}\n self._tls._slot_variables = {}\n\n def apply_gradients(self, grads_and_vars):\n \"\"\"Update variable values.\n\n Args:\n grads_and_vars: A list of (gradient, variable) pairs. If the\n variable is from ElasticDL embedding layer, it should be a\n ElasticDL `Tensor` object. Otherwise it is a TensorFlow\n variable.\n \"\"\"\n self._init_thread_local()\n\n with self._update_gradient_lock:\n grads_and_vars_new = []\n for grad, var in grads_and_vars:\n # If var is a string, create the grad var pair for\n # ElasticDL embedding\n if isinstance(var, str):\n grads_and_vars_new.append(\n self._get_embedding_var_and_grad(grad, var)\n )\n else:\n grads_and_vars_new.append((grad, var))\n\n self._opt.apply_gradients(grads_and_vars_new)\n self._update_embedding_param()\n self._delete_variables()\n\n def _get_embedding_var_and_grad(self, grad, layer_name):\n unique_ids, indices = tf.unique(grad.indices)\n unique_ids = unique_ids.numpy()\n if layer_name in self._tls._unique_ids_all_layers:\n # TODO: support grads_and_vars with duplicated layer name\n logger.warning(\n \"grads_and_vars has duplicated layer name %s.\" % layer_name\n )\n self._tls._unique_ids_all_layers[layer_name] = unique_ids\n new_grad = tf.IndexedSlices(values=grad.values, indices=indices)\n\n embed_value = self._lookup_embedding_func(layer_name, unique_ids)\n embed_var = self._create_embedding_variable(layer_name, embed_value)\n self._get_slot_and_set_to_optimizer(layer_name)\n return new_grad, embed_var\n\n def _create_embedding_variable(self, name, initial_value):\n \"\"\"Creates a TensorFlow variable using given initial value.\n\n Note that this function saves the created variable to\n `self._tls._embed_variables`.\n \"\"\"\n embed_var = tf.Variable(\n initial_value,\n name=name + str(threading.get_ident()),\n shape=initial_value.shape,\n dtype=tf.float32,\n trainable=False,\n )\n self._tls._embed_variables[name] = embed_var\n return embed_var\n\n def _get_slot_and_set_to_optimizer(self, layer_name):\n \"\"\"Looks up slot value and set it to TensorFlow optimizer.\"\"\"\n for slot_name in self._allowed_slot_names:\n param_name = get_slot_table_name(layer_name, slot_name)\n indices = self._tls._unique_ids_all_layers[layer_name]\n slot_value = self._lookup_embedding_func(param_name, indices)\n # self._create_slot_variable creates a slot variable in tf\n # optimizer and set slot_value to it.\n self._create_slot_variable(layer_name, slot_name, slot_value)\n\n def _get_slot_variable(self, layer_name, slot_name):\n \"\"\"Get the variable for specified slot.\"\"\"\n return self._tls._slot_variables.get(layer_name, {}).get(\n slot_name, None\n )\n\n def _get_embedding_variable(self, layer_name):\n \"\"\"Get the variable for the specified ElasticDL embedding layer.\"\"\"\n return self._tls._embed_variables.get(layer_name, None)\n\n def _create_slot_variable(self, layer_name, slot_name, initial_value):\n \"\"\"Creates a slot variable in TensorFlow optimizer using given\n value.\n \"\"\"\n embed_var = self._get_embedding_variable(layer_name)\n if embed_var is None:\n raise RuntimeError(\n \"Embedding variable for layer %s should be already created.\"\n % (layer_name)\n )\n slot_var = self._create_slot_variable_in_optimizer(\n embed_var, slot_name, initial_value.shape, initial_value\n )\n slot_variables_dict = self._tls._slot_variables.setdefault(\n layer_name, {}\n )\n slot_variables_dict[slot_name] = slot_var\n return slot_var\n\n # This is a function modified from TensorFlow optimizers.\n # https://github.com/tensorflow/tensorflow/blob/\n # 69b1feac62276edcc509ac88af229c6236e645fe/tensorflow/python\n # /keras/optimizer_v2/optimizer_v2.py#L567\n def _create_slot_variable_in_optimizer(\n self, embed_var, slot_name, shape, initial_value\n ):\n \"\"\"Create variable for a slot and save it in TensorFlow optimizer.\"\"\"\n if slot_name not in self._opt._slot_names:\n self._opt._slot_names.append(slot_name)\n var_key = _var_key(embed_var)\n slot_dict = self._opt._slots.setdefault(var_key, {})\n slot_var = slot_dict.get(slot_name, None)\n if slot_var is None:\n slot_var_name = \"%s/%s\" % (embed_var._shared_name, slot_name)\n slot_var = self._opt.add_weight(\n name=slot_var_name,\n shape=shape,\n dtype=embed_var.dtype,\n initializer=initial_value,\n trainable=False,\n )\n slot_dict[slot_name] = slot_var\n self._opt._weights.append(slot_var)\n return slot_var\n else:\n raise RuntimeError(\n \"Variable with var_key %s and slot_name %s is not expected to \"\n \"be in self._opt.\" % (var_key, slot_name)\n )\n\n def _update_embedding_param(self):\n \"\"\"Report updated embedding vectors and slots to kv store.\"\"\"\n for layer, ids in self._tls._unique_ids_all_layers.items():\n value = self._get_embedding_variable(layer).numpy()\n self._update_embedding_func(layer, ids, value)\n\n for slot in self._allowed_slot_names:\n value = self._get_slot_variable(layer, slot).numpy()\n slot_table_name = get_slot_table_name(layer, slot)\n self._update_embedding_func(slot_table_name, ids, value)\n\n def _delete_variables(self):\n # Slot variable access in optimizer requires corresponding embedding\n # variable information. Delete slot variables first.\n for layer_name, slots in self._tls._slot_variables.items():\n embed_var = self._get_embedding_variable(layer_name)\n embed_var_key = _var_key(embed_var)\n del self._opt._slots[embed_var_key]\n for _, var in slots.items():\n opt_weight_iter = 0\n while opt_weight_iter < len(self._opt._weights):\n if var is self._opt._weights[opt_weight_iter]:\n self._opt._weights.pop(opt_weight_iter)\n break\n else:\n opt_weight_iter += 1\n for key in list(self._tls._slot_variables.keys()):\n del self._tls._slot_variables[key]\n\n # Delete variables in embed_variables.\n for key in list(self._tls._embed_variables.keys()):\n del self._tls._embed_variables[key]\n\n # Delete variables in unique_ids_all_layers.\n for key in list(self._tls._unique_ids_all_layers.keys()):\n del self._tls._unique_ids_all_layers[key]\n\n @property\n def allowed_slot_names(self):\n return self._allowed_slot_names\n\n # TODO(yunjian.lmh): Do not need to save slot_initial_value in\n # optimizer wrapper after we do not need to support Redis.\n @property\n def slot_initial_value(self):\n return self._slot_initial_value\n", "path": "elasticdl/python/ps/optimizer_wrapper.py"}]}
3,924
654
gh_patches_debug_2165
rasdani/github-patches
git_diff
huggingface__huggingface_hub-234
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error when creating a repository This error happens on huggingface_hub version 0.0.14 (current version) when creating a new dataset repository ```bash (py38) bash-3.2$ huggingface-cli repo create --type dataset codeparrot-train git version 2.29.2 git-lfs/2.13.3 (GitHub; darwin amd64; go 1.16.2; git a5e65851) Traceback (most recent call last): File "/Users/thomwolf/miniconda2/envs/py38/bin/huggingface-cli", line 8, in <module> sys.exit(main()) File "/Users/thomwolf/miniconda2/envs/py38/lib/python3.8/site-packages/huggingface_hub/commands/huggingface_cli.py", line 41, in main service.run() File "/Users/thomwolf/miniconda2/envs/py38/lib/python3.8/site-packages/huggingface_hub/commands/user.py", line 228, in run user, _ = self._api.whoami(token) ValueError: too many values to unpack (expected 2) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/huggingface_hub/commands/user.py` Content: ``` 1 # Copyright 2020 The HuggingFace Team. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import subprocess 16 from argparse import ArgumentParser 17 from getpass import getpass 18 from typing import List, Union 19 20 from huggingface_hub.commands import BaseHuggingfaceCLICommand 21 from huggingface_hub.constants import REPO_TYPES, REPO_TYPES_URL_PREFIXES 22 from huggingface_hub.hf_api import HfApi, HfFolder 23 from requests.exceptions import HTTPError 24 25 26 class UserCommands(BaseHuggingfaceCLICommand): 27 @staticmethod 28 def register_subcommand(parser: ArgumentParser): 29 login_parser = parser.add_parser( 30 "login", help="Log in using the same credentials as on huggingface.co" 31 ) 32 login_parser.set_defaults(func=lambda args: LoginCommand(args)) 33 whoami_parser = parser.add_parser( 34 "whoami", help="Find out which huggingface.co account you are logged in as." 35 ) 36 whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args)) 37 logout_parser = parser.add_parser("logout", help="Log out") 38 logout_parser.set_defaults(func=lambda args: LogoutCommand(args)) 39 40 # new system: git-based repo system 41 repo_parser = parser.add_parser( 42 "repo", 43 help="{create, ls-files} Commands to interact with your huggingface.co repos.", 44 ) 45 repo_subparsers = repo_parser.add_subparsers( 46 help="huggingface.co repos related commands" 47 ) 48 ls_parser = repo_subparsers.add_parser( 49 "ls-files", help="List all your files on huggingface.co" 50 ) 51 ls_parser.add_argument( 52 "--organization", type=str, help="Optional: organization namespace." 53 ) 54 ls_parser.set_defaults(func=lambda args: ListReposObjsCommand(args)) 55 repo_create_parser = repo_subparsers.add_parser( 56 "create", help="Create a new repo on huggingface.co" 57 ) 58 repo_create_parser.add_argument( 59 "name", 60 type=str, 61 help="Name for your repo. Will be namespaced under your username to build the repo id.", 62 ) 63 repo_create_parser.add_argument( 64 "--type", 65 type=str, 66 help='Optional: repo_type: set to "dataset" or "space" if creating a dataset or space, default is model.', 67 ) 68 repo_create_parser.add_argument( 69 "--organization", type=str, help="Optional: organization namespace." 70 ) 71 repo_create_parser.add_argument( 72 "-y", 73 "--yes", 74 action="store_true", 75 help="Optional: answer Yes to the prompt", 76 ) 77 repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args)) 78 79 80 class ANSI: 81 """ 82 Helper for en.wikipedia.org/wiki/ANSI_escape_code 83 """ 84 85 _bold = "\u001b[1m" 86 _red = "\u001b[31m" 87 _gray = "\u001b[90m" 88 _reset = "\u001b[0m" 89 90 @classmethod 91 def bold(cls, s): 92 return "{}{}{}".format(cls._bold, s, cls._reset) 93 94 @classmethod 95 def red(cls, s): 96 return "{}{}{}".format(cls._bold + cls._red, s, cls._reset) 97 98 @classmethod 99 def gray(cls, s): 100 return "{}{}{}".format(cls._gray, s, cls._reset) 101 102 103 def tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str: 104 """ 105 Inspired by: 106 107 - stackoverflow.com/a/8356620/593036 108 - stackoverflow.com/questions/9535954/printing-lists-as-tabular-data 109 """ 110 col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)] 111 row_format = ("{{:{}}} " * len(headers)).format(*col_widths) 112 lines = [] 113 lines.append(row_format.format(*headers)) 114 lines.append(row_format.format(*["-" * w for w in col_widths])) 115 for row in rows: 116 lines.append(row_format.format(*row)) 117 return "\n".join(lines) 118 119 120 class BaseUserCommand: 121 def __init__(self, args): 122 self.args = args 123 self._api = HfApi() 124 125 126 class LoginCommand(BaseUserCommand): 127 def run(self): 128 print( # docstyle-ignore 129 """ 130 _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_| 131 _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _| 132 _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_| 133 _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _| 134 _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_| 135 136 """ 137 ) 138 username = input("Username: ") 139 password = getpass() 140 try: 141 token = self._api.login(username, password) 142 except HTTPError as e: 143 # probably invalid credentials, display error message. 144 print(e) 145 print(ANSI.red(e.response.text)) 146 exit(1) 147 HfFolder.save_token(token) 148 print("Login successful") 149 print("Your token has been saved to", HfFolder.path_token) 150 151 152 class WhoamiCommand(BaseUserCommand): 153 def run(self): 154 token = HfFolder.get_token() 155 if token is None: 156 print("Not logged in") 157 exit() 158 try: 159 info = self._api.whoami(token) 160 print(info["name"]) 161 orgs = [org["name"] for org in info["orgs"]] 162 if orgs: 163 print(ANSI.bold("orgs: "), ",".join(orgs)) 164 except HTTPError as e: 165 print(e) 166 print(ANSI.red(e.response.text)) 167 exit(1) 168 169 170 class LogoutCommand(BaseUserCommand): 171 def run(self): 172 token = HfFolder.get_token() 173 if token is None: 174 print("Not logged in") 175 exit() 176 HfFolder.delete_token() 177 self._api.logout(token) 178 print("Successfully logged out.") 179 180 181 class ListReposObjsCommand(BaseUserCommand): 182 def run(self): 183 token = HfFolder.get_token() 184 if token is None: 185 print("Not logged in") 186 exit(1) 187 try: 188 objs = self._api.list_repos_objs(token, organization=self.args.organization) 189 except HTTPError as e: 190 print(e) 191 print(ANSI.red(e.response.text)) 192 exit(1) 193 if len(objs) == 0: 194 print("No shared file yet") 195 exit() 196 rows = [[obj.filename, obj.lastModified, obj.commit, obj.size] for obj in objs] 197 print( 198 tabulate(rows, headers=["Filename", "LastModified", "Commit-Sha", "Size"]) 199 ) 200 201 202 class RepoCreateCommand(BaseUserCommand): 203 def run(self): 204 token = HfFolder.get_token() 205 if token is None: 206 print("Not logged in") 207 exit(1) 208 try: 209 stdout = subprocess.check_output(["git", "--version"]).decode("utf-8") 210 print(ANSI.gray(stdout.strip())) 211 except FileNotFoundError: 212 print("Looks like you do not have git installed, please install.") 213 214 try: 215 stdout = subprocess.check_output(["git-lfs", "--version"]).decode("utf-8") 216 print(ANSI.gray(stdout.strip())) 217 except FileNotFoundError: 218 print( 219 ANSI.red( 220 "Looks like you do not have git-lfs installed, please install." 221 " You can install from https://git-lfs.github.com/." 222 " Then run `git lfs install` (you only have to do this once)." 223 ) 224 ) 225 print("") 226 227 user, _ = self._api.whoami(token) 228 namespace = ( 229 self.args.organization if self.args.organization is not None else user 230 ) 231 232 repo_id = f"{namespace}/{self.args.name}" 233 234 if self.args.type not in REPO_TYPES: 235 print("Invalid repo --type") 236 exit(1) 237 238 if self.args.type in REPO_TYPES_URL_PREFIXES: 239 repo_id = REPO_TYPES_URL_PREFIXES[self.args.type] + repo_id 240 241 print("You are about to create {}".format(ANSI.bold(repo_id))) 242 243 if not self.args.yes: 244 choice = input("Proceed? [Y/n] ").lower() 245 if not (choice == "" or choice == "y" or choice == "yes"): 246 print("Abort") 247 exit() 248 try: 249 url = self._api.create_repo( 250 token, 251 name=self.args.name, 252 organization=self.args.organization, 253 repo_type=self.args.type, 254 ) 255 except HTTPError as e: 256 print(e) 257 print(ANSI.red(e.response.text)) 258 exit(1) 259 print("\nYour repo now lives at:") 260 print(" {}".format(ANSI.bold(url))) 261 print( 262 "\nYou can clone it locally with the command below," 263 " and commit/push as usual." 264 ) 265 print(f"\n git clone {url}") 266 print("") 267 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/huggingface_hub/commands/user.py b/src/huggingface_hub/commands/user.py --- a/src/huggingface_hub/commands/user.py +++ b/src/huggingface_hub/commands/user.py @@ -224,7 +224,7 @@ ) print("") - user, _ = self._api.whoami(token) + user = self._api.whoami(token)["name"] namespace = ( self.args.organization if self.args.organization is not None else user )
{"golden_diff": "diff --git a/src/huggingface_hub/commands/user.py b/src/huggingface_hub/commands/user.py\n--- a/src/huggingface_hub/commands/user.py\n+++ b/src/huggingface_hub/commands/user.py\n@@ -224,7 +224,7 @@\n )\n print(\"\")\n \n- user, _ = self._api.whoami(token)\n+ user = self._api.whoami(token)[\"name\"]\n namespace = (\n self.args.organization if self.args.organization is not None else user\n )\n", "issue": "Error when creating a repository\nThis error happens on huggingface_hub version 0.0.14 (current version) when creating a new dataset repository\r\n\r\n```bash\r\n(py38) bash-3.2$ huggingface-cli repo create --type dataset codeparrot-train\r\ngit version 2.29.2\r\ngit-lfs/2.13.3 (GitHub; darwin amd64; go 1.16.2; git a5e65851)\r\n\r\nTraceback (most recent call last):\r\n File \"/Users/thomwolf/miniconda2/envs/py38/bin/huggingface-cli\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/Users/thomwolf/miniconda2/envs/py38/lib/python3.8/site-packages/huggingface_hub/commands/huggingface_cli.py\", line 41, in main\r\n service.run()\r\n File \"/Users/thomwolf/miniconda2/envs/py38/lib/python3.8/site-packages/huggingface_hub/commands/user.py\", line 228, in run\r\n user, _ = self._api.whoami(token)\r\nValueError: too many values to unpack (expected 2)\r\n```\n", "before_files": [{"content": "# Copyright 2020 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport subprocess\nfrom argparse import ArgumentParser\nfrom getpass import getpass\nfrom typing import List, Union\n\nfrom huggingface_hub.commands import BaseHuggingfaceCLICommand\nfrom huggingface_hub.constants import REPO_TYPES, REPO_TYPES_URL_PREFIXES\nfrom huggingface_hub.hf_api import HfApi, HfFolder\nfrom requests.exceptions import HTTPError\n\n\nclass UserCommands(BaseHuggingfaceCLICommand):\n @staticmethod\n def register_subcommand(parser: ArgumentParser):\n login_parser = parser.add_parser(\n \"login\", help=\"Log in using the same credentials as on huggingface.co\"\n )\n login_parser.set_defaults(func=lambda args: LoginCommand(args))\n whoami_parser = parser.add_parser(\n \"whoami\", help=\"Find out which huggingface.co account you are logged in as.\"\n )\n whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))\n logout_parser = parser.add_parser(\"logout\", help=\"Log out\")\n logout_parser.set_defaults(func=lambda args: LogoutCommand(args))\n\n # new system: git-based repo system\n repo_parser = parser.add_parser(\n \"repo\",\n help=\"{create, ls-files} Commands to interact with your huggingface.co repos.\",\n )\n repo_subparsers = repo_parser.add_subparsers(\n help=\"huggingface.co repos related commands\"\n )\n ls_parser = repo_subparsers.add_parser(\n \"ls-files\", help=\"List all your files on huggingface.co\"\n )\n ls_parser.add_argument(\n \"--organization\", type=str, help=\"Optional: organization namespace.\"\n )\n ls_parser.set_defaults(func=lambda args: ListReposObjsCommand(args))\n repo_create_parser = repo_subparsers.add_parser(\n \"create\", help=\"Create a new repo on huggingface.co\"\n )\n repo_create_parser.add_argument(\n \"name\",\n type=str,\n help=\"Name for your repo. Will be namespaced under your username to build the repo id.\",\n )\n repo_create_parser.add_argument(\n \"--type\",\n type=str,\n help='Optional: repo_type: set to \"dataset\" or \"space\" if creating a dataset or space, default is model.',\n )\n repo_create_parser.add_argument(\n \"--organization\", type=str, help=\"Optional: organization namespace.\"\n )\n repo_create_parser.add_argument(\n \"-y\",\n \"--yes\",\n action=\"store_true\",\n help=\"Optional: answer Yes to the prompt\",\n )\n repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args))\n\n\nclass ANSI:\n \"\"\"\n Helper for en.wikipedia.org/wiki/ANSI_escape_code\n \"\"\"\n\n _bold = \"\\u001b[1m\"\n _red = \"\\u001b[31m\"\n _gray = \"\\u001b[90m\"\n _reset = \"\\u001b[0m\"\n\n @classmethod\n def bold(cls, s):\n return \"{}{}{}\".format(cls._bold, s, cls._reset)\n\n @classmethod\n def red(cls, s):\n return \"{}{}{}\".format(cls._bold + cls._red, s, cls._reset)\n\n @classmethod\n def gray(cls, s):\n return \"{}{}{}\".format(cls._gray, s, cls._reset)\n\n\ndef tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str:\n \"\"\"\n Inspired by:\n\n - stackoverflow.com/a/8356620/593036\n - stackoverflow.com/questions/9535954/printing-lists-as-tabular-data\n \"\"\"\n col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]\n row_format = (\"{{:{}}} \" * len(headers)).format(*col_widths)\n lines = []\n lines.append(row_format.format(*headers))\n lines.append(row_format.format(*[\"-\" * w for w in col_widths]))\n for row in rows:\n lines.append(row_format.format(*row))\n return \"\\n\".join(lines)\n\n\nclass BaseUserCommand:\n def __init__(self, args):\n self.args = args\n self._api = HfApi()\n\n\nclass LoginCommand(BaseUserCommand):\n def run(self):\n print( # docstyle-ignore\n \"\"\"\n _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|\n _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|\n _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|\n\n \"\"\"\n )\n username = input(\"Username: \")\n password = getpass()\n try:\n token = self._api.login(username, password)\n except HTTPError as e:\n # probably invalid credentials, display error message.\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n HfFolder.save_token(token)\n print(\"Login successful\")\n print(\"Your token has been saved to\", HfFolder.path_token)\n\n\nclass WhoamiCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit()\n try:\n info = self._api.whoami(token)\n print(info[\"name\"])\n orgs = [org[\"name\"] for org in info[\"orgs\"]]\n if orgs:\n print(ANSI.bold(\"orgs: \"), \",\".join(orgs))\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n\n\nclass LogoutCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit()\n HfFolder.delete_token()\n self._api.logout(token)\n print(\"Successfully logged out.\")\n\n\nclass ListReposObjsCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit(1)\n try:\n objs = self._api.list_repos_objs(token, organization=self.args.organization)\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n if len(objs) == 0:\n print(\"No shared file yet\")\n exit()\n rows = [[obj.filename, obj.lastModified, obj.commit, obj.size] for obj in objs]\n print(\n tabulate(rows, headers=[\"Filename\", \"LastModified\", \"Commit-Sha\", \"Size\"])\n )\n\n\nclass RepoCreateCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit(1)\n try:\n stdout = subprocess.check_output([\"git\", \"--version\"]).decode(\"utf-8\")\n print(ANSI.gray(stdout.strip()))\n except FileNotFoundError:\n print(\"Looks like you do not have git installed, please install.\")\n\n try:\n stdout = subprocess.check_output([\"git-lfs\", \"--version\"]).decode(\"utf-8\")\n print(ANSI.gray(stdout.strip()))\n except FileNotFoundError:\n print(\n ANSI.red(\n \"Looks like you do not have git-lfs installed, please install.\"\n \" You can install from https://git-lfs.github.com/.\"\n \" Then run `git lfs install` (you only have to do this once).\"\n )\n )\n print(\"\")\n\n user, _ = self._api.whoami(token)\n namespace = (\n self.args.organization if self.args.organization is not None else user\n )\n\n repo_id = f\"{namespace}/{self.args.name}\"\n\n if self.args.type not in REPO_TYPES:\n print(\"Invalid repo --type\")\n exit(1)\n\n if self.args.type in REPO_TYPES_URL_PREFIXES:\n repo_id = REPO_TYPES_URL_PREFIXES[self.args.type] + repo_id\n\n print(\"You are about to create {}\".format(ANSI.bold(repo_id)))\n\n if not self.args.yes:\n choice = input(\"Proceed? [Y/n] \").lower()\n if not (choice == \"\" or choice == \"y\" or choice == \"yes\"):\n print(\"Abort\")\n exit()\n try:\n url = self._api.create_repo(\n token,\n name=self.args.name,\n organization=self.args.organization,\n repo_type=self.args.type,\n )\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n print(\"\\nYour repo now lives at:\")\n print(\" {}\".format(ANSI.bold(url)))\n print(\n \"\\nYou can clone it locally with the command below,\"\n \" and commit/push as usual.\"\n )\n print(f\"\\n git clone {url}\")\n print(\"\")\n", "path": "src/huggingface_hub/commands/user.py"}], "after_files": [{"content": "# Copyright 2020 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport subprocess\nfrom argparse import ArgumentParser\nfrom getpass import getpass\nfrom typing import List, Union\n\nfrom huggingface_hub.commands import BaseHuggingfaceCLICommand\nfrom huggingface_hub.constants import REPO_TYPES, REPO_TYPES_URL_PREFIXES\nfrom huggingface_hub.hf_api import HfApi, HfFolder\nfrom requests.exceptions import HTTPError\n\n\nclass UserCommands(BaseHuggingfaceCLICommand):\n @staticmethod\n def register_subcommand(parser: ArgumentParser):\n login_parser = parser.add_parser(\n \"login\", help=\"Log in using the same credentials as on huggingface.co\"\n )\n login_parser.set_defaults(func=lambda args: LoginCommand(args))\n whoami_parser = parser.add_parser(\n \"whoami\", help=\"Find out which huggingface.co account you are logged in as.\"\n )\n whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))\n logout_parser = parser.add_parser(\"logout\", help=\"Log out\")\n logout_parser.set_defaults(func=lambda args: LogoutCommand(args))\n\n # new system: git-based repo system\n repo_parser = parser.add_parser(\n \"repo\",\n help=\"{create, ls-files} Commands to interact with your huggingface.co repos.\",\n )\n repo_subparsers = repo_parser.add_subparsers(\n help=\"huggingface.co repos related commands\"\n )\n ls_parser = repo_subparsers.add_parser(\n \"ls-files\", help=\"List all your files on huggingface.co\"\n )\n ls_parser.add_argument(\n \"--organization\", type=str, help=\"Optional: organization namespace.\"\n )\n ls_parser.set_defaults(func=lambda args: ListReposObjsCommand(args))\n repo_create_parser = repo_subparsers.add_parser(\n \"create\", help=\"Create a new repo on huggingface.co\"\n )\n repo_create_parser.add_argument(\n \"name\",\n type=str,\n help=\"Name for your repo. Will be namespaced under your username to build the repo id.\",\n )\n repo_create_parser.add_argument(\n \"--type\",\n type=str,\n help='Optional: repo_type: set to \"dataset\" or \"space\" if creating a dataset or space, default is model.',\n )\n repo_create_parser.add_argument(\n \"--organization\", type=str, help=\"Optional: organization namespace.\"\n )\n repo_create_parser.add_argument(\n \"-y\",\n \"--yes\",\n action=\"store_true\",\n help=\"Optional: answer Yes to the prompt\",\n )\n repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args))\n\n\nclass ANSI:\n \"\"\"\n Helper for en.wikipedia.org/wiki/ANSI_escape_code\n \"\"\"\n\n _bold = \"\\u001b[1m\"\n _red = \"\\u001b[31m\"\n _gray = \"\\u001b[90m\"\n _reset = \"\\u001b[0m\"\n\n @classmethod\n def bold(cls, s):\n return \"{}{}{}\".format(cls._bold, s, cls._reset)\n\n @classmethod\n def red(cls, s):\n return \"{}{}{}\".format(cls._bold + cls._red, s, cls._reset)\n\n @classmethod\n def gray(cls, s):\n return \"{}{}{}\".format(cls._gray, s, cls._reset)\n\n\ndef tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str:\n \"\"\"\n Inspired by:\n\n - stackoverflow.com/a/8356620/593036\n - stackoverflow.com/questions/9535954/printing-lists-as-tabular-data\n \"\"\"\n col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]\n row_format = (\"{{:{}}} \" * len(headers)).format(*col_widths)\n lines = []\n lines.append(row_format.format(*headers))\n lines.append(row_format.format(*[\"-\" * w for w in col_widths]))\n for row in rows:\n lines.append(row_format.format(*row))\n return \"\\n\".join(lines)\n\n\nclass BaseUserCommand:\n def __init__(self, args):\n self.args = args\n self._api = HfApi()\n\n\nclass LoginCommand(BaseUserCommand):\n def run(self):\n print( # docstyle-ignore\n \"\"\"\n _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|\n _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|\n _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|\n\n \"\"\"\n )\n username = input(\"Username: \")\n password = getpass()\n try:\n token = self._api.login(username, password)\n except HTTPError as e:\n # probably invalid credentials, display error message.\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n HfFolder.save_token(token)\n print(\"Login successful\")\n print(\"Your token has been saved to\", HfFolder.path_token)\n\n\nclass WhoamiCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit()\n try:\n info = self._api.whoami(token)\n print(info[\"name\"])\n orgs = [org[\"name\"] for org in info[\"orgs\"]]\n if orgs:\n print(ANSI.bold(\"orgs: \"), \",\".join(orgs))\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n\n\nclass LogoutCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit()\n HfFolder.delete_token()\n self._api.logout(token)\n print(\"Successfully logged out.\")\n\n\nclass ListReposObjsCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit(1)\n try:\n objs = self._api.list_repos_objs(token, organization=self.args.organization)\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n if len(objs) == 0:\n print(\"No shared file yet\")\n exit()\n rows = [[obj.filename, obj.lastModified, obj.commit, obj.size] for obj in objs]\n print(\n tabulate(rows, headers=[\"Filename\", \"LastModified\", \"Commit-Sha\", \"Size\"])\n )\n\n\nclass RepoCreateCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit(1)\n try:\n stdout = subprocess.check_output([\"git\", \"--version\"]).decode(\"utf-8\")\n print(ANSI.gray(stdout.strip()))\n except FileNotFoundError:\n print(\"Looks like you do not have git installed, please install.\")\n\n try:\n stdout = subprocess.check_output([\"git-lfs\", \"--version\"]).decode(\"utf-8\")\n print(ANSI.gray(stdout.strip()))\n except FileNotFoundError:\n print(\n ANSI.red(\n \"Looks like you do not have git-lfs installed, please install.\"\n \" You can install from https://git-lfs.github.com/.\"\n \" Then run `git lfs install` (you only have to do this once).\"\n )\n )\n print(\"\")\n\n user = self._api.whoami(token)[\"name\"]\n namespace = (\n self.args.organization if self.args.organization is not None else user\n )\n\n repo_id = f\"{namespace}/{self.args.name}\"\n\n if self.args.type not in REPO_TYPES:\n print(\"Invalid repo --type\")\n exit(1)\n\n if self.args.type in REPO_TYPES_URL_PREFIXES:\n repo_id = REPO_TYPES_URL_PREFIXES[self.args.type] + repo_id\n\n print(\"You are about to create {}\".format(ANSI.bold(repo_id)))\n\n if not self.args.yes:\n choice = input(\"Proceed? [Y/n] \").lower()\n if not (choice == \"\" or choice == \"y\" or choice == \"yes\"):\n print(\"Abort\")\n exit()\n try:\n url = self._api.create_repo(\n token,\n name=self.args.name,\n organization=self.args.organization,\n repo_type=self.args.type,\n )\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n print(\"\\nYour repo now lives at:\")\n print(\" {}\".format(ANSI.bold(url)))\n print(\n \"\\nYou can clone it locally with the command below,\"\n \" and commit/push as usual.\"\n )\n print(f\"\\n git clone {url}\")\n print(\"\")\n", "path": "src/huggingface_hub/commands/user.py"}]}
3,443
116
gh_patches_debug_3232
rasdani/github-patches
git_diff
ibis-project__ibis-4167
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- feat(sqlalchemy): allow non-primitive arrays SQLAlchemy type inference fails on arrays where the value type is not a primitive or string prevent use cases like `array<array<int>>` from working. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ibis/backends/base/sql/alchemy/datatypes.py` Content: ``` 1 from __future__ import annotations 2 3 import functools 4 from typing import Iterable 5 6 import sqlalchemy as sa 7 from sqlalchemy.dialects import mysql, postgresql, sqlite 8 from sqlalchemy.dialects.mysql.base import MySQLDialect 9 from sqlalchemy.dialects.postgresql.base import PGDialect 10 from sqlalchemy.dialects.sqlite.base import SQLiteDialect 11 from sqlalchemy.engine.interfaces import Dialect 12 from sqlalchemy.types import UserDefinedType 13 14 import ibis.expr.datatypes as dt 15 import ibis.expr.schema as sch 16 from ibis.backends.base.sql.alchemy.geospatial import geospatial_supported 17 18 if geospatial_supported: 19 import geoalchemy2 as ga 20 21 22 class StructType(UserDefinedType): 23 def __init__( 24 self, 25 pairs: Iterable[tuple[str, sa.types.TypeEngine]], 26 ): 27 self.pairs = [ 28 (name, sa.types.to_instance(type)) for name, type in pairs 29 ] 30 31 def get_col_spec(self, **_): 32 pairs = ", ".join(f"{k} {v}" for k, v in self.pairs) 33 return f"STRUCT({pairs})" 34 35 36 def table_from_schema(name, meta, schema, database: str | None = None): 37 # Convert Ibis schema to SQLA table 38 columns = [] 39 40 for colname, dtype in zip(schema.names, schema.types): 41 satype = to_sqla_type(dtype) 42 column = sa.Column(colname, satype, nullable=dtype.nullable) 43 columns.append(column) 44 45 return sa.Table(name, meta, schema=database, *columns) 46 47 48 # TODO(cleanup) 49 ibis_type_to_sqla = { 50 dt.Null: sa.types.NullType, 51 dt.Date: sa.Date, 52 dt.Time: sa.Time, 53 dt.Boolean: sa.Boolean, 54 dt.Binary: sa.LargeBinary, 55 dt.String: sa.Text, 56 dt.Decimal: sa.NUMERIC, 57 # Mantissa-based 58 dt.Float16: sa.REAL, 59 dt.Float32: sa.REAL, 60 dt.Float64: sa.FLOAT, 61 dt.Int8: sa.SmallInteger, 62 dt.Int16: sa.SmallInteger, 63 dt.Int32: sa.Integer, 64 dt.Int64: sa.BigInteger, 65 dt.JSON: sa.JSON, 66 } 67 68 69 @functools.singledispatch 70 def to_sqla_type(itype, type_map=None): 71 if type_map is None: 72 type_map = ibis_type_to_sqla 73 return type_map[type(itype)] 74 75 76 @to_sqla_type.register(dt.Decimal) 77 def _(itype, **kwargs): 78 return sa.types.NUMERIC(itype.precision, itype.scale) 79 80 81 @to_sqla_type.register(dt.Interval) 82 def _(itype, **kwargs): 83 return sa.types.Interval() 84 85 86 @to_sqla_type.register(dt.Date) 87 def _(itype, **kwargs): 88 return sa.Date() 89 90 91 @to_sqla_type.register(dt.Timestamp) 92 def _(itype, **kwargs): 93 return sa.TIMESTAMP(bool(itype.timezone)) 94 95 96 @to_sqla_type.register(dt.Array) 97 def _(itype, **kwargs): 98 ibis_type = itype.value_type 99 if not isinstance(ibis_type, (dt.Primitive, dt.String)): 100 raise TypeError(f'Type {ibis_type} is not a primitive or string type') 101 return sa.ARRAY(to_sqla_type(ibis_type, **kwargs)) 102 103 104 @to_sqla_type.register(dt.Struct) 105 def _(itype, **_): 106 return StructType( 107 [(name, to_sqla_type(type)) for name, type in itype.pairs.items()] 108 ) 109 110 111 @to_sqla_type.register(dt.GeoSpatial) 112 def _(itype, **kwargs): 113 if itype.geotype == 'geometry': 114 return ga.Geometry 115 elif itype.geotype == 'geography': 116 return ga.Geography 117 else: 118 return ga.types._GISType 119 120 121 @dt.dtype.register(Dialect, sa.types.NullType) 122 def sa_null(_, satype, nullable=True): 123 return dt.null 124 125 126 @dt.dtype.register(Dialect, sa.types.Boolean) 127 def sa_boolean(_, satype, nullable=True): 128 return dt.Boolean(nullable=nullable) 129 130 131 @dt.dtype.register(MySQLDialect, mysql.NUMERIC) 132 def sa_mysql_numeric(_, satype, nullable=True): 133 # https://dev.mysql.com/doc/refman/8.0/en/fixed-point-types.html 134 return dt.Decimal( 135 satype.precision or 10, satype.scale or 0, nullable=nullable 136 ) 137 138 139 @dt.dtype.register(Dialect, sa.types.Numeric) 140 @dt.dtype.register(SQLiteDialect, sqlite.NUMERIC) 141 def sa_numeric(_, satype, nullable=True): 142 return dt.Decimal(satype.precision, satype.scale, nullable=nullable) 143 144 145 @dt.dtype.register(Dialect, sa.types.SmallInteger) 146 def sa_smallint(_, satype, nullable=True): 147 return dt.Int16(nullable=nullable) 148 149 150 @dt.dtype.register(Dialect, sa.types.Integer) 151 def sa_integer(_, satype, nullable=True): 152 return dt.Int32(nullable=nullable) 153 154 155 @dt.dtype.register(Dialect, mysql.TINYINT) 156 def sa_mysql_tinyint(_, satype, nullable=True): 157 return dt.Int8(nullable=nullable) 158 159 160 @dt.dtype.register(Dialect, sa.types.BigInteger) 161 def sa_bigint(_, satype, nullable=True): 162 return dt.Int64(nullable=nullable) 163 164 165 @dt.dtype.register(Dialect, sa.REAL) 166 def sa_float(_, satype, nullable=True): 167 return dt.Float32(nullable=nullable) 168 169 170 @dt.dtype.register(Dialect, sa.FLOAT) 171 @dt.dtype.register(SQLiteDialect, sa.REAL) 172 @dt.dtype.register(PGDialect, postgresql.DOUBLE_PRECISION) 173 def sa_double(_, satype, nullable=True): 174 return dt.Float64(nullable=nullable) 175 176 177 @dt.dtype.register(PGDialect, postgresql.UUID) 178 def sa_uuid(_, satype, nullable=True): 179 return dt.UUID(nullable=nullable) 180 181 182 @dt.dtype.register(PGDialect, postgresql.MACADDR) 183 def sa_macaddr(_, satype, nullable=True): 184 return dt.MACADDR(nullable=nullable) 185 186 187 @dt.dtype.register(PGDialect, postgresql.INET) 188 def sa_inet(_, satype, nullable=True): 189 return dt.INET(nullable=nullable) 190 191 192 @dt.dtype.register(PGDialect, postgresql.JSON) 193 def sa_json(_, satype, nullable=True): 194 return dt.JSON(nullable=nullable) 195 196 197 @dt.dtype.register(PGDialect, postgresql.JSONB) 198 def sa_jsonb(_, satype, nullable=True): 199 return dt.JSONB(nullable=nullable) 200 201 202 if geospatial_supported: 203 204 @dt.dtype.register(Dialect, (ga.Geometry, ga.types._GISType)) 205 def ga_geometry(_, gatype, nullable=True): 206 t = gatype.geometry_type 207 if t == 'POINT': 208 return dt.Point(nullable=nullable) 209 if t == 'LINESTRING': 210 return dt.LineString(nullable=nullable) 211 if t == 'POLYGON': 212 return dt.Polygon(nullable=nullable) 213 if t == 'MULTILINESTRING': 214 return dt.MultiLineString(nullable=nullable) 215 if t == 'MULTIPOINT': 216 return dt.MultiPoint(nullable=nullable) 217 if t == 'MULTIPOLYGON': 218 return dt.MultiPolygon(nullable=nullable) 219 if t == 'GEOMETRY': 220 return dt.Geometry(nullable=nullable) 221 else: 222 raise ValueError(f"Unrecognized geometry type: {t}") 223 224 225 POSTGRES_FIELD_TO_IBIS_UNIT = { 226 "YEAR": "Y", 227 "MONTH": "M", 228 "DAY": "D", 229 "HOUR": "h", 230 "MINUTE": "m", 231 "SECOND": "s", 232 "YEAR TO MONTH": "M", 233 "DAY TO HOUR": "h", 234 "DAY TO MINUTE": "m", 235 "DAY TO SECOND": "s", 236 "HOUR TO MINUTE": "m", 237 "HOUR TO SECOND": "s", 238 "MINUTE TO SECOND": "s", 239 } 240 241 242 @dt.dtype.register(PGDialect, postgresql.INTERVAL) 243 def sa_postgres_interval(_, satype, nullable=True): 244 field = satype.fields.upper() 245 unit = POSTGRES_FIELD_TO_IBIS_UNIT.get(field, None) 246 if unit is None: 247 raise ValueError(f"Unknown PostgreSQL interval field {field!r}") 248 elif unit in {"Y", "M"}: 249 raise ValueError( 250 "Variable length timedeltas are not yet supported with PostgreSQL" 251 ) 252 return dt.Interval(unit=unit, nullable=nullable) 253 254 255 @dt.dtype.register(MySQLDialect, mysql.DOUBLE) 256 def sa_mysql_double(_, satype, nullable=True): 257 # TODO: handle asdecimal=True 258 return dt.Float64(nullable=nullable) 259 260 261 @dt.dtype.register(Dialect, sa.types.String) 262 def sa_string(_, satype, nullable=True): 263 return dt.String(nullable=nullable) 264 265 266 @dt.dtype.register(Dialect, sa.LargeBinary) 267 def sa_binary(_, satype, nullable=True): 268 return dt.Binary(nullable=nullable) 269 270 271 @dt.dtype.register(Dialect, sa.Time) 272 def sa_time(_, satype, nullable=True): 273 return dt.Time(nullable=nullable) 274 275 276 @dt.dtype.register(Dialect, sa.Date) 277 def sa_date(_, satype, nullable=True): 278 return dt.Date(nullable=nullable) 279 280 281 @dt.dtype.register(Dialect, sa.DateTime) 282 def sa_datetime(_, satype, nullable=True, default_timezone='UTC'): 283 timezone = default_timezone if satype.timezone else None 284 return dt.Timestamp(timezone=timezone, nullable=nullable) 285 286 287 @dt.dtype.register(Dialect, sa.ARRAY) 288 def sa_array(dialect, satype, nullable=True): 289 dimensions = satype.dimensions 290 if dimensions is not None and dimensions != 1: 291 raise NotImplementedError('Nested array types not yet supported') 292 293 value_dtype = dt.dtype(dialect, satype.item_type) 294 return dt.Array(value_dtype, nullable=nullable) 295 296 297 @dt.dtype.register(Dialect, StructType) 298 def sa_struct(dialect, satype, nullable=True): 299 pairs = [(name, dt.dtype(dialect, typ)) for name, typ in satype.pairs] 300 return dt.Struct.from_tuples(pairs, nullable=nullable) 301 302 303 @sch.infer.register((sa.Table, sa.sql.TableClause)) 304 def schema_from_table(table, schema=None): 305 """Retrieve an ibis schema from a SQLAlchemy ``Table``. 306 307 Parameters 308 ---------- 309 table : sa.Table 310 311 Returns 312 ------- 313 schema : ibis.expr.datatypes.Schema 314 An ibis schema corresponding to the types of the columns in `table`. 315 """ 316 schema = schema if schema is not None else {} 317 pairs = [] 318 for name, column in zip(table.columns.keys(), table.columns): 319 if name in schema: 320 dtype = dt.dtype(schema[name]) 321 else: 322 dtype = dt.dtype( 323 getattr(table.bind, 'dialect', Dialect()), 324 column.type, 325 nullable=column.nullable, 326 ) 327 pairs.append((name, dtype)) 328 return sch.schema(pairs) 329 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ibis/backends/base/sql/alchemy/datatypes.py b/ibis/backends/base/sql/alchemy/datatypes.py --- a/ibis/backends/base/sql/alchemy/datatypes.py +++ b/ibis/backends/base/sql/alchemy/datatypes.py @@ -96,8 +96,6 @@ @to_sqla_type.register(dt.Array) def _(itype, **kwargs): ibis_type = itype.value_type - if not isinstance(ibis_type, (dt.Primitive, dt.String)): - raise TypeError(f'Type {ibis_type} is not a primitive or string type') return sa.ARRAY(to_sqla_type(ibis_type, **kwargs))
{"golden_diff": "diff --git a/ibis/backends/base/sql/alchemy/datatypes.py b/ibis/backends/base/sql/alchemy/datatypes.py\n--- a/ibis/backends/base/sql/alchemy/datatypes.py\n+++ b/ibis/backends/base/sql/alchemy/datatypes.py\n@@ -96,8 +96,6 @@\n @to_sqla_type.register(dt.Array)\n def _(itype, **kwargs):\n ibis_type = itype.value_type\n- if not isinstance(ibis_type, (dt.Primitive, dt.String)):\n- raise TypeError(f'Type {ibis_type} is not a primitive or string type')\n return sa.ARRAY(to_sqla_type(ibis_type, **kwargs))\n", "issue": "feat(sqlalchemy): allow non-primitive arrays\nSQLAlchemy type inference fails on arrays where the value type is not a primitive or string prevent use cases like `array<array<int>>` from working.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport functools\nfrom typing import Iterable\n\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql, postgresql, sqlite\nfrom sqlalchemy.dialects.mysql.base import MySQLDialect\nfrom sqlalchemy.dialects.postgresql.base import PGDialect\nfrom sqlalchemy.dialects.sqlite.base import SQLiteDialect\nfrom sqlalchemy.engine.interfaces import Dialect\nfrom sqlalchemy.types import UserDefinedType\n\nimport ibis.expr.datatypes as dt\nimport ibis.expr.schema as sch\nfrom ibis.backends.base.sql.alchemy.geospatial import geospatial_supported\n\nif geospatial_supported:\n import geoalchemy2 as ga\n\n\nclass StructType(UserDefinedType):\n def __init__(\n self,\n pairs: Iterable[tuple[str, sa.types.TypeEngine]],\n ):\n self.pairs = [\n (name, sa.types.to_instance(type)) for name, type in pairs\n ]\n\n def get_col_spec(self, **_):\n pairs = \", \".join(f\"{k} {v}\" for k, v in self.pairs)\n return f\"STRUCT({pairs})\"\n\n\ndef table_from_schema(name, meta, schema, database: str | None = None):\n # Convert Ibis schema to SQLA table\n columns = []\n\n for colname, dtype in zip(schema.names, schema.types):\n satype = to_sqla_type(dtype)\n column = sa.Column(colname, satype, nullable=dtype.nullable)\n columns.append(column)\n\n return sa.Table(name, meta, schema=database, *columns)\n\n\n# TODO(cleanup)\nibis_type_to_sqla = {\n dt.Null: sa.types.NullType,\n dt.Date: sa.Date,\n dt.Time: sa.Time,\n dt.Boolean: sa.Boolean,\n dt.Binary: sa.LargeBinary,\n dt.String: sa.Text,\n dt.Decimal: sa.NUMERIC,\n # Mantissa-based\n dt.Float16: sa.REAL,\n dt.Float32: sa.REAL,\n dt.Float64: sa.FLOAT,\n dt.Int8: sa.SmallInteger,\n dt.Int16: sa.SmallInteger,\n dt.Int32: sa.Integer,\n dt.Int64: sa.BigInteger,\n dt.JSON: sa.JSON,\n}\n\n\[email protected]\ndef to_sqla_type(itype, type_map=None):\n if type_map is None:\n type_map = ibis_type_to_sqla\n return type_map[type(itype)]\n\n\n@to_sqla_type.register(dt.Decimal)\ndef _(itype, **kwargs):\n return sa.types.NUMERIC(itype.precision, itype.scale)\n\n\n@to_sqla_type.register(dt.Interval)\ndef _(itype, **kwargs):\n return sa.types.Interval()\n\n\n@to_sqla_type.register(dt.Date)\ndef _(itype, **kwargs):\n return sa.Date()\n\n\n@to_sqla_type.register(dt.Timestamp)\ndef _(itype, **kwargs):\n return sa.TIMESTAMP(bool(itype.timezone))\n\n\n@to_sqla_type.register(dt.Array)\ndef _(itype, **kwargs):\n ibis_type = itype.value_type\n if not isinstance(ibis_type, (dt.Primitive, dt.String)):\n raise TypeError(f'Type {ibis_type} is not a primitive or string type')\n return sa.ARRAY(to_sqla_type(ibis_type, **kwargs))\n\n\n@to_sqla_type.register(dt.Struct)\ndef _(itype, **_):\n return StructType(\n [(name, to_sqla_type(type)) for name, type in itype.pairs.items()]\n )\n\n\n@to_sqla_type.register(dt.GeoSpatial)\ndef _(itype, **kwargs):\n if itype.geotype == 'geometry':\n return ga.Geometry\n elif itype.geotype == 'geography':\n return ga.Geography\n else:\n return ga.types._GISType\n\n\[email protected](Dialect, sa.types.NullType)\ndef sa_null(_, satype, nullable=True):\n return dt.null\n\n\[email protected](Dialect, sa.types.Boolean)\ndef sa_boolean(_, satype, nullable=True):\n return dt.Boolean(nullable=nullable)\n\n\[email protected](MySQLDialect, mysql.NUMERIC)\ndef sa_mysql_numeric(_, satype, nullable=True):\n # https://dev.mysql.com/doc/refman/8.0/en/fixed-point-types.html\n return dt.Decimal(\n satype.precision or 10, satype.scale or 0, nullable=nullable\n )\n\n\[email protected](Dialect, sa.types.Numeric)\[email protected](SQLiteDialect, sqlite.NUMERIC)\ndef sa_numeric(_, satype, nullable=True):\n return dt.Decimal(satype.precision, satype.scale, nullable=nullable)\n\n\[email protected](Dialect, sa.types.SmallInteger)\ndef sa_smallint(_, satype, nullable=True):\n return dt.Int16(nullable=nullable)\n\n\[email protected](Dialect, sa.types.Integer)\ndef sa_integer(_, satype, nullable=True):\n return dt.Int32(nullable=nullable)\n\n\[email protected](Dialect, mysql.TINYINT)\ndef sa_mysql_tinyint(_, satype, nullable=True):\n return dt.Int8(nullable=nullable)\n\n\[email protected](Dialect, sa.types.BigInteger)\ndef sa_bigint(_, satype, nullable=True):\n return dt.Int64(nullable=nullable)\n\n\[email protected](Dialect, sa.REAL)\ndef sa_float(_, satype, nullable=True):\n return dt.Float32(nullable=nullable)\n\n\[email protected](Dialect, sa.FLOAT)\[email protected](SQLiteDialect, sa.REAL)\[email protected](PGDialect, postgresql.DOUBLE_PRECISION)\ndef sa_double(_, satype, nullable=True):\n return dt.Float64(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.UUID)\ndef sa_uuid(_, satype, nullable=True):\n return dt.UUID(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.MACADDR)\ndef sa_macaddr(_, satype, nullable=True):\n return dt.MACADDR(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.INET)\ndef sa_inet(_, satype, nullable=True):\n return dt.INET(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.JSON)\ndef sa_json(_, satype, nullable=True):\n return dt.JSON(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.JSONB)\ndef sa_jsonb(_, satype, nullable=True):\n return dt.JSONB(nullable=nullable)\n\n\nif geospatial_supported:\n\n @dt.dtype.register(Dialect, (ga.Geometry, ga.types._GISType))\n def ga_geometry(_, gatype, nullable=True):\n t = gatype.geometry_type\n if t == 'POINT':\n return dt.Point(nullable=nullable)\n if t == 'LINESTRING':\n return dt.LineString(nullable=nullable)\n if t == 'POLYGON':\n return dt.Polygon(nullable=nullable)\n if t == 'MULTILINESTRING':\n return dt.MultiLineString(nullable=nullable)\n if t == 'MULTIPOINT':\n return dt.MultiPoint(nullable=nullable)\n if t == 'MULTIPOLYGON':\n return dt.MultiPolygon(nullable=nullable)\n if t == 'GEOMETRY':\n return dt.Geometry(nullable=nullable)\n else:\n raise ValueError(f\"Unrecognized geometry type: {t}\")\n\n\nPOSTGRES_FIELD_TO_IBIS_UNIT = {\n \"YEAR\": \"Y\",\n \"MONTH\": \"M\",\n \"DAY\": \"D\",\n \"HOUR\": \"h\",\n \"MINUTE\": \"m\",\n \"SECOND\": \"s\",\n \"YEAR TO MONTH\": \"M\",\n \"DAY TO HOUR\": \"h\",\n \"DAY TO MINUTE\": \"m\",\n \"DAY TO SECOND\": \"s\",\n \"HOUR TO MINUTE\": \"m\",\n \"HOUR TO SECOND\": \"s\",\n \"MINUTE TO SECOND\": \"s\",\n}\n\n\[email protected](PGDialect, postgresql.INTERVAL)\ndef sa_postgres_interval(_, satype, nullable=True):\n field = satype.fields.upper()\n unit = POSTGRES_FIELD_TO_IBIS_UNIT.get(field, None)\n if unit is None:\n raise ValueError(f\"Unknown PostgreSQL interval field {field!r}\")\n elif unit in {\"Y\", \"M\"}:\n raise ValueError(\n \"Variable length timedeltas are not yet supported with PostgreSQL\"\n )\n return dt.Interval(unit=unit, nullable=nullable)\n\n\[email protected](MySQLDialect, mysql.DOUBLE)\ndef sa_mysql_double(_, satype, nullable=True):\n # TODO: handle asdecimal=True\n return dt.Float64(nullable=nullable)\n\n\[email protected](Dialect, sa.types.String)\ndef sa_string(_, satype, nullable=True):\n return dt.String(nullable=nullable)\n\n\[email protected](Dialect, sa.LargeBinary)\ndef sa_binary(_, satype, nullable=True):\n return dt.Binary(nullable=nullable)\n\n\[email protected](Dialect, sa.Time)\ndef sa_time(_, satype, nullable=True):\n return dt.Time(nullable=nullable)\n\n\[email protected](Dialect, sa.Date)\ndef sa_date(_, satype, nullable=True):\n return dt.Date(nullable=nullable)\n\n\[email protected](Dialect, sa.DateTime)\ndef sa_datetime(_, satype, nullable=True, default_timezone='UTC'):\n timezone = default_timezone if satype.timezone else None\n return dt.Timestamp(timezone=timezone, nullable=nullable)\n\n\[email protected](Dialect, sa.ARRAY)\ndef sa_array(dialect, satype, nullable=True):\n dimensions = satype.dimensions\n if dimensions is not None and dimensions != 1:\n raise NotImplementedError('Nested array types not yet supported')\n\n value_dtype = dt.dtype(dialect, satype.item_type)\n return dt.Array(value_dtype, nullable=nullable)\n\n\[email protected](Dialect, StructType)\ndef sa_struct(dialect, satype, nullable=True):\n pairs = [(name, dt.dtype(dialect, typ)) for name, typ in satype.pairs]\n return dt.Struct.from_tuples(pairs, nullable=nullable)\n\n\[email protected]((sa.Table, sa.sql.TableClause))\ndef schema_from_table(table, schema=None):\n \"\"\"Retrieve an ibis schema from a SQLAlchemy ``Table``.\n\n Parameters\n ----------\n table : sa.Table\n\n Returns\n -------\n schema : ibis.expr.datatypes.Schema\n An ibis schema corresponding to the types of the columns in `table`.\n \"\"\"\n schema = schema if schema is not None else {}\n pairs = []\n for name, column in zip(table.columns.keys(), table.columns):\n if name in schema:\n dtype = dt.dtype(schema[name])\n else:\n dtype = dt.dtype(\n getattr(table.bind, 'dialect', Dialect()),\n column.type,\n nullable=column.nullable,\n )\n pairs.append((name, dtype))\n return sch.schema(pairs)\n", "path": "ibis/backends/base/sql/alchemy/datatypes.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport functools\nfrom typing import Iterable\n\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql, postgresql, sqlite\nfrom sqlalchemy.dialects.mysql.base import MySQLDialect\nfrom sqlalchemy.dialects.postgresql.base import PGDialect\nfrom sqlalchemy.dialects.sqlite.base import SQLiteDialect\nfrom sqlalchemy.engine.interfaces import Dialect\nfrom sqlalchemy.types import UserDefinedType\n\nimport ibis.expr.datatypes as dt\nimport ibis.expr.schema as sch\nfrom ibis.backends.base.sql.alchemy.geospatial import geospatial_supported\n\nif geospatial_supported:\n import geoalchemy2 as ga\n\n\nclass StructType(UserDefinedType):\n def __init__(\n self,\n pairs: Iterable[tuple[str, sa.types.TypeEngine]],\n ):\n self.pairs = [\n (name, sa.types.to_instance(type)) for name, type in pairs\n ]\n\n def get_col_spec(self, **_):\n pairs = \", \".join(f\"{k} {v}\" for k, v in self.pairs)\n return f\"STRUCT({pairs})\"\n\n\ndef table_from_schema(name, meta, schema, database: str | None = None):\n # Convert Ibis schema to SQLA table\n columns = []\n\n for colname, dtype in zip(schema.names, schema.types):\n satype = to_sqla_type(dtype)\n column = sa.Column(colname, satype, nullable=dtype.nullable)\n columns.append(column)\n\n return sa.Table(name, meta, schema=database, *columns)\n\n\n# TODO(cleanup)\nibis_type_to_sqla = {\n dt.Null: sa.types.NullType,\n dt.Date: sa.Date,\n dt.Time: sa.Time,\n dt.Boolean: sa.Boolean,\n dt.Binary: sa.LargeBinary,\n dt.String: sa.Text,\n dt.Decimal: sa.NUMERIC,\n # Mantissa-based\n dt.Float16: sa.REAL,\n dt.Float32: sa.REAL,\n dt.Float64: sa.FLOAT,\n dt.Int8: sa.SmallInteger,\n dt.Int16: sa.SmallInteger,\n dt.Int32: sa.Integer,\n dt.Int64: sa.BigInteger,\n dt.JSON: sa.JSON,\n}\n\n\[email protected]\ndef to_sqla_type(itype, type_map=None):\n if type_map is None:\n type_map = ibis_type_to_sqla\n return type_map[type(itype)]\n\n\n@to_sqla_type.register(dt.Decimal)\ndef _(itype, **kwargs):\n return sa.types.NUMERIC(itype.precision, itype.scale)\n\n\n@to_sqla_type.register(dt.Interval)\ndef _(itype, **kwargs):\n return sa.types.Interval()\n\n\n@to_sqla_type.register(dt.Date)\ndef _(itype, **kwargs):\n return sa.Date()\n\n\n@to_sqla_type.register(dt.Timestamp)\ndef _(itype, **kwargs):\n return sa.TIMESTAMP(bool(itype.timezone))\n\n\n@to_sqla_type.register(dt.Array)\ndef _(itype, **kwargs):\n ibis_type = itype.value_type\n return sa.ARRAY(to_sqla_type(ibis_type, **kwargs))\n\n\n@to_sqla_type.register(dt.Struct)\ndef _(itype, **_):\n return StructType(\n [(name, to_sqla_type(type)) for name, type in itype.pairs.items()]\n )\n\n\n@to_sqla_type.register(dt.GeoSpatial)\ndef _(itype, **kwargs):\n if itype.geotype == 'geometry':\n return ga.Geometry\n elif itype.geotype == 'geography':\n return ga.Geography\n else:\n return ga.types._GISType\n\n\[email protected](Dialect, sa.types.NullType)\ndef sa_null(_, satype, nullable=True):\n return dt.null\n\n\[email protected](Dialect, sa.types.Boolean)\ndef sa_boolean(_, satype, nullable=True):\n return dt.Boolean(nullable=nullable)\n\n\[email protected](MySQLDialect, mysql.NUMERIC)\ndef sa_mysql_numeric(_, satype, nullable=True):\n # https://dev.mysql.com/doc/refman/8.0/en/fixed-point-types.html\n return dt.Decimal(\n satype.precision or 10, satype.scale or 0, nullable=nullable\n )\n\n\[email protected](Dialect, sa.types.Numeric)\[email protected](SQLiteDialect, sqlite.NUMERIC)\ndef sa_numeric(_, satype, nullable=True):\n return dt.Decimal(satype.precision, satype.scale, nullable=nullable)\n\n\[email protected](Dialect, sa.types.SmallInteger)\ndef sa_smallint(_, satype, nullable=True):\n return dt.Int16(nullable=nullable)\n\n\[email protected](Dialect, sa.types.Integer)\ndef sa_integer(_, satype, nullable=True):\n return dt.Int32(nullable=nullable)\n\n\[email protected](Dialect, mysql.TINYINT)\ndef sa_mysql_tinyint(_, satype, nullable=True):\n return dt.Int8(nullable=nullable)\n\n\[email protected](Dialect, sa.types.BigInteger)\ndef sa_bigint(_, satype, nullable=True):\n return dt.Int64(nullable=nullable)\n\n\[email protected](Dialect, sa.REAL)\ndef sa_float(_, satype, nullable=True):\n return dt.Float32(nullable=nullable)\n\n\[email protected](Dialect, sa.FLOAT)\[email protected](SQLiteDialect, sa.REAL)\[email protected](PGDialect, postgresql.DOUBLE_PRECISION)\ndef sa_double(_, satype, nullable=True):\n return dt.Float64(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.UUID)\ndef sa_uuid(_, satype, nullable=True):\n return dt.UUID(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.MACADDR)\ndef sa_macaddr(_, satype, nullable=True):\n return dt.MACADDR(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.INET)\ndef sa_inet(_, satype, nullable=True):\n return dt.INET(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.JSON)\ndef sa_json(_, satype, nullable=True):\n return dt.JSON(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.JSONB)\ndef sa_jsonb(_, satype, nullable=True):\n return dt.JSONB(nullable=nullable)\n\n\nif geospatial_supported:\n\n @dt.dtype.register(Dialect, (ga.Geometry, ga.types._GISType))\n def ga_geometry(_, gatype, nullable=True):\n t = gatype.geometry_type\n if t == 'POINT':\n return dt.Point(nullable=nullable)\n if t == 'LINESTRING':\n return dt.LineString(nullable=nullable)\n if t == 'POLYGON':\n return dt.Polygon(nullable=nullable)\n if t == 'MULTILINESTRING':\n return dt.MultiLineString(nullable=nullable)\n if t == 'MULTIPOINT':\n return dt.MultiPoint(nullable=nullable)\n if t == 'MULTIPOLYGON':\n return dt.MultiPolygon(nullable=nullable)\n if t == 'GEOMETRY':\n return dt.Geometry(nullable=nullable)\n else:\n raise ValueError(f\"Unrecognized geometry type: {t}\")\n\n\nPOSTGRES_FIELD_TO_IBIS_UNIT = {\n \"YEAR\": \"Y\",\n \"MONTH\": \"M\",\n \"DAY\": \"D\",\n \"HOUR\": \"h\",\n \"MINUTE\": \"m\",\n \"SECOND\": \"s\",\n \"YEAR TO MONTH\": \"M\",\n \"DAY TO HOUR\": \"h\",\n \"DAY TO MINUTE\": \"m\",\n \"DAY TO SECOND\": \"s\",\n \"HOUR TO MINUTE\": \"m\",\n \"HOUR TO SECOND\": \"s\",\n \"MINUTE TO SECOND\": \"s\",\n}\n\n\[email protected](PGDialect, postgresql.INTERVAL)\ndef sa_postgres_interval(_, satype, nullable=True):\n field = satype.fields.upper()\n unit = POSTGRES_FIELD_TO_IBIS_UNIT.get(field, None)\n if unit is None:\n raise ValueError(f\"Unknown PostgreSQL interval field {field!r}\")\n elif unit in {\"Y\", \"M\"}:\n raise ValueError(\n \"Variable length timedeltas are not yet supported with PostgreSQL\"\n )\n return dt.Interval(unit=unit, nullable=nullable)\n\n\[email protected](MySQLDialect, mysql.DOUBLE)\ndef sa_mysql_double(_, satype, nullable=True):\n # TODO: handle asdecimal=True\n return dt.Float64(nullable=nullable)\n\n\[email protected](Dialect, sa.types.String)\ndef sa_string(_, satype, nullable=True):\n return dt.String(nullable=nullable)\n\n\[email protected](Dialect, sa.LargeBinary)\ndef sa_binary(_, satype, nullable=True):\n return dt.Binary(nullable=nullable)\n\n\[email protected](Dialect, sa.Time)\ndef sa_time(_, satype, nullable=True):\n return dt.Time(nullable=nullable)\n\n\[email protected](Dialect, sa.Date)\ndef sa_date(_, satype, nullable=True):\n return dt.Date(nullable=nullable)\n\n\[email protected](Dialect, sa.DateTime)\ndef sa_datetime(_, satype, nullable=True, default_timezone='UTC'):\n timezone = default_timezone if satype.timezone else None\n return dt.Timestamp(timezone=timezone, nullable=nullable)\n\n\[email protected](Dialect, sa.ARRAY)\ndef sa_array(dialect, satype, nullable=True):\n dimensions = satype.dimensions\n if dimensions is not None and dimensions != 1:\n raise NotImplementedError('Nested array types not yet supported')\n\n value_dtype = dt.dtype(dialect, satype.item_type)\n return dt.Array(value_dtype, nullable=nullable)\n\n\[email protected](Dialect, StructType)\ndef sa_struct(dialect, satype, nullable=True):\n pairs = [(name, dt.dtype(dialect, typ)) for name, typ in satype.pairs]\n return dt.Struct.from_tuples(pairs, nullable=nullable)\n\n\[email protected]((sa.Table, sa.sql.TableClause))\ndef schema_from_table(table, schema=None):\n \"\"\"Retrieve an ibis schema from a SQLAlchemy ``Table``.\n\n Parameters\n ----------\n table : sa.Table\n\n Returns\n -------\n schema : ibis.expr.datatypes.Schema\n An ibis schema corresponding to the types of the columns in `table`.\n \"\"\"\n schema = schema if schema is not None else {}\n pairs = []\n for name, column in zip(table.columns.keys(), table.columns):\n if name in schema:\n dtype = dt.dtype(schema[name])\n else:\n dtype = dt.dtype(\n getattr(table.bind, 'dialect', Dialect()),\n column.type,\n nullable=column.nullable,\n )\n pairs.append((name, dtype))\n return sch.schema(pairs)\n", "path": "ibis/backends/base/sql/alchemy/datatypes.py"}]}
3,570
152
gh_patches_debug_17045
rasdani/github-patches
git_diff
Lightning-AI__torchmetrics-899
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- The sdr metric in TM sometime gives NaN for some input ## 🐛 Bug This issue is related with `fast-bss-eval`'s torch version, see https://github.com/fakufaku/fast_bss_eval/issues/5 ### To Reproduce ``` import numpy as np import torch x = np.load('debug.npz') preds = torch.tensor(x['preds']) target = torch.tensor(x['target']) print(preds.shape, target.shape) from torchmetrics.functional.audio import signal_distortion_ratio sdr = signal_distortion_ratio(preds, target) print(sdr) from mir_eval.separation import bss_eval_sources sdr, _, _, _ = bss_eval_sources(target.numpy(), preds.numpy(), False) print(sdr) ``` outputs: ``` torch.Size([2, 64000]) torch.Size([2, 64000]) tensor([-2.6815, nan]) [-2.68156071 44.58523729] ``` unzip [data.zip](https://github.com/PyTorchLightning/metrics/files/8309638/data.zip) to get the `debug.npz` <!-- If you have a code sample, error messages, stack traces, please provide it here as well --> #### Code sample <!-- Ideally attach a minimal code sample to reproduce the decried issue. Minimal means having the shortest code but still preserving the bug. --> ### Expected behavior the results given by signal_distortion_ratio is close to the one given by `mir_eval` <!-- A clear and concise description of what you expected to happen. --> ### Environment - OS (e.g., Linux): - Python & PyTorch Version (e.g., 1.0): - How you installed PyTorch (`conda`, `pip`, build command if you used source): - Any other relevant information: ### Additional context <!-- Add any other context about the problem here. --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchmetrics/functional/audio/sdr.py` Content: ``` 1 # Copyright The PyTorch Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from typing import Optional 16 17 import torch 18 19 from torchmetrics.utilities.imports import _FAST_BSS_EVAL_AVAILABLE, _TORCH_GREATER_EQUAL_1_8 20 21 if _FAST_BSS_EVAL_AVAILABLE: 22 if _TORCH_GREATER_EQUAL_1_8: 23 from fast_bss_eval.torch.cgd import toeplitz_conjugate_gradient 24 from fast_bss_eval.torch.helpers import _normalize 25 from fast_bss_eval.torch.linalg import toeplitz 26 from fast_bss_eval.torch.metrics import compute_stats 27 28 solve = torch.linalg.solve 29 else: 30 import numpy 31 from fast_bss_eval.numpy.cgd import toeplitz_conjugate_gradient 32 from fast_bss_eval.numpy.helpers import _normalize 33 from fast_bss_eval.numpy.linalg import toeplitz 34 from fast_bss_eval.numpy.metrics import compute_stats 35 36 solve = numpy.linalg.solve 37 else: 38 toeplitz = None 39 toeplitz_conjugate_gradient = None 40 compute_stats = None 41 _normalize = None 42 __doctest_skip__ = ["signal_distortion_ratio"] 43 44 from torch import Tensor 45 46 from torchmetrics.utilities import rank_zero_warn 47 from torchmetrics.utilities.checks import _check_same_shape 48 49 50 def signal_distortion_ratio( 51 preds: Tensor, 52 target: Tensor, 53 use_cg_iter: Optional[int] = None, 54 filter_length: int = 512, 55 zero_mean: bool = False, 56 load_diag: Optional[float] = None, 57 ) -> Tensor: 58 r"""Signal to Distortion Ratio (SDR) [1,2,3] 59 60 Args: 61 preds: 62 shape ``[..., time]`` 63 target: 64 shape ``[..., time]`` 65 use_cg_iter: 66 If provided, an iterative method is used to solve for the distortion 67 filter coefficients instead of direct Gaussian elimination. 68 This can speed up the computation of the metrics in case the filters 69 are long. Using a value of 10 here has been shown to provide 70 good accuracy in most cases and is sufficient when using this 71 loss to train neural separation networks. 72 filter_length: 73 The length of the distortion filter allowed 74 zero_mean: 75 When set to True, the mean of all signals is subtracted prior to computation of the metrics 76 load_diag: 77 If provided, this small value is added to the diagonal coefficients of 78 the system metrics when solving for the filter coefficients. 79 This can help stabilize the metric in the case where some of the reference 80 signals may sometimes be zero 81 82 Raises: 83 ModuleNotFoundError: 84 If ``fast-bss-eval`` package is not installed 85 86 Returns: 87 sdr value of shape ``[...]`` 88 89 Example: 90 91 >>> from torchmetrics.functional.audio import signal_distortion_ratio 92 >>> import torch 93 >>> g = torch.manual_seed(1) 94 >>> preds = torch.randn(8000) 95 >>> target = torch.randn(8000) 96 >>> signal_distortion_ratio(preds, target) 97 tensor(-12.0589) 98 >>> # use with permutation_invariant_training 99 >>> from torchmetrics.functional.audio import permutation_invariant_training 100 >>> preds = torch.randn(4, 2, 8000) # [batch, spk, time] 101 >>> target = torch.randn(4, 2, 8000) 102 >>> best_metric, best_perm = permutation_invariant_training(preds, target, signal_distortion_ratio, 'max') 103 >>> best_metric 104 tensor([-11.6375, -11.4358, -11.7148, -11.6325]) 105 >>> best_perm 106 tensor([[1, 0], 107 [0, 1], 108 [1, 0], 109 [0, 1]]) 110 111 .. note:: 112 1. when pytorch<1.8.0, numpy will be used to calculate this metric, which causes ``sdr`` to be 113 non-differentiable and slower to calculate 114 115 2. using this metrics requires you to have ``fast-bss-eval`` install. Either install as ``pip install 116 torchmetrics[audio]`` or ``pip install fast-bss-eval`` 117 118 3. preds and target need to have the same dtype, otherwise target will be converted to preds' dtype 119 120 121 References: 122 [1] Vincent, E., Gribonval, R., & Fevotte, C. (2006). Performance measurement in blind audio source separation. 123 IEEE Transactions on Audio, Speech and Language Processing, 14(4), 1462–1469. 124 125 [2] Scheibler, R. (2021). SDR -- Medium Rare with Fast Computations. 126 127 [3] https://github.com/fakufaku/fast_bss_eval 128 """ 129 if not _FAST_BSS_EVAL_AVAILABLE: 130 raise ModuleNotFoundError( 131 "SDR metric requires that `fast-bss-eval` is installed." 132 " Either install as `pip install torchmetrics[audio]` or `pip install fast-bss-eval`." 133 ) 134 _check_same_shape(preds, target) 135 136 if not preds.dtype.is_floating_point: 137 preds = preds.float() # for torch.norm 138 139 # half precision support 140 if preds.dtype == torch.float16: 141 preds = preds.to(torch.float32) 142 143 if preds.dtype != target.dtype: # for torch.linalg.solve 144 target = target.to(preds.dtype) 145 146 if zero_mean: 147 preds = preds - preds.mean(dim=-1, keepdim=True) 148 target = target - target.mean(dim=-1, keepdim=True) 149 150 # normalize along time-axis 151 if not _TORCH_GREATER_EQUAL_1_8: 152 # use numpy if torch<1.8 153 rank_zero_warn( 154 "Pytorch is under 1.8, thus SDR numpy version is used." 155 "For better performance and differentiability, you should change to Pytorch v1.8 or above." 156 ) 157 device = preds.device 158 preds = preds.detach().cpu().numpy() 159 target = target.detach().cpu().numpy() 160 161 preds = _normalize(preds, axis=-1) 162 target = _normalize(target, axis=-1) 163 else: 164 preds = _normalize(preds, dim=-1) 165 target = _normalize(target, dim=-1) 166 167 # solve for the optimal filter 168 # compute auto-correlation and cross-correlation 169 acf, xcorr = compute_stats(target, preds, length=filter_length, pairwise=False) 170 171 if load_diag is not None: 172 # the diagonal factor of the Toeplitz matrix is the first 173 # coefficient of the acf 174 acf[..., 0] += load_diag 175 176 if use_cg_iter is not None: 177 # use preconditioned conjugate gradient 178 sol = toeplitz_conjugate_gradient(acf, xcorr, n_iter=use_cg_iter) 179 else: 180 # regular matrix solver 181 r_mat = toeplitz(acf) 182 sol = solve(r_mat, xcorr) 183 184 # to tensor if torch<1.8 185 if not _TORCH_GREATER_EQUAL_1_8: 186 sol = torch.tensor(sol, device=device) 187 xcorr = torch.tensor(xcorr, device=device) 188 189 # compute the coherence 190 coh = torch.einsum("...l,...l->...", xcorr, sol) 191 192 # transform to decibels 193 ratio = coh / (1 - coh) 194 val = 10.0 * torch.log10(ratio) 195 return val 196 197 198 def scale_invariant_signal_distortion_ratio(preds: Tensor, target: Tensor, zero_mean: bool = False) -> Tensor: 199 """Calculates Scale-invariant signal-to-distortion ratio (SI-SDR) metric. The SI-SDR value is in general 200 considered an overall measure of how good a source sound. 201 202 Args: 203 preds: 204 shape ``[...,time]`` 205 target: 206 shape ``[...,time]`` 207 zero_mean: 208 If to zero mean target and preds or not 209 210 Returns: 211 si-sdr value of shape [...] 212 213 Example: 214 >>> from torchmetrics.functional.audio import scale_invariant_signal_distortion_ratio 215 >>> target = torch.tensor([3.0, -0.5, 2.0, 7.0]) 216 >>> preds = torch.tensor([2.5, 0.0, 2.0, 8.0]) 217 >>> scale_invariant_signal_distortion_ratio(preds, target) 218 tensor(18.4030) 219 220 References: 221 [1] Le Roux, Jonathan, et al. "SDR half-baked or well done." IEEE International Conference on Acoustics, Speech 222 and Signal Processing (ICASSP) 2019. 223 """ 224 _check_same_shape(preds, target) 225 eps = torch.finfo(preds.dtype).eps 226 227 if zero_mean: 228 target = target - torch.mean(target, dim=-1, keepdim=True) 229 preds = preds - torch.mean(preds, dim=-1, keepdim=True) 230 231 alpha = (torch.sum(preds * target, dim=-1, keepdim=True) + eps) / ( 232 torch.sum(target ** 2, dim=-1, keepdim=True) + eps 233 ) 234 target_scaled = alpha * target 235 236 noise = target_scaled - preds 237 238 val = (torch.sum(target_scaled ** 2, dim=-1) + eps) / (torch.sum(noise ** 2, dim=-1) + eps) 239 val = 10 * torch.log10(val) 240 241 return val 242 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchmetrics/functional/audio/sdr.py b/torchmetrics/functional/audio/sdr.py --- a/torchmetrics/functional/audio/sdr.py +++ b/torchmetrics/functional/audio/sdr.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import warnings from typing import Optional import torch @@ -192,6 +193,15 @@ # transform to decibels ratio = coh / (1 - coh) val = 10.0 * torch.log10(ratio) + + # recompute sdr in float64 if val is NaN or Inf + if (torch.isnan(val).any() or torch.isinf(val).any()) and preds.dtype != torch.float64: + warnings.warn( + "Detected `nan` or `inf` value in computed metric, retrying computation in double precision", + UserWarning, + ) + val = signal_distortion_ratio(preds.double(), target.double(), use_cg_iter, filter_length, zero_mean, load_diag) + return val
{"golden_diff": "diff --git a/torchmetrics/functional/audio/sdr.py b/torchmetrics/functional/audio/sdr.py\n--- a/torchmetrics/functional/audio/sdr.py\n+++ b/torchmetrics/functional/audio/sdr.py\n@@ -12,6 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import warnings\n from typing import Optional\n \n import torch\n@@ -192,6 +193,15 @@\n # transform to decibels\n ratio = coh / (1 - coh)\n val = 10.0 * torch.log10(ratio)\n+\n+ # recompute sdr in float64 if val is NaN or Inf\n+ if (torch.isnan(val).any() or torch.isinf(val).any()) and preds.dtype != torch.float64:\n+ warnings.warn(\n+ \"Detected `nan` or `inf` value in computed metric, retrying computation in double precision\",\n+ UserWarning,\n+ )\n+ val = signal_distortion_ratio(preds.double(), target.double(), use_cg_iter, filter_length, zero_mean, load_diag)\n+\n return val\n", "issue": "The sdr metric in TM sometime gives NaN for some input\n## \ud83d\udc1b Bug\r\n\r\nThis issue is related with `fast-bss-eval`'s torch version, see https://github.com/fakufaku/fast_bss_eval/issues/5\r\n\r\n### To Reproduce\r\n\r\n```\r\nimport numpy as np\r\nimport torch\r\n\r\nx = np.load('debug.npz')\r\npreds = torch.tensor(x['preds'])\r\ntarget = torch.tensor(x['target'])\r\nprint(preds.shape, target.shape)\r\n\r\nfrom torchmetrics.functional.audio import signal_distortion_ratio\r\nsdr = signal_distortion_ratio(preds, target)\r\nprint(sdr)\r\n\r\nfrom mir_eval.separation import bss_eval_sources\r\nsdr, _, _, _ = bss_eval_sources(target.numpy(), preds.numpy(), False)\r\nprint(sdr)\r\n```\r\n\r\noutputs:\r\n```\r\ntorch.Size([2, 64000]) torch.Size([2, 64000])\r\ntensor([-2.6815, nan])\r\n[-2.68156071 44.58523729]\r\n```\r\n\r\nunzip [data.zip](https://github.com/PyTorchLightning/metrics/files/8309638/data.zip) to get the `debug.npz`\r\n\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n#### Code sample\r\n\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue.\r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n### Expected behavior\r\n\r\nthe results given by signal_distortion_ratio is close to the one given by `mir_eval`\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n### Environment\r\n\r\n- OS (e.g., Linux):\r\n- Python & PyTorch Version (e.g., 1.0):\r\n- How you installed PyTorch (`conda`, `pip`, build command if you used source):\r\n- Any other relevant information:\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Optional\n\nimport torch\n\nfrom torchmetrics.utilities.imports import _FAST_BSS_EVAL_AVAILABLE, _TORCH_GREATER_EQUAL_1_8\n\nif _FAST_BSS_EVAL_AVAILABLE:\n if _TORCH_GREATER_EQUAL_1_8:\n from fast_bss_eval.torch.cgd import toeplitz_conjugate_gradient\n from fast_bss_eval.torch.helpers import _normalize\n from fast_bss_eval.torch.linalg import toeplitz\n from fast_bss_eval.torch.metrics import compute_stats\n\n solve = torch.linalg.solve\n else:\n import numpy\n from fast_bss_eval.numpy.cgd import toeplitz_conjugate_gradient\n from fast_bss_eval.numpy.helpers import _normalize\n from fast_bss_eval.numpy.linalg import toeplitz\n from fast_bss_eval.numpy.metrics import compute_stats\n\n solve = numpy.linalg.solve\nelse:\n toeplitz = None\n toeplitz_conjugate_gradient = None\n compute_stats = None\n _normalize = None\n __doctest_skip__ = [\"signal_distortion_ratio\"]\n\nfrom torch import Tensor\n\nfrom torchmetrics.utilities import rank_zero_warn\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n\ndef signal_distortion_ratio(\n preds: Tensor,\n target: Tensor,\n use_cg_iter: Optional[int] = None,\n filter_length: int = 512,\n zero_mean: bool = False,\n load_diag: Optional[float] = None,\n) -> Tensor:\n r\"\"\"Signal to Distortion Ratio (SDR) [1,2,3]\n\n Args:\n preds:\n shape ``[..., time]``\n target:\n shape ``[..., time]``\n use_cg_iter:\n If provided, an iterative method is used to solve for the distortion\n filter coefficients instead of direct Gaussian elimination.\n This can speed up the computation of the metrics in case the filters\n are long. Using a value of 10 here has been shown to provide\n good accuracy in most cases and is sufficient when using this\n loss to train neural separation networks.\n filter_length:\n The length of the distortion filter allowed\n zero_mean:\n When set to True, the mean of all signals is subtracted prior to computation of the metrics\n load_diag:\n If provided, this small value is added to the diagonal coefficients of\n the system metrics when solving for the filter coefficients.\n This can help stabilize the metric in the case where some of the reference\n signals may sometimes be zero\n\n Raises:\n ModuleNotFoundError:\n If ``fast-bss-eval`` package is not installed\n\n Returns:\n sdr value of shape ``[...]``\n\n Example:\n\n >>> from torchmetrics.functional.audio import signal_distortion_ratio\n >>> import torch\n >>> g = torch.manual_seed(1)\n >>> preds = torch.randn(8000)\n >>> target = torch.randn(8000)\n >>> signal_distortion_ratio(preds, target)\n tensor(-12.0589)\n >>> # use with permutation_invariant_training\n >>> from torchmetrics.functional.audio import permutation_invariant_training\n >>> preds = torch.randn(4, 2, 8000) # [batch, spk, time]\n >>> target = torch.randn(4, 2, 8000)\n >>> best_metric, best_perm = permutation_invariant_training(preds, target, signal_distortion_ratio, 'max')\n >>> best_metric\n tensor([-11.6375, -11.4358, -11.7148, -11.6325])\n >>> best_perm\n tensor([[1, 0],\n [0, 1],\n [1, 0],\n [0, 1]])\n\n .. note::\n 1. when pytorch<1.8.0, numpy will be used to calculate this metric, which causes ``sdr`` to be\n non-differentiable and slower to calculate\n\n 2. using this metrics requires you to have ``fast-bss-eval`` install. Either install as ``pip install\n torchmetrics[audio]`` or ``pip install fast-bss-eval``\n\n 3. preds and target need to have the same dtype, otherwise target will be converted to preds' dtype\n\n\n References:\n [1] Vincent, E., Gribonval, R., & Fevotte, C. (2006). Performance measurement in blind audio source separation.\n IEEE Transactions on Audio, Speech and Language Processing, 14(4), 1462\u20131469.\n\n [2] Scheibler, R. (2021). SDR -- Medium Rare with Fast Computations.\n\n [3] https://github.com/fakufaku/fast_bss_eval\n \"\"\"\n if not _FAST_BSS_EVAL_AVAILABLE:\n raise ModuleNotFoundError(\n \"SDR metric requires that `fast-bss-eval` is installed.\"\n \" Either install as `pip install torchmetrics[audio]` or `pip install fast-bss-eval`.\"\n )\n _check_same_shape(preds, target)\n\n if not preds.dtype.is_floating_point:\n preds = preds.float() # for torch.norm\n\n # half precision support\n if preds.dtype == torch.float16:\n preds = preds.to(torch.float32)\n\n if preds.dtype != target.dtype: # for torch.linalg.solve\n target = target.to(preds.dtype)\n\n if zero_mean:\n preds = preds - preds.mean(dim=-1, keepdim=True)\n target = target - target.mean(dim=-1, keepdim=True)\n\n # normalize along time-axis\n if not _TORCH_GREATER_EQUAL_1_8:\n # use numpy if torch<1.8\n rank_zero_warn(\n \"Pytorch is under 1.8, thus SDR numpy version is used.\"\n \"For better performance and differentiability, you should change to Pytorch v1.8 or above.\"\n )\n device = preds.device\n preds = preds.detach().cpu().numpy()\n target = target.detach().cpu().numpy()\n\n preds = _normalize(preds, axis=-1)\n target = _normalize(target, axis=-1)\n else:\n preds = _normalize(preds, dim=-1)\n target = _normalize(target, dim=-1)\n\n # solve for the optimal filter\n # compute auto-correlation and cross-correlation\n acf, xcorr = compute_stats(target, preds, length=filter_length, pairwise=False)\n\n if load_diag is not None:\n # the diagonal factor of the Toeplitz matrix is the first\n # coefficient of the acf\n acf[..., 0] += load_diag\n\n if use_cg_iter is not None:\n # use preconditioned conjugate gradient\n sol = toeplitz_conjugate_gradient(acf, xcorr, n_iter=use_cg_iter)\n else:\n # regular matrix solver\n r_mat = toeplitz(acf)\n sol = solve(r_mat, xcorr)\n\n # to tensor if torch<1.8\n if not _TORCH_GREATER_EQUAL_1_8:\n sol = torch.tensor(sol, device=device)\n xcorr = torch.tensor(xcorr, device=device)\n\n # compute the coherence\n coh = torch.einsum(\"...l,...l->...\", xcorr, sol)\n\n # transform to decibels\n ratio = coh / (1 - coh)\n val = 10.0 * torch.log10(ratio)\n return val\n\n\ndef scale_invariant_signal_distortion_ratio(preds: Tensor, target: Tensor, zero_mean: bool = False) -> Tensor:\n \"\"\"Calculates Scale-invariant signal-to-distortion ratio (SI-SDR) metric. The SI-SDR value is in general\n considered an overall measure of how good a source sound.\n\n Args:\n preds:\n shape ``[...,time]``\n target:\n shape ``[...,time]``\n zero_mean:\n If to zero mean target and preds or not\n\n Returns:\n si-sdr value of shape [...]\n\n Example:\n >>> from torchmetrics.functional.audio import scale_invariant_signal_distortion_ratio\n >>> target = torch.tensor([3.0, -0.5, 2.0, 7.0])\n >>> preds = torch.tensor([2.5, 0.0, 2.0, 8.0])\n >>> scale_invariant_signal_distortion_ratio(preds, target)\n tensor(18.4030)\n\n References:\n [1] Le Roux, Jonathan, et al. \"SDR half-baked or well done.\" IEEE International Conference on Acoustics, Speech\n and Signal Processing (ICASSP) 2019.\n \"\"\"\n _check_same_shape(preds, target)\n eps = torch.finfo(preds.dtype).eps\n\n if zero_mean:\n target = target - torch.mean(target, dim=-1, keepdim=True)\n preds = preds - torch.mean(preds, dim=-1, keepdim=True)\n\n alpha = (torch.sum(preds * target, dim=-1, keepdim=True) + eps) / (\n torch.sum(target ** 2, dim=-1, keepdim=True) + eps\n )\n target_scaled = alpha * target\n\n noise = target_scaled - preds\n\n val = (torch.sum(target_scaled ** 2, dim=-1) + eps) / (torch.sum(noise ** 2, dim=-1) + eps)\n val = 10 * torch.log10(val)\n\n return val\n", "path": "torchmetrics/functional/audio/sdr.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport warnings\nfrom typing import Optional\n\nimport torch\n\nfrom torchmetrics.utilities.imports import _FAST_BSS_EVAL_AVAILABLE, _TORCH_GREATER_EQUAL_1_8\n\nif _FAST_BSS_EVAL_AVAILABLE:\n if _TORCH_GREATER_EQUAL_1_8:\n from fast_bss_eval.torch.cgd import toeplitz_conjugate_gradient\n from fast_bss_eval.torch.helpers import _normalize\n from fast_bss_eval.torch.linalg import toeplitz\n from fast_bss_eval.torch.metrics import compute_stats\n\n solve = torch.linalg.solve\n else:\n import numpy\n from fast_bss_eval.numpy.cgd import toeplitz_conjugate_gradient\n from fast_bss_eval.numpy.helpers import _normalize\n from fast_bss_eval.numpy.linalg import toeplitz\n from fast_bss_eval.numpy.metrics import compute_stats\n\n solve = numpy.linalg.solve\nelse:\n toeplitz = None\n toeplitz_conjugate_gradient = None\n compute_stats = None\n _normalize = None\n __doctest_skip__ = [\"signal_distortion_ratio\"]\n\nfrom torch import Tensor\n\nfrom torchmetrics.utilities import rank_zero_warn\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n\ndef signal_distortion_ratio(\n preds: Tensor,\n target: Tensor,\n use_cg_iter: Optional[int] = None,\n filter_length: int = 512,\n zero_mean: bool = False,\n load_diag: Optional[float] = None,\n) -> Tensor:\n r\"\"\"Signal to Distortion Ratio (SDR) [1,2,3]\n\n Args:\n preds:\n shape ``[..., time]``\n target:\n shape ``[..., time]``\n use_cg_iter:\n If provided, an iterative method is used to solve for the distortion\n filter coefficients instead of direct Gaussian elimination.\n This can speed up the computation of the metrics in case the filters\n are long. Using a value of 10 here has been shown to provide\n good accuracy in most cases and is sufficient when using this\n loss to train neural separation networks.\n filter_length:\n The length of the distortion filter allowed\n zero_mean:\n When set to True, the mean of all signals is subtracted prior to computation of the metrics\n load_diag:\n If provided, this small value is added to the diagonal coefficients of\n the system metrics when solving for the filter coefficients.\n This can help stabilize the metric in the case where some of the reference\n signals may sometimes be zero\n\n Raises:\n ModuleNotFoundError:\n If ``fast-bss-eval`` package is not installed\n\n Returns:\n sdr value of shape ``[...]``\n\n Example:\n\n >>> from torchmetrics.functional.audio import signal_distortion_ratio\n >>> import torch\n >>> g = torch.manual_seed(1)\n >>> preds = torch.randn(8000)\n >>> target = torch.randn(8000)\n >>> signal_distortion_ratio(preds, target)\n tensor(-12.0589)\n >>> # use with permutation_invariant_training\n >>> from torchmetrics.functional.audio import permutation_invariant_training\n >>> preds = torch.randn(4, 2, 8000) # [batch, spk, time]\n >>> target = torch.randn(4, 2, 8000)\n >>> best_metric, best_perm = permutation_invariant_training(preds, target, signal_distortion_ratio, 'max')\n >>> best_metric\n tensor([-11.6375, -11.4358, -11.7148, -11.6325])\n >>> best_perm\n tensor([[1, 0],\n [0, 1],\n [1, 0],\n [0, 1]])\n\n .. note::\n 1. when pytorch<1.8.0, numpy will be used to calculate this metric, which causes ``sdr`` to be\n non-differentiable and slower to calculate\n\n 2. using this metrics requires you to have ``fast-bss-eval`` install. Either install as ``pip install\n torchmetrics[audio]`` or ``pip install fast-bss-eval``\n\n 3. preds and target need to have the same dtype, otherwise target will be converted to preds' dtype\n\n\n References:\n [1] Vincent, E., Gribonval, R., & Fevotte, C. (2006). Performance measurement in blind audio source separation.\n IEEE Transactions on Audio, Speech and Language Processing, 14(4), 1462\u20131469.\n\n [2] Scheibler, R. (2021). SDR -- Medium Rare with Fast Computations.\n\n [3] https://github.com/fakufaku/fast_bss_eval\n \"\"\"\n if not _FAST_BSS_EVAL_AVAILABLE:\n raise ModuleNotFoundError(\n \"SDR metric requires that `fast-bss-eval` is installed.\"\n \" Either install as `pip install torchmetrics[audio]` or `pip install fast-bss-eval`.\"\n )\n _check_same_shape(preds, target)\n\n if not preds.dtype.is_floating_point:\n preds = preds.float() # for torch.norm\n\n # half precision support\n if preds.dtype == torch.float16:\n preds = preds.to(torch.float32)\n\n if preds.dtype != target.dtype: # for torch.linalg.solve\n target = target.to(preds.dtype)\n\n if zero_mean:\n preds = preds - preds.mean(dim=-1, keepdim=True)\n target = target - target.mean(dim=-1, keepdim=True)\n\n # normalize along time-axis\n if not _TORCH_GREATER_EQUAL_1_8:\n # use numpy if torch<1.8\n rank_zero_warn(\n \"Pytorch is under 1.8, thus SDR numpy version is used.\"\n \"For better performance and differentiability, you should change to Pytorch v1.8 or above.\"\n )\n device = preds.device\n preds = preds.detach().cpu().numpy()\n target = target.detach().cpu().numpy()\n\n preds = _normalize(preds, axis=-1)\n target = _normalize(target, axis=-1)\n else:\n preds = _normalize(preds, dim=-1)\n target = _normalize(target, dim=-1)\n\n # solve for the optimal filter\n # compute auto-correlation and cross-correlation\n acf, xcorr = compute_stats(target, preds, length=filter_length, pairwise=False)\n\n if load_diag is not None:\n # the diagonal factor of the Toeplitz matrix is the first\n # coefficient of the acf\n acf[..., 0] += load_diag\n\n if use_cg_iter is not None:\n # use preconditioned conjugate gradient\n sol = toeplitz_conjugate_gradient(acf, xcorr, n_iter=use_cg_iter)\n else:\n # regular matrix solver\n r_mat = toeplitz(acf)\n sol = solve(r_mat, xcorr)\n\n # to tensor if torch<1.8\n if not _TORCH_GREATER_EQUAL_1_8:\n sol = torch.tensor(sol, device=device)\n xcorr = torch.tensor(xcorr, device=device)\n\n # compute the coherence\n coh = torch.einsum(\"...l,...l->...\", xcorr, sol)\n\n # transform to decibels\n ratio = coh / (1 - coh)\n val = 10.0 * torch.log10(ratio)\n\n # recompute sdr in float64 if val is NaN or Inf\n if (torch.isnan(val).any() or torch.isinf(val).any()) and preds.dtype != torch.float64:\n warnings.warn(\n \"Detected `nan` or `inf` value in computed metric, retrying computation in double precision\",\n UserWarning,\n )\n val = signal_distortion_ratio(preds.double(), target.double(), use_cg_iter, filter_length, zero_mean, load_diag)\n\n return val\n\n\ndef scale_invariant_signal_distortion_ratio(preds: Tensor, target: Tensor, zero_mean: bool = False) -> Tensor:\n \"\"\"Calculates Scale-invariant signal-to-distortion ratio (SI-SDR) metric. The SI-SDR value is in general\n considered an overall measure of how good a source sound.\n\n Args:\n preds:\n shape ``[...,time]``\n target:\n shape ``[...,time]``\n zero_mean:\n If to zero mean target and preds or not\n\n Returns:\n si-sdr value of shape [...]\n\n Example:\n >>> from torchmetrics.functional.audio import scale_invariant_signal_distortion_ratio\n >>> target = torch.tensor([3.0, -0.5, 2.0, 7.0])\n >>> preds = torch.tensor([2.5, 0.0, 2.0, 8.0])\n >>> scale_invariant_signal_distortion_ratio(preds, target)\n tensor(18.4030)\n\n References:\n [1] Le Roux, Jonathan, et al. \"SDR half-baked or well done.\" IEEE International Conference on Acoustics, Speech\n and Signal Processing (ICASSP) 2019.\n \"\"\"\n _check_same_shape(preds, target)\n eps = torch.finfo(preds.dtype).eps\n\n if zero_mean:\n target = target - torch.mean(target, dim=-1, keepdim=True)\n preds = preds - torch.mean(preds, dim=-1, keepdim=True)\n\n alpha = (torch.sum(preds * target, dim=-1, keepdim=True) + eps) / (\n torch.sum(target ** 2, dim=-1, keepdim=True) + eps\n )\n target_scaled = alpha * target\n\n noise = target_scaled - preds\n\n val = (torch.sum(target_scaled ** 2, dim=-1) + eps) / (torch.sum(noise ** 2, dim=-1) + eps)\n val = 10 * torch.log10(val)\n\n return val\n", "path": "torchmetrics/functional/audio/sdr.py"}]}
3,566
256
gh_patches_debug_4281
rasdani/github-patches
git_diff
ocf__ocfweb-808
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- disk quota on Account commands page errors When trying to check disk quota usage on the commands page (https://www.ocf.berkeley.edu/account/commands/) this error appears `quota: Bad output format units for human readable output: vQ` ![image](https://user-images.githubusercontent.com/50010165/212457900-d267910d-2452-4483-9a05-291ee91a08f8.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ocfweb/account/commands.py` Content: ``` 1 from django import forms 2 from django.forms import widgets 3 from django.http import HttpRequest 4 from django.http import HttpResponse 5 from django.shortcuts import render 6 from paramiko import AuthenticationException 7 from paramiko import SSHClient 8 from paramiko.hostkeys import HostKeyEntry 9 10 from ocfweb.component.forms import Form 11 12 13 def commands(request: HttpRequest) -> HttpResponse: 14 command_to_run = '' 15 output = '' 16 error = '' 17 if request.method == 'POST': 18 form = CommandForm(request.POST) 19 if form.is_valid(): 20 username = form.cleaned_data['username'] 21 password = form.cleaned_data['password'] 22 23 command_to_run = form.cleaned_data['command_to_run'] 24 25 ssh = SSHClient() 26 27 host_keys = ssh.get_host_keys() 28 entry = HostKeyEntry.from_line( 29 'ssh.ocf.berkeley.edu ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAqMkHVVoMl8md25iky7e2Xe3ARaC4H1PbIpv5Y+xT4KOT17gGvFSmfjGyW9P8ZTyqxq560iWdyELIn7efaGPbkUo9retcnT6WLmuh9nRIYwb6w7BGEEvlblBmH27Fkgt7JQ6+1sr5teuABfIMg22WTQAeDQe1jg0XsPu36OjbC7HjA3BXsiNBpxKDolYIXWzOD+r9FxZLP0lawh8dl//O5FW4ha1IbHklq2i9Mgl79wAH3jxf66kQJTvLmalKnQ0Dbp2+vYGGhIjVFXlGSzKsHAVhuVD6TBXZbxWOYoXanS7CC43MrEtBYYnc6zMn/k/rH0V+WeRhuzTnr/OZGJbBBw==', # noqa 30 ) 31 assert entry is not None # should never be none as we are passing a static string above 32 host_keys.add( 33 'ssh.ocf.berkeley.edu', 34 'ssh-rsa', 35 entry.key, 36 ) 37 38 try: 39 ssh.connect( 40 'ssh.ocf.berkeley.edu', 41 username=username, 42 password=password, 43 ) 44 except AuthenticationException: 45 error = 'Authentication failed. Did you type the wrong username or password?' 46 47 if not error: 48 _, ssh_stdout, ssh_stderr = ssh.exec_command(command_to_run, get_pty=True) 49 output = ssh_stdout.read().decode() 50 error = ssh_stderr.read().decode() 51 else: 52 form = CommandForm() 53 54 return render( 55 request, 56 'account/commands/index.html', { 57 'title': 'Account commands', 58 'form': form, 59 'command': command_to_run, 60 'output': output, 61 'error': error, 62 }, 63 ) 64 65 66 class CommandForm(Form): 67 username = forms.CharField( 68 label='OCF username', 69 min_length=3, 70 max_length=16, 71 ) 72 password = forms.CharField( 73 widget=forms.PasswordInput, 74 label='Password', 75 min_length=8, 76 max_length=256, 77 ) 78 79 COMMAND_CHOICES = ( 80 ( 81 '/opt/share/utils/bin/paper', 82 'paper quota -- how many pages you have remaining this semester', 83 ), 84 ( 85 '/usr/bin/quota -svQ', 86 'disk quota -- how much disk space you have used and how much you ' + 87 'have left', 88 ), 89 ( 90 '/opt/share/utils/bin/makehttp', 91 'makehttp -- set up the web space for your OCF account', 92 ), 93 ( 94 'echo yes | /opt/share/utils/bin/makemysql', 95 'makemysql -- reset your MySQL database password, or create a new ' + 96 'MySQL database (copy down the password somewhere secure)', 97 ), 98 ) 99 100 command_to_run = forms.ChoiceField( 101 choices=COMMAND_CHOICES, 102 label='Command to run', 103 widget=widgets.RadioSelect, 104 ) 105 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ocfweb/account/commands.py b/ocfweb/account/commands.py --- a/ocfweb/account/commands.py +++ b/ocfweb/account/commands.py @@ -82,7 +82,7 @@ 'paper quota -- how many pages you have remaining this semester', ), ( - '/usr/bin/quota -svQ', + '/usr/bin/quota -vQs', 'disk quota -- how much disk space you have used and how much you ' + 'have left', ),
{"golden_diff": "diff --git a/ocfweb/account/commands.py b/ocfweb/account/commands.py\n--- a/ocfweb/account/commands.py\n+++ b/ocfweb/account/commands.py\n@@ -82,7 +82,7 @@\n 'paper quota -- how many pages you have remaining this semester',\n ),\n (\n- '/usr/bin/quota -svQ',\n+ '/usr/bin/quota -vQs',\n 'disk quota -- how much disk space you have used and how much you ' +\n 'have left',\n ),\n", "issue": "disk quota on Account commands page errors\nWhen trying to check disk quota usage on the commands page (https://www.ocf.berkeley.edu/account/commands/)\r\nthis error appears\r\n\r\n`quota: Bad output format units for human readable output: vQ`\r\n\r\n![image](https://user-images.githubusercontent.com/50010165/212457900-d267910d-2452-4483-9a05-291ee91a08f8.png)\r\n\n", "before_files": [{"content": "from django import forms\nfrom django.forms import widgets\nfrom django.http import HttpRequest\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom paramiko import AuthenticationException\nfrom paramiko import SSHClient\nfrom paramiko.hostkeys import HostKeyEntry\n\nfrom ocfweb.component.forms import Form\n\n\ndef commands(request: HttpRequest) -> HttpResponse:\n command_to_run = ''\n output = ''\n error = ''\n if request.method == 'POST':\n form = CommandForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n\n command_to_run = form.cleaned_data['command_to_run']\n\n ssh = SSHClient()\n\n host_keys = ssh.get_host_keys()\n entry = HostKeyEntry.from_line(\n 'ssh.ocf.berkeley.edu ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAqMkHVVoMl8md25iky7e2Xe3ARaC4H1PbIpv5Y+xT4KOT17gGvFSmfjGyW9P8ZTyqxq560iWdyELIn7efaGPbkUo9retcnT6WLmuh9nRIYwb6w7BGEEvlblBmH27Fkgt7JQ6+1sr5teuABfIMg22WTQAeDQe1jg0XsPu36OjbC7HjA3BXsiNBpxKDolYIXWzOD+r9FxZLP0lawh8dl//O5FW4ha1IbHklq2i9Mgl79wAH3jxf66kQJTvLmalKnQ0Dbp2+vYGGhIjVFXlGSzKsHAVhuVD6TBXZbxWOYoXanS7CC43MrEtBYYnc6zMn/k/rH0V+WeRhuzTnr/OZGJbBBw==', # noqa\n )\n assert entry is not None # should never be none as we are passing a static string above\n host_keys.add(\n 'ssh.ocf.berkeley.edu',\n 'ssh-rsa',\n entry.key,\n )\n\n try:\n ssh.connect(\n 'ssh.ocf.berkeley.edu',\n username=username,\n password=password,\n )\n except AuthenticationException:\n error = 'Authentication failed. Did you type the wrong username or password?'\n\n if not error:\n _, ssh_stdout, ssh_stderr = ssh.exec_command(command_to_run, get_pty=True)\n output = ssh_stdout.read().decode()\n error = ssh_stderr.read().decode()\n else:\n form = CommandForm()\n\n return render(\n request,\n 'account/commands/index.html', {\n 'title': 'Account commands',\n 'form': form,\n 'command': command_to_run,\n 'output': output,\n 'error': error,\n },\n )\n\n\nclass CommandForm(Form):\n username = forms.CharField(\n label='OCF username',\n min_length=3,\n max_length=16,\n )\n password = forms.CharField(\n widget=forms.PasswordInput,\n label='Password',\n min_length=8,\n max_length=256,\n )\n\n COMMAND_CHOICES = (\n (\n '/opt/share/utils/bin/paper',\n 'paper quota -- how many pages you have remaining this semester',\n ),\n (\n '/usr/bin/quota -svQ',\n 'disk quota -- how much disk space you have used and how much you ' +\n 'have left',\n ),\n (\n '/opt/share/utils/bin/makehttp',\n 'makehttp -- set up the web space for your OCF account',\n ),\n (\n 'echo yes | /opt/share/utils/bin/makemysql',\n 'makemysql -- reset your MySQL database password, or create a new ' +\n 'MySQL database (copy down the password somewhere secure)',\n ),\n )\n\n command_to_run = forms.ChoiceField(\n choices=COMMAND_CHOICES,\n label='Command to run',\n widget=widgets.RadioSelect,\n )\n", "path": "ocfweb/account/commands.py"}], "after_files": [{"content": "from django import forms\nfrom django.forms import widgets\nfrom django.http import HttpRequest\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom paramiko import AuthenticationException\nfrom paramiko import SSHClient\nfrom paramiko.hostkeys import HostKeyEntry\n\nfrom ocfweb.component.forms import Form\n\n\ndef commands(request: HttpRequest) -> HttpResponse:\n command_to_run = ''\n output = ''\n error = ''\n if request.method == 'POST':\n form = CommandForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n\n command_to_run = form.cleaned_data['command_to_run']\n\n ssh = SSHClient()\n\n host_keys = ssh.get_host_keys()\n entry = HostKeyEntry.from_line(\n 'ssh.ocf.berkeley.edu ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAqMkHVVoMl8md25iky7e2Xe3ARaC4H1PbIpv5Y+xT4KOT17gGvFSmfjGyW9P8ZTyqxq560iWdyELIn7efaGPbkUo9retcnT6WLmuh9nRIYwb6w7BGEEvlblBmH27Fkgt7JQ6+1sr5teuABfIMg22WTQAeDQe1jg0XsPu36OjbC7HjA3BXsiNBpxKDolYIXWzOD+r9FxZLP0lawh8dl//O5FW4ha1IbHklq2i9Mgl79wAH3jxf66kQJTvLmalKnQ0Dbp2+vYGGhIjVFXlGSzKsHAVhuVD6TBXZbxWOYoXanS7CC43MrEtBYYnc6zMn/k/rH0V+WeRhuzTnr/OZGJbBBw==', # noqa\n )\n assert entry is not None # should never be none as we are passing a static string above\n host_keys.add(\n 'ssh.ocf.berkeley.edu',\n 'ssh-rsa',\n entry.key,\n )\n\n try:\n ssh.connect(\n 'ssh.ocf.berkeley.edu',\n username=username,\n password=password,\n )\n except AuthenticationException:\n error = 'Authentication failed. Did you type the wrong username or password?'\n\n if not error:\n _, ssh_stdout, ssh_stderr = ssh.exec_command(command_to_run, get_pty=True)\n output = ssh_stdout.read().decode()\n error = ssh_stderr.read().decode()\n else:\n form = CommandForm()\n\n return render(\n request,\n 'account/commands/index.html', {\n 'title': 'Account commands',\n 'form': form,\n 'command': command_to_run,\n 'output': output,\n 'error': error,\n },\n )\n\n\nclass CommandForm(Form):\n username = forms.CharField(\n label='OCF username',\n min_length=3,\n max_length=16,\n )\n password = forms.CharField(\n widget=forms.PasswordInput,\n label='Password',\n min_length=8,\n max_length=256,\n )\n\n COMMAND_CHOICES = (\n (\n '/opt/share/utils/bin/paper',\n 'paper quota -- how many pages you have remaining this semester',\n ),\n (\n '/usr/bin/quota -vQs',\n 'disk quota -- how much disk space you have used and how much you ' +\n 'have left',\n ),\n (\n '/opt/share/utils/bin/makehttp',\n 'makehttp -- set up the web space for your OCF account',\n ),\n (\n 'echo yes | /opt/share/utils/bin/makemysql',\n 'makemysql -- reset your MySQL database password, or create a new ' +\n 'MySQL database (copy down the password somewhere secure)',\n ),\n )\n\n command_to_run = forms.ChoiceField(\n choices=COMMAND_CHOICES,\n label='Command to run',\n widget=widgets.RadioSelect,\n )\n", "path": "ocfweb/account/commands.py"}]}
1,501
122
gh_patches_debug_15944
rasdani/github-patches
git_diff
aio-libs__aiohttp-1326
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support load/save cookie_jar I found that cookie_jar do not provide any function to _load or save cookie_jar_ Is there any pythonic way to implement these two method? ``` session.cookie_jar.load('foo.cookie.txt') ....# do sth. session.cookie_jar.save() ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `aiohttp/cookiejar.py` Content: ``` 1 import datetime 2 import re 3 from collections import defaultdict 4 from collections.abc import Mapping 5 from http.cookies import Morsel, SimpleCookie 6 from math import ceil 7 8 from yarl import URL 9 10 from .abc import AbstractCookieJar 11 from .helpers import is_ip_address 12 13 14 class CookieJar(AbstractCookieJar): 15 """Implements cookie storage adhering to RFC 6265.""" 16 17 DATE_TOKENS_RE = re.compile( 18 "[\x09\x20-\x2F\x3B-\x40\x5B-\x60\x7B-\x7E]*" 19 "(?P<token>[\x00-\x08\x0A-\x1F\d:a-zA-Z\x7F-\xFF]+)") 20 21 DATE_HMS_TIME_RE = re.compile("(\d{1,2}):(\d{1,2}):(\d{1,2})") 22 23 DATE_DAY_OF_MONTH_RE = re.compile("(\d{1,2})") 24 25 DATE_MONTH_RE = re.compile("(jan)|(feb)|(mar)|(apr)|(may)|(jun)|(jul)|" 26 "(aug)|(sep)|(oct)|(nov)|(dec)", re.I) 27 28 DATE_YEAR_RE = re.compile("(\d{2,4})") 29 30 MAX_TIME = 2051215261.0 # so far in future (2035-01-01) 31 32 def __init__(self, *, unsafe=False, loop=None): 33 super().__init__(loop=loop) 34 self._cookies = defaultdict(SimpleCookie) 35 self._host_only_cookies = set() 36 self._unsafe = unsafe 37 self._next_expiration = ceil(self._loop.time()) 38 self._expirations = {} 39 40 def clear(self): 41 self._cookies.clear() 42 self._host_only_cookies.clear() 43 self._next_expiration = ceil(self._loop.time()) 44 self._expirations.clear() 45 46 def __iter__(self): 47 self._do_expiration() 48 for val in self._cookies.values(): 49 yield from val.values() 50 51 def __len__(self): 52 return sum(1 for i in self) 53 54 def _do_expiration(self): 55 now = self._loop.time() 56 if self._next_expiration > now: 57 return 58 if not self._expirations: 59 return 60 next_expiration = self.MAX_TIME 61 to_del = [] 62 cookies = self._cookies 63 expirations = self._expirations 64 for (domain, name), when in expirations.items(): 65 if when < now: 66 cookies[domain].pop(name, None) 67 to_del.append((domain, name)) 68 self._host_only_cookies.discard((domain, name)) 69 else: 70 next_expiration = min(next_expiration, when) 71 for key in to_del: 72 del expirations[key] 73 74 self._next_expiration = ceil(next_expiration) 75 76 def _expire_cookie(self, when, domain, name): 77 self._next_expiration = min(self._next_expiration, when) 78 self._expirations[(domain, name)] = when 79 80 def update_cookies(self, cookies, response_url=URL()): 81 """Update cookies.""" 82 hostname = response_url.host 83 84 if not self._unsafe and is_ip_address(hostname): 85 # Don't accept cookies from IPs 86 return 87 88 if isinstance(cookies, Mapping): 89 cookies = cookies.items() 90 91 for name, cookie in cookies: 92 if not isinstance(cookie, Morsel): 93 tmp = SimpleCookie() 94 tmp[name] = cookie 95 cookie = tmp[name] 96 97 domain = cookie["domain"] 98 99 # ignore domains with trailing dots 100 if domain.endswith('.'): 101 domain = "" 102 del cookie["domain"] 103 104 if not domain and hostname is not None: 105 # Set the cookie's domain to the response hostname 106 # and set its host-only-flag 107 self._host_only_cookies.add((hostname, name)) 108 domain = cookie["domain"] = hostname 109 110 if domain.startswith("."): 111 # Remove leading dot 112 domain = domain[1:] 113 cookie["domain"] = domain 114 115 if hostname and not self._is_domain_match(domain, hostname): 116 # Setting cookies for different domains is not allowed 117 continue 118 119 path = cookie["path"] 120 if not path or not path.startswith("/"): 121 # Set the cookie's path to the response path 122 path = response_url.path 123 if not path.startswith("/"): 124 path = "/" 125 else: 126 # Cut everything from the last slash to the end 127 path = "/" + path[1:path.rfind("/")] 128 cookie["path"] = path 129 130 max_age = cookie["max-age"] 131 if max_age: 132 try: 133 delta_seconds = int(max_age) 134 self._expire_cookie(self._loop.time() + delta_seconds, 135 domain, name) 136 except ValueError: 137 cookie["max-age"] = "" 138 139 else: 140 expires = cookie["expires"] 141 if expires: 142 expire_time = self._parse_date(expires) 143 if expire_time: 144 self._expire_cookie(expire_time.timestamp(), 145 domain, name) 146 else: 147 cookie["expires"] = "" 148 149 # use dict method because SimpleCookie class modifies value 150 # before Python 3.4.3 151 dict.__setitem__(self._cookies[domain], name, cookie) 152 153 self._do_expiration() 154 155 def filter_cookies(self, request_url=URL()): 156 """Returns this jar's cookies filtered by their attributes.""" 157 self._do_expiration() 158 filtered = SimpleCookie() 159 hostname = request_url.host or "" 160 is_not_secure = request_url.scheme not in ("https", "wss") 161 162 for cookie in self: 163 name = cookie.key 164 domain = cookie["domain"] 165 166 # Send shared cookies 167 if not domain: 168 filtered[name] = cookie.value 169 continue 170 171 if not self._unsafe and is_ip_address(hostname): 172 continue 173 174 if (domain, name) in self._host_only_cookies: 175 if domain != hostname: 176 continue 177 elif not self._is_domain_match(domain, hostname): 178 continue 179 180 if not self._is_path_match(request_url.path, cookie["path"]): 181 continue 182 183 if is_not_secure and cookie["secure"]: 184 continue 185 186 filtered[name] = cookie.value 187 188 return filtered 189 190 @staticmethod 191 def _is_domain_match(domain, hostname): 192 """Implements domain matching adhering to RFC 6265.""" 193 if hostname == domain: 194 return True 195 196 if not hostname.endswith(domain): 197 return False 198 199 non_matching = hostname[:-len(domain)] 200 201 if not non_matching.endswith("."): 202 return False 203 204 return not is_ip_address(hostname) 205 206 @staticmethod 207 def _is_path_match(req_path, cookie_path): 208 """Implements path matching adhering to RFC 6265.""" 209 if not req_path.startswith("/"): 210 req_path = "/" 211 212 if req_path == cookie_path: 213 return True 214 215 if not req_path.startswith(cookie_path): 216 return False 217 218 if cookie_path.endswith("/"): 219 return True 220 221 non_matching = req_path[len(cookie_path):] 222 223 return non_matching.startswith("/") 224 225 @classmethod 226 def _parse_date(cls, date_str): 227 """Implements date string parsing adhering to RFC 6265.""" 228 if not date_str: 229 return 230 231 found_time = False 232 found_day = False 233 found_month = False 234 found_year = False 235 236 hour = minute = second = 0 237 day = 0 238 month = 0 239 year = 0 240 241 for token_match in cls.DATE_TOKENS_RE.finditer(date_str): 242 243 token = token_match.group("token") 244 245 if not found_time: 246 time_match = cls.DATE_HMS_TIME_RE.match(token) 247 if time_match: 248 found_time = True 249 hour, minute, second = [ 250 int(s) for s in time_match.groups()] 251 continue 252 253 if not found_day: 254 day_match = cls.DATE_DAY_OF_MONTH_RE.match(token) 255 if day_match: 256 found_day = True 257 day = int(day_match.group()) 258 continue 259 260 if not found_month: 261 month_match = cls.DATE_MONTH_RE.match(token) 262 if month_match: 263 found_month = True 264 month = month_match.lastindex 265 continue 266 267 if not found_year: 268 year_match = cls.DATE_YEAR_RE.match(token) 269 if year_match: 270 found_year = True 271 year = int(year_match.group()) 272 273 if 70 <= year <= 99: 274 year += 1900 275 elif 0 <= year <= 69: 276 year += 2000 277 278 if False in (found_day, found_month, found_year, found_time): 279 return 280 281 if not 1 <= day <= 31: 282 return 283 284 if year < 1601 or hour > 23 or minute > 59 or second > 59: 285 return 286 287 return datetime.datetime(year, month, day, 288 hour, minute, second, 289 tzinfo=datetime.timezone.utc) 290 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/aiohttp/cookiejar.py b/aiohttp/cookiejar.py --- a/aiohttp/cookiejar.py +++ b/aiohttp/cookiejar.py @@ -1,4 +1,6 @@ import datetime +import pathlib +import pickle import re from collections import defaultdict from collections.abc import Mapping @@ -37,6 +39,16 @@ self._next_expiration = ceil(self._loop.time()) self._expirations = {} + def save(self, file_path): + file_path = pathlib.Path(file_path) + with file_path.open(mode='wb') as f: + pickle.dump(self._cookies, f, pickle.HIGHEST_PROTOCOL) + + def load(self, file_path): + file_path = pathlib.Path(file_path) + with file_path.open(mode='rb') as f: + self._cookies = pickle.load(f) + def clear(self): self._cookies.clear() self._host_only_cookies.clear()
{"golden_diff": "diff --git a/aiohttp/cookiejar.py b/aiohttp/cookiejar.py\n--- a/aiohttp/cookiejar.py\n+++ b/aiohttp/cookiejar.py\n@@ -1,4 +1,6 @@\n import datetime\n+import pathlib\n+import pickle\n import re\n from collections import defaultdict\n from collections.abc import Mapping\n@@ -37,6 +39,16 @@\n self._next_expiration = ceil(self._loop.time())\n self._expirations = {}\n \n+ def save(self, file_path):\n+ file_path = pathlib.Path(file_path)\n+ with file_path.open(mode='wb') as f:\n+ pickle.dump(self._cookies, f, pickle.HIGHEST_PROTOCOL)\n+\n+ def load(self, file_path):\n+ file_path = pathlib.Path(file_path)\n+ with file_path.open(mode='rb') as f:\n+ self._cookies = pickle.load(f)\n+\n def clear(self):\n self._cookies.clear()\n self._host_only_cookies.clear()\n", "issue": "Support load/save cookie_jar\nI found that cookie_jar do not provide any function to _load or save cookie_jar_\n\nIs there any pythonic way to implement these two method?\n\n```\nsession.cookie_jar.load('foo.cookie.txt')\n....# do sth.\nsession.cookie_jar.save()\n\n```\n\n", "before_files": [{"content": "import datetime\nimport re\nfrom collections import defaultdict\nfrom collections.abc import Mapping\nfrom http.cookies import Morsel, SimpleCookie\nfrom math import ceil\n\nfrom yarl import URL\n\nfrom .abc import AbstractCookieJar\nfrom .helpers import is_ip_address\n\n\nclass CookieJar(AbstractCookieJar):\n \"\"\"Implements cookie storage adhering to RFC 6265.\"\"\"\n\n DATE_TOKENS_RE = re.compile(\n \"[\\x09\\x20-\\x2F\\x3B-\\x40\\x5B-\\x60\\x7B-\\x7E]*\"\n \"(?P<token>[\\x00-\\x08\\x0A-\\x1F\\d:a-zA-Z\\x7F-\\xFF]+)\")\n\n DATE_HMS_TIME_RE = re.compile(\"(\\d{1,2}):(\\d{1,2}):(\\d{1,2})\")\n\n DATE_DAY_OF_MONTH_RE = re.compile(\"(\\d{1,2})\")\n\n DATE_MONTH_RE = re.compile(\"(jan)|(feb)|(mar)|(apr)|(may)|(jun)|(jul)|\"\n \"(aug)|(sep)|(oct)|(nov)|(dec)\", re.I)\n\n DATE_YEAR_RE = re.compile(\"(\\d{2,4})\")\n\n MAX_TIME = 2051215261.0 # so far in future (2035-01-01)\n\n def __init__(self, *, unsafe=False, loop=None):\n super().__init__(loop=loop)\n self._cookies = defaultdict(SimpleCookie)\n self._host_only_cookies = set()\n self._unsafe = unsafe\n self._next_expiration = ceil(self._loop.time())\n self._expirations = {}\n\n def clear(self):\n self._cookies.clear()\n self._host_only_cookies.clear()\n self._next_expiration = ceil(self._loop.time())\n self._expirations.clear()\n\n def __iter__(self):\n self._do_expiration()\n for val in self._cookies.values():\n yield from val.values()\n\n def __len__(self):\n return sum(1 for i in self)\n\n def _do_expiration(self):\n now = self._loop.time()\n if self._next_expiration > now:\n return\n if not self._expirations:\n return\n next_expiration = self.MAX_TIME\n to_del = []\n cookies = self._cookies\n expirations = self._expirations\n for (domain, name), when in expirations.items():\n if when < now:\n cookies[domain].pop(name, None)\n to_del.append((domain, name))\n self._host_only_cookies.discard((domain, name))\n else:\n next_expiration = min(next_expiration, when)\n for key in to_del:\n del expirations[key]\n\n self._next_expiration = ceil(next_expiration)\n\n def _expire_cookie(self, when, domain, name):\n self._next_expiration = min(self._next_expiration, when)\n self._expirations[(domain, name)] = when\n\n def update_cookies(self, cookies, response_url=URL()):\n \"\"\"Update cookies.\"\"\"\n hostname = response_url.host\n\n if not self._unsafe and is_ip_address(hostname):\n # Don't accept cookies from IPs\n return\n\n if isinstance(cookies, Mapping):\n cookies = cookies.items()\n\n for name, cookie in cookies:\n if not isinstance(cookie, Morsel):\n tmp = SimpleCookie()\n tmp[name] = cookie\n cookie = tmp[name]\n\n domain = cookie[\"domain\"]\n\n # ignore domains with trailing dots\n if domain.endswith('.'):\n domain = \"\"\n del cookie[\"domain\"]\n\n if not domain and hostname is not None:\n # Set the cookie's domain to the response hostname\n # and set its host-only-flag\n self._host_only_cookies.add((hostname, name))\n domain = cookie[\"domain\"] = hostname\n\n if domain.startswith(\".\"):\n # Remove leading dot\n domain = domain[1:]\n cookie[\"domain\"] = domain\n\n if hostname and not self._is_domain_match(domain, hostname):\n # Setting cookies for different domains is not allowed\n continue\n\n path = cookie[\"path\"]\n if not path or not path.startswith(\"/\"):\n # Set the cookie's path to the response path\n path = response_url.path\n if not path.startswith(\"/\"):\n path = \"/\"\n else:\n # Cut everything from the last slash to the end\n path = \"/\" + path[1:path.rfind(\"/\")]\n cookie[\"path\"] = path\n\n max_age = cookie[\"max-age\"]\n if max_age:\n try:\n delta_seconds = int(max_age)\n self._expire_cookie(self._loop.time() + delta_seconds,\n domain, name)\n except ValueError:\n cookie[\"max-age\"] = \"\"\n\n else:\n expires = cookie[\"expires\"]\n if expires:\n expire_time = self._parse_date(expires)\n if expire_time:\n self._expire_cookie(expire_time.timestamp(),\n domain, name)\n else:\n cookie[\"expires\"] = \"\"\n\n # use dict method because SimpleCookie class modifies value\n # before Python 3.4.3\n dict.__setitem__(self._cookies[domain], name, cookie)\n\n self._do_expiration()\n\n def filter_cookies(self, request_url=URL()):\n \"\"\"Returns this jar's cookies filtered by their attributes.\"\"\"\n self._do_expiration()\n filtered = SimpleCookie()\n hostname = request_url.host or \"\"\n is_not_secure = request_url.scheme not in (\"https\", \"wss\")\n\n for cookie in self:\n name = cookie.key\n domain = cookie[\"domain\"]\n\n # Send shared cookies\n if not domain:\n filtered[name] = cookie.value\n continue\n\n if not self._unsafe and is_ip_address(hostname):\n continue\n\n if (domain, name) in self._host_only_cookies:\n if domain != hostname:\n continue\n elif not self._is_domain_match(domain, hostname):\n continue\n\n if not self._is_path_match(request_url.path, cookie[\"path\"]):\n continue\n\n if is_not_secure and cookie[\"secure\"]:\n continue\n\n filtered[name] = cookie.value\n\n return filtered\n\n @staticmethod\n def _is_domain_match(domain, hostname):\n \"\"\"Implements domain matching adhering to RFC 6265.\"\"\"\n if hostname == domain:\n return True\n\n if not hostname.endswith(domain):\n return False\n\n non_matching = hostname[:-len(domain)]\n\n if not non_matching.endswith(\".\"):\n return False\n\n return not is_ip_address(hostname)\n\n @staticmethod\n def _is_path_match(req_path, cookie_path):\n \"\"\"Implements path matching adhering to RFC 6265.\"\"\"\n if not req_path.startswith(\"/\"):\n req_path = \"/\"\n\n if req_path == cookie_path:\n return True\n\n if not req_path.startswith(cookie_path):\n return False\n\n if cookie_path.endswith(\"/\"):\n return True\n\n non_matching = req_path[len(cookie_path):]\n\n return non_matching.startswith(\"/\")\n\n @classmethod\n def _parse_date(cls, date_str):\n \"\"\"Implements date string parsing adhering to RFC 6265.\"\"\"\n if not date_str:\n return\n\n found_time = False\n found_day = False\n found_month = False\n found_year = False\n\n hour = minute = second = 0\n day = 0\n month = 0\n year = 0\n\n for token_match in cls.DATE_TOKENS_RE.finditer(date_str):\n\n token = token_match.group(\"token\")\n\n if not found_time:\n time_match = cls.DATE_HMS_TIME_RE.match(token)\n if time_match:\n found_time = True\n hour, minute, second = [\n int(s) for s in time_match.groups()]\n continue\n\n if not found_day:\n day_match = cls.DATE_DAY_OF_MONTH_RE.match(token)\n if day_match:\n found_day = True\n day = int(day_match.group())\n continue\n\n if not found_month:\n month_match = cls.DATE_MONTH_RE.match(token)\n if month_match:\n found_month = True\n month = month_match.lastindex\n continue\n\n if not found_year:\n year_match = cls.DATE_YEAR_RE.match(token)\n if year_match:\n found_year = True\n year = int(year_match.group())\n\n if 70 <= year <= 99:\n year += 1900\n elif 0 <= year <= 69:\n year += 2000\n\n if False in (found_day, found_month, found_year, found_time):\n return\n\n if not 1 <= day <= 31:\n return\n\n if year < 1601 or hour > 23 or minute > 59 or second > 59:\n return\n\n return datetime.datetime(year, month, day,\n hour, minute, second,\n tzinfo=datetime.timezone.utc)\n", "path": "aiohttp/cookiejar.py"}], "after_files": [{"content": "import datetime\nimport pathlib\nimport pickle\nimport re\nfrom collections import defaultdict\nfrom collections.abc import Mapping\nfrom http.cookies import Morsel, SimpleCookie\nfrom math import ceil\n\nfrom yarl import URL\n\nfrom .abc import AbstractCookieJar\nfrom .helpers import is_ip_address\n\n\nclass CookieJar(AbstractCookieJar):\n \"\"\"Implements cookie storage adhering to RFC 6265.\"\"\"\n\n DATE_TOKENS_RE = re.compile(\n \"[\\x09\\x20-\\x2F\\x3B-\\x40\\x5B-\\x60\\x7B-\\x7E]*\"\n \"(?P<token>[\\x00-\\x08\\x0A-\\x1F\\d:a-zA-Z\\x7F-\\xFF]+)\")\n\n DATE_HMS_TIME_RE = re.compile(\"(\\d{1,2}):(\\d{1,2}):(\\d{1,2})\")\n\n DATE_DAY_OF_MONTH_RE = re.compile(\"(\\d{1,2})\")\n\n DATE_MONTH_RE = re.compile(\"(jan)|(feb)|(mar)|(apr)|(may)|(jun)|(jul)|\"\n \"(aug)|(sep)|(oct)|(nov)|(dec)\", re.I)\n\n DATE_YEAR_RE = re.compile(\"(\\d{2,4})\")\n\n MAX_TIME = 2051215261.0 # so far in future (2035-01-01)\n\n def __init__(self, *, unsafe=False, loop=None):\n super().__init__(loop=loop)\n self._cookies = defaultdict(SimpleCookie)\n self._host_only_cookies = set()\n self._unsafe = unsafe\n self._next_expiration = ceil(self._loop.time())\n self._expirations = {}\n\n def save(self, file_path):\n file_path = pathlib.Path(file_path)\n with file_path.open(mode='wb') as f:\n pickle.dump(self._cookies, f, pickle.HIGHEST_PROTOCOL)\n\n def load(self, file_path):\n file_path = pathlib.Path(file_path)\n with file_path.open(mode='rb') as f:\n self._cookies = pickle.load(f)\n\n def clear(self):\n self._cookies.clear()\n self._host_only_cookies.clear()\n self._next_expiration = ceil(self._loop.time())\n self._expirations.clear()\n\n def __iter__(self):\n self._do_expiration()\n for val in self._cookies.values():\n yield from val.values()\n\n def __len__(self):\n return sum(1 for i in self)\n\n def _do_expiration(self):\n now = self._loop.time()\n if self._next_expiration > now:\n return\n if not self._expirations:\n return\n next_expiration = self.MAX_TIME\n to_del = []\n cookies = self._cookies\n expirations = self._expirations\n for (domain, name), when in expirations.items():\n if when < now:\n cookies[domain].pop(name, None)\n to_del.append((domain, name))\n self._host_only_cookies.discard((domain, name))\n else:\n next_expiration = min(next_expiration, when)\n for key in to_del:\n del expirations[key]\n\n self._next_expiration = ceil(next_expiration)\n\n def _expire_cookie(self, when, domain, name):\n self._next_expiration = min(self._next_expiration, when)\n self._expirations[(domain, name)] = when\n\n def update_cookies(self, cookies, response_url=URL()):\n \"\"\"Update cookies.\"\"\"\n hostname = response_url.host\n\n if not self._unsafe and is_ip_address(hostname):\n # Don't accept cookies from IPs\n return\n\n if isinstance(cookies, Mapping):\n cookies = cookies.items()\n\n for name, cookie in cookies:\n if not isinstance(cookie, Morsel):\n tmp = SimpleCookie()\n tmp[name] = cookie\n cookie = tmp[name]\n\n domain = cookie[\"domain\"]\n\n # ignore domains with trailing dots\n if domain.endswith('.'):\n domain = \"\"\n del cookie[\"domain\"]\n\n if not domain and hostname is not None:\n # Set the cookie's domain to the response hostname\n # and set its host-only-flag\n self._host_only_cookies.add((hostname, name))\n domain = cookie[\"domain\"] = hostname\n\n if domain.startswith(\".\"):\n # Remove leading dot\n domain = domain[1:]\n cookie[\"domain\"] = domain\n\n if hostname and not self._is_domain_match(domain, hostname):\n # Setting cookies for different domains is not allowed\n continue\n\n path = cookie[\"path\"]\n if not path or not path.startswith(\"/\"):\n # Set the cookie's path to the response path\n path = response_url.path\n if not path.startswith(\"/\"):\n path = \"/\"\n else:\n # Cut everything from the last slash to the end\n path = \"/\" + path[1:path.rfind(\"/\")]\n cookie[\"path\"] = path\n\n max_age = cookie[\"max-age\"]\n if max_age:\n try:\n delta_seconds = int(max_age)\n self._expire_cookie(self._loop.time() + delta_seconds,\n domain, name)\n except ValueError:\n cookie[\"max-age\"] = \"\"\n\n else:\n expires = cookie[\"expires\"]\n if expires:\n expire_time = self._parse_date(expires)\n if expire_time:\n self._expire_cookie(expire_time.timestamp(),\n domain, name)\n else:\n cookie[\"expires\"] = \"\"\n\n # use dict method because SimpleCookie class modifies value\n # before Python 3.4.3\n dict.__setitem__(self._cookies[domain], name, cookie)\n\n self._do_expiration()\n\n def filter_cookies(self, request_url=URL()):\n \"\"\"Returns this jar's cookies filtered by their attributes.\"\"\"\n self._do_expiration()\n filtered = SimpleCookie()\n hostname = request_url.host or \"\"\n is_not_secure = request_url.scheme not in (\"https\", \"wss\")\n\n for cookie in self:\n name = cookie.key\n domain = cookie[\"domain\"]\n\n # Send shared cookies\n if not domain:\n filtered[name] = cookie.value\n continue\n\n if not self._unsafe and is_ip_address(hostname):\n continue\n\n if (domain, name) in self._host_only_cookies:\n if domain != hostname:\n continue\n elif not self._is_domain_match(domain, hostname):\n continue\n\n if not self._is_path_match(request_url.path, cookie[\"path\"]):\n continue\n\n if is_not_secure and cookie[\"secure\"]:\n continue\n\n filtered[name] = cookie.value\n\n return filtered\n\n @staticmethod\n def _is_domain_match(domain, hostname):\n \"\"\"Implements domain matching adhering to RFC 6265.\"\"\"\n if hostname == domain:\n return True\n\n if not hostname.endswith(domain):\n return False\n\n non_matching = hostname[:-len(domain)]\n\n if not non_matching.endswith(\".\"):\n return False\n\n return not is_ip_address(hostname)\n\n @staticmethod\n def _is_path_match(req_path, cookie_path):\n \"\"\"Implements path matching adhering to RFC 6265.\"\"\"\n if not req_path.startswith(\"/\"):\n req_path = \"/\"\n\n if req_path == cookie_path:\n return True\n\n if not req_path.startswith(cookie_path):\n return False\n\n if cookie_path.endswith(\"/\"):\n return True\n\n non_matching = req_path[len(cookie_path):]\n\n return non_matching.startswith(\"/\")\n\n @classmethod\n def _parse_date(cls, date_str):\n \"\"\"Implements date string parsing adhering to RFC 6265.\"\"\"\n if not date_str:\n return\n\n found_time = False\n found_day = False\n found_month = False\n found_year = False\n\n hour = minute = second = 0\n day = 0\n month = 0\n year = 0\n\n for token_match in cls.DATE_TOKENS_RE.finditer(date_str):\n\n token = token_match.group(\"token\")\n\n if not found_time:\n time_match = cls.DATE_HMS_TIME_RE.match(token)\n if time_match:\n found_time = True\n hour, minute, second = [\n int(s) for s in time_match.groups()]\n continue\n\n if not found_day:\n day_match = cls.DATE_DAY_OF_MONTH_RE.match(token)\n if day_match:\n found_day = True\n day = int(day_match.group())\n continue\n\n if not found_month:\n month_match = cls.DATE_MONTH_RE.match(token)\n if month_match:\n found_month = True\n month = month_match.lastindex\n continue\n\n if not found_year:\n year_match = cls.DATE_YEAR_RE.match(token)\n if year_match:\n found_year = True\n year = int(year_match.group())\n\n if 70 <= year <= 99:\n year += 1900\n elif 0 <= year <= 69:\n year += 2000\n\n if False in (found_day, found_month, found_year, found_time):\n return\n\n if not 1 <= day <= 31:\n return\n\n if year < 1601 or hour > 23 or minute > 59 or second > 59:\n return\n\n return datetime.datetime(year, month, day,\n hour, minute, second,\n tzinfo=datetime.timezone.utc)\n", "path": "aiohttp/cookiejar.py"}]}
3,088
222
gh_patches_debug_23038
rasdani/github-patches
git_diff
aws__aws-cli-2702
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- cloudformation deploy does not honour tabs in JSON templates Most of our templates are using tabs for the indentation and everything was fine until we tried to feed the template to `aws cloudformation deploy` which did not like the template despite that every other function in AWS CLI works with the template just fine: ``` [galaxy@athena templates]$ aws cloudformation validate-template --template-body file://codepipeline.template { "CapabilitiesReason": "The following resource(s) require capabilities: [AWS::IAM::Role]", "Description": "Playing with CodeCommit, CodeBuild, and CodeDeploy", "Parameters": [ ``` and the only function that fails to parse the template is: ``` [galaxy@athena templates]$ aws cloudformation deploy --stack-name "galaxy-ccc" --template-file codepipeline.template --capabilities CAPABILITY_IAM while scanning for the next token found character '\t' that cannot start any token in "<string>", line 2, column 1: "AWSTemplateFormatVersion": "20 ... ^ ``` A quick fix is to replace tabs with spaces: ``` [galaxy@athena templates]$ sed 's,\t, ,g' codepipeline.template > c.template [galaxy@athena templates]$ aws cloudformation deploy --stack-name "galaxy-ccc" --template-file c.template --capabilities CAPABILITY_IAM Waiting for changeset to be created.. Waiting for stack create/update to complete ``` ... but it would mean that we would need to fix all our templates which are valid JSON just to workaround a bug in the tool! :( --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `awscli/customizations/cloudformation/yamlhelper.py` Content: ``` 1 # Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"). You 4 # may not use this file except in compliance with the License. A copy of 5 # the License is located at 6 # 7 # http://aws.amazon.com/apache2.0/ 8 # 9 # or in the "license" file accompanying this file. This file is 10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 # ANY KIND, either express or implied. See the License for the specific 12 # language governing permissions and limitations under the License. 13 14 import yaml 15 from awscli.compat import six 16 from yaml.resolver import ScalarNode, SequenceNode 17 18 19 def intrinsics_multi_constructor(loader, tag_prefix, node): 20 """ 21 YAML constructor to parse CloudFormation intrinsics. 22 This will return a dictionary with key being the instrinsic name 23 """ 24 25 # Get the actual tag name excluding the first exclamation 26 tag = node.tag[1:] 27 28 # All CloudFormation intrinsics have prefix Fn:: except Ref 29 prefix = "Fn::" 30 if tag == "Ref": 31 prefix = "" 32 33 cfntag = prefix + tag 34 35 if tag == "GetAtt" and isinstance(node.value, six.string_types): 36 # ShortHand notation for !GetAtt accepts Resource.Attribute format 37 # while the standard notation is to use an array 38 # [Resource, Attribute]. Convert shorthand to standard format 39 value = node.value.split(".", 1) 40 41 elif isinstance(node, ScalarNode): 42 # Value of this node is scalar 43 value = loader.construct_scalar(node) 44 45 elif isinstance(node, SequenceNode): 46 # Value of this node is an array (Ex: [1,2]) 47 value = loader.construct_sequence(node) 48 49 else: 50 # Value of this node is an mapping (ex: {foo: bar}) 51 value = loader.construct_mapping(node) 52 53 return {cfntag: value} 54 55 56 def yaml_dump(dict_to_dump): 57 """ 58 Dumps the dictionary as a YAML document 59 :param dict_to_dump: 60 :return: 61 """ 62 return yaml.safe_dump(dict_to_dump, default_flow_style=False) 63 64 65 def yaml_parse(yamlstr): 66 67 yaml.SafeLoader.add_multi_constructor("!", intrinsics_multi_constructor) 68 69 return yaml.safe_load(yamlstr) 70 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/awscli/customizations/cloudformation/yamlhelper.py b/awscli/customizations/cloudformation/yamlhelper.py --- a/awscli/customizations/cloudformation/yamlhelper.py +++ b/awscli/customizations/cloudformation/yamlhelper.py @@ -10,11 +10,12 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. - +import json import yaml -from awscli.compat import six from yaml.resolver import ScalarNode, SequenceNode +from awscli.compat import six + def intrinsics_multi_constructor(loader, tag_prefix, node): """ @@ -63,7 +64,13 @@ def yaml_parse(yamlstr): - - yaml.SafeLoader.add_multi_constructor("!", intrinsics_multi_constructor) - - return yaml.safe_load(yamlstr) + """Parse a yaml string""" + try: + # PyYAML doesn't support json as well as it should, so if the input + # is actually just json it is better to parse it with the standard + # json parser. + return json.loads(yamlstr) + except ValueError: + yaml.SafeLoader.add_multi_constructor( + "!", intrinsics_multi_constructor) + return yaml.safe_load(yamlstr)
{"golden_diff": "diff --git a/awscli/customizations/cloudformation/yamlhelper.py b/awscli/customizations/cloudformation/yamlhelper.py\n--- a/awscli/customizations/cloudformation/yamlhelper.py\n+++ b/awscli/customizations/cloudformation/yamlhelper.py\n@@ -10,11 +10,12 @@\n # distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n # ANY KIND, either express or implied. See the License for the specific\n # language governing permissions and limitations under the License.\n-\n+import json\n import yaml\n-from awscli.compat import six\n from yaml.resolver import ScalarNode, SequenceNode\n \n+from awscli.compat import six\n+\n \n def intrinsics_multi_constructor(loader, tag_prefix, node):\n \"\"\"\n@@ -63,7 +64,13 @@\n \n \n def yaml_parse(yamlstr):\n-\n- yaml.SafeLoader.add_multi_constructor(\"!\", intrinsics_multi_constructor)\n-\n- return yaml.safe_load(yamlstr)\n+ \"\"\"Parse a yaml string\"\"\"\n+ try:\n+ # PyYAML doesn't support json as well as it should, so if the input\n+ # is actually just json it is better to parse it with the standard\n+ # json parser.\n+ return json.loads(yamlstr)\n+ except ValueError:\n+ yaml.SafeLoader.add_multi_constructor(\n+ \"!\", intrinsics_multi_constructor)\n+ return yaml.safe_load(yamlstr)\n", "issue": "cloudformation deploy does not honour tabs in JSON templates\nMost of our templates are using tabs for the indentation and everything was fine until we tried to feed the template to `aws cloudformation deploy` which did not like the template despite that every other function in AWS CLI works with the template just fine:\r\n\r\n```\r\n[galaxy@athena templates]$ aws cloudformation validate-template --template-body file://codepipeline.template\r\n{\r\n \"CapabilitiesReason\": \"The following resource(s) require capabilities: [AWS::IAM::Role]\", \r\n \"Description\": \"Playing with CodeCommit, CodeBuild, and CodeDeploy\", \r\n \"Parameters\": [\r\n```\r\nand the only function that fails to parse the template is:\r\n```\r\n[galaxy@athena templates]$ aws cloudformation deploy --stack-name \"galaxy-ccc\" --template-file codepipeline.template --capabilities CAPABILITY_IAM\r\n\r\nwhile scanning for the next token\r\nfound character '\\t' that cannot start any token\r\n in \"<string>\", line 2, column 1:\r\n \t\"AWSTemplateFormatVersion\": \"20 ... \r\n ^\r\n```\r\nA quick fix is to replace tabs with spaces:\r\n```\r\n[galaxy@athena templates]$ sed 's,\\t, ,g' codepipeline.template > c.template\r\n[galaxy@athena templates]$ aws cloudformation deploy --stack-name \"galaxy-ccc\" --template-file c.template --capabilities CAPABILITY_IAM\r\nWaiting for changeset to be created..\r\nWaiting for stack create/update to complete\r\n```\r\n\r\n... but it would mean that we would need to fix all our templates which are valid JSON just to workaround a bug in the tool! :(\n", "before_files": [{"content": "# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport yaml\nfrom awscli.compat import six\nfrom yaml.resolver import ScalarNode, SequenceNode\n\n\ndef intrinsics_multi_constructor(loader, tag_prefix, node):\n \"\"\"\n YAML constructor to parse CloudFormation intrinsics.\n This will return a dictionary with key being the instrinsic name\n \"\"\"\n\n # Get the actual tag name excluding the first exclamation\n tag = node.tag[1:]\n\n # All CloudFormation intrinsics have prefix Fn:: except Ref\n prefix = \"Fn::\"\n if tag == \"Ref\":\n prefix = \"\"\n\n cfntag = prefix + tag\n\n if tag == \"GetAtt\" and isinstance(node.value, six.string_types):\n # ShortHand notation for !GetAtt accepts Resource.Attribute format\n # while the standard notation is to use an array\n # [Resource, Attribute]. Convert shorthand to standard format\n value = node.value.split(\".\", 1)\n\n elif isinstance(node, ScalarNode):\n # Value of this node is scalar\n value = loader.construct_scalar(node)\n\n elif isinstance(node, SequenceNode):\n # Value of this node is an array (Ex: [1,2])\n value = loader.construct_sequence(node)\n\n else:\n # Value of this node is an mapping (ex: {foo: bar})\n value = loader.construct_mapping(node)\n\n return {cfntag: value}\n\n\ndef yaml_dump(dict_to_dump):\n \"\"\"\n Dumps the dictionary as a YAML document\n :param dict_to_dump:\n :return:\n \"\"\"\n return yaml.safe_dump(dict_to_dump, default_flow_style=False)\n\n\ndef yaml_parse(yamlstr):\n\n yaml.SafeLoader.add_multi_constructor(\"!\", intrinsics_multi_constructor)\n\n return yaml.safe_load(yamlstr)\n", "path": "awscli/customizations/cloudformation/yamlhelper.py"}], "after_files": [{"content": "# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport json\nimport yaml\nfrom yaml.resolver import ScalarNode, SequenceNode\n\nfrom awscli.compat import six\n\n\ndef intrinsics_multi_constructor(loader, tag_prefix, node):\n \"\"\"\n YAML constructor to parse CloudFormation intrinsics.\n This will return a dictionary with key being the instrinsic name\n \"\"\"\n\n # Get the actual tag name excluding the first exclamation\n tag = node.tag[1:]\n\n # All CloudFormation intrinsics have prefix Fn:: except Ref\n prefix = \"Fn::\"\n if tag == \"Ref\":\n prefix = \"\"\n\n cfntag = prefix + tag\n\n if tag == \"GetAtt\" and isinstance(node.value, six.string_types):\n # ShortHand notation for !GetAtt accepts Resource.Attribute format\n # while the standard notation is to use an array\n # [Resource, Attribute]. Convert shorthand to standard format\n value = node.value.split(\".\", 1)\n\n elif isinstance(node, ScalarNode):\n # Value of this node is scalar\n value = loader.construct_scalar(node)\n\n elif isinstance(node, SequenceNode):\n # Value of this node is an array (Ex: [1,2])\n value = loader.construct_sequence(node)\n\n else:\n # Value of this node is an mapping (ex: {foo: bar})\n value = loader.construct_mapping(node)\n\n return {cfntag: value}\n\n\ndef yaml_dump(dict_to_dump):\n \"\"\"\n Dumps the dictionary as a YAML document\n :param dict_to_dump:\n :return:\n \"\"\"\n return yaml.safe_dump(dict_to_dump, default_flow_style=False)\n\n\ndef yaml_parse(yamlstr):\n \"\"\"Parse a yaml string\"\"\"\n try:\n # PyYAML doesn't support json as well as it should, so if the input\n # is actually just json it is better to parse it with the standard\n # json parser.\n return json.loads(yamlstr)\n except ValueError:\n yaml.SafeLoader.add_multi_constructor(\n \"!\", intrinsics_multi_constructor)\n return yaml.safe_load(yamlstr)\n", "path": "awscli/customizations/cloudformation/yamlhelper.py"}]}
1,257
304
gh_patches_debug_9712
rasdani/github-patches
git_diff
huggingface__dataset-viewer-2733
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- /search and /filter are currently broken Any search or filter is currently failing. https://datasets-server.huggingface.co/search?dataset=gsarti%2Fflores_101&config=afr&split=devtest&offset=0&length=100&query=a https://datasets-server.huggingface.co/filter?dataset=gsarti%2Fflores_101&config=afr&split=devtest&offset=0&length=100&where=id%3E%3D409+and+id%3C511 Logs: ``` FileNotFoundError: [Errno 2] No such file or directory: '/tmp/duckdb-index' ``` when doing `disk_stat = os.statvfs(path)` here: https://github.com/huggingface/dataset-viewer/blob/a489c0b6ad4e5a78b2670679abbfab93f9be5742/libs/libapi/src/libapi/duckdb.py#L78 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `libs/libapi/src/libapi/duckdb.py` Content: ``` 1 # SPDX-License-Identifier: Apache-2.0 2 # Copyright 2023 The HuggingFace Authors. 3 4 import errno 5 import json 6 import logging 7 import os 8 import re 9 from hashlib import sha1 10 from typing import Optional 11 12 import anyio 13 from anyio import Path 14 from libcommon.constants import DUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY, SPLIT_DUCKDB_INDEX_KIND 15 from libcommon.parquet_utils import extract_split_name_from_parquet_url 16 from libcommon.prometheus import StepProfiler 17 from libcommon.simple_cache import CacheEntry 18 from libcommon.storage import StrPath, init_dir 19 from libcommon.storage_client import StorageClient 20 from libcommon.utils import download_file_from_hub 21 22 from libapi.exceptions import DownloadIndexError 23 from libapi.utils import get_cache_entry_from_step 24 25 REPO_TYPE = "dataset" 26 HUB_DOWNLOAD_CACHE_FOLDER = "cache" 27 28 29 async def get_index_file_location_and_download_if_missing( 30 duckdb_index_file_directory: StrPath, 31 dataset: str, 32 revision: str, 33 config: str, 34 split: str, 35 filename: str, 36 size_bytes: int, 37 url: str, 38 target_revision: str, 39 hf_token: Optional[str], 40 ) -> str: 41 with StepProfiler(method="get_index_file_location_and_download_if_missing", step="all"): 42 index_folder = get_download_folder(duckdb_index_file_directory, size_bytes, dataset, config, split, revision) 43 # For directories like "partial-train" for the file 44 # at "en/partial-train/0000.parquet" in the C4 dataset. 45 # Note that "-" is forbidden for split names, so it doesn't create directory names collisions. 46 split_directory = extract_split_name_from_parquet_url(url) 47 repo_file_location = f"{config}/{split_directory}/{filename}" 48 index_file_location = f"{index_folder}/{repo_file_location}" 49 index_path = Path(index_file_location) 50 if not await index_path.is_file(): 51 with StepProfiler(method="get_index_file_location_and_download_if_missing", step="download index file"): 52 cache_folder = f"{duckdb_index_file_directory}/{HUB_DOWNLOAD_CACHE_FOLDER}" 53 await anyio.to_thread.run_sync( 54 download_index_file, 55 cache_folder, 56 index_folder, 57 target_revision, 58 dataset, 59 repo_file_location, 60 hf_token, 61 ) 62 # Update its modification time 63 await index_path.touch() 64 return index_file_location 65 66 67 def get_download_folder( 68 root_directory: StrPath, size_bytes: int, dataset: str, revision: str, config: str, split: str 69 ) -> str: 70 check_available_disk_space(root_directory, size_bytes) 71 payload = (dataset, config, split, revision) 72 hash_suffix = sha1(json.dumps(payload, sort_keys=True).encode(), usedforsecurity=False).hexdigest()[:8] 73 subdirectory = "".join([c if re.match(r"[\w-]", c) else "-" for c in f"{dataset}-{hash_suffix}"]) 74 return f"{root_directory}/{DUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY}/{subdirectory}" 75 76 77 def check_available_disk_space(path: StrPath, required_space: int) -> None: 78 disk_stat = os.statvfs(path) 79 # Calculate free space in bytes 80 free_space = disk_stat.f_bavail * disk_stat.f_frsize 81 logging.debug(f"{free_space} available space, needed {required_space}") 82 if free_space < required_space: 83 raise DownloadIndexError( 84 "Cannot perform the search due to a lack of disk space on the server. Please report the issue." 85 ) 86 87 88 def download_index_file( 89 cache_folder: str, 90 index_folder: str, 91 target_revision: str, 92 dataset: str, 93 repo_file_location: str, 94 hf_token: Optional[str] = None, 95 ) -> None: 96 logging.info(f"init_dir {index_folder}") 97 try: 98 init_dir(index_folder) 99 download_file_from_hub( 100 repo_type=REPO_TYPE, 101 revision=target_revision, 102 repo_id=dataset, 103 filename=repo_file_location, 104 local_dir=index_folder, 105 hf_token=hf_token, 106 cache_dir=cache_folder, 107 ) 108 except OSError as err: 109 if err.errno == errno.ENOSPC: 110 raise DownloadIndexError( 111 "Cannot perform the operation due to a lack of disk space on the server. Please report the issue.", err 112 ) 113 114 115 def get_cache_entry_from_duckdb_index_job( 116 dataset: str, 117 config: str, 118 split: str, 119 hf_endpoint: str, 120 hf_token: Optional[str], 121 hf_timeout_seconds: Optional[float], 122 blocked_datasets: list[str], 123 storage_clients: Optional[list[StorageClient]] = None, 124 ) -> CacheEntry: 125 return get_cache_entry_from_step( 126 processing_step_name=SPLIT_DUCKDB_INDEX_KIND, 127 dataset=dataset, 128 config=config, 129 split=split, 130 hf_endpoint=hf_endpoint, 131 hf_token=hf_token, 132 hf_timeout_seconds=hf_timeout_seconds, 133 blocked_datasets=blocked_datasets, 134 storage_clients=storage_clients, 135 ) 136 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libs/libapi/src/libapi/duckdb.py b/libs/libapi/src/libapi/duckdb.py --- a/libs/libapi/src/libapi/duckdb.py +++ b/libs/libapi/src/libapi/duckdb.py @@ -75,7 +75,12 @@ def check_available_disk_space(path: StrPath, required_space: int) -> None: - disk_stat = os.statvfs(path) + try: + disk_stat = os.statvfs(path) + except FileNotFoundError: + # The path does not exist, we create it and + init_dir(path) + disk_stat = os.statvfs(path) # Calculate free space in bytes free_space = disk_stat.f_bavail * disk_stat.f_frsize logging.debug(f"{free_space} available space, needed {required_space}")
{"golden_diff": "diff --git a/libs/libapi/src/libapi/duckdb.py b/libs/libapi/src/libapi/duckdb.py\n--- a/libs/libapi/src/libapi/duckdb.py\n+++ b/libs/libapi/src/libapi/duckdb.py\n@@ -75,7 +75,12 @@\n \n \n def check_available_disk_space(path: StrPath, required_space: int) -> None:\n- disk_stat = os.statvfs(path)\n+ try:\n+ disk_stat = os.statvfs(path)\n+ except FileNotFoundError:\n+ # The path does not exist, we create it and\n+ init_dir(path)\n+ disk_stat = os.statvfs(path)\n # Calculate free space in bytes\n free_space = disk_stat.f_bavail * disk_stat.f_frsize\n logging.debug(f\"{free_space} available space, needed {required_space}\")\n", "issue": "/search and /filter are currently broken\nAny search or filter is currently failing.\r\n\r\nhttps://datasets-server.huggingface.co/search?dataset=gsarti%2Fflores_101&config=afr&split=devtest&offset=0&length=100&query=a\r\nhttps://datasets-server.huggingface.co/filter?dataset=gsarti%2Fflores_101&config=afr&split=devtest&offset=0&length=100&where=id%3E%3D409+and+id%3C511\r\n\r\nLogs:\r\n\r\n```\r\nFileNotFoundError: [Errno 2] No such file or directory: '/tmp/duckdb-index'\r\n```\r\n\r\nwhen doing `disk_stat = os.statvfs(path)` here: \r\n\r\nhttps://github.com/huggingface/dataset-viewer/blob/a489c0b6ad4e5a78b2670679abbfab93f9be5742/libs/libapi/src/libapi/duckdb.py#L78\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2023 The HuggingFace Authors.\n\nimport errno\nimport json\nimport logging\nimport os\nimport re\nfrom hashlib import sha1\nfrom typing import Optional\n\nimport anyio\nfrom anyio import Path\nfrom libcommon.constants import DUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY, SPLIT_DUCKDB_INDEX_KIND\nfrom libcommon.parquet_utils import extract_split_name_from_parquet_url\nfrom libcommon.prometheus import StepProfiler\nfrom libcommon.simple_cache import CacheEntry\nfrom libcommon.storage import StrPath, init_dir\nfrom libcommon.storage_client import StorageClient\nfrom libcommon.utils import download_file_from_hub\n\nfrom libapi.exceptions import DownloadIndexError\nfrom libapi.utils import get_cache_entry_from_step\n\nREPO_TYPE = \"dataset\"\nHUB_DOWNLOAD_CACHE_FOLDER = \"cache\"\n\n\nasync def get_index_file_location_and_download_if_missing(\n duckdb_index_file_directory: StrPath,\n dataset: str,\n revision: str,\n config: str,\n split: str,\n filename: str,\n size_bytes: int,\n url: str,\n target_revision: str,\n hf_token: Optional[str],\n) -> str:\n with StepProfiler(method=\"get_index_file_location_and_download_if_missing\", step=\"all\"):\n index_folder = get_download_folder(duckdb_index_file_directory, size_bytes, dataset, config, split, revision)\n # For directories like \"partial-train\" for the file\n # at \"en/partial-train/0000.parquet\" in the C4 dataset.\n # Note that \"-\" is forbidden for split names, so it doesn't create directory names collisions.\n split_directory = extract_split_name_from_parquet_url(url)\n repo_file_location = f\"{config}/{split_directory}/{filename}\"\n index_file_location = f\"{index_folder}/{repo_file_location}\"\n index_path = Path(index_file_location)\n if not await index_path.is_file():\n with StepProfiler(method=\"get_index_file_location_and_download_if_missing\", step=\"download index file\"):\n cache_folder = f\"{duckdb_index_file_directory}/{HUB_DOWNLOAD_CACHE_FOLDER}\"\n await anyio.to_thread.run_sync(\n download_index_file,\n cache_folder,\n index_folder,\n target_revision,\n dataset,\n repo_file_location,\n hf_token,\n )\n # Update its modification time\n await index_path.touch()\n return index_file_location\n\n\ndef get_download_folder(\n root_directory: StrPath, size_bytes: int, dataset: str, revision: str, config: str, split: str\n) -> str:\n check_available_disk_space(root_directory, size_bytes)\n payload = (dataset, config, split, revision)\n hash_suffix = sha1(json.dumps(payload, sort_keys=True).encode(), usedforsecurity=False).hexdigest()[:8]\n subdirectory = \"\".join([c if re.match(r\"[\\w-]\", c) else \"-\" for c in f\"{dataset}-{hash_suffix}\"])\n return f\"{root_directory}/{DUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY}/{subdirectory}\"\n\n\ndef check_available_disk_space(path: StrPath, required_space: int) -> None:\n disk_stat = os.statvfs(path)\n # Calculate free space in bytes\n free_space = disk_stat.f_bavail * disk_stat.f_frsize\n logging.debug(f\"{free_space} available space, needed {required_space}\")\n if free_space < required_space:\n raise DownloadIndexError(\n \"Cannot perform the search due to a lack of disk space on the server. Please report the issue.\"\n )\n\n\ndef download_index_file(\n cache_folder: str,\n index_folder: str,\n target_revision: str,\n dataset: str,\n repo_file_location: str,\n hf_token: Optional[str] = None,\n) -> None:\n logging.info(f\"init_dir {index_folder}\")\n try:\n init_dir(index_folder)\n download_file_from_hub(\n repo_type=REPO_TYPE,\n revision=target_revision,\n repo_id=dataset,\n filename=repo_file_location,\n local_dir=index_folder,\n hf_token=hf_token,\n cache_dir=cache_folder,\n )\n except OSError as err:\n if err.errno == errno.ENOSPC:\n raise DownloadIndexError(\n \"Cannot perform the operation due to a lack of disk space on the server. Please report the issue.\", err\n )\n\n\ndef get_cache_entry_from_duckdb_index_job(\n dataset: str,\n config: str,\n split: str,\n hf_endpoint: str,\n hf_token: Optional[str],\n hf_timeout_seconds: Optional[float],\n blocked_datasets: list[str],\n storage_clients: Optional[list[StorageClient]] = None,\n) -> CacheEntry:\n return get_cache_entry_from_step(\n processing_step_name=SPLIT_DUCKDB_INDEX_KIND,\n dataset=dataset,\n config=config,\n split=split,\n hf_endpoint=hf_endpoint,\n hf_token=hf_token,\n hf_timeout_seconds=hf_timeout_seconds,\n blocked_datasets=blocked_datasets,\n storage_clients=storage_clients,\n )\n", "path": "libs/libapi/src/libapi/duckdb.py"}], "after_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2023 The HuggingFace Authors.\n\nimport errno\nimport json\nimport logging\nimport os\nimport re\nfrom hashlib import sha1\nfrom typing import Optional\n\nimport anyio\nfrom anyio import Path\nfrom libcommon.constants import DUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY, SPLIT_DUCKDB_INDEX_KIND\nfrom libcommon.parquet_utils import extract_split_name_from_parquet_url\nfrom libcommon.prometheus import StepProfiler\nfrom libcommon.simple_cache import CacheEntry\nfrom libcommon.storage import StrPath, init_dir\nfrom libcommon.storage_client import StorageClient\nfrom libcommon.utils import download_file_from_hub\n\nfrom libapi.exceptions import DownloadIndexError\nfrom libapi.utils import get_cache_entry_from_step\n\nREPO_TYPE = \"dataset\"\nHUB_DOWNLOAD_CACHE_FOLDER = \"cache\"\n\n\nasync def get_index_file_location_and_download_if_missing(\n duckdb_index_file_directory: StrPath,\n dataset: str,\n revision: str,\n config: str,\n split: str,\n filename: str,\n size_bytes: int,\n url: str,\n target_revision: str,\n hf_token: Optional[str],\n) -> str:\n with StepProfiler(method=\"get_index_file_location_and_download_if_missing\", step=\"all\"):\n index_folder = get_download_folder(duckdb_index_file_directory, size_bytes, dataset, config, split, revision)\n # For directories like \"partial-train\" for the file\n # at \"en/partial-train/0000.parquet\" in the C4 dataset.\n # Note that \"-\" is forbidden for split names, so it doesn't create directory names collisions.\n split_directory = extract_split_name_from_parquet_url(url)\n repo_file_location = f\"{config}/{split_directory}/{filename}\"\n index_file_location = f\"{index_folder}/{repo_file_location}\"\n index_path = Path(index_file_location)\n if not await index_path.is_file():\n with StepProfiler(method=\"get_index_file_location_and_download_if_missing\", step=\"download index file\"):\n cache_folder = f\"{duckdb_index_file_directory}/{HUB_DOWNLOAD_CACHE_FOLDER}\"\n await anyio.to_thread.run_sync(\n download_index_file,\n cache_folder,\n index_folder,\n target_revision,\n dataset,\n repo_file_location,\n hf_token,\n )\n # Update its modification time\n await index_path.touch()\n return index_file_location\n\n\ndef get_download_folder(\n root_directory: StrPath, size_bytes: int, dataset: str, revision: str, config: str, split: str\n) -> str:\n check_available_disk_space(root_directory, size_bytes)\n payload = (dataset, config, split, revision)\n hash_suffix = sha1(json.dumps(payload, sort_keys=True).encode(), usedforsecurity=False).hexdigest()[:8]\n subdirectory = \"\".join([c if re.match(r\"[\\w-]\", c) else \"-\" for c in f\"{dataset}-{hash_suffix}\"])\n return f\"{root_directory}/{DUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY}/{subdirectory}\"\n\n\ndef check_available_disk_space(path: StrPath, required_space: int) -> None:\n try:\n disk_stat = os.statvfs(path)\n except FileNotFoundError:\n # The path does not exist, we create it and\n init_dir(path)\n disk_stat = os.statvfs(path)\n # Calculate free space in bytes\n free_space = disk_stat.f_bavail * disk_stat.f_frsize\n logging.debug(f\"{free_space} available space, needed {required_space}\")\n if free_space < required_space:\n raise DownloadIndexError(\n \"Cannot perform the search due to a lack of disk space on the server. Please report the issue.\"\n )\n\n\ndef download_index_file(\n cache_folder: str,\n index_folder: str,\n target_revision: str,\n dataset: str,\n repo_file_location: str,\n hf_token: Optional[str] = None,\n) -> None:\n logging.info(f\"init_dir {index_folder}\")\n try:\n init_dir(index_folder)\n download_file_from_hub(\n repo_type=REPO_TYPE,\n revision=target_revision,\n repo_id=dataset,\n filename=repo_file_location,\n local_dir=index_folder,\n hf_token=hf_token,\n cache_dir=cache_folder,\n )\n except OSError as err:\n if err.errno == errno.ENOSPC:\n raise DownloadIndexError(\n \"Cannot perform the operation due to a lack of disk space on the server. Please report the issue.\", err\n )\n\n\ndef get_cache_entry_from_duckdb_index_job(\n dataset: str,\n config: str,\n split: str,\n hf_endpoint: str,\n hf_token: Optional[str],\n hf_timeout_seconds: Optional[float],\n blocked_datasets: list[str],\n storage_clients: Optional[list[StorageClient]] = None,\n) -> CacheEntry:\n return get_cache_entry_from_step(\n processing_step_name=SPLIT_DUCKDB_INDEX_KIND,\n dataset=dataset,\n config=config,\n split=split,\n hf_endpoint=hf_endpoint,\n hf_token=hf_token,\n hf_timeout_seconds=hf_timeout_seconds,\n blocked_datasets=blocked_datasets,\n storage_clients=storage_clients,\n )\n", "path": "libs/libapi/src/libapi/duckdb.py"}]}
1,880
183
gh_patches_debug_10519
rasdani/github-patches
git_diff
ansible__ansible-34845
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- aci_epg_to_domain: Module does not need to support OpenStack _From @nilloBE on October 10, 2017 9:11_ ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME aci_epg_to_domain ##### ANSIBLE VERSION v2.5 ##### SUMMARY The module aci_epg_to_domain allows mapping an EPG to a Physical or Virtual Domain in ACI. The Virtual Domain allows the user to specify the domain type selecting VMware, Microsoft or OpenStack. In this setting, OpenStack should be removed because the OpenStack EPGs should not be created or manipulated directly via Ansible, but instead via Neutron. _Copied from original issue: datacenter/aci-ansible#163_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/ansible/modules/network/aci/aci_epg_to_domain.py` Content: ``` 1 #!/usr/bin/python 2 # -*- coding: utf-8 -*- 3 4 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) 5 6 from __future__ import absolute_import, division, print_function 7 __metaclass__ = type 8 9 ANSIBLE_METADATA = {'metadata_version': '1.1', 10 'status': ['preview'], 11 'supported_by': 'community'} 12 13 DOCUMENTATION = r''' 14 --- 15 module: aci_epg_to_domain 16 short_description: Bind EPGs to Domains on Cisco ACI fabrics (fv:RsDomAtt) 17 description: 18 - Bind EPGs to Physical and Virtual Domains on Cisco ACI fabrics. 19 - More information from the internal APIC class I(fv:RsDomAtt) at 20 U(https://developer.cisco.com/media/mim-ref/MO-fvRsDomAtt.html). 21 author: 22 - Jacob McGill (@jmcgill298) 23 version_added: '2.4' 24 notes: 25 - The C(tenant), C(ap), C(epg), and C(domain) used must exist before using this module in your playbook. 26 The M(aci_tenant) M(aci_ap), M(aci_epg) M(aci_domain) modules can be used for this. 27 options: 28 allow_useg: 29 description: 30 - Allows micro-segmentation. 31 - The APIC defaults new EPG to Domain bindings to use C(encap). 32 choices: [ encap, useg ] 33 default: encap 34 ap: 35 description: 36 - Name of an existing application network profile, that will contain the EPGs. 37 aliases: [ app_profile, app_profile_name ] 38 deploy_immediacy: 39 description: 40 - Determines when the policy is pushed to hardware Policy CAM. 41 - The APIC defaults new EPG to Domain bindings to C(lazy). 42 choices: [ immediate, lazy ] 43 default: lazy 44 domain: 45 description: 46 - Name of the physical or virtual domain being associated with the EPG. 47 aliases: [ domain_name, domain_profile ] 48 domain_type: 49 description: 50 - Determines if the Domain is physical (phys) or virtual (vmm). 51 choices: [ phys, vmm ] 52 aliases: [ type ] 53 encap: 54 description: 55 - The VLAN encapsulation for the EPG when binding a VMM Domain with static encap_mode. 56 - This acts as the secondary encap when using useg. 57 choices: [ range from 1 to 4096 ] 58 encap_mode: 59 description: 60 - The ecapsulataion method to be used. 61 - The APIC defaults new EPG to Domain bindings to C(auto). 62 choices: [ auto, vlan, vxlan ] 63 default: auto 64 epg: 65 description: 66 - Name of the end point group. 67 aliases: [ epg_name ] 68 netflow: 69 description: 70 - Determines if netflow should be enabled. 71 - The APIC defaults new EPG to Domain binings to C(disabled). 72 choices: [ disabled, enabled ] 73 default: disabled 74 primary_encap: 75 description: 76 - Determines the primary VLAN ID when using useg. 77 choices: [ range from 1 to 4096 ] 78 resolution_immediacy: 79 description: 80 - Determines when the policies should be resolved and available. 81 - The APIC defaults new EPG to Domain bindings to C(lazy). 82 choices: [ immediate, lazy, pre-provision ] 83 default: lazy 84 state: 85 description: 86 - Use C(present) or C(absent) for adding or removing. 87 - Use C(query) for listing an object or multiple objects. 88 choices: [ absent, present, query ] 89 default: present 90 tenant: 91 description: 92 - Name of an existing tenant. 93 aliases: [ tenant_name ] 94 vm_provider: 95 description: 96 - The VM platform for VMM Domains. 97 choices: [ microsoft, openstack, vmware ] 98 extends_documentation_fragment: aci 99 ''' 100 101 EXAMPLES = r''' # ''' 102 103 RETURN = r''' # ''' 104 105 from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec 106 from ansible.module_utils.basic import AnsibleModule 107 108 VM_PROVIDER_MAPPING = dict( 109 microsoft="uni/vmmp-Microsoft/dom-", 110 openstack="uni/vmmp-OpenStack/dom-", 111 vmware="uni/vmmp-VMware/dom-", 112 ) 113 114 115 def main(): 116 argument_spec = aci_argument_spec 117 argument_spec.update( 118 allow_useg=dict(type='str', choices=['encap', 'useg']), 119 ap=dict(type='str', aliases=['app_profile', 'app_profile_name']), 120 deploy_immediacy=dict(type='str', choices=['immediate', 'on-demand']), 121 domain=dict(type='str', aliases=['domain_name', 'domain_profile']), 122 domain_type=dict(type='str', choices=['phys', 'vmm'], aliases=['type']), 123 encap=dict(type='int'), 124 encap_mode=dict(type='str', choices=['auto', 'vlan', 'vxlan']), 125 epg=dict(type='str', aliases=['name', 'epg_name']), 126 netflow=dict(type='str', choices=['disabled', 'enabled']), 127 primary_encap=dict(type='int'), 128 resolution_immediacy=dict(type='str', choices=['immediate', 'lazy', 'pre-provision']), 129 state=dict(type='str', default='present', choices=['absent', 'present', 'query']), 130 tenant=dict(type='str', aliases=['tenant_name']), 131 vm_provider=dict(type='str', choices=['microsoft', 'openstack', 'vmware']), 132 method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6 133 ) 134 135 module = AnsibleModule( 136 argument_spec=argument_spec, 137 supports_check_mode=True, 138 required_if=[ 139 ['domain_type', 'vmm', ['vm_provider']], 140 ['state', 'absent', ['ap', 'domain', 'domain_type', 'epg', 'tenant']], 141 ['state', 'present', ['ap', 'domain', 'domain_type', 'epg', 'tenant']], 142 ], 143 ) 144 145 allow_useg = module.params['allow_useg'] 146 ap = module.params['ap'] 147 deploy_immediacy = module.params['deploy_immediacy'] 148 domain = module.params['domain'] 149 domain_type = module.params['domain_type'] 150 vm_provider = module.params['vm_provider'] 151 encap = module.params['encap'] 152 if encap is not None: 153 if encap in range(1, 4097): 154 encap = 'vlan-{0}'.format(encap) 155 else: 156 module.fail_json(msg='Valid VLAN assigments are from 1 to 4096') 157 encap_mode = module.params['encap_mode'] 158 epg = module.params['epg'] 159 netflow = module.params['netflow'] 160 primary_encap = module.params['primary_encap'] 161 if primary_encap is not None: 162 if primary_encap in range(1, 4097): 163 primary_encap = 'vlan-{0}'.format(primary_encap) 164 else: 165 module.fail_json(msg='Valid VLAN assigments are from 1 to 4096') 166 resolution_immediacy = module.params['resolution_immediacy'] 167 state = module.params['state'] 168 tenant = module.params['tenant'] 169 170 if domain_type == 'phys' and vm_provider is not None: 171 module.fail_json(msg="Domain type 'phys' cannot have a 'vm_provider'") 172 173 # Compile the full domain for URL building 174 if domain_type == 'vmm': 175 epg_domain = '{0}{1}'.format(VM_PROVIDER_MAPPING[vm_provider], domain) 176 elif domain_type is not None: 177 epg_domain = 'uni/phys-{0}'.format(domain) 178 else: 179 epg_domain = None 180 181 aci = ACIModule(module) 182 aci.construct_url( 183 root_class=dict( 184 aci_class='fvTenant', 185 aci_rn='tn-{0}'.format(tenant), 186 filter_target='eq(fvTenant.name, "{0}")'.format(tenant), 187 module_object=tenant, 188 ), 189 subclass_1=dict( 190 aci_class='fvAp', 191 aci_rn='ap-{0}'.format(ap), 192 filter_target='eq(fvAp.name, "{0}")'.format(ap), 193 module_object=ap, 194 ), 195 subclass_2=dict( 196 aci_class='fvAEPg', 197 aci_rn='epg-{0}'.format(epg), 198 filter_target='eq(fvTenant.name, "{0}")'.format(epg), 199 module_object=epg, 200 ), 201 subclass_3=dict( 202 aci_class='fvRsDomAtt', 203 aci_rn='rsdomAtt-[{0}]'.format(epg_domain), 204 filter_target='eq(fvRsDomAtt.tDn, "{0}")'.format(epg_domain), 205 module_object=epg_domain, 206 ), 207 ) 208 209 aci.get_existing() 210 211 if state == 'present': 212 # Filter out module parameters with null values 213 aci.payload( 214 aci_class='fvRsDomAtt', 215 class_config=dict( 216 classPref=allow_useg, 217 encap=encap, 218 encapMode=encap_mode, 219 instrImedcy=deploy_immediacy, 220 netflowPref=netflow, 221 primaryEncap=primary_encap, 222 resImedcy=resolution_immediacy, 223 ), 224 ) 225 226 # Generate config diff which will be used as POST request body 227 aci.get_diff(aci_class='fvRsDomAtt') 228 229 # Submit changes if module not in check_mode and the proposed is different than existing 230 aci.post_config() 231 232 elif state == 'absent': 233 aci.delete_config() 234 235 module.exit_json(**aci.result) 236 237 238 if __name__ == "__main__": 239 main() 240 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/ansible/modules/network/aci/aci_epg_to_domain.py b/lib/ansible/modules/network/aci/aci_epg_to_domain.py --- a/lib/ansible/modules/network/aci/aci_epg_to_domain.py +++ b/lib/ansible/modules/network/aci/aci_epg_to_domain.py @@ -24,6 +24,9 @@ notes: - The C(tenant), C(ap), C(epg), and C(domain) used must exist before using this module in your playbook. The M(aci_tenant) M(aci_ap), M(aci_epg) M(aci_domain) modules can be used for this. +- OpenStack VMM domains must not be created using this module. The OpenStack VMM domain is created directly + by the Cisco APIC Neutron plugin as part of the installation and configuration. + This module can be used to query status of an OpenStack VMM domain. options: allow_useg: description:
{"golden_diff": "diff --git a/lib/ansible/modules/network/aci/aci_epg_to_domain.py b/lib/ansible/modules/network/aci/aci_epg_to_domain.py\n--- a/lib/ansible/modules/network/aci/aci_epg_to_domain.py\n+++ b/lib/ansible/modules/network/aci/aci_epg_to_domain.py\n@@ -24,6 +24,9 @@\n notes:\n - The C(tenant), C(ap), C(epg), and C(domain) used must exist before using this module in your playbook.\n The M(aci_tenant) M(aci_ap), M(aci_epg) M(aci_domain) modules can be used for this.\n+- OpenStack VMM domains must not be created using this module. The OpenStack VMM domain is created directly\n+ by the Cisco APIC Neutron plugin as part of the installation and configuration.\n+ This module can be used to query status of an OpenStack VMM domain.\n options:\n allow_useg:\n description:\n", "issue": "aci_epg_to_domain: Module does not need to support OpenStack\n_From @nilloBE on October 10, 2017 9:11_\r\n\r\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\naci_epg_to_domain\r\n\r\n##### ANSIBLE VERSION\r\nv2.5\r\n\r\n##### SUMMARY\r\nThe module aci_epg_to_domain allows mapping an EPG to a Physical or Virtual Domain in ACI. The Virtual Domain allows the user to specify the domain type selecting VMware, Microsoft or OpenStack.\r\n\r\nIn this setting, OpenStack should be removed because the OpenStack EPGs should not be created or manipulated directly via Ansible, but instead via Neutron. \r\n\r\n_Copied from original issue: datacenter/aci-ansible#163_\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\nDOCUMENTATION = r'''\n---\nmodule: aci_epg_to_domain\nshort_description: Bind EPGs to Domains on Cisco ACI fabrics (fv:RsDomAtt)\ndescription:\n- Bind EPGs to Physical and Virtual Domains on Cisco ACI fabrics.\n- More information from the internal APIC class I(fv:RsDomAtt) at\n U(https://developer.cisco.com/media/mim-ref/MO-fvRsDomAtt.html).\nauthor:\n- Jacob McGill (@jmcgill298)\nversion_added: '2.4'\nnotes:\n- The C(tenant), C(ap), C(epg), and C(domain) used must exist before using this module in your playbook.\n The M(aci_tenant) M(aci_ap), M(aci_epg) M(aci_domain) modules can be used for this.\noptions:\n allow_useg:\n description:\n - Allows micro-segmentation.\n - The APIC defaults new EPG to Domain bindings to use C(encap).\n choices: [ encap, useg ]\n default: encap\n ap:\n description:\n - Name of an existing application network profile, that will contain the EPGs.\n aliases: [ app_profile, app_profile_name ]\n deploy_immediacy:\n description:\n - Determines when the policy is pushed to hardware Policy CAM.\n - The APIC defaults new EPG to Domain bindings to C(lazy).\n choices: [ immediate, lazy ]\n default: lazy\n domain:\n description:\n - Name of the physical or virtual domain being associated with the EPG.\n aliases: [ domain_name, domain_profile ]\n domain_type:\n description:\n - Determines if the Domain is physical (phys) or virtual (vmm).\n choices: [ phys, vmm ]\n aliases: [ type ]\n encap:\n description:\n - The VLAN encapsulation for the EPG when binding a VMM Domain with static encap_mode.\n - This acts as the secondary encap when using useg.\n choices: [ range from 1 to 4096 ]\n encap_mode:\n description:\n - The ecapsulataion method to be used.\n - The APIC defaults new EPG to Domain bindings to C(auto).\n choices: [ auto, vlan, vxlan ]\n default: auto\n epg:\n description:\n - Name of the end point group.\n aliases: [ epg_name ]\n netflow:\n description:\n - Determines if netflow should be enabled.\n - The APIC defaults new EPG to Domain binings to C(disabled).\n choices: [ disabled, enabled ]\n default: disabled\n primary_encap:\n description:\n - Determines the primary VLAN ID when using useg.\n choices: [ range from 1 to 4096 ]\n resolution_immediacy:\n description:\n - Determines when the policies should be resolved and available.\n - The APIC defaults new EPG to Domain bindings to C(lazy).\n choices: [ immediate, lazy, pre-provision ]\n default: lazy\n state:\n description:\n - Use C(present) or C(absent) for adding or removing.\n - Use C(query) for listing an object or multiple objects.\n choices: [ absent, present, query ]\n default: present\n tenant:\n description:\n - Name of an existing tenant.\n aliases: [ tenant_name ]\n vm_provider:\n description:\n - The VM platform for VMM Domains.\n choices: [ microsoft, openstack, vmware ]\nextends_documentation_fragment: aci\n'''\n\nEXAMPLES = r''' # '''\n\nRETURN = r''' # '''\n\nfrom ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec\nfrom ansible.module_utils.basic import AnsibleModule\n\nVM_PROVIDER_MAPPING = dict(\n microsoft=\"uni/vmmp-Microsoft/dom-\",\n openstack=\"uni/vmmp-OpenStack/dom-\",\n vmware=\"uni/vmmp-VMware/dom-\",\n)\n\n\ndef main():\n argument_spec = aci_argument_spec\n argument_spec.update(\n allow_useg=dict(type='str', choices=['encap', 'useg']),\n ap=dict(type='str', aliases=['app_profile', 'app_profile_name']),\n deploy_immediacy=dict(type='str', choices=['immediate', 'on-demand']),\n domain=dict(type='str', aliases=['domain_name', 'domain_profile']),\n domain_type=dict(type='str', choices=['phys', 'vmm'], aliases=['type']),\n encap=dict(type='int'),\n encap_mode=dict(type='str', choices=['auto', 'vlan', 'vxlan']),\n epg=dict(type='str', aliases=['name', 'epg_name']),\n netflow=dict(type='str', choices=['disabled', 'enabled']),\n primary_encap=dict(type='int'),\n resolution_immediacy=dict(type='str', choices=['immediate', 'lazy', 'pre-provision']),\n state=dict(type='str', default='present', choices=['absent', 'present', 'query']),\n tenant=dict(type='str', aliases=['tenant_name']),\n vm_provider=dict(type='str', choices=['microsoft', 'openstack', 'vmware']),\n method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n required_if=[\n ['domain_type', 'vmm', ['vm_provider']],\n ['state', 'absent', ['ap', 'domain', 'domain_type', 'epg', 'tenant']],\n ['state', 'present', ['ap', 'domain', 'domain_type', 'epg', 'tenant']],\n ],\n )\n\n allow_useg = module.params['allow_useg']\n ap = module.params['ap']\n deploy_immediacy = module.params['deploy_immediacy']\n domain = module.params['domain']\n domain_type = module.params['domain_type']\n vm_provider = module.params['vm_provider']\n encap = module.params['encap']\n if encap is not None:\n if encap in range(1, 4097):\n encap = 'vlan-{0}'.format(encap)\n else:\n module.fail_json(msg='Valid VLAN assigments are from 1 to 4096')\n encap_mode = module.params['encap_mode']\n epg = module.params['epg']\n netflow = module.params['netflow']\n primary_encap = module.params['primary_encap']\n if primary_encap is not None:\n if primary_encap in range(1, 4097):\n primary_encap = 'vlan-{0}'.format(primary_encap)\n else:\n module.fail_json(msg='Valid VLAN assigments are from 1 to 4096')\n resolution_immediacy = module.params['resolution_immediacy']\n state = module.params['state']\n tenant = module.params['tenant']\n\n if domain_type == 'phys' and vm_provider is not None:\n module.fail_json(msg=\"Domain type 'phys' cannot have a 'vm_provider'\")\n\n # Compile the full domain for URL building\n if domain_type == 'vmm':\n epg_domain = '{0}{1}'.format(VM_PROVIDER_MAPPING[vm_provider], domain)\n elif domain_type is not None:\n epg_domain = 'uni/phys-{0}'.format(domain)\n else:\n epg_domain = None\n\n aci = ACIModule(module)\n aci.construct_url(\n root_class=dict(\n aci_class='fvTenant',\n aci_rn='tn-{0}'.format(tenant),\n filter_target='eq(fvTenant.name, \"{0}\")'.format(tenant),\n module_object=tenant,\n ),\n subclass_1=dict(\n aci_class='fvAp',\n aci_rn='ap-{0}'.format(ap),\n filter_target='eq(fvAp.name, \"{0}\")'.format(ap),\n module_object=ap,\n ),\n subclass_2=dict(\n aci_class='fvAEPg',\n aci_rn='epg-{0}'.format(epg),\n filter_target='eq(fvTenant.name, \"{0}\")'.format(epg),\n module_object=epg,\n ),\n subclass_3=dict(\n aci_class='fvRsDomAtt',\n aci_rn='rsdomAtt-[{0}]'.format(epg_domain),\n filter_target='eq(fvRsDomAtt.tDn, \"{0}\")'.format(epg_domain),\n module_object=epg_domain,\n ),\n )\n\n aci.get_existing()\n\n if state == 'present':\n # Filter out module parameters with null values\n aci.payload(\n aci_class='fvRsDomAtt',\n class_config=dict(\n classPref=allow_useg,\n encap=encap,\n encapMode=encap_mode,\n instrImedcy=deploy_immediacy,\n netflowPref=netflow,\n primaryEncap=primary_encap,\n resImedcy=resolution_immediacy,\n ),\n )\n\n # Generate config diff which will be used as POST request body\n aci.get_diff(aci_class='fvRsDomAtt')\n\n # Submit changes if module not in check_mode and the proposed is different than existing\n aci.post_config()\n\n elif state == 'absent':\n aci.delete_config()\n\n module.exit_json(**aci.result)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "lib/ansible/modules/network/aci/aci_epg_to_domain.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\nDOCUMENTATION = r'''\n---\nmodule: aci_epg_to_domain\nshort_description: Bind EPGs to Domains on Cisco ACI fabrics (fv:RsDomAtt)\ndescription:\n- Bind EPGs to Physical and Virtual Domains on Cisco ACI fabrics.\n- More information from the internal APIC class I(fv:RsDomAtt) at\n U(https://developer.cisco.com/media/mim-ref/MO-fvRsDomAtt.html).\nauthor:\n- Jacob McGill (@jmcgill298)\nversion_added: '2.4'\nnotes:\n- The C(tenant), C(ap), C(epg), and C(domain) used must exist before using this module in your playbook.\n The M(aci_tenant) M(aci_ap), M(aci_epg) M(aci_domain) modules can be used for this.\n- OpenStack VMM domains must not be created using this module. The OpenStack VMM domain is created directly\n by the Cisco APIC Neutron plugin as part of the installation and configuration.\n This module can be used to query status of an OpenStack VMM domain.\noptions:\n allow_useg:\n description:\n - Allows micro-segmentation.\n - The APIC defaults new EPG to Domain bindings to use C(encap).\n choices: [ encap, useg ]\n default: encap\n ap:\n description:\n - Name of an existing application network profile, that will contain the EPGs.\n aliases: [ app_profile, app_profile_name ]\n deploy_immediacy:\n description:\n - Determines when the policy is pushed to hardware Policy CAM.\n - The APIC defaults new EPG to Domain bindings to C(lazy).\n choices: [ immediate, lazy ]\n default: lazy\n domain:\n description:\n - Name of the physical or virtual domain being associated with the EPG.\n aliases: [ domain_name, domain_profile ]\n domain_type:\n description:\n - Determines if the Domain is physical (phys) or virtual (vmm).\n choices: [ phys, vmm ]\n aliases: [ type ]\n encap:\n description:\n - The VLAN encapsulation for the EPG when binding a VMM Domain with static encap_mode.\n - This acts as the secondary encap when using useg.\n choices: [ range from 1 to 4096 ]\n encap_mode:\n description:\n - The ecapsulataion method to be used.\n - The APIC defaults new EPG to Domain bindings to C(auto).\n choices: [ auto, vlan, vxlan ]\n default: auto\n epg:\n description:\n - Name of the end point group.\n aliases: [ epg_name ]\n netflow:\n description:\n - Determines if netflow should be enabled.\n - The APIC defaults new EPG to Domain binings to C(disabled).\n choices: [ disabled, enabled ]\n default: disabled\n primary_encap:\n description:\n - Determines the primary VLAN ID when using useg.\n choices: [ range from 1 to 4096 ]\n resolution_immediacy:\n description:\n - Determines when the policies should be resolved and available.\n - The APIC defaults new EPG to Domain bindings to C(lazy).\n choices: [ immediate, lazy, pre-provision ]\n default: lazy\n state:\n description:\n - Use C(present) or C(absent) for adding or removing.\n - Use C(query) for listing an object or multiple objects.\n choices: [ absent, present, query ]\n default: present\n tenant:\n description:\n - Name of an existing tenant.\n aliases: [ tenant_name ]\n vm_provider:\n description:\n - The VM platform for VMM Domains.\n choices: [ microsoft, openstack, vmware ]\nextends_documentation_fragment: aci\n'''\n\nEXAMPLES = r''' # '''\n\nRETURN = r''' # '''\n\nfrom ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec\nfrom ansible.module_utils.basic import AnsibleModule\n\nVM_PROVIDER_MAPPING = dict(\n microsoft=\"uni/vmmp-Microsoft/dom-\",\n openstack=\"uni/vmmp-OpenStack/dom-\",\n vmware=\"uni/vmmp-VMware/dom-\",\n)\n\n\ndef main():\n argument_spec = aci_argument_spec\n argument_spec.update(\n allow_useg=dict(type='str', choices=['encap', 'useg']),\n ap=dict(type='str', aliases=['app_profile', 'app_profile_name']),\n deploy_immediacy=dict(type='str', choices=['immediate', 'on-demand']),\n domain=dict(type='str', aliases=['domain_name', 'domain_profile']),\n domain_type=dict(type='str', choices=['phys', 'vmm'], aliases=['type']),\n encap=dict(type='int'),\n encap_mode=dict(type='str', choices=['auto', 'vlan', 'vxlan']),\n epg=dict(type='str', aliases=['name', 'epg_name']),\n netflow=dict(type='str', choices=['disabled', 'enabled']),\n primary_encap=dict(type='int'),\n resolution_immediacy=dict(type='str', choices=['immediate', 'lazy', 'pre-provision']),\n state=dict(type='str', default='present', choices=['absent', 'present', 'query']),\n tenant=dict(type='str', aliases=['tenant_name']),\n vm_provider=dict(type='str', choices=['microsoft', 'openstack', 'vmware']),\n method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n required_if=[\n ['domain_type', 'vmm', ['vm_provider']],\n ['state', 'absent', ['ap', 'domain', 'domain_type', 'epg', 'tenant']],\n ['state', 'present', ['ap', 'domain', 'domain_type', 'epg', 'tenant']],\n ],\n )\n\n allow_useg = module.params['allow_useg']\n ap = module.params['ap']\n deploy_immediacy = module.params['deploy_immediacy']\n domain = module.params['domain']\n domain_type = module.params['domain_type']\n vm_provider = module.params['vm_provider']\n encap = module.params['encap']\n if encap is not None:\n if encap in range(1, 4097):\n encap = 'vlan-{0}'.format(encap)\n else:\n module.fail_json(msg='Valid VLAN assigments are from 1 to 4096')\n encap_mode = module.params['encap_mode']\n epg = module.params['epg']\n netflow = module.params['netflow']\n primary_encap = module.params['primary_encap']\n if primary_encap is not None:\n if primary_encap in range(1, 4097):\n primary_encap = 'vlan-{0}'.format(primary_encap)\n else:\n module.fail_json(msg='Valid VLAN assigments are from 1 to 4096')\n resolution_immediacy = module.params['resolution_immediacy']\n state = module.params['state']\n tenant = module.params['tenant']\n\n if domain_type == 'phys' and vm_provider is not None:\n module.fail_json(msg=\"Domain type 'phys' cannot have a 'vm_provider'\")\n\n # Compile the full domain for URL building\n if domain_type == 'vmm':\n epg_domain = '{0}{1}'.format(VM_PROVIDER_MAPPING[vm_provider], domain)\n elif domain_type is not None:\n epg_domain = 'uni/phys-{0}'.format(domain)\n else:\n epg_domain = None\n\n aci = ACIModule(module)\n aci.construct_url(\n root_class=dict(\n aci_class='fvTenant',\n aci_rn='tn-{0}'.format(tenant),\n filter_target='eq(fvTenant.name, \"{0}\")'.format(tenant),\n module_object=tenant,\n ),\n subclass_1=dict(\n aci_class='fvAp',\n aci_rn='ap-{0}'.format(ap),\n filter_target='eq(fvAp.name, \"{0}\")'.format(ap),\n module_object=ap,\n ),\n subclass_2=dict(\n aci_class='fvAEPg',\n aci_rn='epg-{0}'.format(epg),\n filter_target='eq(fvTenant.name, \"{0}\")'.format(epg),\n module_object=epg,\n ),\n subclass_3=dict(\n aci_class='fvRsDomAtt',\n aci_rn='rsdomAtt-[{0}]'.format(epg_domain),\n filter_target='eq(fvRsDomAtt.tDn, \"{0}\")'.format(epg_domain),\n module_object=epg_domain,\n ),\n )\n\n aci.get_existing()\n\n if state == 'present':\n # Filter out module parameters with null values\n aci.payload(\n aci_class='fvRsDomAtt',\n class_config=dict(\n classPref=allow_useg,\n encap=encap,\n encapMode=encap_mode,\n instrImedcy=deploy_immediacy,\n netflowPref=netflow,\n primaryEncap=primary_encap,\n resImedcy=resolution_immediacy,\n ),\n )\n\n # Generate config diff which will be used as POST request body\n aci.get_diff(aci_class='fvRsDomAtt')\n\n # Submit changes if module not in check_mode and the proposed is different than existing\n aci.post_config()\n\n elif state == 'absent':\n aci.delete_config()\n\n module.exit_json(**aci.result)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "lib/ansible/modules/network/aci/aci_epg_to_domain.py"}]}
3,226
211
gh_patches_debug_15678
rasdani/github-patches
git_diff
celery__celery-8338
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- database backend does not store children The [`Task`](https://github.com/celery/celery/blob/main/celery/backends/database/models.py#L20-L27) and [`TaskExtended`](https://github.com/celery/celery/blob/main/celery/backends/database/models.py#L57-L62) models for the `database` backend do not include `children`. This means that when using any `database` backend, [`AsyncResult.children`](https://github.com/celery/celery/blob/main/celery/result.py#L424) is always empty, even if a task does have children. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `celery/backends/database/models.py` Content: ``` 1 """Database models used by the SQLAlchemy result store backend.""" 2 from datetime import datetime 3 4 import sqlalchemy as sa 5 from sqlalchemy.types import PickleType 6 7 from celery import states 8 9 from .session import ResultModelBase 10 11 __all__ = ('Task', 'TaskExtended', 'TaskSet') 12 13 14 class Task(ResultModelBase): 15 """Task result/status.""" 16 17 __tablename__ = 'celery_taskmeta' 18 __table_args__ = {'sqlite_autoincrement': True} 19 20 id = sa.Column(sa.Integer, sa.Sequence('task_id_sequence'), 21 primary_key=True, autoincrement=True) 22 task_id = sa.Column(sa.String(155), unique=True) 23 status = sa.Column(sa.String(50), default=states.PENDING) 24 result = sa.Column(PickleType, nullable=True) 25 date_done = sa.Column(sa.DateTime, default=datetime.utcnow, 26 onupdate=datetime.utcnow, nullable=True) 27 traceback = sa.Column(sa.Text, nullable=True) 28 29 def __init__(self, task_id): 30 self.task_id = task_id 31 32 def to_dict(self): 33 return { 34 'task_id': self.task_id, 35 'status': self.status, 36 'result': self.result, 37 'traceback': self.traceback, 38 'date_done': self.date_done, 39 } 40 41 def __repr__(self): 42 return '<Task {0.task_id} state: {0.status}>'.format(self) 43 44 @classmethod 45 def configure(cls, schema=None, name=None): 46 cls.__table__.schema = schema 47 cls.id.default.schema = schema 48 cls.__table__.name = name or cls.__tablename__ 49 50 51 class TaskExtended(Task): 52 """For the extend result.""" 53 54 __tablename__ = 'celery_taskmeta' 55 __table_args__ = {'sqlite_autoincrement': True, 'extend_existing': True} 56 57 name = sa.Column(sa.String(155), nullable=True) 58 args = sa.Column(sa.LargeBinary, nullable=True) 59 kwargs = sa.Column(sa.LargeBinary, nullable=True) 60 worker = sa.Column(sa.String(155), nullable=True) 61 retries = sa.Column(sa.Integer, nullable=True) 62 queue = sa.Column(sa.String(155), nullable=True) 63 64 def to_dict(self): 65 task_dict = super().to_dict() 66 task_dict.update({ 67 'name': self.name, 68 'args': self.args, 69 'kwargs': self.kwargs, 70 'worker': self.worker, 71 'retries': self.retries, 72 'queue': self.queue, 73 }) 74 return task_dict 75 76 77 class TaskSet(ResultModelBase): 78 """TaskSet result.""" 79 80 __tablename__ = 'celery_tasksetmeta' 81 __table_args__ = {'sqlite_autoincrement': True} 82 83 id = sa.Column(sa.Integer, sa.Sequence('taskset_id_sequence'), 84 autoincrement=True, primary_key=True) 85 taskset_id = sa.Column(sa.String(155), unique=True) 86 result = sa.Column(PickleType, nullable=True) 87 date_done = sa.Column(sa.DateTime, default=datetime.utcnow, 88 nullable=True) 89 90 def __init__(self, taskset_id, result): 91 self.taskset_id = taskset_id 92 self.result = result 93 94 def to_dict(self): 95 return { 96 'taskset_id': self.taskset_id, 97 'result': self.result, 98 'date_done': self.date_done, 99 } 100 101 def __repr__(self): 102 return f'<TaskSet: {self.taskset_id}>' 103 104 @classmethod 105 def configure(cls, schema=None, name=None): 106 cls.__table__.schema = schema 107 cls.id.default.schema = schema 108 cls.__table__.name = name or cls.__tablename__ 109 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/celery/backends/database/models.py b/celery/backends/database/models.py --- a/celery/backends/database/models.py +++ b/celery/backends/database/models.py @@ -25,6 +25,7 @@ date_done = sa.Column(sa.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=True) traceback = sa.Column(sa.Text, nullable=True) + children = sa.Column(PickleType, nullable=True) def __init__(self, task_id): self.task_id = task_id @@ -36,6 +37,7 @@ 'result': self.result, 'traceback': self.traceback, 'date_done': self.date_done, + 'children': self.children, } def __repr__(self):
{"golden_diff": "diff --git a/celery/backends/database/models.py b/celery/backends/database/models.py\n--- a/celery/backends/database/models.py\n+++ b/celery/backends/database/models.py\n@@ -25,6 +25,7 @@\n date_done = sa.Column(sa.DateTime, default=datetime.utcnow,\n onupdate=datetime.utcnow, nullable=True)\n traceback = sa.Column(sa.Text, nullable=True)\n+ children = sa.Column(PickleType, nullable=True)\n \n def __init__(self, task_id):\n self.task_id = task_id\n@@ -36,6 +37,7 @@\n 'result': self.result,\n 'traceback': self.traceback,\n 'date_done': self.date_done,\n+ 'children': self.children,\n }\n \n def __repr__(self):\n", "issue": "database backend does not store children\nThe [`Task`](https://github.com/celery/celery/blob/main/celery/backends/database/models.py#L20-L27) and [`TaskExtended`](https://github.com/celery/celery/blob/main/celery/backends/database/models.py#L57-L62) models for the `database` backend do not include `children`. This means that when using any `database` backend, [`AsyncResult.children`](https://github.com/celery/celery/blob/main/celery/result.py#L424) is always empty, even if a task does have children.\n", "before_files": [{"content": "\"\"\"Database models used by the SQLAlchemy result store backend.\"\"\"\nfrom datetime import datetime\n\nimport sqlalchemy as sa\nfrom sqlalchemy.types import PickleType\n\nfrom celery import states\n\nfrom .session import ResultModelBase\n\n__all__ = ('Task', 'TaskExtended', 'TaskSet')\n\n\nclass Task(ResultModelBase):\n \"\"\"Task result/status.\"\"\"\n\n __tablename__ = 'celery_taskmeta'\n __table_args__ = {'sqlite_autoincrement': True}\n\n id = sa.Column(sa.Integer, sa.Sequence('task_id_sequence'),\n primary_key=True, autoincrement=True)\n task_id = sa.Column(sa.String(155), unique=True)\n status = sa.Column(sa.String(50), default=states.PENDING)\n result = sa.Column(PickleType, nullable=True)\n date_done = sa.Column(sa.DateTime, default=datetime.utcnow,\n onupdate=datetime.utcnow, nullable=True)\n traceback = sa.Column(sa.Text, nullable=True)\n\n def __init__(self, task_id):\n self.task_id = task_id\n\n def to_dict(self):\n return {\n 'task_id': self.task_id,\n 'status': self.status,\n 'result': self.result,\n 'traceback': self.traceback,\n 'date_done': self.date_done,\n }\n\n def __repr__(self):\n return '<Task {0.task_id} state: {0.status}>'.format(self)\n\n @classmethod\n def configure(cls, schema=None, name=None):\n cls.__table__.schema = schema\n cls.id.default.schema = schema\n cls.__table__.name = name or cls.__tablename__\n\n\nclass TaskExtended(Task):\n \"\"\"For the extend result.\"\"\"\n\n __tablename__ = 'celery_taskmeta'\n __table_args__ = {'sqlite_autoincrement': True, 'extend_existing': True}\n\n name = sa.Column(sa.String(155), nullable=True)\n args = sa.Column(sa.LargeBinary, nullable=True)\n kwargs = sa.Column(sa.LargeBinary, nullable=True)\n worker = sa.Column(sa.String(155), nullable=True)\n retries = sa.Column(sa.Integer, nullable=True)\n queue = sa.Column(sa.String(155), nullable=True)\n\n def to_dict(self):\n task_dict = super().to_dict()\n task_dict.update({\n 'name': self.name,\n 'args': self.args,\n 'kwargs': self.kwargs,\n 'worker': self.worker,\n 'retries': self.retries,\n 'queue': self.queue,\n })\n return task_dict\n\n\nclass TaskSet(ResultModelBase):\n \"\"\"TaskSet result.\"\"\"\n\n __tablename__ = 'celery_tasksetmeta'\n __table_args__ = {'sqlite_autoincrement': True}\n\n id = sa.Column(sa.Integer, sa.Sequence('taskset_id_sequence'),\n autoincrement=True, primary_key=True)\n taskset_id = sa.Column(sa.String(155), unique=True)\n result = sa.Column(PickleType, nullable=True)\n date_done = sa.Column(sa.DateTime, default=datetime.utcnow,\n nullable=True)\n\n def __init__(self, taskset_id, result):\n self.taskset_id = taskset_id\n self.result = result\n\n def to_dict(self):\n return {\n 'taskset_id': self.taskset_id,\n 'result': self.result,\n 'date_done': self.date_done,\n }\n\n def __repr__(self):\n return f'<TaskSet: {self.taskset_id}>'\n\n @classmethod\n def configure(cls, schema=None, name=None):\n cls.__table__.schema = schema\n cls.id.default.schema = schema\n cls.__table__.name = name or cls.__tablename__\n", "path": "celery/backends/database/models.py"}], "after_files": [{"content": "\"\"\"Database models used by the SQLAlchemy result store backend.\"\"\"\nfrom datetime import datetime\n\nimport sqlalchemy as sa\nfrom sqlalchemy.types import PickleType\n\nfrom celery import states\n\nfrom .session import ResultModelBase\n\n__all__ = ('Task', 'TaskExtended', 'TaskSet')\n\n\nclass Task(ResultModelBase):\n \"\"\"Task result/status.\"\"\"\n\n __tablename__ = 'celery_taskmeta'\n __table_args__ = {'sqlite_autoincrement': True}\n\n id = sa.Column(sa.Integer, sa.Sequence('task_id_sequence'),\n primary_key=True, autoincrement=True)\n task_id = sa.Column(sa.String(155), unique=True)\n status = sa.Column(sa.String(50), default=states.PENDING)\n result = sa.Column(PickleType, nullable=True)\n date_done = sa.Column(sa.DateTime, default=datetime.utcnow,\n onupdate=datetime.utcnow, nullable=True)\n traceback = sa.Column(sa.Text, nullable=True)\n children = sa.Column(PickleType, nullable=True)\n\n def __init__(self, task_id):\n self.task_id = task_id\n\n def to_dict(self):\n return {\n 'task_id': self.task_id,\n 'status': self.status,\n 'result': self.result,\n 'traceback': self.traceback,\n 'date_done': self.date_done,\n 'children': self.children,\n }\n\n def __repr__(self):\n return '<Task {0.task_id} state: {0.status}>'.format(self)\n\n @classmethod\n def configure(cls, schema=None, name=None):\n cls.__table__.schema = schema\n cls.id.default.schema = schema\n cls.__table__.name = name or cls.__tablename__\n\n\nclass TaskExtended(Task):\n \"\"\"For the extend result.\"\"\"\n\n __tablename__ = 'celery_taskmeta'\n __table_args__ = {'sqlite_autoincrement': True, 'extend_existing': True}\n\n name = sa.Column(sa.String(155), nullable=True)\n args = sa.Column(sa.LargeBinary, nullable=True)\n kwargs = sa.Column(sa.LargeBinary, nullable=True)\n worker = sa.Column(sa.String(155), nullable=True)\n retries = sa.Column(sa.Integer, nullable=True)\n queue = sa.Column(sa.String(155), nullable=True)\n\n def to_dict(self):\n task_dict = super().to_dict()\n task_dict.update({\n 'name': self.name,\n 'args': self.args,\n 'kwargs': self.kwargs,\n 'worker': self.worker,\n 'retries': self.retries,\n 'queue': self.queue,\n })\n return task_dict\n\n\nclass TaskSet(ResultModelBase):\n \"\"\"TaskSet result.\"\"\"\n\n __tablename__ = 'celery_tasksetmeta'\n __table_args__ = {'sqlite_autoincrement': True}\n\n id = sa.Column(sa.Integer, sa.Sequence('taskset_id_sequence'),\n autoincrement=True, primary_key=True)\n taskset_id = sa.Column(sa.String(155), unique=True)\n result = sa.Column(PickleType, nullable=True)\n date_done = sa.Column(sa.DateTime, default=datetime.utcnow,\n nullable=True)\n\n def __init__(self, taskset_id, result):\n self.taskset_id = taskset_id\n self.result = result\n\n def to_dict(self):\n return {\n 'taskset_id': self.taskset_id,\n 'result': self.result,\n 'date_done': self.date_done,\n }\n\n def __repr__(self):\n return f'<TaskSet: {self.taskset_id}>'\n\n @classmethod\n def configure(cls, schema=None, name=None):\n cls.__table__.schema = schema\n cls.id.default.schema = schema\n cls.__table__.name = name or cls.__tablename__\n", "path": "celery/backends/database/models.py"}]}
1,411
176
gh_patches_debug_19000
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-478
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- FastAPI Instrumentor not populating attributes required for Azure exporter **Describe your environment** **Package Version - azure-monitor-opentelemetry-exporter = {version = "^1.0.0-beta.3", allow-prereleases = true} opentelemetry-instrumentation-fastapi = "^0.19b0" Operating System - macOS Mojave v10.14.6** **Python Version - v3.9.0** **Steps to reproduce** When using the azure-monitor-opentelemetry-exporter in conjunction with opentelemetry-instrumentation-fastapi the response code is not populated by the exporter, raised this issue with Azure monitor team and was redirected here; > Looking at the output, it seems like the spans are missing some attributes. This is probably an issue in the opentelemetry-instrumentation-asgi, could you file an issue here for this problem specifically? Basically, the exporter sees if http.status_code is populated in the span attributes to populate response_Code in app insights. Since the attribute is not there, this is why you are seeing 0s. Normally, it is up to the instrumentation to populate this. [Azure monitor issue#17839](https://github.com/Azure/azure-sdk-for-python/issues/17839) Spans obtained from console exporter: - one for successful - 201 request - one for failed - 422 request - one on exit [json.zip](https://github.com/open-telemetry/opentelemetry-python-contrib/files/6281056/json.zip) Screen grab from Azure App Insights indication reponse code as 0: ![image](https://user-images.githubusercontent.com/43657587/114087357-6bae9380-9879-11eb-98db-3d3b1a0c1055.png) **What is the expected behavior?** Compatibility with Azure monitor exporter **Additional context** Detailed context for this issue [Azure monitor issue#17839](https://github.com/Azure/azure-sdk-for-python/issues/17839) Please let me know in case of additional info required. Thanks. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """ 16 The opentelemetry-instrumentation-asgi package provides an ASGI middleware that can be used 17 on any ASGI framework (such as Django-channels / Quart) to track requests 18 timing through OpenTelemetry. 19 """ 20 21 import typing 22 import urllib 23 from functools import wraps 24 from typing import Tuple 25 26 from asgiref.compatibility import guarantee_single_callable 27 28 from opentelemetry import context, trace 29 from opentelemetry.instrumentation.asgi.version import __version__ # noqa 30 from opentelemetry.instrumentation.utils import http_status_to_status_code 31 from opentelemetry.propagate import extract 32 from opentelemetry.propagators.textmap import Getter 33 from opentelemetry.semconv.trace import SpanAttributes 34 from opentelemetry.trace.status import Status, StatusCode 35 36 37 class ASGIGetter(Getter): 38 def get( 39 self, carrier: dict, key: str 40 ) -> typing.Optional[typing.List[str]]: 41 """Getter implementation to retrieve a HTTP header value from the ASGI 42 scope. 43 44 Args: 45 carrier: ASGI scope object 46 key: header name in scope 47 Returns: 48 A list with a single string with the header value if it exists, 49 else None. 50 """ 51 headers = carrier.get("headers") 52 if not headers: 53 return None 54 55 # asgi header keys are in lower case 56 key = key.lower() 57 decoded = [ 58 _value.decode("utf8") 59 for (_key, _value) in headers 60 if _key.decode("utf8") == key 61 ] 62 if not decoded: 63 return None 64 return decoded 65 66 def keys(self, carrier: dict) -> typing.List[str]: 67 return list(carrier.keys()) 68 69 70 asgi_getter = ASGIGetter() 71 72 73 def collect_request_attributes(scope): 74 """Collects HTTP request attributes from the ASGI scope and returns a 75 dictionary to be used as span creation attributes.""" 76 server_host, port, http_url = get_host_port_url_tuple(scope) 77 query_string = scope.get("query_string") 78 if query_string and http_url: 79 if isinstance(query_string, bytes): 80 query_string = query_string.decode("utf8") 81 http_url = http_url + ("?" + urllib.parse.unquote(query_string)) 82 83 result = { 84 SpanAttributes.HTTP_SCHEME: scope.get("scheme"), 85 SpanAttributes.HTTP_HOST: server_host, 86 SpanAttributes.NET_HOST_PORT: port, 87 SpanAttributes.HTTP_FLAVOR: scope.get("http_version"), 88 SpanAttributes.HTTP_TARGET: scope.get("path"), 89 SpanAttributes.HTTP_URL: http_url, 90 } 91 http_method = scope.get("method") 92 if http_method: 93 result[SpanAttributes.HTTP_METHOD] = http_method 94 95 http_host_value_list = asgi_getter.get(scope, "host") 96 if http_host_value_list: 97 result[SpanAttributes.HTTP_SERVER_NAME] = ",".join( 98 http_host_value_list 99 ) 100 http_user_agent = asgi_getter.get(scope, "user-agent") 101 if http_user_agent: 102 result[SpanAttributes.HTTP_USER_AGENT] = http_user_agent[0] 103 104 if "client" in scope and scope["client"] is not None: 105 result[SpanAttributes.NET_PEER_IP] = scope.get("client")[0] 106 result[SpanAttributes.NET_PEER_PORT] = scope.get("client")[1] 107 108 # remove None values 109 result = {k: v for k, v in result.items() if v is not None} 110 111 return result 112 113 114 def get_host_port_url_tuple(scope): 115 """Returns (host, port, full_url) tuple. 116 """ 117 server = scope.get("server") or ["0.0.0.0", 80] 118 port = server[1] 119 server_host = server[0] + (":" + str(port) if port != 80 else "") 120 full_path = scope.get("root_path", "") + scope.get("path", "") 121 http_url = scope.get("scheme", "http") + "://" + server_host + full_path 122 return server_host, port, http_url 123 124 125 def set_status_code(span, status_code): 126 """Adds HTTP response attributes to span using the status_code argument.""" 127 if not span.is_recording(): 128 return 129 try: 130 status_code = int(status_code) 131 except ValueError: 132 span.set_status( 133 Status( 134 StatusCode.ERROR, 135 "Non-integer HTTP status: " + repr(status_code), 136 ) 137 ) 138 else: 139 span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, status_code) 140 span.set_status(Status(http_status_to_status_code(status_code))) 141 142 143 def get_default_span_details(scope: dict) -> Tuple[str, dict]: 144 """Default implementation for span_details_callback 145 146 Args: 147 scope: the asgi scope dictionary 148 149 Returns: 150 a tuple of the span, and any attributes to attach to the 151 span. 152 """ 153 method_or_path = scope.get("method") or scope.get("path") 154 155 return method_or_path, {} 156 157 158 class OpenTelemetryMiddleware: 159 """The ASGI application middleware. 160 161 This class is an ASGI middleware that starts and annotates spans for any 162 requests it is invoked with. 163 164 Args: 165 app: The ASGI application callable to forward requests to. 166 span_details_callback: Callback which should return a string 167 and a tuple, representing the desired span name and a 168 dictionary with any additional span attributes to set. 169 Optional: Defaults to get_default_span_details. 170 tracer_provider: The optional tracer provider to use. If omitted 171 the current globally configured one is used. 172 """ 173 174 def __init__( 175 self, 176 app, 177 excluded_urls=None, 178 span_details_callback=None, 179 tracer_provider=None, 180 ): 181 self.app = guarantee_single_callable(app) 182 self.tracer = trace.get_tracer(__name__, __version__, tracer_provider) 183 self.span_details_callback = ( 184 span_details_callback or get_default_span_details 185 ) 186 self.excluded_urls = excluded_urls 187 188 async def __call__(self, scope, receive, send): 189 """The ASGI application 190 191 Args: 192 scope: A ASGI environment. 193 receive: An awaitable callable yielding dictionaries 194 send: An awaitable callable taking a single dictionary as argument. 195 """ 196 if scope["type"] not in ("http", "websocket"): 197 return await self.app(scope, receive, send) 198 199 _, _, url = get_host_port_url_tuple(scope) 200 if self.excluded_urls and self.excluded_urls.url_disabled(url): 201 return await self.app(scope, receive, send) 202 203 token = context.attach(extract(scope, getter=asgi_getter)) 204 span_name, additional_attributes = self.span_details_callback(scope) 205 206 try: 207 with self.tracer.start_as_current_span( 208 span_name + " asgi", kind=trace.SpanKind.SERVER, 209 ) as span: 210 if span.is_recording(): 211 attributes = collect_request_attributes(scope) 212 attributes.update(additional_attributes) 213 for key, value in attributes.items(): 214 span.set_attribute(key, value) 215 216 @wraps(receive) 217 async def wrapped_receive(): 218 with self.tracer.start_as_current_span( 219 span_name + " asgi." + scope["type"] + ".receive" 220 ) as receive_span: 221 message = await receive() 222 if receive_span.is_recording(): 223 if message["type"] == "websocket.receive": 224 set_status_code(receive_span, 200) 225 receive_span.set_attribute("type", message["type"]) 226 return message 227 228 @wraps(send) 229 async def wrapped_send(message): 230 with self.tracer.start_as_current_span( 231 span_name + " asgi." + scope["type"] + ".send" 232 ) as send_span: 233 if send_span.is_recording(): 234 if message["type"] == "http.response.start": 235 status_code = message["status"] 236 set_status_code(send_span, status_code) 237 elif message["type"] == "websocket.send": 238 set_status_code(send_span, 200) 239 send_span.set_attribute("type", message["type"]) 240 await send(message) 241 242 await self.app(scope, wrapped_receive, wrapped_send) 243 finally: 244 context.detach(token) 245 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py --- a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py @@ -233,8 +233,10 @@ if send_span.is_recording(): if message["type"] == "http.response.start": status_code = message["status"] + set_status_code(span, status_code) set_status_code(send_span, status_code) elif message["type"] == "websocket.send": + set_status_code(span, 200) set_status_code(send_span, 200) send_span.set_attribute("type", message["type"]) await send(message)
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py\n@@ -233,8 +233,10 @@\n if send_span.is_recording():\n if message[\"type\"] == \"http.response.start\":\n status_code = message[\"status\"]\n+ set_status_code(span, status_code)\n set_status_code(send_span, status_code)\n elif message[\"type\"] == \"websocket.send\":\n+ set_status_code(span, 200)\n set_status_code(send_span, 200)\n send_span.set_attribute(\"type\", message[\"type\"])\n await send(message)\n", "issue": "FastAPI Instrumentor not populating attributes required for Azure exporter\n\r\n\r\n**Describe your environment** \r\n\r\n**Package Version - azure-monitor-opentelemetry-exporter = {version = \"^1.0.0-beta.3\", allow-prereleases = true}\r\nopentelemetry-instrumentation-fastapi = \"^0.19b0\"\r\nOperating System - macOS Mojave v10.14.6**\r\n**Python Version - v3.9.0**\r\n\r\n**Steps to reproduce**\r\nWhen using the azure-monitor-opentelemetry-exporter in conjunction with opentelemetry-instrumentation-fastapi \r\nthe response code is not populated by the exporter, raised this issue with Azure monitor team and was redirected here;\r\n\r\n> Looking at the output, it seems like the spans are missing some attributes. This is probably an issue in the opentelemetry-instrumentation-asgi, could you file an issue here for this problem specifically? Basically, the exporter sees if http.status_code is populated in the span attributes to populate response_Code in app insights. Since the attribute is not there, this is why you are seeing 0s. Normally, it is up to the instrumentation to populate this.\r\n\r\n[Azure monitor issue#17839](https://github.com/Azure/azure-sdk-for-python/issues/17839)\r\n\r\nSpans obtained from console exporter: \r\n\r\n- one for successful - 201 request\r\n- one for failed - 422 request\r\n- one on exit\r\n\r\n[json.zip](https://github.com/open-telemetry/opentelemetry-python-contrib/files/6281056/json.zip)\r\n\r\nScreen grab from Azure App Insights indication reponse code as 0:\r\n\r\n![image](https://user-images.githubusercontent.com/43657587/114087357-6bae9380-9879-11eb-98db-3d3b1a0c1055.png)\r\n\r\n**What is the expected behavior?**\r\nCompatibility with Azure monitor exporter\r\n\r\n**Additional context**\r\nDetailed context for this issue\r\n\r\n[Azure monitor issue#17839](https://github.com/Azure/azure-sdk-for-python/issues/17839)\r\n\r\nPlease let me know in case of additional info required.\r\n\r\nThanks.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe opentelemetry-instrumentation-asgi package provides an ASGI middleware that can be used\non any ASGI framework (such as Django-channels / Quart) to track requests\ntiming through OpenTelemetry.\n\"\"\"\n\nimport typing\nimport urllib\nfrom functools import wraps\nfrom typing import Tuple\n\nfrom asgiref.compatibility import guarantee_single_callable\n\nfrom opentelemetry import context, trace\nfrom opentelemetry.instrumentation.asgi.version import __version__ # noqa\nfrom opentelemetry.instrumentation.utils import http_status_to_status_code\nfrom opentelemetry.propagate import extract\nfrom opentelemetry.propagators.textmap import Getter\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace.status import Status, StatusCode\n\n\nclass ASGIGetter(Getter):\n def get(\n self, carrier: dict, key: str\n ) -> typing.Optional[typing.List[str]]:\n \"\"\"Getter implementation to retrieve a HTTP header value from the ASGI\n scope.\n\n Args:\n carrier: ASGI scope object\n key: header name in scope\n Returns:\n A list with a single string with the header value if it exists,\n else None.\n \"\"\"\n headers = carrier.get(\"headers\")\n if not headers:\n return None\n\n # asgi header keys are in lower case\n key = key.lower()\n decoded = [\n _value.decode(\"utf8\")\n for (_key, _value) in headers\n if _key.decode(\"utf8\") == key\n ]\n if not decoded:\n return None\n return decoded\n\n def keys(self, carrier: dict) -> typing.List[str]:\n return list(carrier.keys())\n\n\nasgi_getter = ASGIGetter()\n\n\ndef collect_request_attributes(scope):\n \"\"\"Collects HTTP request attributes from the ASGI scope and returns a\n dictionary to be used as span creation attributes.\"\"\"\n server_host, port, http_url = get_host_port_url_tuple(scope)\n query_string = scope.get(\"query_string\")\n if query_string and http_url:\n if isinstance(query_string, bytes):\n query_string = query_string.decode(\"utf8\")\n http_url = http_url + (\"?\" + urllib.parse.unquote(query_string))\n\n result = {\n SpanAttributes.HTTP_SCHEME: scope.get(\"scheme\"),\n SpanAttributes.HTTP_HOST: server_host,\n SpanAttributes.NET_HOST_PORT: port,\n SpanAttributes.HTTP_FLAVOR: scope.get(\"http_version\"),\n SpanAttributes.HTTP_TARGET: scope.get(\"path\"),\n SpanAttributes.HTTP_URL: http_url,\n }\n http_method = scope.get(\"method\")\n if http_method:\n result[SpanAttributes.HTTP_METHOD] = http_method\n\n http_host_value_list = asgi_getter.get(scope, \"host\")\n if http_host_value_list:\n result[SpanAttributes.HTTP_SERVER_NAME] = \",\".join(\n http_host_value_list\n )\n http_user_agent = asgi_getter.get(scope, \"user-agent\")\n if http_user_agent:\n result[SpanAttributes.HTTP_USER_AGENT] = http_user_agent[0]\n\n if \"client\" in scope and scope[\"client\"] is not None:\n result[SpanAttributes.NET_PEER_IP] = scope.get(\"client\")[0]\n result[SpanAttributes.NET_PEER_PORT] = scope.get(\"client\")[1]\n\n # remove None values\n result = {k: v for k, v in result.items() if v is not None}\n\n return result\n\n\ndef get_host_port_url_tuple(scope):\n \"\"\"Returns (host, port, full_url) tuple.\n \"\"\"\n server = scope.get(\"server\") or [\"0.0.0.0\", 80]\n port = server[1]\n server_host = server[0] + (\":\" + str(port) if port != 80 else \"\")\n full_path = scope.get(\"root_path\", \"\") + scope.get(\"path\", \"\")\n http_url = scope.get(\"scheme\", \"http\") + \"://\" + server_host + full_path\n return server_host, port, http_url\n\n\ndef set_status_code(span, status_code):\n \"\"\"Adds HTTP response attributes to span using the status_code argument.\"\"\"\n if not span.is_recording():\n return\n try:\n status_code = int(status_code)\n except ValueError:\n span.set_status(\n Status(\n StatusCode.ERROR,\n \"Non-integer HTTP status: \" + repr(status_code),\n )\n )\n else:\n span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, status_code)\n span.set_status(Status(http_status_to_status_code(status_code)))\n\n\ndef get_default_span_details(scope: dict) -> Tuple[str, dict]:\n \"\"\"Default implementation for span_details_callback\n\n Args:\n scope: the asgi scope dictionary\n\n Returns:\n a tuple of the span, and any attributes to attach to the\n span.\n \"\"\"\n method_or_path = scope.get(\"method\") or scope.get(\"path\")\n\n return method_or_path, {}\n\n\nclass OpenTelemetryMiddleware:\n \"\"\"The ASGI application middleware.\n\n This class is an ASGI middleware that starts and annotates spans for any\n requests it is invoked with.\n\n Args:\n app: The ASGI application callable to forward requests to.\n span_details_callback: Callback which should return a string\n and a tuple, representing the desired span name and a\n dictionary with any additional span attributes to set.\n Optional: Defaults to get_default_span_details.\n tracer_provider: The optional tracer provider to use. If omitted\n the current globally configured one is used.\n \"\"\"\n\n def __init__(\n self,\n app,\n excluded_urls=None,\n span_details_callback=None,\n tracer_provider=None,\n ):\n self.app = guarantee_single_callable(app)\n self.tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n self.span_details_callback = (\n span_details_callback or get_default_span_details\n )\n self.excluded_urls = excluded_urls\n\n async def __call__(self, scope, receive, send):\n \"\"\"The ASGI application\n\n Args:\n scope: A ASGI environment.\n receive: An awaitable callable yielding dictionaries\n send: An awaitable callable taking a single dictionary as argument.\n \"\"\"\n if scope[\"type\"] not in (\"http\", \"websocket\"):\n return await self.app(scope, receive, send)\n\n _, _, url = get_host_port_url_tuple(scope)\n if self.excluded_urls and self.excluded_urls.url_disabled(url):\n return await self.app(scope, receive, send)\n\n token = context.attach(extract(scope, getter=asgi_getter))\n span_name, additional_attributes = self.span_details_callback(scope)\n\n try:\n with self.tracer.start_as_current_span(\n span_name + \" asgi\", kind=trace.SpanKind.SERVER,\n ) as span:\n if span.is_recording():\n attributes = collect_request_attributes(scope)\n attributes.update(additional_attributes)\n for key, value in attributes.items():\n span.set_attribute(key, value)\n\n @wraps(receive)\n async def wrapped_receive():\n with self.tracer.start_as_current_span(\n span_name + \" asgi.\" + scope[\"type\"] + \".receive\"\n ) as receive_span:\n message = await receive()\n if receive_span.is_recording():\n if message[\"type\"] == \"websocket.receive\":\n set_status_code(receive_span, 200)\n receive_span.set_attribute(\"type\", message[\"type\"])\n return message\n\n @wraps(send)\n async def wrapped_send(message):\n with self.tracer.start_as_current_span(\n span_name + \" asgi.\" + scope[\"type\"] + \".send\"\n ) as send_span:\n if send_span.is_recording():\n if message[\"type\"] == \"http.response.start\":\n status_code = message[\"status\"]\n set_status_code(send_span, status_code)\n elif message[\"type\"] == \"websocket.send\":\n set_status_code(send_span, 200)\n send_span.set_attribute(\"type\", message[\"type\"])\n await send(message)\n\n await self.app(scope, wrapped_receive, wrapped_send)\n finally:\n context.detach(token)\n", "path": "instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe opentelemetry-instrumentation-asgi package provides an ASGI middleware that can be used\non any ASGI framework (such as Django-channels / Quart) to track requests\ntiming through OpenTelemetry.\n\"\"\"\n\nimport typing\nimport urllib\nfrom functools import wraps\nfrom typing import Tuple\n\nfrom asgiref.compatibility import guarantee_single_callable\n\nfrom opentelemetry import context, trace\nfrom opentelemetry.instrumentation.asgi.version import __version__ # noqa\nfrom opentelemetry.instrumentation.utils import http_status_to_status_code\nfrom opentelemetry.propagate import extract\nfrom opentelemetry.propagators.textmap import Getter\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace.status import Status, StatusCode\n\n\nclass ASGIGetter(Getter):\n def get(\n self, carrier: dict, key: str\n ) -> typing.Optional[typing.List[str]]:\n \"\"\"Getter implementation to retrieve a HTTP header value from the ASGI\n scope.\n\n Args:\n carrier: ASGI scope object\n key: header name in scope\n Returns:\n A list with a single string with the header value if it exists,\n else None.\n \"\"\"\n headers = carrier.get(\"headers\")\n if not headers:\n return None\n\n # asgi header keys are in lower case\n key = key.lower()\n decoded = [\n _value.decode(\"utf8\")\n for (_key, _value) in headers\n if _key.decode(\"utf8\") == key\n ]\n if not decoded:\n return None\n return decoded\n\n def keys(self, carrier: dict) -> typing.List[str]:\n return list(carrier.keys())\n\n\nasgi_getter = ASGIGetter()\n\n\ndef collect_request_attributes(scope):\n \"\"\"Collects HTTP request attributes from the ASGI scope and returns a\n dictionary to be used as span creation attributes.\"\"\"\n server_host, port, http_url = get_host_port_url_tuple(scope)\n query_string = scope.get(\"query_string\")\n if query_string and http_url:\n if isinstance(query_string, bytes):\n query_string = query_string.decode(\"utf8\")\n http_url = http_url + (\"?\" + urllib.parse.unquote(query_string))\n\n result = {\n SpanAttributes.HTTP_SCHEME: scope.get(\"scheme\"),\n SpanAttributes.HTTP_HOST: server_host,\n SpanAttributes.NET_HOST_PORT: port,\n SpanAttributes.HTTP_FLAVOR: scope.get(\"http_version\"),\n SpanAttributes.HTTP_TARGET: scope.get(\"path\"),\n SpanAttributes.HTTP_URL: http_url,\n }\n http_method = scope.get(\"method\")\n if http_method:\n result[SpanAttributes.HTTP_METHOD] = http_method\n\n http_host_value_list = asgi_getter.get(scope, \"host\")\n if http_host_value_list:\n result[SpanAttributes.HTTP_SERVER_NAME] = \",\".join(\n http_host_value_list\n )\n http_user_agent = asgi_getter.get(scope, \"user-agent\")\n if http_user_agent:\n result[SpanAttributes.HTTP_USER_AGENT] = http_user_agent[0]\n\n if \"client\" in scope and scope[\"client\"] is not None:\n result[SpanAttributes.NET_PEER_IP] = scope.get(\"client\")[0]\n result[SpanAttributes.NET_PEER_PORT] = scope.get(\"client\")[1]\n\n # remove None values\n result = {k: v for k, v in result.items() if v is not None}\n\n return result\n\n\ndef get_host_port_url_tuple(scope):\n \"\"\"Returns (host, port, full_url) tuple.\n \"\"\"\n server = scope.get(\"server\") or [\"0.0.0.0\", 80]\n port = server[1]\n server_host = server[0] + (\":\" + str(port) if port != 80 else \"\")\n full_path = scope.get(\"root_path\", \"\") + scope.get(\"path\", \"\")\n http_url = scope.get(\"scheme\", \"http\") + \"://\" + server_host + full_path\n return server_host, port, http_url\n\n\ndef set_status_code(span, status_code):\n \"\"\"Adds HTTP response attributes to span using the status_code argument.\"\"\"\n if not span.is_recording():\n return\n try:\n status_code = int(status_code)\n except ValueError:\n span.set_status(\n Status(\n StatusCode.ERROR,\n \"Non-integer HTTP status: \" + repr(status_code),\n )\n )\n else:\n span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, status_code)\n span.set_status(Status(http_status_to_status_code(status_code)))\n\n\ndef get_default_span_details(scope: dict) -> Tuple[str, dict]:\n \"\"\"Default implementation for span_details_callback\n\n Args:\n scope: the asgi scope dictionary\n\n Returns:\n a tuple of the span, and any attributes to attach to the\n span.\n \"\"\"\n method_or_path = scope.get(\"method\") or scope.get(\"path\")\n\n return method_or_path, {}\n\n\nclass OpenTelemetryMiddleware:\n \"\"\"The ASGI application middleware.\n\n This class is an ASGI middleware that starts and annotates spans for any\n requests it is invoked with.\n\n Args:\n app: The ASGI application callable to forward requests to.\n span_details_callback: Callback which should return a string\n and a tuple, representing the desired span name and a\n dictionary with any additional span attributes to set.\n Optional: Defaults to get_default_span_details.\n tracer_provider: The optional tracer provider to use. If omitted\n the current globally configured one is used.\n \"\"\"\n\n def __init__(\n self,\n app,\n excluded_urls=None,\n span_details_callback=None,\n tracer_provider=None,\n ):\n self.app = guarantee_single_callable(app)\n self.tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n self.span_details_callback = (\n span_details_callback or get_default_span_details\n )\n self.excluded_urls = excluded_urls\n\n async def __call__(self, scope, receive, send):\n \"\"\"The ASGI application\n\n Args:\n scope: A ASGI environment.\n receive: An awaitable callable yielding dictionaries\n send: An awaitable callable taking a single dictionary as argument.\n \"\"\"\n if scope[\"type\"] not in (\"http\", \"websocket\"):\n return await self.app(scope, receive, send)\n\n _, _, url = get_host_port_url_tuple(scope)\n if self.excluded_urls and self.excluded_urls.url_disabled(url):\n return await self.app(scope, receive, send)\n\n token = context.attach(extract(scope, getter=asgi_getter))\n span_name, additional_attributes = self.span_details_callback(scope)\n\n try:\n with self.tracer.start_as_current_span(\n span_name + \" asgi\", kind=trace.SpanKind.SERVER,\n ) as span:\n if span.is_recording():\n attributes = collect_request_attributes(scope)\n attributes.update(additional_attributes)\n for key, value in attributes.items():\n span.set_attribute(key, value)\n\n @wraps(receive)\n async def wrapped_receive():\n with self.tracer.start_as_current_span(\n span_name + \" asgi.\" + scope[\"type\"] + \".receive\"\n ) as receive_span:\n message = await receive()\n if receive_span.is_recording():\n if message[\"type\"] == \"websocket.receive\":\n set_status_code(receive_span, 200)\n receive_span.set_attribute(\"type\", message[\"type\"])\n return message\n\n @wraps(send)\n async def wrapped_send(message):\n with self.tracer.start_as_current_span(\n span_name + \" asgi.\" + scope[\"type\"] + \".send\"\n ) as send_span:\n if send_span.is_recording():\n if message[\"type\"] == \"http.response.start\":\n status_code = message[\"status\"]\n set_status_code(span, status_code)\n set_status_code(send_span, status_code)\n elif message[\"type\"] == \"websocket.send\":\n set_status_code(span, 200)\n set_status_code(send_span, 200)\n send_span.set_attribute(\"type\", message[\"type\"])\n await send(message)\n\n await self.app(scope, wrapped_receive, wrapped_send)\n finally:\n context.detach(token)\n", "path": "instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py"}]}
3,267
223
gh_patches_debug_2226
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-5568
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support matplotlib-3.4.0rc1 ## Description of the issue `matplotlib._get_data_path` no longer exists since 3.4.0rc1: https://github.com/matplotlib/matplotlib/commit/e1352c71f07aee7eab004b73dd9bda2a260ab31b. This is on schedule for the removal of the deprecations that occurred around the time of #5006. The missing function leads to a traceback output during build, and the whole `cwd` being crammed into `mpl-data`. Finally, `matplotlib` cannot be imported in the packaged app because it cannot find `mpl-data/matplotlibrc`. ## Context information (for bug reports) * Output of `pyinstaller --version`: ```4.2``` * Version of Python: `3.8` * Platform: `Windows` * Did you also try this on another platform? Does it work there? `Surely it is a cross platform bug` > * try the latest development version, using the following command: > > ```shell > pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip > ``` > > * follow *all* the instructions in our "If Things Go Wrong" Guide > (https://github.com/pyinstaller/pyinstaller/wiki/If-Things-Go-Wrong) and > > ### Make sure [everything is packaged correctly](https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs#make-sure-everything-is-packaged-correctly) > > * [ ] start with clean installation > * [ ] use the latest development version > * [ ] Run your frozen program **from a command window (shell)** — instead of double-clicking on it > * [ ] Package your program in **--onedir mode** > * [ ] Package **without UPX**, say: use the option `--noupx` or set `upx=False` in your .spec-file > * [ ] Repackage you application in **verbose/debug mode**. For this, pass the option `--debug` to `pyi-makespec` or `pyinstaller` or use `EXE(..., debug=1, ...)` in your .spec file. > This part of the template is irrelevant, as the responsible code is unchanged on `develop` ### A minimal example program which shows the error ``` pip install --pre matplotlib==3.4.0rc1 pyinstaller echo "import matplotlib" > test.py pyinstaller test.py ``` ### Stacktrace / full error message Building `pyinstaller test.py`: ``` 20391 INFO: Loading module hook 'hook-matplotlib.py' from 'XXXXXXX'... Traceback (most recent call last): File "<string>", line 1, in <module> AttributeError: module 'matplotlib' has no attribute '_get_data_path' ``` Running `test.exe`: ``` Traceback (most recent call last): File "test.py", line 1, in <module> import matplotlib File "<frozen importlib._bootstrap>", line 991, in _find_and_load File "<frozen importlib._bootstrap>", line 975, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 671, in _load_unlocked File "PyInstaller\loader\pyimod03_importers.py", line 531, in exec_module File "matplotlib\__init__.py", line 820, in <module> File "matplotlib\__init__.py", line 725, in _rc_params_in_file File "contextlib.py", line 113, in __enter__ File "matplotlib\__init__.py", line 703, in _open_file_or_url FileNotFoundError: [Errno 2] No such file or directory: 'xxxxx\\matplotlib\\mpl-data\\matplotlibrc' [2688] Failed to execute script test ``` ## Possible resolution Simply remove the first underscore in `matplotlib._get_data_path` in https://github.com/pyinstaller/pyinstaller/blob/b9fcbbf86bc71addafc830debe289e7edb2a5697/PyInstaller/hooks/hook-matplotlib.py#L16 This works on my system. I'm a little confused as to why the private function was being used in the first place. `matplotlib.get_data_path` has been available for some time. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `PyInstaller/hooks/hook-matplotlib.py` Content: ``` 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2013-2021, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License (version 2 5 # or later) with exception for distributing the bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 # 9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception) 10 #----------------------------------------------------------------------------- 11 12 13 from PyInstaller.utils.hooks import exec_statement 14 15 mpl_data_dir = exec_statement( 16 "import matplotlib; print(matplotlib._get_data_path())") 17 18 datas = [ 19 (mpl_data_dir, "matplotlib/mpl-data"), 20 ] 21 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/PyInstaller/hooks/hook-matplotlib.py b/PyInstaller/hooks/hook-matplotlib.py --- a/PyInstaller/hooks/hook-matplotlib.py +++ b/PyInstaller/hooks/hook-matplotlib.py @@ -13,7 +13,7 @@ from PyInstaller.utils.hooks import exec_statement mpl_data_dir = exec_statement( - "import matplotlib; print(matplotlib._get_data_path())") + "import matplotlib; print(matplotlib.get_data_path())") datas = [ (mpl_data_dir, "matplotlib/mpl-data"),
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-matplotlib.py b/PyInstaller/hooks/hook-matplotlib.py\n--- a/PyInstaller/hooks/hook-matplotlib.py\n+++ b/PyInstaller/hooks/hook-matplotlib.py\n@@ -13,7 +13,7 @@\n from PyInstaller.utils.hooks import exec_statement\n \n mpl_data_dir = exec_statement(\n- \"import matplotlib; print(matplotlib._get_data_path())\")\n+ \"import matplotlib; print(matplotlib.get_data_path())\")\n \n datas = [\n (mpl_data_dir, \"matplotlib/mpl-data\"),\n", "issue": "Support matplotlib-3.4.0rc1\n## Description of the issue\r\n`matplotlib._get_data_path` no longer exists since 3.4.0rc1: https://github.com/matplotlib/matplotlib/commit/e1352c71f07aee7eab004b73dd9bda2a260ab31b.\r\nThis is on schedule for the removal of the deprecations that occurred around the time of #5006. The missing function leads to a traceback output during build, and the whole `cwd` being crammed into `mpl-data`. Finally, `matplotlib` cannot be imported in the packaged app because it cannot find `mpl-data/matplotlibrc`.\r\n## Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: ```4.2```\r\n* Version of Python: `3.8`\r\n* Platform: `Windows`\r\n* Did you also try this on another platform? Does it work there? `Surely it is a cross platform bug`\r\n\r\n\r\n> * try the latest development version, using the following command: \r\n> \r\n> ```shell\r\n> pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip\r\n> ```\r\n> \r\n> * follow *all* the instructions in our \"If Things Go Wrong\" Guide\r\n> (https://github.com/pyinstaller/pyinstaller/wiki/If-Things-Go-Wrong) and\r\n> \r\n> ### Make sure [everything is packaged correctly](https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs#make-sure-everything-is-packaged-correctly)\r\n> \r\n> * [ ] start with clean installation\r\n> * [ ] use the latest development version\r\n> * [ ] Run your frozen program **from a command window (shell)** \u2014 instead of double-clicking on it\r\n> * [ ] Package your program in **--onedir mode**\r\n> * [ ] Package **without UPX**, say: use the option `--noupx` or set `upx=False` in your .spec-file\r\n> * [ ] Repackage you application in **verbose/debug mode**. For this, pass the option `--debug` to `pyi-makespec` or `pyinstaller` or use `EXE(..., debug=1, ...)` in your .spec file.\r\n> \r\nThis part of the template is irrelevant, as the responsible code is unchanged on `develop`\r\n\r\n### A minimal example program which shows the error\r\n\r\n```\r\npip install --pre matplotlib==3.4.0rc1 pyinstaller\r\necho \"import matplotlib\" > test.py\r\npyinstaller test.py\r\n```\r\n\r\n### Stacktrace / full error message\r\nBuilding `pyinstaller test.py`:\r\n```\r\n20391 INFO: Loading module hook 'hook-matplotlib.py' from 'XXXXXXX'...\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\nAttributeError: module 'matplotlib' has no attribute '_get_data_path'\r\n\r\n```\r\nRunning `test.exe`:\r\n```\r\nTraceback (most recent call last):\r\n File \"test.py\", line 1, in <module>\r\n import matplotlib\r\n File \"<frozen importlib._bootstrap>\", line 991, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 975, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 671, in _load_unlocked\r\n File \"PyInstaller\\loader\\pyimod03_importers.py\", line 531, in exec_module\r\n File \"matplotlib\\__init__.py\", line 820, in <module>\r\n File \"matplotlib\\__init__.py\", line 725, in _rc_params_in_file\r\n File \"contextlib.py\", line 113, in __enter__\r\n File \"matplotlib\\__init__.py\", line 703, in _open_file_or_url\r\nFileNotFoundError: [Errno 2] No such file or directory: 'xxxxx\\\\matplotlib\\\\mpl-data\\\\matplotlibrc'\r\n[2688] Failed to execute script test\r\n\r\n```\r\n\r\n## Possible resolution\r\n\r\nSimply remove the first underscore in `matplotlib._get_data_path` in \r\n\r\nhttps://github.com/pyinstaller/pyinstaller/blob/b9fcbbf86bc71addafc830debe289e7edb2a5697/PyInstaller/hooks/hook-matplotlib.py#L16\r\n\r\nThis works on my system.\r\n\r\nI'm a little confused as to why the private function was being used in the first place. `matplotlib.get_data_path` has been available for some time.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2021, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\n\nfrom PyInstaller.utils.hooks import exec_statement\n\nmpl_data_dir = exec_statement(\n \"import matplotlib; print(matplotlib._get_data_path())\")\n\ndatas = [\n (mpl_data_dir, \"matplotlib/mpl-data\"),\n]\n", "path": "PyInstaller/hooks/hook-matplotlib.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2021, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\n\nfrom PyInstaller.utils.hooks import exec_statement\n\nmpl_data_dir = exec_statement(\n \"import matplotlib; print(matplotlib.get_data_path())\")\n\ndatas = [\n (mpl_data_dir, \"matplotlib/mpl-data\"),\n]\n", "path": "PyInstaller/hooks/hook-matplotlib.py"}]}
1,449
126
gh_patches_debug_22927
rasdani/github-patches
git_diff
beeware__toga-1193
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ImageView only works with square images I created a hello world app following this tutorial under Linux Mint 20: [https://docs.beeware.org/en/latest/tutorial/tutorial-0.html](https://docs.beeware.org/en/latest/tutorial/tutorial-0.html) My python version is 3.8.5 When trying to add images some work and some not. This code doesn't work: `image_from_url = toga.Image("https://dummyimage.com/100x67/000/fff") imageview_from_url = toga.ImageView(image=image_from_url, style=Pack(width=100, height=67))` And this is working fine: `image_from_url = toga.Image("https://dummyimage.com/100x100/000/fff") imageview_from_url = toga.ImageView(image=image_from_url, style=Pack(width=100, height=100))` The error I get is: > (__main__.py:67130): GdkPixbuf-CRITICAL **: 16:12:00.644: gdk_pixbuf_scale_simple: assertion 'dest_height > 0' failed > ... TypeError: Argument 0 does not allow None as a value --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/gtk/toga_gtk/widgets/imageview.py` Content: ``` 1 from ..libs import GdkPixbuf, Gtk, Gdk 2 from .base import Widget 3 4 5 class ImageView(Widget): 6 def create(self): 7 self.native = Gtk.Box(orientation=Gtk.Orientation.VERTICAL) 8 self._image = Gtk.Image() 9 self._pixbuf = None 10 self.native.add(self._image) 11 self.native.interface = self.interface 12 13 def set_image(self, image): 14 self._pixbuf = image._impl.native 15 16 def set_bounds(self, x, y, width, height): 17 super().set_bounds(x, y, width, height) 18 # rehint to update scaling of pixbuf 19 self.rehint() 20 21 def rehint(self): 22 if self._pixbuf: 23 height, width = self._resize_max( 24 original_height=self._pixbuf.get_height(), 25 original_width=self._pixbuf.get_width(), 26 max_height=self.native.get_allocated_height(), 27 max_width=self.native.get_allocated_width(), 28 ) 29 30 dpr = self.native.get_scale_factor() 31 32 scaled_pixbuf = self._pixbuf.scale_simple( 33 width * dpr, height * dpr, GdkPixbuf.InterpType.BILINEAR 34 ) 35 36 surface = Gdk.cairo_surface_create_from_pixbuf( 37 scaled_pixbuf, 0, self.native.get_window() # scale: 0 = same as window 38 ) 39 self._image.set_from_surface(surface) 40 41 @staticmethod 42 def _resize_max(original_height, original_width, max_height, max_width): 43 44 # Check to make sure all dimensions have valid sizes 45 if min(original_height, original_width, max_height, max_width) <= 0: 46 return 1, 1 47 48 width_ratio = max_width / original_width 49 height_ratio = max_height / original_height 50 51 height = original_height * width_ratio 52 if height <= max_height: 53 width = original_width * width_ratio 54 else: 55 height = original_height * height_ratio 56 width = original_width * height_ratio 57 58 return int(height), int(width) 59 ``` Path: `examples/imageview/imageview/app.py` Content: ``` 1 import toga 2 from toga.style.pack import CENTER, COLUMN 3 4 5 class ImageViewApp(toga.App): 6 def startup(self): 7 self.main_window = toga.MainWindow(title=self.name) 8 9 box = toga.Box() 10 box.style.padding = 40 11 box.style.update(alignment=CENTER) 12 box.style.update(direction=COLUMN) 13 14 # image from local path 15 # load brutus.png from the package 16 # We set the style width/height parameters for this one 17 image_from_path = toga.Image('resources/brutus.png') 18 imageview_from_path = toga.ImageView(image_from_path) 19 imageview_from_path.style.update(height=72) 20 imageview_from_path.style.update(width=72) 21 box.add(imageview_from_path) 22 23 # image from remote URL 24 # no style parameters - we let Pack determine how to allocate 25 # the space 26 image_from_url = toga.Image('https://beeware.org/project/projects/libraries/toga/toga.png') 27 imageview_from_url = toga.ImageView(image_from_url) 28 box.add(imageview_from_url) 29 30 self.main_window.content = box 31 self.main_window.show() 32 33 34 def main(): 35 return ImageViewApp('ImageView', 'org.beeware.widgets.imageview') 36 37 38 if __name__ == '__main__': 39 app = main() 40 app.main_loop() 41 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/imageview/imageview/app.py b/examples/imageview/imageview/app.py --- a/examples/imageview/imageview/app.py +++ b/examples/imageview/imageview/app.py @@ -14,10 +14,9 @@ # image from local path # load brutus.png from the package # We set the style width/height parameters for this one - image_from_path = toga.Image('resources/brutus.png') + image_from_path = toga.Image('resources/pride-brutus.png') imageview_from_path = toga.ImageView(image_from_path) imageview_from_path.style.update(height=72) - imageview_from_path.style.update(width=72) box.add(imageview_from_path) # image from remote URL diff --git a/src/gtk/toga_gtk/widgets/imageview.py b/src/gtk/toga_gtk/widgets/imageview.py --- a/src/gtk/toga_gtk/widgets/imageview.py +++ b/src/gtk/toga_gtk/widgets/imageview.py @@ -55,4 +55,8 @@ height = original_height * height_ratio width = original_width * height_ratio - return int(height), int(width) + # On the first display the allocated height/width will be 1x1. + # If the image isn't square, this will result in one of the dimensions + # scaling to 0, which breaks GTK. So; constraint the minimum height + # and width to 1. + return max(int(height), 1), max(int(width), 1)
{"golden_diff": "diff --git a/examples/imageview/imageview/app.py b/examples/imageview/imageview/app.py\n--- a/examples/imageview/imageview/app.py\n+++ b/examples/imageview/imageview/app.py\n@@ -14,10 +14,9 @@\n # image from local path\n # load brutus.png from the package\n # We set the style width/height parameters for this one\n- image_from_path = toga.Image('resources/brutus.png')\n+ image_from_path = toga.Image('resources/pride-brutus.png')\n imageview_from_path = toga.ImageView(image_from_path)\n imageview_from_path.style.update(height=72)\n- imageview_from_path.style.update(width=72)\n box.add(imageview_from_path)\n \n # image from remote URL\ndiff --git a/src/gtk/toga_gtk/widgets/imageview.py b/src/gtk/toga_gtk/widgets/imageview.py\n--- a/src/gtk/toga_gtk/widgets/imageview.py\n+++ b/src/gtk/toga_gtk/widgets/imageview.py\n@@ -55,4 +55,8 @@\n height = original_height * height_ratio\n width = original_width * height_ratio\n \n- return int(height), int(width)\n+ # On the first display the allocated height/width will be 1x1.\n+ # If the image isn't square, this will result in one of the dimensions\n+ # scaling to 0, which breaks GTK. So; constraint the minimum height\n+ # and width to 1.\n+ return max(int(height), 1), max(int(width), 1)\n", "issue": "ImageView only works with square images\nI created a hello world app following this tutorial under Linux Mint 20: [https://docs.beeware.org/en/latest/tutorial/tutorial-0.html](https://docs.beeware.org/en/latest/tutorial/tutorial-0.html)\r\nMy python version is 3.8.5\r\nWhen trying to add images some work and some not.\r\n\r\nThis code doesn't work:\r\n`image_from_url = toga.Image(\"https://dummyimage.com/100x67/000/fff\")\r\nimageview_from_url = toga.ImageView(image=image_from_url, style=Pack(width=100, height=67))`\r\n\r\nAnd this is working fine:\r\n`image_from_url = toga.Image(\"https://dummyimage.com/100x100/000/fff\")\r\nimageview_from_url = toga.ImageView(image=image_from_url, style=Pack(width=100, height=100))`\r\n\r\nThe error I get is:\r\n\r\n> (__main__.py:67130): GdkPixbuf-CRITICAL **: 16:12:00.644: gdk_pixbuf_scale_simple: assertion 'dest_height > 0' failed\r\n> ... TypeError: Argument 0 does not allow None as a value\r\n\n", "before_files": [{"content": "from ..libs import GdkPixbuf, Gtk, Gdk\nfrom .base import Widget\n\n\nclass ImageView(Widget):\n def create(self):\n self.native = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self._image = Gtk.Image()\n self._pixbuf = None\n self.native.add(self._image)\n self.native.interface = self.interface\n\n def set_image(self, image):\n self._pixbuf = image._impl.native\n\n def set_bounds(self, x, y, width, height):\n super().set_bounds(x, y, width, height)\n # rehint to update scaling of pixbuf\n self.rehint()\n\n def rehint(self):\n if self._pixbuf:\n height, width = self._resize_max(\n original_height=self._pixbuf.get_height(),\n original_width=self._pixbuf.get_width(),\n max_height=self.native.get_allocated_height(),\n max_width=self.native.get_allocated_width(),\n )\n\n dpr = self.native.get_scale_factor()\n\n scaled_pixbuf = self._pixbuf.scale_simple(\n width * dpr, height * dpr, GdkPixbuf.InterpType.BILINEAR\n )\n\n surface = Gdk.cairo_surface_create_from_pixbuf(\n scaled_pixbuf, 0, self.native.get_window() # scale: 0 = same as window\n )\n self._image.set_from_surface(surface)\n\n @staticmethod\n def _resize_max(original_height, original_width, max_height, max_width):\n\n # Check to make sure all dimensions have valid sizes\n if min(original_height, original_width, max_height, max_width) <= 0:\n return 1, 1\n\n width_ratio = max_width / original_width\n height_ratio = max_height / original_height\n\n height = original_height * width_ratio\n if height <= max_height:\n width = original_width * width_ratio\n else:\n height = original_height * height_ratio\n width = original_width * height_ratio\n\n return int(height), int(width)\n", "path": "src/gtk/toga_gtk/widgets/imageview.py"}, {"content": "import toga\nfrom toga.style.pack import CENTER, COLUMN\n\n\nclass ImageViewApp(toga.App):\n def startup(self):\n self.main_window = toga.MainWindow(title=self.name)\n\n box = toga.Box()\n box.style.padding = 40\n box.style.update(alignment=CENTER)\n box.style.update(direction=COLUMN)\n\n # image from local path\n # load brutus.png from the package\n # We set the style width/height parameters for this one\n image_from_path = toga.Image('resources/brutus.png')\n imageview_from_path = toga.ImageView(image_from_path)\n imageview_from_path.style.update(height=72)\n imageview_from_path.style.update(width=72)\n box.add(imageview_from_path)\n\n # image from remote URL\n # no style parameters - we let Pack determine how to allocate\n # the space\n image_from_url = toga.Image('https://beeware.org/project/projects/libraries/toga/toga.png')\n imageview_from_url = toga.ImageView(image_from_url)\n box.add(imageview_from_url)\n\n self.main_window.content = box\n self.main_window.show()\n\n\ndef main():\n return ImageViewApp('ImageView', 'org.beeware.widgets.imageview')\n\n\nif __name__ == '__main__':\n app = main()\n app.main_loop()\n", "path": "examples/imageview/imageview/app.py"}], "after_files": [{"content": "from ..libs import GdkPixbuf, Gtk, Gdk\nfrom .base import Widget\n\n\nclass ImageView(Widget):\n def create(self):\n self.native = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self._image = Gtk.Image()\n self._pixbuf = None\n self.native.add(self._image)\n self.native.interface = self.interface\n\n def set_image(self, image):\n self._pixbuf = image._impl.native\n\n def set_bounds(self, x, y, width, height):\n super().set_bounds(x, y, width, height)\n # rehint to update scaling of pixbuf\n self.rehint()\n\n def rehint(self):\n if self._pixbuf:\n height, width = self._resize_max(\n original_height=self._pixbuf.get_height(),\n original_width=self._pixbuf.get_width(),\n max_height=self.native.get_allocated_height(),\n max_width=self.native.get_allocated_width(),\n )\n\n dpr = self.native.get_scale_factor()\n\n scaled_pixbuf = self._pixbuf.scale_simple(\n width * dpr, height * dpr, GdkPixbuf.InterpType.BILINEAR\n )\n\n surface = Gdk.cairo_surface_create_from_pixbuf(\n scaled_pixbuf, 0, self.native.get_window() # scale: 0 = same as window\n )\n self._image.set_from_surface(surface)\n\n @staticmethod\n def _resize_max(original_height, original_width, max_height, max_width):\n\n # Check to make sure all dimensions have valid sizes\n if min(original_height, original_width, max_height, max_width) <= 0:\n return 1, 1\n\n width_ratio = max_width / original_width\n height_ratio = max_height / original_height\n\n height = original_height * width_ratio\n if height <= max_height:\n width = original_width * width_ratio\n else:\n height = original_height * height_ratio\n width = original_width * height_ratio\n\n # On the first display the allocated height/width will be 1x1.\n # If the image isn't square, this will result in one of the dimensions\n # scaling to 0, which breaks GTK. So; constraint the minimum height\n # and width to 1.\n return max(int(height), 1), max(int(width), 1)\n", "path": "src/gtk/toga_gtk/widgets/imageview.py"}, {"content": "import toga\nfrom toga.style.pack import CENTER, COLUMN\n\n\nclass ImageViewApp(toga.App):\n def startup(self):\n self.main_window = toga.MainWindow(title=self.name)\n\n box = toga.Box()\n box.style.padding = 40\n box.style.update(alignment=CENTER)\n box.style.update(direction=COLUMN)\n\n # image from local path\n # load brutus.png from the package\n # We set the style width/height parameters for this one\n image_from_path = toga.Image('resources/pride-brutus.png')\n imageview_from_path = toga.ImageView(image_from_path)\n imageview_from_path.style.update(height=72)\n box.add(imageview_from_path)\n\n # image from remote URL\n # no style parameters - we let Pack determine how to allocate\n # the space\n image_from_url = toga.Image('https://beeware.org/project/projects/libraries/toga/toga.png')\n imageview_from_url = toga.ImageView(image_from_url)\n box.add(imageview_from_url)\n\n self.main_window.content = box\n self.main_window.show()\n\n\ndef main():\n return ImageViewApp('ImageView', 'org.beeware.widgets.imageview')\n\n\nif __name__ == '__main__':\n app = main()\n app.main_loop()\n", "path": "examples/imageview/imageview/app.py"}]}
1,472
351
gh_patches_debug_32054
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-2186
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Don't allow public schema to be edited We currently allow editing the public schema's name and description from the schema page. We should disable this. <img width="968" alt="Screen Shot 2022-11-18 at 12 03 58 PM" src="https://user-images.githubusercontent.com/287034/202761131-265da99a-6f76-4f02-b680-7a98851427b3.png"> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mathesar/api/exceptions/validation_exceptions/exceptions.py` Content: ``` 1 from mathesar.api.exceptions.error_codes import ErrorCodes 2 from mathesar.api.exceptions.validation_exceptions.base_exceptions import MathesarValidationException 3 4 5 class DistinctColumnRequiredAPIException(MathesarValidationException): 6 error_code = ErrorCodes.DistinctColumnNameRequired.value 7 8 def __init__( 9 self, 10 message="Column names must be distinct", 11 field=None, 12 details=None, 13 ): 14 super().__init__(None, self.error_code, message, field, details) 15 16 17 class ColumnSizeMismatchAPIException(MathesarValidationException): 18 error_code = ErrorCodes.ColumnSizeMismatch.value 19 20 def __init__( 21 self, 22 message="Incorrect number of columns in request.", 23 field=None, 24 details=None, 25 ): 26 super().__init__(None, self.error_code, message, field, details) 27 28 29 class InvalidLinkChoiceAPIException(MathesarValidationException): 30 error_code = ErrorCodes.InvalidLinkChoice.value 31 32 def __init__( 33 self, 34 message="Invalid Link type", 35 field=None, 36 details=None, 37 ): 38 super().__init__(None, self.error_code, message, field, details) 39 40 41 class MultipleDataFileAPIException(MathesarValidationException): 42 error_code = ErrorCodes.MultipleDataFiles.value 43 44 def __init__( 45 self, 46 message="Multiple data files are unsupported.", 47 field=None, 48 details=None, 49 ): 50 super().__init__(None, self.error_code, message, field, details) 51 52 53 class UnknownDatabaseTypeIdentifier(MathesarValidationException): 54 error_code = ErrorCodes.UnknownDBType.value 55 56 def __init__( 57 self, 58 db_type_id, 59 field=None, 60 details=None, 61 ): 62 message = f"Unknown database type identifier {db_type_id}." 63 super().__init__(None, self.error_code, message, field, details) 64 65 66 class MoneyDisplayOptionValueConflictAPIException(MathesarValidationException): 67 error_code = ErrorCodes.MoneyDisplayOptionConflict.value 68 69 def __init__( 70 self, 71 message="Money type cannot specify a currency code display option as well as other display options.", 72 field=None, 73 details=None, 74 ): 75 super().__init__(None, self.error_code, message, field, details) 76 77 78 class IncompatibleFractionDigitValuesAPIException(MathesarValidationException): 79 error_code = ErrorCodes.IncompatibleFractionDigitValues.value 80 81 def __init__( 82 self, 83 message="maximum_fraction_digits cannot be less than minimum_fraction_digits.", 84 field=None, 85 details=None, 86 ): 87 super().__init__(None, self.error_code, message, field, details) 88 89 90 class UnsupportedConstraintAPIException(MathesarValidationException): 91 error_code = ErrorCodes.UnsupportedConstraint.value 92 93 def __init__( 94 self, 95 constraint_type, 96 field=None, 97 ): 98 message = f"Operations related to {constraint_type} constraint are currently not supported" 99 super().__init__(None, self.error_code, message, field, None) 100 101 102 class ConstraintColumnEmptyAPIException(MathesarValidationException): 103 error_code = ErrorCodes.ConstraintColumnEmpty.value 104 105 def __init__( 106 self, 107 field=None, 108 ): 109 message = "Constraint column field cannot be empty" 110 super().__init__(None, self.error_code, message, field, None) 111 112 113 class InvalidValueType(MathesarValidationException): 114 error_code = ErrorCodes.InvalidValueType.value 115 116 def __init__( 117 self, 118 message=None, 119 field=None, 120 ): 121 if message is None: 122 message = "Value's type is invalid." 123 super().__init__(None, self.error_code, message, field, None) 124 125 126 class DictHasBadKeys(MathesarValidationException): 127 error_code = ErrorCodes.DictHasBadKeys.value 128 129 def __init__( 130 self, 131 message=None, 132 field=None, 133 ): 134 if message is None: 135 message = "Dictionary's keys are invalid or obligatory keys are missing." 136 super().__init__(None, self.error_code, message, field, None) 137 138 139 class InvalidTableName(MathesarValidationException): 140 error_code = ErrorCodes.InvalidTableName.value 141 142 def __init__( 143 self, 144 table_name, 145 message=None, 146 field=None, 147 ): 148 message = f'Table name "{table_name}" is invalid.' 149 super().__init__(None, self.error_code, message, field, None) 150 151 152 class IncorrectOldPassword(MathesarValidationException): 153 error_code = ErrorCodes.IncorrectOldPassword.value 154 155 def __init__( 156 self, 157 158 field=None, 159 ): 160 message = "Old password is not correct" 161 super().__init__(None, self.error_code, message, field, None) 162 ``` Path: `mathesar/api/exceptions/error_codes.py` Content: ``` 1 from enum import Enum, unique 2 3 4 @unique 5 class ErrorCodes(Enum): 6 # Matches with default code of drf-friendly-errors library 7 # API Error 8 MethodNotAllowed = 4006 9 NotFound = 4005 10 TableNotFound = 4041 11 RecordNotFound = 4042 12 UnknownError = 4999 13 # Generic Errors 14 ProgrammingError = 4101 15 TypeError = 4102 16 ValueError = 4103 17 18 # DB Error Codes 19 DuplicateTableError = 4205 20 DuplicateColumnError = 4206 21 ExclusionViolation = 4213 22 ForeignKeyViolation = 4212 23 InvalidTypeCast = 4203 24 InvalidTypeOption = 4210 25 InvalidDefault = 4211 26 NonClassifiedIntegrityError = 4201 27 NotNullViolation = 4204 28 RaiseException = 4202 29 TypeMismatchViolation = 4214 30 UndefinedFunction = 4207 31 UniqueViolation = 4208 32 UnsupportedType = 4209 33 34 # Data Imports error code 35 InvalidTableError = 4301 36 UniqueImportViolation = 4303 37 38 # Validation Error 39 ColumnSizeMismatch = 4401 40 DistinctColumnNameRequired = 4402 41 MappingsNotFound = 4417 42 MultipleDataFiles = 4400 43 MoneyDisplayOptionConflict = 4407 44 UnsupportedAlter = 4403 45 URLDownloadError = 4404 46 URLNotReachableError = 4405 47 URLInvalidContentType = 4406 48 UnknownDBType = 4408 49 InvalidDateError = 4413 50 InvalidDateFormatError = 4414 51 InvalidLinkChoice = 4409 52 InvalidTableName = 4420 53 IncompatibleFractionDigitValues = 4410 54 UnsupportedConstraint = 4411 55 ConstraintColumnEmpty = 4412 56 InvalidValueType = 4415 57 DictHasBadKeys = 4416 58 DeletedColumnAccess = 4418 59 IncorrectOldPassword = 4419 60 ``` Path: `mathesar/api/db/viewsets/schemas.py` Content: ``` 1 from django_filters import rest_framework as filters 2 from rest_access_policy import AccessViewSetMixin 3 from rest_framework import status, viewsets 4 from rest_framework.decorators import action 5 from rest_framework.mixins import ListModelMixin, RetrieveModelMixin 6 from rest_framework.response import Response 7 8 from mathesar.api.db.permissions.schema import SchemaAccessPolicy 9 from mathesar.api.dj_filters import SchemaFilter 10 from mathesar.api.pagination import DefaultLimitOffsetPagination 11 from mathesar.api.serializers.dependents import DependentSerializer, DependentFilterSerializer 12 from mathesar.api.serializers.schemas import SchemaSerializer 13 from mathesar.models.base import Schema 14 from mathesar.utils.schemas import create_schema_and_object 15 16 17 class SchemaViewSet(AccessViewSetMixin, viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin): 18 serializer_class = SchemaSerializer 19 pagination_class = DefaultLimitOffsetPagination 20 filter_backends = (filters.DjangoFilterBackend,) 21 filterset_class = SchemaFilter 22 access_policy = SchemaAccessPolicy 23 24 def get_queryset(self): 25 qs = Schema.objects.all().order_by('-created_at') 26 return self.access_policy.scope_viewset_queryset(self.request, qs) 27 28 def create(self, request): 29 serializer = SchemaSerializer(data=request.data, context={'request': request}) 30 serializer.is_valid(raise_exception=True) 31 database_name = serializer.validated_data['database'].name 32 schema = create_schema_and_object( 33 serializer.validated_data['name'], 34 database_name, 35 comment=serializer.validated_data.get('description') 36 ) 37 serializer = SchemaSerializer(schema) 38 return Response(serializer.data, status=status.HTTP_201_CREATED) 39 40 def partial_update(self, request, pk=None): 41 serializer = SchemaSerializer( 42 data=request.data, context={'request': request}, partial=True 43 ) 44 serializer.is_valid(raise_exception=True) 45 46 schema = self.get_object() 47 schema.update_sa_schema(serializer.validated_data) 48 49 # Reload the schema to avoid cached properties 50 schema = self.get_object() 51 schema.clear_name_cache() 52 serializer = SchemaSerializer(schema, context={'request': request}) 53 return Response(serializer.data) 54 55 def destroy(self, request, pk=None): 56 schema = self.get_object() 57 schema.delete_sa_schema() 58 return Response(status=status.HTTP_204_NO_CONTENT) 59 60 @action(methods=['get'], detail=True) 61 def dependents(self, request, pk=None): 62 serializer = DependentFilterSerializer(data=request.GET) 63 serializer.is_valid(raise_exception=True) 64 types_exclude = serializer.validated_data['exclude'] 65 66 schema = self.get_object() 67 serializer = DependentSerializer(schema.get_dependents(types_exclude), many=True, context={'request': request}) 68 return Response(serializer.data) 69 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mathesar/api/db/viewsets/schemas.py b/mathesar/api/db/viewsets/schemas.py --- a/mathesar/api/db/viewsets/schemas.py +++ b/mathesar/api/db/viewsets/schemas.py @@ -12,6 +12,7 @@ from mathesar.api.serializers.schemas import SchemaSerializer from mathesar.models.base import Schema from mathesar.utils.schemas import create_schema_and_object +from mathesar.api.exceptions.validation_exceptions.exceptions import EditingPublicSchemaIsDisallowed class SchemaViewSet(AccessViewSetMixin, viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin): @@ -44,6 +45,11 @@ serializer.is_valid(raise_exception=True) schema = self.get_object() + + # We forbid editing the public schema + if schema.name == "public": + raise EditingPublicSchemaIsDisallowed() + schema.update_sa_schema(serializer.validated_data) # Reload the schema to avoid cached properties diff --git a/mathesar/api/exceptions/error_codes.py b/mathesar/api/exceptions/error_codes.py --- a/mathesar/api/exceptions/error_codes.py +++ b/mathesar/api/exceptions/error_codes.py @@ -57,3 +57,4 @@ DictHasBadKeys = 4416 DeletedColumnAccess = 4418 IncorrectOldPassword = 4419 + EditingPublicSchema = 4421 diff --git a/mathesar/api/exceptions/validation_exceptions/exceptions.py b/mathesar/api/exceptions/validation_exceptions/exceptions.py --- a/mathesar/api/exceptions/validation_exceptions/exceptions.py +++ b/mathesar/api/exceptions/validation_exceptions/exceptions.py @@ -154,8 +154,18 @@ def __init__( self, - field=None, ): - message = "Old password is not correct" + message = "Old password is not correct." super().__init__(None, self.error_code, message, field, None) + + +class EditingPublicSchemaIsDisallowed(MathesarValidationException): + error_code = ErrorCodes.EditingPublicSchema.value + + def __init__( + self, + message="Editing the public schema is disallowed.", + field=None, + ): + super().__init__(None, self.error_code, message, field)
{"golden_diff": "diff --git a/mathesar/api/db/viewsets/schemas.py b/mathesar/api/db/viewsets/schemas.py\n--- a/mathesar/api/db/viewsets/schemas.py\n+++ b/mathesar/api/db/viewsets/schemas.py\n@@ -12,6 +12,7 @@\n from mathesar.api.serializers.schemas import SchemaSerializer\n from mathesar.models.base import Schema\n from mathesar.utils.schemas import create_schema_and_object\n+from mathesar.api.exceptions.validation_exceptions.exceptions import EditingPublicSchemaIsDisallowed\n \n \n class SchemaViewSet(AccessViewSetMixin, viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):\n@@ -44,6 +45,11 @@\n serializer.is_valid(raise_exception=True)\n \n schema = self.get_object()\n+\n+ # We forbid editing the public schema\n+ if schema.name == \"public\":\n+ raise EditingPublicSchemaIsDisallowed()\n+\n schema.update_sa_schema(serializer.validated_data)\n \n # Reload the schema to avoid cached properties\ndiff --git a/mathesar/api/exceptions/error_codes.py b/mathesar/api/exceptions/error_codes.py\n--- a/mathesar/api/exceptions/error_codes.py\n+++ b/mathesar/api/exceptions/error_codes.py\n@@ -57,3 +57,4 @@\n DictHasBadKeys = 4416\n DeletedColumnAccess = 4418\n IncorrectOldPassword = 4419\n+ EditingPublicSchema = 4421\ndiff --git a/mathesar/api/exceptions/validation_exceptions/exceptions.py b/mathesar/api/exceptions/validation_exceptions/exceptions.py\n--- a/mathesar/api/exceptions/validation_exceptions/exceptions.py\n+++ b/mathesar/api/exceptions/validation_exceptions/exceptions.py\n@@ -154,8 +154,18 @@\n \n def __init__(\n self,\n-\n field=None,\n ):\n- message = \"Old password is not correct\"\n+ message = \"Old password is not correct.\"\n super().__init__(None, self.error_code, message, field, None)\n+\n+\n+class EditingPublicSchemaIsDisallowed(MathesarValidationException):\n+ error_code = ErrorCodes.EditingPublicSchema.value\n+\n+ def __init__(\n+ self,\n+ message=\"Editing the public schema is disallowed.\",\n+ field=None,\n+ ):\n+ super().__init__(None, self.error_code, message, field)\n", "issue": "Don't allow public schema to be edited\nWe currently allow editing the public schema's name and description from the schema page. We should disable this.\r\n\r\n\r\n<img width=\"968\" alt=\"Screen Shot 2022-11-18 at 12 03 58 PM\" src=\"https://user-images.githubusercontent.com/287034/202761131-265da99a-6f76-4f02-b680-7a98851427b3.png\">\r\n\n", "before_files": [{"content": "from mathesar.api.exceptions.error_codes import ErrorCodes\nfrom mathesar.api.exceptions.validation_exceptions.base_exceptions import MathesarValidationException\n\n\nclass DistinctColumnRequiredAPIException(MathesarValidationException):\n error_code = ErrorCodes.DistinctColumnNameRequired.value\n\n def __init__(\n self,\n message=\"Column names must be distinct\",\n field=None,\n details=None,\n ):\n super().__init__(None, self.error_code, message, field, details)\n\n\nclass ColumnSizeMismatchAPIException(MathesarValidationException):\n error_code = ErrorCodes.ColumnSizeMismatch.value\n\n def __init__(\n self,\n message=\"Incorrect number of columns in request.\",\n field=None,\n details=None,\n ):\n super().__init__(None, self.error_code, message, field, details)\n\n\nclass InvalidLinkChoiceAPIException(MathesarValidationException):\n error_code = ErrorCodes.InvalidLinkChoice.value\n\n def __init__(\n self,\n message=\"Invalid Link type\",\n field=None,\n details=None,\n ):\n super().__init__(None, self.error_code, message, field, details)\n\n\nclass MultipleDataFileAPIException(MathesarValidationException):\n error_code = ErrorCodes.MultipleDataFiles.value\n\n def __init__(\n self,\n message=\"Multiple data files are unsupported.\",\n field=None,\n details=None,\n ):\n super().__init__(None, self.error_code, message, field, details)\n\n\nclass UnknownDatabaseTypeIdentifier(MathesarValidationException):\n error_code = ErrorCodes.UnknownDBType.value\n\n def __init__(\n self,\n db_type_id,\n field=None,\n details=None,\n ):\n message = f\"Unknown database type identifier {db_type_id}.\"\n super().__init__(None, self.error_code, message, field, details)\n\n\nclass MoneyDisplayOptionValueConflictAPIException(MathesarValidationException):\n error_code = ErrorCodes.MoneyDisplayOptionConflict.value\n\n def __init__(\n self,\n message=\"Money type cannot specify a currency code display option as well as other display options.\",\n field=None,\n details=None,\n ):\n super().__init__(None, self.error_code, message, field, details)\n\n\nclass IncompatibleFractionDigitValuesAPIException(MathesarValidationException):\n error_code = ErrorCodes.IncompatibleFractionDigitValues.value\n\n def __init__(\n self,\n message=\"maximum_fraction_digits cannot be less than minimum_fraction_digits.\",\n field=None,\n details=None,\n ):\n super().__init__(None, self.error_code, message, field, details)\n\n\nclass UnsupportedConstraintAPIException(MathesarValidationException):\n error_code = ErrorCodes.UnsupportedConstraint.value\n\n def __init__(\n self,\n constraint_type,\n field=None,\n ):\n message = f\"Operations related to {constraint_type} constraint are currently not supported\"\n super().__init__(None, self.error_code, message, field, None)\n\n\nclass ConstraintColumnEmptyAPIException(MathesarValidationException):\n error_code = ErrorCodes.ConstraintColumnEmpty.value\n\n def __init__(\n self,\n field=None,\n ):\n message = \"Constraint column field cannot be empty\"\n super().__init__(None, self.error_code, message, field, None)\n\n\nclass InvalidValueType(MathesarValidationException):\n error_code = ErrorCodes.InvalidValueType.value\n\n def __init__(\n self,\n message=None,\n field=None,\n ):\n if message is None:\n message = \"Value's type is invalid.\"\n super().__init__(None, self.error_code, message, field, None)\n\n\nclass DictHasBadKeys(MathesarValidationException):\n error_code = ErrorCodes.DictHasBadKeys.value\n\n def __init__(\n self,\n message=None,\n field=None,\n ):\n if message is None:\n message = \"Dictionary's keys are invalid or obligatory keys are missing.\"\n super().__init__(None, self.error_code, message, field, None)\n\n\nclass InvalidTableName(MathesarValidationException):\n error_code = ErrorCodes.InvalidTableName.value\n\n def __init__(\n self,\n table_name,\n message=None,\n field=None,\n ):\n message = f'Table name \"{table_name}\" is invalid.'\n super().__init__(None, self.error_code, message, field, None)\n\n\nclass IncorrectOldPassword(MathesarValidationException):\n error_code = ErrorCodes.IncorrectOldPassword.value\n\n def __init__(\n self,\n\n field=None,\n ):\n message = \"Old password is not correct\"\n super().__init__(None, self.error_code, message, field, None)\n", "path": "mathesar/api/exceptions/validation_exceptions/exceptions.py"}, {"content": "from enum import Enum, unique\n\n\n@unique\nclass ErrorCodes(Enum):\n # Matches with default code of drf-friendly-errors library\n # API Error\n MethodNotAllowed = 4006\n NotFound = 4005\n TableNotFound = 4041\n RecordNotFound = 4042\n UnknownError = 4999\n # Generic Errors\n ProgrammingError = 4101\n TypeError = 4102\n ValueError = 4103\n\n # DB Error Codes\n DuplicateTableError = 4205\n DuplicateColumnError = 4206\n ExclusionViolation = 4213\n ForeignKeyViolation = 4212\n InvalidTypeCast = 4203\n InvalidTypeOption = 4210\n InvalidDefault = 4211\n NonClassifiedIntegrityError = 4201\n NotNullViolation = 4204\n RaiseException = 4202\n TypeMismatchViolation = 4214\n UndefinedFunction = 4207\n UniqueViolation = 4208\n UnsupportedType = 4209\n\n # Data Imports error code\n InvalidTableError = 4301\n UniqueImportViolation = 4303\n\n # Validation Error\n ColumnSizeMismatch = 4401\n DistinctColumnNameRequired = 4402\n MappingsNotFound = 4417\n MultipleDataFiles = 4400\n MoneyDisplayOptionConflict = 4407\n UnsupportedAlter = 4403\n URLDownloadError = 4404\n URLNotReachableError = 4405\n URLInvalidContentType = 4406\n UnknownDBType = 4408\n InvalidDateError = 4413\n InvalidDateFormatError = 4414\n InvalidLinkChoice = 4409\n InvalidTableName = 4420\n IncompatibleFractionDigitValues = 4410\n UnsupportedConstraint = 4411\n ConstraintColumnEmpty = 4412\n InvalidValueType = 4415\n DictHasBadKeys = 4416\n DeletedColumnAccess = 4418\n IncorrectOldPassword = 4419\n", "path": "mathesar/api/exceptions/error_codes.py"}, {"content": "from django_filters import rest_framework as filters\nfrom rest_access_policy import AccessViewSetMixin\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin\nfrom rest_framework.response import Response\n\nfrom mathesar.api.db.permissions.schema import SchemaAccessPolicy\nfrom mathesar.api.dj_filters import SchemaFilter\nfrom mathesar.api.pagination import DefaultLimitOffsetPagination\nfrom mathesar.api.serializers.dependents import DependentSerializer, DependentFilterSerializer\nfrom mathesar.api.serializers.schemas import SchemaSerializer\nfrom mathesar.models.base import Schema\nfrom mathesar.utils.schemas import create_schema_and_object\n\n\nclass SchemaViewSet(AccessViewSetMixin, viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):\n serializer_class = SchemaSerializer\n pagination_class = DefaultLimitOffsetPagination\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = SchemaFilter\n access_policy = SchemaAccessPolicy\n\n def get_queryset(self):\n qs = Schema.objects.all().order_by('-created_at')\n return self.access_policy.scope_viewset_queryset(self.request, qs)\n\n def create(self, request):\n serializer = SchemaSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n database_name = serializer.validated_data['database'].name\n schema = create_schema_and_object(\n serializer.validated_data['name'],\n database_name,\n comment=serializer.validated_data.get('description')\n )\n serializer = SchemaSerializer(schema)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None):\n serializer = SchemaSerializer(\n data=request.data, context={'request': request}, partial=True\n )\n serializer.is_valid(raise_exception=True)\n\n schema = self.get_object()\n schema.update_sa_schema(serializer.validated_data)\n\n # Reload the schema to avoid cached properties\n schema = self.get_object()\n schema.clear_name_cache()\n serializer = SchemaSerializer(schema, context={'request': request})\n return Response(serializer.data)\n\n def destroy(self, request, pk=None):\n schema = self.get_object()\n schema.delete_sa_schema()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(methods=['get'], detail=True)\n def dependents(self, request, pk=None):\n serializer = DependentFilterSerializer(data=request.GET)\n serializer.is_valid(raise_exception=True)\n types_exclude = serializer.validated_data['exclude']\n\n schema = self.get_object()\n serializer = DependentSerializer(schema.get_dependents(types_exclude), many=True, context={'request': request})\n return Response(serializer.data)\n", "path": "mathesar/api/db/viewsets/schemas.py"}], "after_files": [{"content": "from mathesar.api.exceptions.error_codes import ErrorCodes\nfrom mathesar.api.exceptions.validation_exceptions.base_exceptions import MathesarValidationException\n\n\nclass DistinctColumnRequiredAPIException(MathesarValidationException):\n error_code = ErrorCodes.DistinctColumnNameRequired.value\n\n def __init__(\n self,\n message=\"Column names must be distinct\",\n field=None,\n details=None,\n ):\n super().__init__(None, self.error_code, message, field, details)\n\n\nclass ColumnSizeMismatchAPIException(MathesarValidationException):\n error_code = ErrorCodes.ColumnSizeMismatch.value\n\n def __init__(\n self,\n message=\"Incorrect number of columns in request.\",\n field=None,\n details=None,\n ):\n super().__init__(None, self.error_code, message, field, details)\n\n\nclass InvalidLinkChoiceAPIException(MathesarValidationException):\n error_code = ErrorCodes.InvalidLinkChoice.value\n\n def __init__(\n self,\n message=\"Invalid Link type\",\n field=None,\n details=None,\n ):\n super().__init__(None, self.error_code, message, field, details)\n\n\nclass MultipleDataFileAPIException(MathesarValidationException):\n error_code = ErrorCodes.MultipleDataFiles.value\n\n def __init__(\n self,\n message=\"Multiple data files are unsupported.\",\n field=None,\n details=None,\n ):\n super().__init__(None, self.error_code, message, field, details)\n\n\nclass UnknownDatabaseTypeIdentifier(MathesarValidationException):\n error_code = ErrorCodes.UnknownDBType.value\n\n def __init__(\n self,\n db_type_id,\n field=None,\n details=None,\n ):\n message = f\"Unknown database type identifier {db_type_id}.\"\n super().__init__(None, self.error_code, message, field, details)\n\n\nclass MoneyDisplayOptionValueConflictAPIException(MathesarValidationException):\n error_code = ErrorCodes.MoneyDisplayOptionConflict.value\n\n def __init__(\n self,\n message=\"Money type cannot specify a currency code display option as well as other display options.\",\n field=None,\n details=None,\n ):\n super().__init__(None, self.error_code, message, field, details)\n\n\nclass IncompatibleFractionDigitValuesAPIException(MathesarValidationException):\n error_code = ErrorCodes.IncompatibleFractionDigitValues.value\n\n def __init__(\n self,\n message=\"maximum_fraction_digits cannot be less than minimum_fraction_digits.\",\n field=None,\n details=None,\n ):\n super().__init__(None, self.error_code, message, field, details)\n\n\nclass UnsupportedConstraintAPIException(MathesarValidationException):\n error_code = ErrorCodes.UnsupportedConstraint.value\n\n def __init__(\n self,\n constraint_type,\n field=None,\n ):\n message = f\"Operations related to {constraint_type} constraint are currently not supported\"\n super().__init__(None, self.error_code, message, field, None)\n\n\nclass ConstraintColumnEmptyAPIException(MathesarValidationException):\n error_code = ErrorCodes.ConstraintColumnEmpty.value\n\n def __init__(\n self,\n field=None,\n ):\n message = \"Constraint column field cannot be empty\"\n super().__init__(None, self.error_code, message, field, None)\n\n\nclass InvalidValueType(MathesarValidationException):\n error_code = ErrorCodes.InvalidValueType.value\n\n def __init__(\n self,\n message=None,\n field=None,\n ):\n if message is None:\n message = \"Value's type is invalid.\"\n super().__init__(None, self.error_code, message, field, None)\n\n\nclass DictHasBadKeys(MathesarValidationException):\n error_code = ErrorCodes.DictHasBadKeys.value\n\n def __init__(\n self,\n message=None,\n field=None,\n ):\n if message is None:\n message = \"Dictionary's keys are invalid or obligatory keys are missing.\"\n super().__init__(None, self.error_code, message, field, None)\n\n\nclass InvalidTableName(MathesarValidationException):\n error_code = ErrorCodes.InvalidTableName.value\n\n def __init__(\n self,\n table_name,\n message=None,\n field=None,\n ):\n message = f'Table name \"{table_name}\" is invalid.'\n super().__init__(None, self.error_code, message, field, None)\n\n\nclass IncorrectOldPassword(MathesarValidationException):\n error_code = ErrorCodes.IncorrectOldPassword.value\n\n def __init__(\n self,\n field=None,\n ):\n message = \"Old password is not correct.\"\n super().__init__(None, self.error_code, message, field, None)\n\n\nclass EditingPublicSchemaIsDisallowed(MathesarValidationException):\n error_code = ErrorCodes.EditingPublicSchema.value\n\n def __init__(\n self,\n message=\"Editing the public schema is disallowed.\",\n field=None,\n ):\n super().__init__(None, self.error_code, message, field)\n", "path": "mathesar/api/exceptions/validation_exceptions/exceptions.py"}, {"content": "from enum import Enum, unique\n\n\n@unique\nclass ErrorCodes(Enum):\n # Matches with default code of drf-friendly-errors library\n # API Error\n MethodNotAllowed = 4006\n NotFound = 4005\n TableNotFound = 4041\n RecordNotFound = 4042\n UnknownError = 4999\n # Generic Errors\n ProgrammingError = 4101\n TypeError = 4102\n ValueError = 4103\n\n # DB Error Codes\n DuplicateTableError = 4205\n DuplicateColumnError = 4206\n ExclusionViolation = 4213\n ForeignKeyViolation = 4212\n InvalidTypeCast = 4203\n InvalidTypeOption = 4210\n InvalidDefault = 4211\n NonClassifiedIntegrityError = 4201\n NotNullViolation = 4204\n RaiseException = 4202\n TypeMismatchViolation = 4214\n UndefinedFunction = 4207\n UniqueViolation = 4208\n UnsupportedType = 4209\n\n # Data Imports error code\n InvalidTableError = 4301\n UniqueImportViolation = 4303\n\n # Validation Error\n ColumnSizeMismatch = 4401\n DistinctColumnNameRequired = 4402\n MappingsNotFound = 4417\n MultipleDataFiles = 4400\n MoneyDisplayOptionConflict = 4407\n UnsupportedAlter = 4403\n URLDownloadError = 4404\n URLNotReachableError = 4405\n URLInvalidContentType = 4406\n UnknownDBType = 4408\n InvalidDateError = 4413\n InvalidDateFormatError = 4414\n InvalidLinkChoice = 4409\n InvalidTableName = 4420\n IncompatibleFractionDigitValues = 4410\n UnsupportedConstraint = 4411\n ConstraintColumnEmpty = 4412\n InvalidValueType = 4415\n DictHasBadKeys = 4416\n DeletedColumnAccess = 4418\n IncorrectOldPassword = 4419\n EditingPublicSchema = 4421\n", "path": "mathesar/api/exceptions/error_codes.py"}, {"content": "from django_filters import rest_framework as filters\nfrom rest_access_policy import AccessViewSetMixin\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin\nfrom rest_framework.response import Response\n\nfrom mathesar.api.db.permissions.schema import SchemaAccessPolicy\nfrom mathesar.api.dj_filters import SchemaFilter\nfrom mathesar.api.pagination import DefaultLimitOffsetPagination\nfrom mathesar.api.serializers.dependents import DependentSerializer, DependentFilterSerializer\nfrom mathesar.api.serializers.schemas import SchemaSerializer\nfrom mathesar.models.base import Schema\nfrom mathesar.utils.schemas import create_schema_and_object\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import EditingPublicSchemaIsDisallowed\n\n\nclass SchemaViewSet(AccessViewSetMixin, viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):\n serializer_class = SchemaSerializer\n pagination_class = DefaultLimitOffsetPagination\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = SchemaFilter\n access_policy = SchemaAccessPolicy\n\n def get_queryset(self):\n qs = Schema.objects.all().order_by('-created_at')\n return self.access_policy.scope_viewset_queryset(self.request, qs)\n\n def create(self, request):\n serializer = SchemaSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n database_name = serializer.validated_data['database'].name\n schema = create_schema_and_object(\n serializer.validated_data['name'],\n database_name,\n comment=serializer.validated_data.get('description')\n )\n serializer = SchemaSerializer(schema)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None):\n serializer = SchemaSerializer(\n data=request.data, context={'request': request}, partial=True\n )\n serializer.is_valid(raise_exception=True)\n\n schema = self.get_object()\n\n # We forbid editing the public schema\n if schema.name == \"public\":\n raise EditingPublicSchemaIsDisallowed()\n\n schema.update_sa_schema(serializer.validated_data)\n\n # Reload the schema to avoid cached properties\n schema = self.get_object()\n schema.clear_name_cache()\n serializer = SchemaSerializer(schema, context={'request': request})\n return Response(serializer.data)\n\n def destroy(self, request, pk=None):\n schema = self.get_object()\n schema.delete_sa_schema()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(methods=['get'], detail=True)\n def dependents(self, request, pk=None):\n serializer = DependentFilterSerializer(data=request.GET)\n serializer.is_valid(raise_exception=True)\n types_exclude = serializer.validated_data['exclude']\n\n schema = self.get_object()\n serializer = DependentSerializer(schema.get_dependents(types_exclude), many=True, context={'request': request})\n return Response(serializer.data)\n", "path": "mathesar/api/db/viewsets/schemas.py"}]}
3,148
512
gh_patches_debug_14182
rasdani/github-patches
git_diff
fonttools__fonttools-2376
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [ttLib] font.importXML() always sets sfntVersion from imported TTX When using `ttx -m` or `font.importXML()`, the font's `sfntVersion` is unconditionally taken from the imported TTX. That is correct if we're doing a full import (starting with an empty TTFont) but is dubious when importing some tables into an existing font. Importing (say) a name table from an .otf into an existing .ttf will set `sfntValue` to "OTTO", which is incorrect for TTF. Perhaps we should not set `sfntVersion` if `importXML()` is called on a non-empty font. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `Lib/fontTools/misc/xmlReader.py` Content: ``` 1 from fontTools import ttLib 2 from fontTools.misc.textTools import safeEval 3 from fontTools.ttLib.tables.DefaultTable import DefaultTable 4 import sys 5 import os 6 import logging 7 8 9 log = logging.getLogger(__name__) 10 11 class TTXParseError(Exception): pass 12 13 BUFSIZE = 0x4000 14 15 16 class XMLReader(object): 17 18 def __init__(self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False): 19 if fileOrPath == '-': 20 fileOrPath = sys.stdin 21 if not hasattr(fileOrPath, "read"): 22 self.file = open(fileOrPath, "rb") 23 self._closeStream = True 24 else: 25 # assume readable file object 26 self.file = fileOrPath 27 self._closeStream = False 28 self.ttFont = ttFont 29 self.progress = progress 30 if quiet is not None: 31 from fontTools.misc.loggingTools import deprecateArgument 32 deprecateArgument("quiet", "configure logging instead") 33 self.quiet = quiet 34 self.root = None 35 self.contentStack = [] 36 self.contentOnly = contentOnly 37 self.stackSize = 0 38 39 def read(self, rootless=False): 40 if rootless: 41 self.stackSize += 1 42 if self.progress: 43 self.file.seek(0, 2) 44 fileSize = self.file.tell() 45 self.progress.set(0, fileSize // 100 or 1) 46 self.file.seek(0) 47 self._parseFile(self.file) 48 if self._closeStream: 49 self.close() 50 if rootless: 51 self.stackSize -= 1 52 53 def close(self): 54 self.file.close() 55 56 def _parseFile(self, file): 57 from xml.parsers.expat import ParserCreate 58 parser = ParserCreate() 59 parser.StartElementHandler = self._startElementHandler 60 parser.EndElementHandler = self._endElementHandler 61 parser.CharacterDataHandler = self._characterDataHandler 62 63 pos = 0 64 while True: 65 chunk = file.read(BUFSIZE) 66 if not chunk: 67 parser.Parse(chunk, 1) 68 break 69 pos = pos + len(chunk) 70 if self.progress: 71 self.progress.set(pos // 100) 72 parser.Parse(chunk, 0) 73 74 def _startElementHandler(self, name, attrs): 75 if self.stackSize == 1 and self.contentOnly: 76 # We already know the table we're parsing, skip 77 # parsing the table tag and continue to 78 # stack '2' which begins parsing content 79 self.contentStack.append([]) 80 self.stackSize = 2 81 return 82 stackSize = self.stackSize 83 self.stackSize = stackSize + 1 84 subFile = attrs.get("src") 85 if subFile is not None: 86 if hasattr(self.file, 'name'): 87 # if file has a name, get its parent directory 88 dirname = os.path.dirname(self.file.name) 89 else: 90 # else fall back to using the current working directory 91 dirname = os.getcwd() 92 subFile = os.path.join(dirname, subFile) 93 if not stackSize: 94 if name != "ttFont": 95 raise TTXParseError("illegal root tag: %s" % name) 96 sfntVersion = attrs.get("sfntVersion") 97 if sfntVersion is not None: 98 if len(sfntVersion) != 4: 99 sfntVersion = safeEval('"' + sfntVersion + '"') 100 self.ttFont.sfntVersion = sfntVersion 101 self.contentStack.append([]) 102 elif stackSize == 1: 103 if subFile is not None: 104 subReader = XMLReader(subFile, self.ttFont, self.progress) 105 subReader.read() 106 self.contentStack.append([]) 107 return 108 tag = ttLib.xmlToTag(name) 109 msg = "Parsing '%s' table..." % tag 110 if self.progress: 111 self.progress.setLabel(msg) 112 log.info(msg) 113 if tag == "GlyphOrder": 114 tableClass = ttLib.GlyphOrder 115 elif "ERROR" in attrs or ('raw' in attrs and safeEval(attrs['raw'])): 116 tableClass = DefaultTable 117 else: 118 tableClass = ttLib.getTableClass(tag) 119 if tableClass is None: 120 tableClass = DefaultTable 121 if tag == 'loca' and tag in self.ttFont: 122 # Special-case the 'loca' table as we need the 123 # original if the 'glyf' table isn't recompiled. 124 self.currentTable = self.ttFont[tag] 125 else: 126 self.currentTable = tableClass(tag) 127 self.ttFont[tag] = self.currentTable 128 self.contentStack.append([]) 129 elif stackSize == 2 and subFile is not None: 130 subReader = XMLReader(subFile, self.ttFont, self.progress, contentOnly=True) 131 subReader.read() 132 self.contentStack.append([]) 133 self.root = subReader.root 134 elif stackSize == 2: 135 self.contentStack.append([]) 136 self.root = (name, attrs, self.contentStack[-1]) 137 else: 138 l = [] 139 self.contentStack[-1].append((name, attrs, l)) 140 self.contentStack.append(l) 141 142 def _characterDataHandler(self, data): 143 if self.stackSize > 1: 144 self.contentStack[-1].append(data) 145 146 def _endElementHandler(self, name): 147 self.stackSize = self.stackSize - 1 148 del self.contentStack[-1] 149 if not self.contentOnly: 150 if self.stackSize == 1: 151 self.root = None 152 elif self.stackSize == 2: 153 name, attrs, content = self.root 154 self.currentTable.fromXML(name, attrs, content, self.ttFont) 155 self.root = None 156 157 158 class ProgressPrinter(object): 159 160 def __init__(self, title, maxval=100): 161 print(title) 162 163 def set(self, val, maxval=None): 164 pass 165 166 def increment(self, val=1): 167 pass 168 169 def setLabel(self, text): 170 print(text) 171 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/Lib/fontTools/misc/xmlReader.py b/Lib/fontTools/misc/xmlReader.py --- a/Lib/fontTools/misc/xmlReader.py +++ b/Lib/fontTools/misc/xmlReader.py @@ -93,11 +93,12 @@ if not stackSize: if name != "ttFont": raise TTXParseError("illegal root tag: %s" % name) - sfntVersion = attrs.get("sfntVersion") - if sfntVersion is not None: - if len(sfntVersion) != 4: - sfntVersion = safeEval('"' + sfntVersion + '"') - self.ttFont.sfntVersion = sfntVersion + if self.ttFont.reader is None and not self.ttFont.tables: + sfntVersion = attrs.get("sfntVersion") + if sfntVersion is not None: + if len(sfntVersion) != 4: + sfntVersion = safeEval('"' + sfntVersion + '"') + self.ttFont.sfntVersion = sfntVersion self.contentStack.append([]) elif stackSize == 1: if subFile is not None:
{"golden_diff": "diff --git a/Lib/fontTools/misc/xmlReader.py b/Lib/fontTools/misc/xmlReader.py\n--- a/Lib/fontTools/misc/xmlReader.py\n+++ b/Lib/fontTools/misc/xmlReader.py\n@@ -93,11 +93,12 @@\n \t\tif not stackSize:\n \t\t\tif name != \"ttFont\":\n \t\t\t\traise TTXParseError(\"illegal root tag: %s\" % name)\n-\t\t\tsfntVersion = attrs.get(\"sfntVersion\")\n-\t\t\tif sfntVersion is not None:\n-\t\t\t\tif len(sfntVersion) != 4:\n-\t\t\t\t\tsfntVersion = safeEval('\"' + sfntVersion + '\"')\n-\t\t\t\tself.ttFont.sfntVersion = sfntVersion\n+\t\t\tif self.ttFont.reader is None and not self.ttFont.tables:\n+\t\t\t\tsfntVersion = attrs.get(\"sfntVersion\")\n+\t\t\t\tif sfntVersion is not None:\n+\t\t\t\t\tif len(sfntVersion) != 4:\n+\t\t\t\t\t\tsfntVersion = safeEval('\"' + sfntVersion + '\"')\n+\t\t\t\t\tself.ttFont.sfntVersion = sfntVersion\n \t\t\tself.contentStack.append([])\n \t\telif stackSize == 1:\n \t\t\tif subFile is not None:\n", "issue": "[ttLib] font.importXML() always sets sfntVersion from imported TTX\nWhen using `ttx -m` or `font.importXML()`, the font's `sfntVersion` is unconditionally taken from the imported TTX. That is correct if we're doing a full import (starting with an empty TTFont) but is dubious when importing some tables into an existing font. Importing (say) a name table from an .otf into an existing .ttf will set `sfntValue` to \"OTTO\", which is incorrect for TTF.\r\n\r\nPerhaps we should not set `sfntVersion` if `importXML()` is called on a non-empty font.\n", "before_files": [{"content": "from fontTools import ttLib\nfrom fontTools.misc.textTools import safeEval\nfrom fontTools.ttLib.tables.DefaultTable import DefaultTable\nimport sys\nimport os\nimport logging\n\n\nlog = logging.getLogger(__name__)\n\nclass TTXParseError(Exception): pass\n\nBUFSIZE = 0x4000\n\n\nclass XMLReader(object):\n\n\tdef __init__(self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False):\n\t\tif fileOrPath == '-':\n\t\t\tfileOrPath = sys.stdin\n\t\tif not hasattr(fileOrPath, \"read\"):\n\t\t\tself.file = open(fileOrPath, \"rb\")\n\t\t\tself._closeStream = True\n\t\telse:\n\t\t\t# assume readable file object\n\t\t\tself.file = fileOrPath\n\t\t\tself._closeStream = False\n\t\tself.ttFont = ttFont\n\t\tself.progress = progress\n\t\tif quiet is not None:\n\t\t\tfrom fontTools.misc.loggingTools import deprecateArgument\n\t\t\tdeprecateArgument(\"quiet\", \"configure logging instead\")\n\t\t\tself.quiet = quiet\n\t\tself.root = None\n\t\tself.contentStack = []\n\t\tself.contentOnly = contentOnly\n\t\tself.stackSize = 0\n\n\tdef read(self, rootless=False):\n\t\tif rootless:\n\t\t\tself.stackSize += 1\n\t\tif self.progress:\n\t\t\tself.file.seek(0, 2)\n\t\t\tfileSize = self.file.tell()\n\t\t\tself.progress.set(0, fileSize // 100 or 1)\n\t\t\tself.file.seek(0)\n\t\tself._parseFile(self.file)\n\t\tif self._closeStream:\n\t\t\tself.close()\n\t\tif rootless:\n\t\t\tself.stackSize -= 1\n\n\tdef close(self):\n\t\tself.file.close()\n\n\tdef _parseFile(self, file):\n\t\tfrom xml.parsers.expat import ParserCreate\n\t\tparser = ParserCreate()\n\t\tparser.StartElementHandler = self._startElementHandler\n\t\tparser.EndElementHandler = self._endElementHandler\n\t\tparser.CharacterDataHandler = self._characterDataHandler\n\n\t\tpos = 0\n\t\twhile True:\n\t\t\tchunk = file.read(BUFSIZE)\n\t\t\tif not chunk:\n\t\t\t\tparser.Parse(chunk, 1)\n\t\t\t\tbreak\n\t\t\tpos = pos + len(chunk)\n\t\t\tif self.progress:\n\t\t\t\tself.progress.set(pos // 100)\n\t\t\tparser.Parse(chunk, 0)\n\n\tdef _startElementHandler(self, name, attrs):\n\t\tif self.stackSize == 1 and self.contentOnly:\n\t\t\t# We already know the table we're parsing, skip\n\t\t\t# parsing the table tag and continue to\n\t\t\t# stack '2' which begins parsing content\n\t\t\tself.contentStack.append([])\n\t\t\tself.stackSize = 2\n\t\t\treturn\n\t\tstackSize = self.stackSize\n\t\tself.stackSize = stackSize + 1\n\t\tsubFile = attrs.get(\"src\")\n\t\tif subFile is not None:\n\t\t\tif hasattr(self.file, 'name'):\n\t\t\t\t# if file has a name, get its parent directory\n\t\t\t\tdirname = os.path.dirname(self.file.name)\n\t\t\telse:\n\t\t\t\t# else fall back to using the current working directory\n\t\t\t\tdirname = os.getcwd()\n\t\t\tsubFile = os.path.join(dirname, subFile)\n\t\tif not stackSize:\n\t\t\tif name != \"ttFont\":\n\t\t\t\traise TTXParseError(\"illegal root tag: %s\" % name)\n\t\t\tsfntVersion = attrs.get(\"sfntVersion\")\n\t\t\tif sfntVersion is not None:\n\t\t\t\tif len(sfntVersion) != 4:\n\t\t\t\t\tsfntVersion = safeEval('\"' + sfntVersion + '\"')\n\t\t\t\tself.ttFont.sfntVersion = sfntVersion\n\t\t\tself.contentStack.append([])\n\t\telif stackSize == 1:\n\t\t\tif subFile is not None:\n\t\t\t\tsubReader = XMLReader(subFile, self.ttFont, self.progress)\n\t\t\t\tsubReader.read()\n\t\t\t\tself.contentStack.append([])\n\t\t\t\treturn\n\t\t\ttag = ttLib.xmlToTag(name)\n\t\t\tmsg = \"Parsing '%s' table...\" % tag\n\t\t\tif self.progress:\n\t\t\t\tself.progress.setLabel(msg)\n\t\t\tlog.info(msg)\n\t\t\tif tag == \"GlyphOrder\":\n\t\t\t\ttableClass = ttLib.GlyphOrder\n\t\t\telif \"ERROR\" in attrs or ('raw' in attrs and safeEval(attrs['raw'])):\n\t\t\t\ttableClass = DefaultTable\n\t\t\telse:\n\t\t\t\ttableClass = ttLib.getTableClass(tag)\n\t\t\t\tif tableClass is None:\n\t\t\t\t\ttableClass = DefaultTable\n\t\t\tif tag == 'loca' and tag in self.ttFont:\n\t\t\t\t# Special-case the 'loca' table as we need the\n\t\t\t\t# original if the 'glyf' table isn't recompiled.\n\t\t\t\tself.currentTable = self.ttFont[tag]\n\t\t\telse:\n\t\t\t\tself.currentTable = tableClass(tag)\n\t\t\t\tself.ttFont[tag] = self.currentTable\n\t\t\tself.contentStack.append([])\n\t\telif stackSize == 2 and subFile is not None:\n\t\t\tsubReader = XMLReader(subFile, self.ttFont, self.progress, contentOnly=True)\n\t\t\tsubReader.read()\n\t\t\tself.contentStack.append([])\n\t\t\tself.root = subReader.root\n\t\telif stackSize == 2:\n\t\t\tself.contentStack.append([])\n\t\t\tself.root = (name, attrs, self.contentStack[-1])\n\t\telse:\n\t\t\tl = []\n\t\t\tself.contentStack[-1].append((name, attrs, l))\n\t\t\tself.contentStack.append(l)\n\n\tdef _characterDataHandler(self, data):\n\t\tif self.stackSize > 1:\n\t\t\tself.contentStack[-1].append(data)\n\n\tdef _endElementHandler(self, name):\n\t\tself.stackSize = self.stackSize - 1\n\t\tdel self.contentStack[-1]\n\t\tif not self.contentOnly:\n\t\t\tif self.stackSize == 1:\n\t\t\t\tself.root = None\n\t\t\telif self.stackSize == 2:\n\t\t\t\tname, attrs, content = self.root\n\t\t\t\tself.currentTable.fromXML(name, attrs, content, self.ttFont)\n\t\t\t\tself.root = None\n\n\nclass ProgressPrinter(object):\n\n\tdef __init__(self, title, maxval=100):\n\t\tprint(title)\n\n\tdef set(self, val, maxval=None):\n\t\tpass\n\n\tdef increment(self, val=1):\n\t\tpass\n\n\tdef setLabel(self, text):\n\t\tprint(text)\n", "path": "Lib/fontTools/misc/xmlReader.py"}], "after_files": [{"content": "from fontTools import ttLib\nfrom fontTools.misc.textTools import safeEval\nfrom fontTools.ttLib.tables.DefaultTable import DefaultTable\nimport sys\nimport os\nimport logging\n\n\nlog = logging.getLogger(__name__)\n\nclass TTXParseError(Exception): pass\n\nBUFSIZE = 0x4000\n\n\nclass XMLReader(object):\n\n\tdef __init__(self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False):\n\t\tif fileOrPath == '-':\n\t\t\tfileOrPath = sys.stdin\n\t\tif not hasattr(fileOrPath, \"read\"):\n\t\t\tself.file = open(fileOrPath, \"rb\")\n\t\t\tself._closeStream = True\n\t\telse:\n\t\t\t# assume readable file object\n\t\t\tself.file = fileOrPath\n\t\t\tself._closeStream = False\n\t\tself.ttFont = ttFont\n\t\tself.progress = progress\n\t\tif quiet is not None:\n\t\t\tfrom fontTools.misc.loggingTools import deprecateArgument\n\t\t\tdeprecateArgument(\"quiet\", \"configure logging instead\")\n\t\t\tself.quiet = quiet\n\t\tself.root = None\n\t\tself.contentStack = []\n\t\tself.contentOnly = contentOnly\n\t\tself.stackSize = 0\n\n\tdef read(self, rootless=False):\n\t\tif rootless:\n\t\t\tself.stackSize += 1\n\t\tif self.progress:\n\t\t\tself.file.seek(0, 2)\n\t\t\tfileSize = self.file.tell()\n\t\t\tself.progress.set(0, fileSize // 100 or 1)\n\t\t\tself.file.seek(0)\n\t\tself._parseFile(self.file)\n\t\tif self._closeStream:\n\t\t\tself.close()\n\t\tif rootless:\n\t\t\tself.stackSize -= 1\n\n\tdef close(self):\n\t\tself.file.close()\n\n\tdef _parseFile(self, file):\n\t\tfrom xml.parsers.expat import ParserCreate\n\t\tparser = ParserCreate()\n\t\tparser.StartElementHandler = self._startElementHandler\n\t\tparser.EndElementHandler = self._endElementHandler\n\t\tparser.CharacterDataHandler = self._characterDataHandler\n\n\t\tpos = 0\n\t\twhile True:\n\t\t\tchunk = file.read(BUFSIZE)\n\t\t\tif not chunk:\n\t\t\t\tparser.Parse(chunk, 1)\n\t\t\t\tbreak\n\t\t\tpos = pos + len(chunk)\n\t\t\tif self.progress:\n\t\t\t\tself.progress.set(pos // 100)\n\t\t\tparser.Parse(chunk, 0)\n\n\tdef _startElementHandler(self, name, attrs):\n\t\tif self.stackSize == 1 and self.contentOnly:\n\t\t\t# We already know the table we're parsing, skip\n\t\t\t# parsing the table tag and continue to\n\t\t\t# stack '2' which begins parsing content\n\t\t\tself.contentStack.append([])\n\t\t\tself.stackSize = 2\n\t\t\treturn\n\t\tstackSize = self.stackSize\n\t\tself.stackSize = stackSize + 1\n\t\tsubFile = attrs.get(\"src\")\n\t\tif subFile is not None:\n\t\t\tif hasattr(self.file, 'name'):\n\t\t\t\t# if file has a name, get its parent directory\n\t\t\t\tdirname = os.path.dirname(self.file.name)\n\t\t\telse:\n\t\t\t\t# else fall back to using the current working directory\n\t\t\t\tdirname = os.getcwd()\n\t\t\tsubFile = os.path.join(dirname, subFile)\n\t\tif not stackSize:\n\t\t\tif name != \"ttFont\":\n\t\t\t\traise TTXParseError(\"illegal root tag: %s\" % name)\n\t\t\tif self.ttFont.reader is None and not self.ttFont.tables:\n\t\t\t\tsfntVersion = attrs.get(\"sfntVersion\")\n\t\t\t\tif sfntVersion is not None:\n\t\t\t\t\tif len(sfntVersion) != 4:\n\t\t\t\t\t\tsfntVersion = safeEval('\"' + sfntVersion + '\"')\n\t\t\t\t\tself.ttFont.sfntVersion = sfntVersion\n\t\t\tself.contentStack.append([])\n\t\telif stackSize == 1:\n\t\t\tif subFile is not None:\n\t\t\t\tsubReader = XMLReader(subFile, self.ttFont, self.progress)\n\t\t\t\tsubReader.read()\n\t\t\t\tself.contentStack.append([])\n\t\t\t\treturn\n\t\t\ttag = ttLib.xmlToTag(name)\n\t\t\tmsg = \"Parsing '%s' table...\" % tag\n\t\t\tif self.progress:\n\t\t\t\tself.progress.setLabel(msg)\n\t\t\tlog.info(msg)\n\t\t\tif tag == \"GlyphOrder\":\n\t\t\t\ttableClass = ttLib.GlyphOrder\n\t\t\telif \"ERROR\" in attrs or ('raw' in attrs and safeEval(attrs['raw'])):\n\t\t\t\ttableClass = DefaultTable\n\t\t\telse:\n\t\t\t\ttableClass = ttLib.getTableClass(tag)\n\t\t\t\tif tableClass is None:\n\t\t\t\t\ttableClass = DefaultTable\n\t\t\tif tag == 'loca' and tag in self.ttFont:\n\t\t\t\t# Special-case the 'loca' table as we need the\n\t\t\t\t# original if the 'glyf' table isn't recompiled.\n\t\t\t\tself.currentTable = self.ttFont[tag]\n\t\t\telse:\n\t\t\t\tself.currentTable = tableClass(tag)\n\t\t\t\tself.ttFont[tag] = self.currentTable\n\t\t\tself.contentStack.append([])\n\t\telif stackSize == 2 and subFile is not None:\n\t\t\tsubReader = XMLReader(subFile, self.ttFont, self.progress, contentOnly=True)\n\t\t\tsubReader.read()\n\t\t\tself.contentStack.append([])\n\t\t\tself.root = subReader.root\n\t\telif stackSize == 2:\n\t\t\tself.contentStack.append([])\n\t\t\tself.root = (name, attrs, self.contentStack[-1])\n\t\telse:\n\t\t\tl = []\n\t\t\tself.contentStack[-1].append((name, attrs, l))\n\t\t\tself.contentStack.append(l)\n\n\tdef _characterDataHandler(self, data):\n\t\tif self.stackSize > 1:\n\t\t\tself.contentStack[-1].append(data)\n\n\tdef _endElementHandler(self, name):\n\t\tself.stackSize = self.stackSize - 1\n\t\tdel self.contentStack[-1]\n\t\tif not self.contentOnly:\n\t\t\tif self.stackSize == 1:\n\t\t\t\tself.root = None\n\t\t\telif self.stackSize == 2:\n\t\t\t\tname, attrs, content = self.root\n\t\t\t\tself.currentTable.fromXML(name, attrs, content, self.ttFont)\n\t\t\t\tself.root = None\n\n\nclass ProgressPrinter(object):\n\n\tdef __init__(self, title, maxval=100):\n\t\tprint(title)\n\n\tdef set(self, val, maxval=None):\n\t\tpass\n\n\tdef increment(self, val=1):\n\t\tpass\n\n\tdef setLabel(self, text):\n\t\tprint(text)\n", "path": "Lib/fontTools/misc/xmlReader.py"}]}
2,141
257
gh_patches_debug_40167
rasdani/github-patches
git_diff
mosaicml__composer-534
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Incorrect setting of persistent_workers hparam DataloaderHparams: `persistent_workers: bool = hp.optional("Whether to shutdown workers after the dataset has been consumed once", default=True)` This makes it sound like the default option which is True shuts down the workers after the dataset has been consumed once. But when calling torch Dataloader, the default is False and this option keeps the workers alive. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `composer/datasets/dataloader.py` Content: ``` 1 # Copyright 2021 MosaicML. All Rights Reserved. 2 3 from __future__ import annotations 4 5 import logging 6 import textwrap 7 from dataclasses import dataclass 8 from typing import Any, Callable, Iterator, Optional 9 10 import torch 11 import torch.distributed 12 import torch.utils.data 13 import yahp as hp 14 15 from composer.core.types import Batch, DataLoader, Dataset 16 17 log = logging.getLogger(__name__) 18 19 20 class WrappedDataLoader(DataLoader): 21 22 def __init__(self, dataloader: DataLoader) -> None: 23 if self.is_dataloader_already_wrapped(dataloader): 24 log.debug( 25 textwrap.dedent("""\ 26 The dataloader is already wrapped with %s; it will be wrapped again. 27 If this is unintended behavior, guard the wrapping of the dataloader i.e. with: 28 if not %s.is_dataloader_already_wrapped(dataloader): dataloader = %s(dataloader)"""), 29 type(self).__name__, 30 type(self).__name__, 31 type(self).__name__, 32 ) 33 self.dataset = dataloader.dataset 34 self.batch_size = dataloader.batch_size 35 self.num_workers = dataloader.num_workers 36 self.pin_memory = dataloader.pin_memory 37 self.drop_last = dataloader.drop_last 38 self.timeout = dataloader.timeout 39 self.sampler = dataloader.sampler 40 self.prefetch_factor = dataloader.prefetch_factor 41 self.dataloader = dataloader 42 43 def __len__(self) -> int: 44 return len(self.dataloader) 45 46 def __iter__(self) -> Iterator[Batch]: 47 return iter(self.dataloader) 48 49 def __bool__(self) -> bool: 50 return True 51 52 def __setattr__(self, name: str, value: Any) -> None: 53 if hasattr(self, name) and name in ("dataset", "batch_size", "num_workers", "pin_memory", "drop_last", 54 "timeout", "sampler", "prefetch_factor", "dataloader"): 55 raise RuntimeError(f"Property {name} cannot be set after initialization in a DataLoader") 56 return super().__setattr__(name, value) 57 58 @classmethod 59 def is_dataloader_already_wrapped(cls, dataloader: DataLoader): 60 """Returns whether the ``dataloader`` is wrapped with ``cls``. This helper method checks recursively through all 61 wrappings until the underlying dataloader is reached. 62 63 Args: 64 dataloader (DataLoader): The dataloader to check 65 66 Returns: 67 bool: Whether the ``dataloader`` is wrapped recursively with ``cls``. 68 """ 69 if isinstance(dataloader, cls): 70 return True 71 if not isinstance(dataloader, WrappedDataLoader): 72 return False 73 if not isinstance(dataloader.dataloader, WrappedDataLoader): 74 return False 75 return cls.is_dataloader_already_wrapped(dataloader.dataloader) 76 77 78 def unwrap_data_loader(dataloader: DataLoader) -> DataLoader: 79 """Recursively unwraps a dataloader if it is of type :class:`WrappedDataLoader`. 80 81 Args: 82 dataloader (DataLoader): The dataloader to unwrap 83 84 Returns: 85 DataLoader: The underlying dataloader 86 """ 87 if isinstance(dataloader, WrappedDataLoader): 88 return unwrap_data_loader(dataloader.dataloader) 89 return dataloader 90 91 92 @dataclass 93 class DataloaderHparams(hp.Hparams): 94 """Hyperparameters to initialize a :class:`~torch.utils.data.Dataloader`. 95 96 Parameters: 97 num_workers (int): Number of CPU workers to use per device to fetch data. 98 prefetch_factor (int): Number of samples loaded in advance by each worker. 99 2 means there will be a total of 2 * num_workers samples prefetched across all workers. 100 persistent_workers (bool): Whether or not to shutdown workers after the dataset has been consumed once. 101 pin_memory (bool): Whether or not to copy Tensors into CUDA pinned memory before returning them. 102 timeout (float): Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout. 103 """ 104 105 num_workers: int = hp.optional("Number of CPU workers to use per device to fetch data.", default=8) 106 prefetch_factor: int = hp.optional("Number of samples loaded in advance by each worker", default=2) 107 persistent_workers: bool = hp.optional("Whether to shutdown workers after the dataset has been consumed once", 108 default=True) 109 pin_memory: bool = hp.optional("Whether to copy Tensors into CUDA pinned memory before returning them", 110 default=True) 111 timeout: float = hp.optional("Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout", 112 default=0) 113 114 def initialize_object( 115 self, 116 dataset: Dataset, 117 *, 118 batch_size: int, 119 sampler: Optional[torch.utils.data.Sampler[int]], 120 drop_last: bool, 121 collate_fn: Optional[Callable] = None, 122 worker_init_fn: Optional[Callable] = None, 123 ) -> DataLoader: 124 """Create a dataloader. 125 126 Args: 127 dataset (Dataset): The dataset. 128 batch_size (int): The per-device batch size. 129 sampler (torch.utils.data.Sampler[int] or None): The sampler to use for the dataloader. 130 drop_last (bool): Whether to drop the last batch if the number of 131 samples is not evenly divisible by the batch size. 132 collate_fn (callable, optional): Custom collate function. Defaults to None. 133 worker_init_fn (callable, optional): Custom worker init function. Defaults to None. 134 135 Returns: 136 DataLoader: The dataloader. 137 """ 138 139 return torch.utils.data.DataLoader(dataset, 140 batch_size=batch_size, 141 num_workers=self.num_workers, 142 pin_memory=self.pin_memory, 143 drop_last=drop_last, 144 sampler=sampler, 145 collate_fn=collate_fn, 146 worker_init_fn=worker_init_fn, 147 timeout=self.timeout, 148 prefetch_factor=self.prefetch_factor, 149 persistent_workers=self.persistent_workers) 150 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/composer/datasets/dataloader.py b/composer/datasets/dataloader.py --- a/composer/datasets/dataloader.py +++ b/composer/datasets/dataloader.py @@ -94,22 +94,42 @@ """Hyperparameters to initialize a :class:`~torch.utils.data.Dataloader`. Parameters: - num_workers (int): Number of CPU workers to use per device to fetch data. - prefetch_factor (int): Number of samples loaded in advance by each worker. - 2 means there will be a total of 2 * num_workers samples prefetched across all workers. - persistent_workers (bool): Whether or not to shutdown workers after the dataset has been consumed once. - pin_memory (bool): Whether or not to copy Tensors into CUDA pinned memory before returning them. - timeout (float): Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout. + num_workers (int, optional): Number of CPU workers to use per device to fetch data. + Set to ``0`` to use the main training thread for dataloading. + While zero workers can be useful for debugging, it should not be used for performance reasons. + (default: ``8``) + prefetch_factor (int, optional): Number of samples loaded in advance by each worker. + For example, 2 means there will be a total of 2 * num_workers samples prefetched across all workers. + If ``num_workers = 0``, then the ``prefetch_factor`` must be left at the default value. + (default: ``2``) + persistent_workers (bool): Whether to reuse dataloader workers across epochs. If ``num_workers`` is 0, + then this field must be ``False``. (default: ``True``) + pin_memory (bool, optional): Whether or not to copy Tensors into CUDA pinned memory before returning them. + If ``num_workers = 0``, then the ``pin_memory`` must be ``False``. (default: ``True``) + timeout (float): Timeout, in seconds, for collecting a batch from workers. Set to ``0`` for no timeout. + (default: ``0``) """ - num_workers: int = hp.optional("Number of CPU workers to use per device to fetch data.", default=8) - prefetch_factor: int = hp.optional("Number of samples loaded in advance by each worker", default=2) - persistent_workers: bool = hp.optional("Whether to shutdown workers after the dataset has been consumed once", + num_workers: int = hp.optional(textwrap.dedent("""\ + Number of CPU workers to use per device to fetch data. + Set to ``0`` to use the main training thread for dataloading. + While zero workers can be useful for debugging, it should not be used for performance reasons."""), + default=8) + prefetch_factor: int = hp.optional(textwrap.dedent("""\ + Number of samples loaded in advance by each worker. + For example, 2 means there will be a total of 2 * num_workers samples prefetched across all workers. + If ``num_workers = 0``, then the ``prefetch_factor`` must be left at the default value."""), + default=2) + persistent_workers: bool = hp.optional(textwrap.dedent("""\ + Whether to reuse dataloader workers across epochs. If ``num_workers`` is 0, + then this field must be ``False``"""), default=True) - pin_memory: bool = hp.optional("Whether to copy Tensors into CUDA pinned memory before returning them", + pin_memory: bool = hp.optional(textwrap.dedent("""\ + Whether or not to copy Tensors into CUDA pinned memory before returning them. + If ``num_workers = 0``, then the ``pin_memory`` must be ``False``."""), default=True) - timeout: float = hp.optional("Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout", - default=0) + timeout: float = hp.optional( + "Timeout, in seconds, for collecting a batch from workers. Set to ``0`` for no timeout.", default=0) def initialize_object( self,
{"golden_diff": "diff --git a/composer/datasets/dataloader.py b/composer/datasets/dataloader.py\n--- a/composer/datasets/dataloader.py\n+++ b/composer/datasets/dataloader.py\n@@ -94,22 +94,42 @@\n \"\"\"Hyperparameters to initialize a :class:`~torch.utils.data.Dataloader`.\n \n Parameters:\n- num_workers (int): Number of CPU workers to use per device to fetch data.\n- prefetch_factor (int): Number of samples loaded in advance by each worker.\n- 2 means there will be a total of 2 * num_workers samples prefetched across all workers.\n- persistent_workers (bool): Whether or not to shutdown workers after the dataset has been consumed once.\n- pin_memory (bool): Whether or not to copy Tensors into CUDA pinned memory before returning them.\n- timeout (float): Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout.\n+ num_workers (int, optional): Number of CPU workers to use per device to fetch data.\n+ Set to ``0`` to use the main training thread for dataloading.\n+ While zero workers can be useful for debugging, it should not be used for performance reasons.\n+ (default: ``8``)\n+ prefetch_factor (int, optional): Number of samples loaded in advance by each worker.\n+ For example, 2 means there will be a total of 2 * num_workers samples prefetched across all workers.\n+ If ``num_workers = 0``, then the ``prefetch_factor`` must be left at the default value.\n+ (default: ``2``)\n+ persistent_workers (bool): Whether to reuse dataloader workers across epochs. If ``num_workers`` is 0,\n+ then this field must be ``False``. (default: ``True``)\n+ pin_memory (bool, optional): Whether or not to copy Tensors into CUDA pinned memory before returning them.\n+ If ``num_workers = 0``, then the ``pin_memory`` must be ``False``. (default: ``True``)\n+ timeout (float): Timeout, in seconds, for collecting a batch from workers. Set to ``0`` for no timeout.\n+ (default: ``0``)\n \"\"\"\n \n- num_workers: int = hp.optional(\"Number of CPU workers to use per device to fetch data.\", default=8)\n- prefetch_factor: int = hp.optional(\"Number of samples loaded in advance by each worker\", default=2)\n- persistent_workers: bool = hp.optional(\"Whether to shutdown workers after the dataset has been consumed once\",\n+ num_workers: int = hp.optional(textwrap.dedent(\"\"\"\\\n+ Number of CPU workers to use per device to fetch data.\n+ Set to ``0`` to use the main training thread for dataloading.\n+ While zero workers can be useful for debugging, it should not be used for performance reasons.\"\"\"),\n+ default=8)\n+ prefetch_factor: int = hp.optional(textwrap.dedent(\"\"\"\\\n+ Number of samples loaded in advance by each worker.\n+ For example, 2 means there will be a total of 2 * num_workers samples prefetched across all workers.\n+ If ``num_workers = 0``, then the ``prefetch_factor`` must be left at the default value.\"\"\"),\n+ default=2)\n+ persistent_workers: bool = hp.optional(textwrap.dedent(\"\"\"\\\n+ Whether to reuse dataloader workers across epochs. If ``num_workers`` is 0,\n+ then this field must be ``False``\"\"\"),\n default=True)\n- pin_memory: bool = hp.optional(\"Whether to copy Tensors into CUDA pinned memory before returning them\",\n+ pin_memory: bool = hp.optional(textwrap.dedent(\"\"\"\\\n+ Whether or not to copy Tensors into CUDA pinned memory before returning them.\n+ If ``num_workers = 0``, then the ``pin_memory`` must be ``False``.\"\"\"),\n default=True)\n- timeout: float = hp.optional(\"Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout\",\n- default=0)\n+ timeout: float = hp.optional(\n+ \"Timeout, in seconds, for collecting a batch from workers. Set to ``0`` for no timeout.\", default=0)\n \n def initialize_object(\n self,\n", "issue": "Incorrect setting of persistent_workers hparam\nDataloaderHparams:\r\n`persistent_workers: bool = hp.optional(\"Whether to shutdown workers after the dataset has been consumed once\", default=True)`\r\n\r\nThis makes it sound like the default option which is True shuts down the workers after the dataset has been consumed once. But when calling torch Dataloader, the default is False and this option keeps the workers alive.\n", "before_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom __future__ import annotations\n\nimport logging\nimport textwrap\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Iterator, Optional\n\nimport torch\nimport torch.distributed\nimport torch.utils.data\nimport yahp as hp\n\nfrom composer.core.types import Batch, DataLoader, Dataset\n\nlog = logging.getLogger(__name__)\n\n\nclass WrappedDataLoader(DataLoader):\n\n def __init__(self, dataloader: DataLoader) -> None:\n if self.is_dataloader_already_wrapped(dataloader):\n log.debug(\n textwrap.dedent(\"\"\"\\\n The dataloader is already wrapped with %s; it will be wrapped again.\n If this is unintended behavior, guard the wrapping of the dataloader i.e. with:\n if not %s.is_dataloader_already_wrapped(dataloader): dataloader = %s(dataloader)\"\"\"),\n type(self).__name__,\n type(self).__name__,\n type(self).__name__,\n )\n self.dataset = dataloader.dataset\n self.batch_size = dataloader.batch_size\n self.num_workers = dataloader.num_workers\n self.pin_memory = dataloader.pin_memory\n self.drop_last = dataloader.drop_last\n self.timeout = dataloader.timeout\n self.sampler = dataloader.sampler\n self.prefetch_factor = dataloader.prefetch_factor\n self.dataloader = dataloader\n\n def __len__(self) -> int:\n return len(self.dataloader)\n\n def __iter__(self) -> Iterator[Batch]:\n return iter(self.dataloader)\n\n def __bool__(self) -> bool:\n return True\n\n def __setattr__(self, name: str, value: Any) -> None:\n if hasattr(self, name) and name in (\"dataset\", \"batch_size\", \"num_workers\", \"pin_memory\", \"drop_last\",\n \"timeout\", \"sampler\", \"prefetch_factor\", \"dataloader\"):\n raise RuntimeError(f\"Property {name} cannot be set after initialization in a DataLoader\")\n return super().__setattr__(name, value)\n\n @classmethod\n def is_dataloader_already_wrapped(cls, dataloader: DataLoader):\n \"\"\"Returns whether the ``dataloader`` is wrapped with ``cls``. This helper method checks recursively through all\n wrappings until the underlying dataloader is reached.\n\n Args:\n dataloader (DataLoader): The dataloader to check\n\n Returns:\n bool: Whether the ``dataloader`` is wrapped recursively with ``cls``.\n \"\"\"\n if isinstance(dataloader, cls):\n return True\n if not isinstance(dataloader, WrappedDataLoader):\n return False\n if not isinstance(dataloader.dataloader, WrappedDataLoader):\n return False\n return cls.is_dataloader_already_wrapped(dataloader.dataloader)\n\n\ndef unwrap_data_loader(dataloader: DataLoader) -> DataLoader:\n \"\"\"Recursively unwraps a dataloader if it is of type :class:`WrappedDataLoader`.\n\n Args:\n dataloader (DataLoader): The dataloader to unwrap\n\n Returns:\n DataLoader: The underlying dataloader\n \"\"\"\n if isinstance(dataloader, WrappedDataLoader):\n return unwrap_data_loader(dataloader.dataloader)\n return dataloader\n\n\n@dataclass\nclass DataloaderHparams(hp.Hparams):\n \"\"\"Hyperparameters to initialize a :class:`~torch.utils.data.Dataloader`.\n\n Parameters:\n num_workers (int): Number of CPU workers to use per device to fetch data.\n prefetch_factor (int): Number of samples loaded in advance by each worker.\n 2 means there will be a total of 2 * num_workers samples prefetched across all workers.\n persistent_workers (bool): Whether or not to shutdown workers after the dataset has been consumed once.\n pin_memory (bool): Whether or not to copy Tensors into CUDA pinned memory before returning them.\n timeout (float): Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout.\n \"\"\"\n\n num_workers: int = hp.optional(\"Number of CPU workers to use per device to fetch data.\", default=8)\n prefetch_factor: int = hp.optional(\"Number of samples loaded in advance by each worker\", default=2)\n persistent_workers: bool = hp.optional(\"Whether to shutdown workers after the dataset has been consumed once\",\n default=True)\n pin_memory: bool = hp.optional(\"Whether to copy Tensors into CUDA pinned memory before returning them\",\n default=True)\n timeout: float = hp.optional(\"Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout\",\n default=0)\n\n def initialize_object(\n self,\n dataset: Dataset,\n *,\n batch_size: int,\n sampler: Optional[torch.utils.data.Sampler[int]],\n drop_last: bool,\n collate_fn: Optional[Callable] = None,\n worker_init_fn: Optional[Callable] = None,\n ) -> DataLoader:\n \"\"\"Create a dataloader.\n\n Args:\n dataset (Dataset): The dataset.\n batch_size (int): The per-device batch size.\n sampler (torch.utils.data.Sampler[int] or None): The sampler to use for the dataloader.\n drop_last (bool): Whether to drop the last batch if the number of\n samples is not evenly divisible by the batch size.\n collate_fn (callable, optional): Custom collate function. Defaults to None.\n worker_init_fn (callable, optional): Custom worker init function. Defaults to None.\n\n Returns:\n DataLoader: The dataloader.\n \"\"\"\n\n return torch.utils.data.DataLoader(dataset,\n batch_size=batch_size,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n drop_last=drop_last,\n sampler=sampler,\n collate_fn=collate_fn,\n worker_init_fn=worker_init_fn,\n timeout=self.timeout,\n prefetch_factor=self.prefetch_factor,\n persistent_workers=self.persistent_workers)\n", "path": "composer/datasets/dataloader.py"}], "after_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom __future__ import annotations\n\nimport logging\nimport textwrap\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Iterator, Optional\n\nimport torch\nimport torch.distributed\nimport torch.utils.data\nimport yahp as hp\n\nfrom composer.core.types import Batch, DataLoader, Dataset\n\nlog = logging.getLogger(__name__)\n\n\nclass WrappedDataLoader(DataLoader):\n\n def __init__(self, dataloader: DataLoader) -> None:\n if self.is_dataloader_already_wrapped(dataloader):\n log.debug(\n textwrap.dedent(\"\"\"\\\n The dataloader is already wrapped with %s; it will be wrapped again.\n If this is unintended behavior, guard the wrapping of the dataloader i.e. with:\n if not %s.is_dataloader_already_wrapped(dataloader): dataloader = %s(dataloader)\"\"\"),\n type(self).__name__,\n type(self).__name__,\n type(self).__name__,\n )\n self.dataset = dataloader.dataset\n self.batch_size = dataloader.batch_size\n self.num_workers = dataloader.num_workers\n self.pin_memory = dataloader.pin_memory\n self.drop_last = dataloader.drop_last\n self.timeout = dataloader.timeout\n self.sampler = dataloader.sampler\n self.prefetch_factor = dataloader.prefetch_factor\n self.dataloader = dataloader\n\n def __len__(self) -> int:\n return len(self.dataloader)\n\n def __iter__(self) -> Iterator[Batch]:\n return iter(self.dataloader)\n\n def __bool__(self) -> bool:\n return True\n\n def __setattr__(self, name: str, value: Any) -> None:\n if hasattr(self, name) and name in (\"dataset\", \"batch_size\", \"num_workers\", \"pin_memory\", \"drop_last\",\n \"timeout\", \"sampler\", \"prefetch_factor\", \"dataloader\"):\n raise RuntimeError(f\"Property {name} cannot be set after initialization in a DataLoader\")\n return super().__setattr__(name, value)\n\n @classmethod\n def is_dataloader_already_wrapped(cls, dataloader: DataLoader):\n \"\"\"Returns whether the ``dataloader`` is wrapped with ``cls``. This helper method checks recursively through all\n wrappings until the underlying dataloader is reached.\n\n Args:\n dataloader (DataLoader): The dataloader to check\n\n Returns:\n bool: Whether the ``dataloader`` is wrapped recursively with ``cls``.\n \"\"\"\n if isinstance(dataloader, cls):\n return True\n if not isinstance(dataloader, WrappedDataLoader):\n return False\n if not isinstance(dataloader.dataloader, WrappedDataLoader):\n return False\n return cls.is_dataloader_already_wrapped(dataloader.dataloader)\n\n\ndef unwrap_data_loader(dataloader: DataLoader) -> DataLoader:\n \"\"\"Recursively unwraps a dataloader if it is of type :class:`WrappedDataLoader`.\n\n Args:\n dataloader (DataLoader): The dataloader to unwrap\n\n Returns:\n DataLoader: The underlying dataloader\n \"\"\"\n if isinstance(dataloader, WrappedDataLoader):\n return unwrap_data_loader(dataloader.dataloader)\n return dataloader\n\n\n@dataclass\nclass DataloaderHparams(hp.Hparams):\n \"\"\"Hyperparameters to initialize a :class:`~torch.utils.data.Dataloader`.\n\n Parameters:\n num_workers (int, optional): Number of CPU workers to use per device to fetch data.\n Set to ``0`` to use the main training thread for dataloading.\n While zero workers can be useful for debugging, it should not be used for performance reasons.\n (default: ``8``)\n prefetch_factor (int, optional): Number of samples loaded in advance by each worker.\n For example, 2 means there will be a total of 2 * num_workers samples prefetched across all workers.\n If ``num_workers = 0``, then the ``prefetch_factor`` must be left at the default value.\n (default: ``2``)\n persistent_workers (bool): Whether to reuse dataloader workers across epochs. If ``num_workers`` is 0,\n then this field must be ``False``. (default: ``True``)\n pin_memory (bool, optional): Whether or not to copy Tensors into CUDA pinned memory before returning them.\n If ``num_workers = 0``, then the ``pin_memory`` must be ``False``. (default: ``True``)\n timeout (float): Timeout, in seconds, for collecting a batch from workers. Set to ``0`` for no timeout.\n (default: ``0``)\n \"\"\"\n\n num_workers: int = hp.optional(textwrap.dedent(\"\"\"\\\n Number of CPU workers to use per device to fetch data.\n Set to ``0`` to use the main training thread for dataloading.\n While zero workers can be useful for debugging, it should not be used for performance reasons.\"\"\"),\n default=8)\n prefetch_factor: int = hp.optional(textwrap.dedent(\"\"\"\\\n Number of samples loaded in advance by each worker.\n For example, 2 means there will be a total of 2 * num_workers samples prefetched across all workers.\n If ``num_workers = 0``, then the ``prefetch_factor`` must be left at the default value.\"\"\"),\n default=2)\n persistent_workers: bool = hp.optional(textwrap.dedent(\"\"\"\\\n Whether to reuse dataloader workers across epochs. If ``num_workers`` is 0,\n then this field must be ``False``\"\"\"),\n default=True)\n pin_memory: bool = hp.optional(textwrap.dedent(\"\"\"\\\n Whether or not to copy Tensors into CUDA pinned memory before returning them.\n If ``num_workers = 0``, then the ``pin_memory`` must be ``False``.\"\"\"),\n default=True)\n timeout: float = hp.optional(\n \"Timeout, in seconds, for collecting a batch from workers. Set to ``0`` for no timeout.\", default=0)\n\n def initialize_object(\n self,\n dataset: Dataset,\n *,\n batch_size: int,\n sampler: Optional[torch.utils.data.Sampler[int]],\n drop_last: bool,\n collate_fn: Optional[Callable] = None,\n worker_init_fn: Optional[Callable] = None,\n ) -> DataLoader:\n \"\"\"Create a dataloader.\n\n Args:\n dataset (Dataset): The dataset.\n batch_size (int): The per-device batch size.\n sampler (torch.utils.data.Sampler[int] or None): The sampler to use for the dataloader.\n drop_last (bool): Whether to drop the last batch if the number of\n samples is not evenly divisible by the batch size.\n collate_fn (callable, optional): Custom collate function. Defaults to None.\n worker_init_fn (callable, optional): Custom worker init function. Defaults to None.\n\n Returns:\n DataLoader: The dataloader.\n \"\"\"\n\n return torch.utils.data.DataLoader(dataset,\n batch_size=batch_size,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n drop_last=drop_last,\n sampler=sampler,\n collate_fn=collate_fn,\n worker_init_fn=worker_init_fn,\n timeout=self.timeout,\n prefetch_factor=self.prefetch_factor,\n persistent_workers=self.persistent_workers)\n", "path": "composer/datasets/dataloader.py"}]}
1,975
934
gh_patches_debug_37164
rasdani/github-patches
git_diff
PokemonGoF__PokemonGo-Bot-5695
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Loitering does not seem to work properly <!-- STOP ! ! ! Read the following before creating anything (or you will have your issue/feature request closed without notice) 1. Please only create an ISSUE or a FEATURE REQUEST - don't mix the two together in one item 2. For a Feature Request please only fill out the FEATURE REQUEST section 3. For a Issue please only fill out the ISSUE section 4. Issues are NOT to be used for help/config problems/support - use the relevant slack channels as per the README 5. Provide a good summary in the title, don't just write problem, or awesome idea! 6. Delete all irrelevant sections not related to your issue/feature request (including this top section) ===============ISSUE SECTION=============== Before you create an Issue, please check the following: 1. Have you validated that your config.json is valid JSON? Use http://jsonlint.com/ to check. 2. Have you [searched our issue tracker](https://github.com/PokemonGoF/PokemonGo-Bot/issues?q=is%3Aissue+sort%3Aupdated-desc) to see if the issue already exists? If so, comment on that issue instead rather than creating a new issue. 3. Are you running on the `master` branch? We work on the `dev` branch and then add that functionality to `master` when it is stable. Your issue may be fixed on `dev` and there is no need for this issue, just wait and it will eventually be merged to `master`. 4. All Issue sections MUST be completed to help us determine the actual problem and find its cause --> ### Expected Behavior <!-- Tell us what you expect to happen --> Run the bot, go to the next point as in my path file, then, while loitering, move to forts, spin the pokestop and catch pokemons. ### Actual Behavior <!-- Tell us what is happening --> only while is following a path spin the pokestops, while is loitering it does nothing. ### Your FULL config.json (remove your username, password, gmapkey and any other private info) <!-- Provide your FULL config file, feel free to use services such as pastebin.com to reduce clutter --> http://pastebin.com/HDWecKDg ### Output when issue occurred <!-- Provide a reasonable sample from your output log (not just the error message), feel free to use services such as pastebin.com to reduce clutter --> http://pastebin.com/2fY2XXBC in the line 40 and 426 start to loiter. ### Steps to Reproduce <!-- Tell us the steps you have taken to reproduce the issue --> ./run.sh ### Other Information OS: Ubuntu 16.04 <!-- Tell us what Operating system you're using --> Branch: master <!-- dev or master --> Git Commit: a8ee31256d412413b107cce81b62059634e8c802 <!-- run 'git log -n 1 --pretty=format:"%H"' --> Python Version: Python 2.7.12 <!-- run 'python -V' and paste it here) --> Any other relevant files/configs (eg: path files) <!-- Anything else which may be of relevance --> path file [ {"location": "40.7814675, -73.9741015, 33", "loiter": 300}, {"location": "40.7795502, -73.9632225, 46", "loiter": 300}, {"location": "40.7739931, -73.9665484, 23", "loiter": 300}, {"location": "40.7741881, -73.9707756, 25", "loiter": 300}, {"location": "40.7757805, -73.9717841, 26", "loiter": 300}, {"location": "40.7772429, -73.9712047, 29", "loiter": 300}, {"location": "40.7678016, -73.9717411, 18", "loiter": 300}, {"location": "40.7663878, -73.9732003, 21", "loiter": 300}, {"location": "40.7647626, -73.9732003, 32", "loiter": 300}, {"location": "40.7653802, -73.9750671, 26", "loiter": 300}, {"location": "40.7665015, -73.9748954, 14", "loiter": 300}, {"location": "40.7762842, -73.9740157, 32", "loiter": 300} ] <!-- ===============END OF ISSUE SECTION=============== --> <!-- Note: Delete these lines and everything BELOW if creating an Issue --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pokemongo_bot/cell_workers/follow_path.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 from __future__ import absolute_import 4 import gpxpy 5 import gpxpy.gpx 6 import json 7 import time 8 from pokemongo_bot.base_task import BaseTask 9 from pokemongo_bot.cell_workers.utils import distance 10 from pokemongo_bot.cell_workers.utils import format_dist 11 from pokemongo_bot.cell_workers.utils import i2f 12 from pokemongo_bot.human_behaviour import sleep 13 from pokemongo_bot.walkers.walker_factory import walker_factory 14 from pokemongo_bot.worker_result import WorkerResult 15 from pgoapi.utilities import f2i 16 from random import uniform 17 from .utils import getSeconds, format_dist 18 from datetime import datetime as dt, timedelta 19 20 STATUS_MOVING = 0 21 STATUS_LOITERING = 1 22 STATUS_FINISHED = 2 23 24 class FollowPath(BaseTask): 25 SUPPORTED_TASK_API_VERSION = 1 26 27 def initialize(self): 28 self._process_config() 29 self.points = self.load_path() 30 self.status = STATUS_MOVING 31 self.loiter_end_time = 0 32 self.distance_unit = self.bot.config.distance_unit 33 self.append_unit = False 34 35 if self.path_start_mode == 'closest': 36 self.ptr = self.find_closest_point_idx(self.points) 37 38 else: 39 self.ptr = 0 40 41 def _process_config(self): 42 self.path_file = self.config.get("path_file", None) 43 self.path_mode = self.config.get("path_mode", "linear") 44 self.path_start_mode = self.config.get("path_start_mode", "first") 45 self.number_lap_max = self.config.get("number_lap", -1) # if < 0, then the number is inf. 46 self.timer_restart_min = getSeconds(self.config.get("timer_restart_min", "00:20:00")) 47 self.timer_restart_max = getSeconds(self.config.get("timer_restart_max", "02:00:00")) 48 self.walker = self.config.get('walker', 'StepWalker') 49 50 if self.timer_restart_min > self.timer_restart_max: 51 raise ValueError('path timer_restart_min is bigger than path timer_restart_max') #TODO there must be a more elegant way to do it... 52 53 #var not related to configs 54 self.number_lap = 0 55 56 def load_path(self): 57 if self.path_file is None: 58 raise RuntimeError('You need to specify a path file (json or gpx)') 59 60 if self.path_file.endswith('.json'): 61 return self.load_json() 62 elif self.path_file.endswith('.gpx'): 63 return self.load_gpx() 64 65 def load_json(self): 66 with open(self.path_file) as data_file: 67 points=json.load(data_file) 68 # Replace Verbal Location with lat&lng. 69 for _, point in enumerate(points): 70 point_tuple = self.bot.get_pos_by_name(point['location']) 71 self.emit_event( 72 'location_found', 73 level='debug', 74 formatted="Location found: {location} {position}", 75 data={ 76 'location': point, 77 'position': point_tuple 78 } 79 ) 80 # Keep point['location'] 81 point["lat"] = float(point_tuple[0]) 82 point["lng"] = float(point_tuple[1]) 83 point["alt"] = float(point_tuple[2]) 84 return points 85 86 def load_gpx(self): 87 gpx_file = open(self.path_file, 'r') 88 gpx = gpxpy.parse(gpx_file) 89 90 if len(gpx.tracks) == 0: 91 raise RuntimeError('GPX file does not contain a track') 92 93 points = [] 94 track = gpx.tracks[0] 95 for segment in track.segments: 96 for point in segment.points: 97 points.append({"lat": point.latitude, "lng": point.longitude, 98 "alt": point.elevation, "location": point.name}) 99 100 return points 101 102 def find_closest_point_idx(self, points): 103 return_idx = 0 104 min_distance = float("inf"); 105 106 for index in range(len(points)): 107 point = points[index] 108 lat = point['lat'] 109 lng = point['lng'] 110 111 dist = distance( 112 self.bot.position[0], 113 self.bot.position[1], 114 lat, 115 lng 116 ) 117 118 if dist < min_distance: 119 min_distance = dist 120 return_idx = index 121 122 return return_idx 123 124 def endLaps(self): 125 duration = int(uniform(self.timer_restart_min, self.timer_restart_max)) 126 resume = dt.now() + timedelta(seconds=duration) 127 128 self.emit_event( 129 'path_lap_end', 130 formatted="Great job, lot of calories burned! Taking a break now for {duration}, will resume at {resume}.", 131 data={ 132 'duration': str(timedelta(seconds=duration)), 133 'resume': resume.strftime("%H:%M:%S") 134 } 135 ) 136 137 self.number_lap = 0 # at the end of the break, start again 138 sleep(duration) 139 self.bot.login() 140 141 def work(self): 142 # If done or loitering allow the next task to run 143 if self.status == STATUS_FINISHED: 144 return WorkerResult.SUCCESS 145 146 if self.status == STATUS_LOITERING and time.time() < self.loiter_end_time: 147 return WorkerResult.RUNNING 148 149 last_lat, last_lng, last_alt = self.bot.position 150 151 point = self.points[self.ptr] 152 lat = point['lat'] 153 lng = point['lng'] 154 155 if 'alt' in point: 156 alt = float(point['alt']) 157 else: 158 alt = uniform(self.bot.config.alt_min, self.bot.config.alt_max) 159 160 if self.bot.config.walk_max > 0: 161 step_walker = walker_factory(self.walker, 162 self.bot, 163 lat, 164 lng, 165 alt 166 ) 167 168 is_at_destination = False 169 if step_walker.step(): 170 is_at_destination = True 171 172 else: 173 self.bot.api.set_position(lat, lng, alt) 174 175 dist = distance( 176 last_lat, 177 last_lng, 178 lat, 179 lng 180 ) 181 182 self.emit_event( 183 'position_update', 184 formatted="Walking from {last_position} to {current_position}, distance left: ({distance} {distance_unit}) ..", 185 data={ 186 'last_position': (last_lat, last_lng, last_alt), 187 'current_position': (lat, lng, alt), 188 'distance': format_dist(dist,self.distance_unit,self.append_unit), 189 'distance_unit': self.distance_unit 190 } 191 ) 192 193 if (self.bot.config.walk_min > 0 and is_at_destination) or (self.status == STATUS_LOITERING and time.time() >= self.loiter_end_time): 194 if "loiter" in point and self.status != STATUS_LOITERING: 195 self.logger.info("Loitering for {} seconds...".format(point["loiter"])) 196 self.status = STATUS_LOITERING 197 self.loiter_end_time = time.time() + point["loiter"] 198 return WorkerResult.RUNNING 199 if (self.ptr + 1) == len(self.points): 200 if self.path_mode == 'single': 201 self.status = STATUS_FINISHED 202 return WorkerResult.SUCCESS 203 self.ptr = 0 204 if self.path_mode == 'linear': 205 self.points = list(reversed(self.points)) 206 if self.number_lap_max >= 0: 207 self.number_lap+=1 208 self.emit_event( 209 'path_lap_update', 210 formatted="number lap : {number_lap} / {number_lap_max}", 211 data={ 212 'number_lap': str(self.number_lap), 213 'number_lap_max': str(self.number_lap_max) 214 } 215 ) 216 if self.number_lap >= self.number_lap_max: 217 self.endLaps() 218 else: 219 self.ptr += 1 220 221 self.status = STATUS_MOVING 222 return WorkerResult.RUNNING 223 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pokemongo_bot/cell_workers/follow_path.py b/pokemongo_bot/cell_workers/follow_path.py --- a/pokemongo_bot/cell_workers/follow_path.py +++ b/pokemongo_bot/cell_workers/follow_path.py @@ -18,7 +18,7 @@ from datetime import datetime as dt, timedelta STATUS_MOVING = 0 -STATUS_LOITERING = 1 +STATUS_WANDERING = 1 STATUS_FINISHED = 2 class FollowPath(BaseTask): @@ -28,7 +28,7 @@ self._process_config() self.points = self.load_path() self.status = STATUS_MOVING - self.loiter_end_time = 0 + self.wander_end_time = 0 self.distance_unit = self.bot.config.distance_unit self.append_unit = False @@ -139,12 +139,12 @@ self.bot.login() def work(self): - # If done or loitering allow the next task to run + # If done or wandering allow the next task to run if self.status == STATUS_FINISHED: return WorkerResult.SUCCESS - if self.status == STATUS_LOITERING and time.time() < self.loiter_end_time: - return WorkerResult.RUNNING + if self.status == STATUS_WANDERING and time.time() < self.wander_end_time: + return WorkerResult.SUCCESS last_lat, last_lng, last_alt = self.bot.position @@ -190,12 +190,14 @@ } ) - if (self.bot.config.walk_min > 0 and is_at_destination) or (self.status == STATUS_LOITERING and time.time() >= self.loiter_end_time): - if "loiter" in point and self.status != STATUS_LOITERING: - self.logger.info("Loitering for {} seconds...".format(point["loiter"])) - self.status = STATUS_LOITERING - self.loiter_end_time = time.time() + point["loiter"] - return WorkerResult.RUNNING + if (self.bot.config.walk_min > 0 and is_at_destination) or (self.status == STATUS_WANDERING and time.time() >= self.wander_end_time): + if "loiter" in point: + self.logger.warning("'loiter' is obsolete, please change to 'wander' in {}".format(self.path_file)) + if "wander" in point and self.status != STATUS_WANDERING: + self.logger.info("Wandering for {} seconds...".format(point["wander"])) + self.status = STATUS_WANDERING + self.wander_end_time = time.time() + point["wander"] + return WorkerResult.SUCCESS if (self.ptr + 1) == len(self.points): if self.path_mode == 'single': self.status = STATUS_FINISHED
{"golden_diff": "diff --git a/pokemongo_bot/cell_workers/follow_path.py b/pokemongo_bot/cell_workers/follow_path.py\n--- a/pokemongo_bot/cell_workers/follow_path.py\n+++ b/pokemongo_bot/cell_workers/follow_path.py\n@@ -18,7 +18,7 @@\n from datetime import datetime as dt, timedelta\n \n STATUS_MOVING = 0\n-STATUS_LOITERING = 1\n+STATUS_WANDERING = 1\n STATUS_FINISHED = 2\n \n class FollowPath(BaseTask):\n@@ -28,7 +28,7 @@\n self._process_config()\n self.points = self.load_path()\n self.status = STATUS_MOVING\n- self.loiter_end_time = 0\n+ self.wander_end_time = 0\n self.distance_unit = self.bot.config.distance_unit\n self.append_unit = False\n \n@@ -139,12 +139,12 @@\n self.bot.login()\n \n def work(self):\n- # If done or loitering allow the next task to run\n+ # If done or wandering allow the next task to run\n if self.status == STATUS_FINISHED:\n return WorkerResult.SUCCESS\n \n- if self.status == STATUS_LOITERING and time.time() < self.loiter_end_time:\n- return WorkerResult.RUNNING\n+ if self.status == STATUS_WANDERING and time.time() < self.wander_end_time:\n+ return WorkerResult.SUCCESS\n \n last_lat, last_lng, last_alt = self.bot.position\n \n@@ -190,12 +190,14 @@\n }\n )\n \n- if (self.bot.config.walk_min > 0 and is_at_destination) or (self.status == STATUS_LOITERING and time.time() >= self.loiter_end_time):\n- if \"loiter\" in point and self.status != STATUS_LOITERING:\n- self.logger.info(\"Loitering for {} seconds...\".format(point[\"loiter\"]))\n- self.status = STATUS_LOITERING\n- self.loiter_end_time = time.time() + point[\"loiter\"]\n- return WorkerResult.RUNNING\n+ if (self.bot.config.walk_min > 0 and is_at_destination) or (self.status == STATUS_WANDERING and time.time() >= self.wander_end_time):\n+ if \"loiter\" in point:\n+ self.logger.warning(\"'loiter' is obsolete, please change to 'wander' in {}\".format(self.path_file))\n+ if \"wander\" in point and self.status != STATUS_WANDERING:\n+ self.logger.info(\"Wandering for {} seconds...\".format(point[\"wander\"]))\n+ self.status = STATUS_WANDERING\n+ self.wander_end_time = time.time() + point[\"wander\"]\n+ return WorkerResult.SUCCESS\n if (self.ptr + 1) == len(self.points):\n if self.path_mode == 'single':\n self.status = STATUS_FINISHED\n", "issue": "Loitering does not seem to work properly\n<!--\n\nSTOP ! ! !\n\nRead the following before creating anything (or you will have your issue/feature request closed without notice)\n\n1. Please only create an ISSUE or a FEATURE REQUEST - don't mix the two together in one item\n2. For a Feature Request please only fill out the FEATURE REQUEST section\n3. For a Issue please only fill out the ISSUE section\n4. Issues are NOT to be used for help/config problems/support - use the relevant slack channels as per the README\n5. Provide a good summary in the title, don't just write problem, or awesome idea!\n6. Delete all irrelevant sections not related to your issue/feature request (including this top section)\n\n===============ISSUE SECTION===============\n\nBefore you create an Issue, please check the following:\n\n1. Have you validated that your config.json is valid JSON? Use http://jsonlint.com/ to check.\n2. Have you [searched our issue tracker](https://github.com/PokemonGoF/PokemonGo-Bot/issues?q=is%3Aissue+sort%3Aupdated-desc) to see if the issue already exists? If so, comment on that issue instead rather than creating a new issue.\n3. Are you running on the `master` branch? We work on the `dev` branch and then add that functionality to `master` when it is stable. Your issue may be fixed on `dev` and there is no need for this issue, just wait and it will eventually be merged to `master`.\n4. All Issue sections MUST be completed to help us determine the actual problem and find its cause\n-->\n### Expected Behavior\n\n<!-- Tell us what you expect to happen -->\n\nRun the bot, go to the next point as in my path file, then, while loitering, move to forts, spin the pokestop and catch pokemons.\n### Actual Behavior\n\n<!-- Tell us what is happening -->\n\nonly while is following a path spin the pokestops, while is loitering it does nothing.\n### Your FULL config.json (remove your username, password, gmapkey and any other private info)\n\n<!-- Provide your FULL config file, feel free to use services such as pastebin.com to reduce clutter -->\n\nhttp://pastebin.com/HDWecKDg\n### Output when issue occurred\n\n<!-- Provide a reasonable sample from your output log (not just the error message), feel free to use services such as pastebin.com to reduce clutter -->\n\nhttp://pastebin.com/2fY2XXBC\nin the line 40 and 426 start to loiter.\n### Steps to Reproduce\n\n<!-- Tell us the steps you have taken to reproduce the issue -->\n\n./run.sh\n### Other Information\n\nOS: Ubuntu 16.04\n\n<!-- Tell us what Operating system you're using --> \n\nBranch: master\n\n<!-- dev or master --> \n\nGit Commit: a8ee31256d412413b107cce81b62059634e8c802\n\n<!-- run 'git log -n 1 --pretty=format:\"%H\"' --> \n\nPython Version: Python 2.7.12\n\n<!-- run 'python -V' and paste it here) --> \n\nAny other relevant files/configs (eg: path files) \n\n<!-- Anything else which may be of relevance -->\n\npath file\n[\n {\"location\": \"40.7814675, -73.9741015, 33\", \"loiter\": 300},\n {\"location\": \"40.7795502, -73.9632225, 46\", \"loiter\": 300},\n {\"location\": \"40.7739931, -73.9665484, 23\", \"loiter\": 300},\n {\"location\": \"40.7741881, -73.9707756, 25\", \"loiter\": 300},\n {\"location\": \"40.7757805, -73.9717841, 26\", \"loiter\": 300},\n {\"location\": \"40.7772429, -73.9712047, 29\", \"loiter\": 300},\n {\"location\": \"40.7678016, -73.9717411, 18\", \"loiter\": 300},\n {\"location\": \"40.7663878, -73.9732003, 21\", \"loiter\": 300},\n {\"location\": \"40.7647626, -73.9732003, 32\", \"loiter\": 300},\n {\"location\": \"40.7653802, -73.9750671, 26\", \"loiter\": 300},\n {\"location\": \"40.7665015, -73.9748954, 14\", \"loiter\": 300},\n {\"location\": \"40.7762842, -73.9740157, 32\", \"loiter\": 300}\n]\n\n<!-- ===============END OF ISSUE SECTION=============== -->\n\n<!-- Note: Delete these lines and everything BELOW if creating an Issue -->\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nimport gpxpy\nimport gpxpy.gpx\nimport json\nimport time\nfrom pokemongo_bot.base_task import BaseTask\nfrom pokemongo_bot.cell_workers.utils import distance\nfrom pokemongo_bot.cell_workers.utils import format_dist\nfrom pokemongo_bot.cell_workers.utils import i2f\nfrom pokemongo_bot.human_behaviour import sleep\nfrom pokemongo_bot.walkers.walker_factory import walker_factory\nfrom pokemongo_bot.worker_result import WorkerResult\nfrom pgoapi.utilities import f2i\nfrom random import uniform\nfrom .utils import getSeconds, format_dist\nfrom datetime import datetime as dt, timedelta\n\nSTATUS_MOVING = 0\nSTATUS_LOITERING = 1\nSTATUS_FINISHED = 2\n \nclass FollowPath(BaseTask):\n SUPPORTED_TASK_API_VERSION = 1\n\n def initialize(self):\n self._process_config()\n self.points = self.load_path()\n self.status = STATUS_MOVING\n self.loiter_end_time = 0\n self.distance_unit = self.bot.config.distance_unit\n self.append_unit = False\n\n if self.path_start_mode == 'closest':\n self.ptr = self.find_closest_point_idx(self.points)\n\n else:\n self.ptr = 0\n\n def _process_config(self):\n self.path_file = self.config.get(\"path_file\", None)\n self.path_mode = self.config.get(\"path_mode\", \"linear\")\n self.path_start_mode = self.config.get(\"path_start_mode\", \"first\")\n self.number_lap_max = self.config.get(\"number_lap\", -1) # if < 0, then the number is inf.\n self.timer_restart_min = getSeconds(self.config.get(\"timer_restart_min\", \"00:20:00\"))\n self.timer_restart_max = getSeconds(self.config.get(\"timer_restart_max\", \"02:00:00\"))\n self.walker = self.config.get('walker', 'StepWalker')\n\n if self.timer_restart_min > self.timer_restart_max:\n raise ValueError('path timer_restart_min is bigger than path timer_restart_max') #TODO there must be a more elegant way to do it...\n \n #var not related to configs\n self.number_lap = 0\n \n def load_path(self):\n if self.path_file is None:\n raise RuntimeError('You need to specify a path file (json or gpx)')\n\n if self.path_file.endswith('.json'):\n return self.load_json()\n elif self.path_file.endswith('.gpx'):\n return self.load_gpx()\n\n def load_json(self):\n with open(self.path_file) as data_file:\n points=json.load(data_file)\n # Replace Verbal Location with lat&lng.\n for _, point in enumerate(points):\n point_tuple = self.bot.get_pos_by_name(point['location'])\n self.emit_event(\n 'location_found',\n level='debug',\n formatted=\"Location found: {location} {position}\",\n data={\n 'location': point,\n 'position': point_tuple\n }\n )\n # Keep point['location']\n point[\"lat\"] = float(point_tuple[0])\n point[\"lng\"] = float(point_tuple[1])\n point[\"alt\"] = float(point_tuple[2])\n return points\n\n def load_gpx(self):\n gpx_file = open(self.path_file, 'r')\n gpx = gpxpy.parse(gpx_file)\n\n if len(gpx.tracks) == 0:\n raise RuntimeError('GPX file does not contain a track')\n\n points = []\n track = gpx.tracks[0]\n for segment in track.segments:\n for point in segment.points:\n points.append({\"lat\": point.latitude, \"lng\": point.longitude,\n \"alt\": point.elevation, \"location\": point.name})\n\n return points\n\n def find_closest_point_idx(self, points):\n return_idx = 0\n min_distance = float(\"inf\");\n \n for index in range(len(points)):\n point = points[index]\n lat = point['lat']\n lng = point['lng']\n\n dist = distance(\n self.bot.position[0],\n self.bot.position[1],\n lat,\n lng\n )\n\n if dist < min_distance:\n min_distance = dist\n return_idx = index\n\n return return_idx\n\n def endLaps(self):\n duration = int(uniform(self.timer_restart_min, self.timer_restart_max))\n resume = dt.now() + timedelta(seconds=duration)\n \n self.emit_event(\n 'path_lap_end',\n formatted=\"Great job, lot of calories burned! Taking a break now for {duration}, will resume at {resume}.\",\n data={\n 'duration': str(timedelta(seconds=duration)),\n 'resume': resume.strftime(\"%H:%M:%S\")\n }\n )\n \n self.number_lap = 0 # at the end of the break, start again\n sleep(duration)\n self.bot.login()\n\n def work(self):\n # If done or loitering allow the next task to run\n if self.status == STATUS_FINISHED:\n return WorkerResult.SUCCESS\n\n if self.status == STATUS_LOITERING and time.time() < self.loiter_end_time:\n return WorkerResult.RUNNING\n\n last_lat, last_lng, last_alt = self.bot.position\n\n point = self.points[self.ptr]\n lat = point['lat']\n lng = point['lng']\n\n if 'alt' in point:\n alt = float(point['alt'])\n else:\n alt = uniform(self.bot.config.alt_min, self.bot.config.alt_max)\n\n if self.bot.config.walk_max > 0:\n step_walker = walker_factory(self.walker,\n self.bot,\n lat,\n lng,\n alt\n )\n\n is_at_destination = False\n if step_walker.step():\n is_at_destination = True\n\n else:\n self.bot.api.set_position(lat, lng, alt)\n\n dist = distance(\n last_lat,\n last_lng,\n lat,\n lng\n )\n\n self.emit_event(\n 'position_update',\n formatted=\"Walking from {last_position} to {current_position}, distance left: ({distance} {distance_unit}) ..\",\n data={\n 'last_position': (last_lat, last_lng, last_alt),\n 'current_position': (lat, lng, alt),\n 'distance': format_dist(dist,self.distance_unit,self.append_unit),\n 'distance_unit': self.distance_unit\n }\n )\n \n if (self.bot.config.walk_min > 0 and is_at_destination) or (self.status == STATUS_LOITERING and time.time() >= self.loiter_end_time):\n if \"loiter\" in point and self.status != STATUS_LOITERING:\n self.logger.info(\"Loitering for {} seconds...\".format(point[\"loiter\"]))\n self.status = STATUS_LOITERING\n self.loiter_end_time = time.time() + point[\"loiter\"]\n return WorkerResult.RUNNING\n if (self.ptr + 1) == len(self.points):\n if self.path_mode == 'single':\n self.status = STATUS_FINISHED\n return WorkerResult.SUCCESS\n self.ptr = 0\n if self.path_mode == 'linear':\n self.points = list(reversed(self.points))\n if self.number_lap_max >= 0:\n self.number_lap+=1\n self.emit_event(\n 'path_lap_update',\n formatted=\"number lap : {number_lap} / {number_lap_max}\",\n data={\n 'number_lap': str(self.number_lap),\n 'number_lap_max': str(self.number_lap_max)\n }\n )\n if self.number_lap >= self.number_lap_max:\n self.endLaps()\n else:\n self.ptr += 1\n \n self.status = STATUS_MOVING\n return WorkerResult.RUNNING\n", "path": "pokemongo_bot/cell_workers/follow_path.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nimport gpxpy\nimport gpxpy.gpx\nimport json\nimport time\nfrom pokemongo_bot.base_task import BaseTask\nfrom pokemongo_bot.cell_workers.utils import distance\nfrom pokemongo_bot.cell_workers.utils import format_dist\nfrom pokemongo_bot.cell_workers.utils import i2f\nfrom pokemongo_bot.human_behaviour import sleep\nfrom pokemongo_bot.walkers.walker_factory import walker_factory\nfrom pokemongo_bot.worker_result import WorkerResult\nfrom pgoapi.utilities import f2i\nfrom random import uniform\nfrom .utils import getSeconds, format_dist\nfrom datetime import datetime as dt, timedelta\n\nSTATUS_MOVING = 0\nSTATUS_WANDERING = 1\nSTATUS_FINISHED = 2\n \nclass FollowPath(BaseTask):\n SUPPORTED_TASK_API_VERSION = 1\n\n def initialize(self):\n self._process_config()\n self.points = self.load_path()\n self.status = STATUS_MOVING\n self.wander_end_time = 0\n self.distance_unit = self.bot.config.distance_unit\n self.append_unit = False\n\n if self.path_start_mode == 'closest':\n self.ptr = self.find_closest_point_idx(self.points)\n\n else:\n self.ptr = 0\n\n def _process_config(self):\n self.path_file = self.config.get(\"path_file\", None)\n self.path_mode = self.config.get(\"path_mode\", \"linear\")\n self.path_start_mode = self.config.get(\"path_start_mode\", \"first\")\n self.number_lap_max = self.config.get(\"number_lap\", -1) # if < 0, then the number is inf.\n self.timer_restart_min = getSeconds(self.config.get(\"timer_restart_min\", \"00:20:00\"))\n self.timer_restart_max = getSeconds(self.config.get(\"timer_restart_max\", \"02:00:00\"))\n self.walker = self.config.get('walker', 'StepWalker')\n\n if self.timer_restart_min > self.timer_restart_max:\n raise ValueError('path timer_restart_min is bigger than path timer_restart_max') #TODO there must be a more elegant way to do it...\n \n #var not related to configs\n self.number_lap = 0\n \n def load_path(self):\n if self.path_file is None:\n raise RuntimeError('You need to specify a path file (json or gpx)')\n\n if self.path_file.endswith('.json'):\n return self.load_json()\n elif self.path_file.endswith('.gpx'):\n return self.load_gpx()\n\n def load_json(self):\n with open(self.path_file) as data_file:\n points=json.load(data_file)\n # Replace Verbal Location with lat&lng.\n for _, point in enumerate(points):\n point_tuple = self.bot.get_pos_by_name(point['location'])\n self.emit_event(\n 'location_found',\n level='debug',\n formatted=\"Location found: {location} {position}\",\n data={\n 'location': point,\n 'position': point_tuple\n }\n )\n # Keep point['location']\n point[\"lat\"] = float(point_tuple[0])\n point[\"lng\"] = float(point_tuple[1])\n point[\"alt\"] = float(point_tuple[2])\n return points\n\n def load_gpx(self):\n gpx_file = open(self.path_file, 'r')\n gpx = gpxpy.parse(gpx_file)\n\n if len(gpx.tracks) == 0:\n raise RuntimeError('GPX file does not contain a track')\n\n points = []\n track = gpx.tracks[0]\n for segment in track.segments:\n for point in segment.points:\n points.append({\"lat\": point.latitude, \"lng\": point.longitude,\n \"alt\": point.elevation, \"location\": point.name})\n\n return points\n\n def find_closest_point_idx(self, points):\n return_idx = 0\n min_distance = float(\"inf\");\n \n for index in range(len(points)):\n point = points[index]\n lat = point['lat']\n lng = point['lng']\n\n dist = distance(\n self.bot.position[0],\n self.bot.position[1],\n lat,\n lng\n )\n\n if dist < min_distance:\n min_distance = dist\n return_idx = index\n\n return return_idx\n\n def endLaps(self):\n duration = int(uniform(self.timer_restart_min, self.timer_restart_max))\n resume = dt.now() + timedelta(seconds=duration)\n \n self.emit_event(\n 'path_lap_end',\n formatted=\"Great job, lot of calories burned! Taking a break now for {duration}, will resume at {resume}.\",\n data={\n 'duration': str(timedelta(seconds=duration)),\n 'resume': resume.strftime(\"%H:%M:%S\")\n }\n )\n \n self.number_lap = 0 # at the end of the break, start again\n sleep(duration)\n self.bot.login()\n\n def work(self):\n # If done or wandering allow the next task to run\n if self.status == STATUS_FINISHED:\n return WorkerResult.SUCCESS\n\n if self.status == STATUS_WANDERING and time.time() < self.wander_end_time:\n return WorkerResult.SUCCESS\n\n last_lat, last_lng, last_alt = self.bot.position\n\n point = self.points[self.ptr]\n lat = point['lat']\n lng = point['lng']\n\n if 'alt' in point:\n alt = float(point['alt'])\n else:\n alt = uniform(self.bot.config.alt_min, self.bot.config.alt_max)\n\n if self.bot.config.walk_max > 0:\n step_walker = walker_factory(self.walker,\n self.bot,\n lat,\n lng,\n alt\n )\n\n is_at_destination = False\n if step_walker.step():\n is_at_destination = True\n\n else:\n self.bot.api.set_position(lat, lng, alt)\n\n dist = distance(\n last_lat,\n last_lng,\n lat,\n lng\n )\n\n self.emit_event(\n 'position_update',\n formatted=\"Walking from {last_position} to {current_position}, distance left: ({distance} {distance_unit}) ..\",\n data={\n 'last_position': (last_lat, last_lng, last_alt),\n 'current_position': (lat, lng, alt),\n 'distance': format_dist(dist,self.distance_unit,self.append_unit),\n 'distance_unit': self.distance_unit\n }\n )\n \n if (self.bot.config.walk_min > 0 and is_at_destination) or (self.status == STATUS_WANDERING and time.time() >= self.wander_end_time):\n if \"loiter\" in point:\n self.logger.warning(\"'loiter' is obsolete, please change to 'wander' in {}\".format(self.path_file))\n if \"wander\" in point and self.status != STATUS_WANDERING:\n self.logger.info(\"Wandering for {} seconds...\".format(point[\"wander\"]))\n self.status = STATUS_WANDERING\n self.wander_end_time = time.time() + point[\"wander\"]\n return WorkerResult.SUCCESS\n if (self.ptr + 1) == len(self.points):\n if self.path_mode == 'single':\n self.status = STATUS_FINISHED\n return WorkerResult.SUCCESS\n self.ptr = 0\n if self.path_mode == 'linear':\n self.points = list(reversed(self.points))\n if self.number_lap_max >= 0:\n self.number_lap+=1\n self.emit_event(\n 'path_lap_update',\n formatted=\"number lap : {number_lap} / {number_lap_max}\",\n data={\n 'number_lap': str(self.number_lap),\n 'number_lap_max': str(self.number_lap_max)\n }\n )\n if self.number_lap >= self.number_lap_max:\n self.endLaps()\n else:\n self.ptr += 1\n \n self.status = STATUS_MOVING\n return WorkerResult.RUNNING\n", "path": "pokemongo_bot/cell_workers/follow_path.py"}]}
3,751
642
gh_patches_debug_43705
rasdani/github-patches
git_diff
iterative__dvc-1399
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Handle KeyboardInterrupt gracefully Currently, when you send a `SIGINT` to a running instance of DVC, it doesn't terminate properly. ```bash dvc run 'sleep 999s' # Ctrl+C # or # pkill -INT dvc ``` Error: ``` Traceback (most recent call last): File "/home/mroutis/src/iterative/dvc/.venv/bin/dvc", line 11, in <module> load_entry_point('dvc', 'console_scripts', 'dvc')() File "/home/mroutis/src/iterative/dvc/dvc/main.py", line 22, in main ret = cmd.run_cmd() File "/home/mroutis/src/iterative/dvc/dvc/command/base.py", line 41, in run_cmd return self.run() File "/home/mroutis/src/iterative/dvc/dvc/command/run.py", line 34, in run no_exec=self.args.no_exec) File "/home/mroutis/src/iterative/dvc/dvc/project.py", line 355, in run stage.run() File "/home/mroutis/src/iterative/dvc/dvc/stage.py", line 488, in run self._run() File "/home/mroutis/src/iterative/dvc/dvc/stage.py", line 460, in _run p.communicate() File "/usr/lib/python3.7/subprocess.py", line 925, in communicate self.wait() File "/usr/lib/python3.7/subprocess.py", line 984, in wait return self._wait(timeout=timeout) File "/usr/lib/python3.7/subprocess.py", line 1618, in _wait (pid, sts) = self._try_wait(0) File "/usr/lib/python3.7/subprocess.py", line 1576, in _try_wait (pid, sts) = os.waitpid(self.pid, wait_flags) KeyboardInterrupt ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dvc/logger.py` Content: ``` 1 import sys 2 import logging 3 import colorama 4 import traceback 5 import re 6 7 8 colorama.init() 9 10 11 def visual_width(line): 12 """ Get the the number of columns required to display a string """ 13 14 return len(re.sub(colorama.ansitowin32.AnsiToWin32.ANSI_CSI_RE, '', line)) 15 16 17 def visual_center(line, width): 18 """ Center align string according to it's visual width """ 19 20 spaces = max(width - visual_width(line), 0) 21 left_padding = int(spaces / 2) 22 right_padding = (spaces - left_padding) 23 24 return (left_padding * ' ') + line + (right_padding * ' ') 25 26 27 class Logger(object): 28 FMT = '%(message)s' 29 DEFAULT_LEVEL = logging.INFO 30 31 LEVEL_MAP = { 32 'debug': logging.DEBUG, 33 'info': logging.INFO, 34 'warn': logging.WARNING, 35 'error': logging.ERROR 36 } 37 38 COLOR_MAP = { 39 'green': colorama.Fore.GREEN, 40 'yellow': colorama.Fore.YELLOW, 41 'blue': colorama.Fore.BLUE, 42 'red': colorama.Fore.RED, 43 } 44 45 LEVEL_COLOR_MAP = { 46 'debug': 'blue', 47 'warn': 'yellow', 48 'error': 'red', 49 } 50 51 def __init__(self, loglevel=None): 52 if loglevel: 53 Logger.set_level(loglevel) 54 55 @staticmethod 56 def init(): 57 58 class LogLevelFilter(logging.Filter): 59 def filter(self, record): 60 return record.levelno <= logging.WARNING 61 62 sh_out = logging.StreamHandler(sys.stdout) 63 sh_out.setFormatter(logging.Formatter(Logger.FMT)) 64 sh_out.setLevel(logging.DEBUG) 65 sh_out.addFilter(LogLevelFilter()) 66 67 sh_err = logging.StreamHandler(sys.stderr) 68 sh_err.setFormatter(logging.Formatter(Logger.FMT)) 69 sh_err.setLevel(logging.ERROR) 70 71 Logger.logger().addHandler(sh_out) 72 Logger.logger().addHandler(sh_err) 73 Logger.set_level() 74 75 @staticmethod 76 def logger(): 77 return logging.getLogger('dvc') 78 79 @staticmethod 80 def set_level(level=None): 81 if not level: 82 lvl = Logger.DEFAULT_LEVEL 83 else: 84 lvl = Logger.LEVEL_MAP.get(level.lower(), Logger.DEFAULT_LEVEL) 85 Logger.logger().setLevel(lvl) 86 87 @staticmethod 88 def be_quiet(): 89 Logger.logger().setLevel(logging.CRITICAL) 90 91 @staticmethod 92 def be_verbose(): 93 Logger.logger().setLevel(logging.DEBUG) 94 95 @staticmethod 96 def colorize(msg, color): 97 header = '' 98 footer = '' 99 100 if sys.stdout.isatty(): # pragma: no cover 101 header = Logger.COLOR_MAP.get(color.lower(), '') 102 footer = colorama.Style.RESET_ALL 103 104 return u'{}{}{}'.format(header, msg, footer) 105 106 @staticmethod 107 def parse_exc(exc, tb=None): 108 str_tb = tb if tb else None 109 str_exc = ': {}'.format(str(exc)) if exc else "" 110 111 if exc and hasattr(exc, 'cause') and exc.cause: 112 cause_tb = exc.cause_tb if hasattr(exc, 'cause_tb') else None 113 cause_str_exc, cause_str_tb = Logger.parse_exc(exc.cause, cause_tb) 114 115 str_tb = cause_str_tb 116 str_exc = '{}{}'.format(str_exc, cause_str_exc) 117 118 return (str_exc, str_tb) 119 120 @staticmethod 121 def _prefix(msg, typ): 122 color = Logger.LEVEL_COLOR_MAP.get(typ.lower(), '') 123 return Logger.colorize('{}: '.format(msg), color) 124 125 @staticmethod 126 def error_prefix(): 127 return Logger._prefix('Error', 'error') 128 129 @staticmethod 130 def warning_prefix(): 131 return Logger._prefix('Warning', 'warn') 132 133 @staticmethod 134 def debug_prefix(): 135 return Logger._prefix('Debug', 'debug') 136 137 @staticmethod 138 def _with_progress(func, msg): 139 from dvc.progress import progress 140 with progress: 141 func(msg) 142 143 @staticmethod 144 def _error_exc(exc): 145 if exc is None: 146 return 147 148 if Logger.logger().getEffectiveLevel() != logging.DEBUG: 149 return 150 151 prefix = Logger.error_prefix() 152 str_exc, str_tb = Logger.parse_exc(exc) 153 str_tb = str_tb if str_tb else traceback.format_exc() 154 Logger._with_progress(Logger.logger().error, prefix + str_tb) 155 156 @staticmethod 157 def _with_exc(func, msg, suffix="", exc=None): 158 Logger._error_exc(exc) 159 msg = msg + Logger.parse_exc(exc)[0] + suffix 160 Logger._with_progress(func, msg) 161 162 @staticmethod 163 def error(msg, exc=None): 164 chat = "\n\nHaving any troubles? Hit us up at dvc.org/support, " \ 165 "we are always happy to help!" 166 Logger._with_exc(Logger.logger().error, 167 Logger.error_prefix() + msg, 168 suffix=chat, 169 exc=exc) 170 171 @classmethod 172 def warn(cls, msg, exc=None): 173 cls._with_exc(cls.logger().warning, 174 cls.warning_prefix() + msg, 175 exc=exc) 176 177 @classmethod 178 def debug(cls, msg, exc=None): 179 cls._with_exc(cls.logger().debug, 180 cls.debug_prefix() + msg, 181 exc=exc) 182 183 @staticmethod 184 def info(msg): 185 Logger._with_progress(Logger.logger().info, msg) 186 187 @staticmethod 188 def is_quiet(): 189 return Logger.logger().level == logging.CRITICAL 190 191 @classmethod 192 def box(cls, msg, border_color=''): 193 if cls.is_quiet(): 194 return 195 196 lines = msg.split('\n') 197 max_width = max(visual_width(line) for line in lines) 198 199 padding_horizontal = 5 200 padding_vertical = 1 201 202 box_size_horizontal = (max_width + (padding_horizontal * 2)) 203 204 chars = { 205 'corner': '+', 206 'horizontal': '-', 207 'vertical': '|', 208 'empty': ' ', 209 } 210 211 margin = "{corner}{line}{corner}\n".format( 212 corner=chars['corner'], 213 line=chars['horizontal'] * box_size_horizontal, 214 ) 215 216 padding_lines = [ 217 "{border}{space}{border}\n".format( 218 border=cls.colorize(chars['vertical'], border_color), 219 space=chars['empty'] * box_size_horizontal, 220 ) * padding_vertical 221 ] 222 223 content_lines = [ 224 "{border}{space}{content}{space}{border}\n".format( 225 border=cls.colorize(chars['vertical'], border_color), 226 space=chars['empty'] * padding_horizontal, 227 content=visual_center(line, max_width), 228 ) for line in lines 229 ] 230 231 box = "{margin}{padding}{content}{padding}{margin}".format( 232 margin=cls.colorize(margin, border_color), 233 padding=''.join(padding_lines), 234 content=''.join(content_lines), 235 ) 236 237 print(box) 238 ``` Path: `dvc/main.py` Content: ``` 1 from dvc.logger import Logger 2 from dvc.cli import parse_args 3 from dvc.command.base import CmdBase 4 from dvc.analytics import Analytics 5 from dvc.exceptions import NotDvcProjectError, DvcParserError 6 7 8 def main(argv=None): 9 Logger.init() 10 11 args = None 12 cmd = None 13 try: 14 args = parse_args(argv) 15 16 # Init loglevel early in case we'll run 17 # into errors before setting it properly 18 CmdBase._set_loglevel(args) 19 20 cmd = args.func(args) 21 22 ret = cmd.run_cmd() 23 except NotDvcProjectError as ex: 24 Logger.error(str(ex)) 25 ret = 253 26 except DvcParserError: 27 ret = 254 28 except Exception as ex: 29 Logger.error('Unexpected error', ex) 30 ret = 255 31 32 Analytics().send_cmd(cmd, args, ret) 33 34 return ret 35 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dvc/logger.py b/dvc/logger.py --- a/dvc/logger.py +++ b/dvc/logger.py @@ -106,21 +106,26 @@ @staticmethod def parse_exc(exc, tb=None): str_tb = tb if tb else None - str_exc = ': {}'.format(str(exc)) if exc else "" + str_exc = str(exc) if exc else "" + l_str_exc = [] + + if len(str_exc) != 0: + l_str_exc.append(str_exc) if exc and hasattr(exc, 'cause') and exc.cause: cause_tb = exc.cause_tb if hasattr(exc, 'cause_tb') else None - cause_str_exc, cause_str_tb = Logger.parse_exc(exc.cause, cause_tb) + l_cause_str_exc, cause_str_tb = Logger.parse_exc(exc.cause, + cause_tb) str_tb = cause_str_tb - str_exc = '{}{}'.format(str_exc, cause_str_exc) + l_str_exc += l_cause_str_exc - return (str_exc, str_tb) + return (l_str_exc, str_tb) @staticmethod def _prefix(msg, typ): color = Logger.LEVEL_COLOR_MAP.get(typ.lower(), '') - return Logger.colorize('{}: '.format(msg), color) + return Logger.colorize('{}'.format(msg), color) @staticmethod def error_prefix(): @@ -141,43 +146,42 @@ func(msg) @staticmethod - def _error_exc(exc): - if exc is None: - return + def _with_exc(func, prefix, msg, suffix="", exc=None): + l_str_exc, str_tb = Logger.parse_exc(exc) - if Logger.logger().getEffectiveLevel() != logging.DEBUG: - return + if exc is not None and Logger.is_verbose(): + str_tb = str_tb if str_tb else traceback.format_exc() + Logger._with_progress(Logger.logger().error, str_tb) - prefix = Logger.error_prefix() - str_exc, str_tb = Logger.parse_exc(exc) - str_tb = str_tb if str_tb else traceback.format_exc() - Logger._with_progress(Logger.logger().error, prefix + str_tb) + l_msg = [prefix] + if msg is not None and len(msg) != 0: + l_msg.append(msg) + l_msg += l_str_exc - @staticmethod - def _with_exc(func, msg, suffix="", exc=None): - Logger._error_exc(exc) - msg = msg + Logger.parse_exc(exc)[0] + suffix - Logger._with_progress(func, msg) + Logger._with_progress(func, ': '.join(l_msg) + suffix) @staticmethod def error(msg, exc=None): chat = "\n\nHaving any troubles? Hit us up at dvc.org/support, " \ "we are always happy to help!" Logger._with_exc(Logger.logger().error, - Logger.error_prefix() + msg, + Logger.error_prefix(), + msg, suffix=chat, exc=exc) @classmethod def warn(cls, msg, exc=None): cls._with_exc(cls.logger().warning, - cls.warning_prefix() + msg, + cls.warning_prefix(), + msg, exc=exc) @classmethod def debug(cls, msg, exc=None): cls._with_exc(cls.logger().debug, - cls.debug_prefix() + msg, + cls.debug_prefix(), + msg, exc=exc) @staticmethod @@ -188,6 +192,10 @@ def is_quiet(): return Logger.logger().level == logging.CRITICAL + @staticmethod + def is_verbose(): + return Logger.logger().getEffectiveLevel() == logging.DEBUG + @classmethod def box(cls, msg, border_color=''): if cls.is_quiet(): diff --git a/dvc/main.py b/dvc/main.py --- a/dvc/main.py +++ b/dvc/main.py @@ -20,8 +20,11 @@ cmd = args.func(args) ret = cmd.run_cmd() + except KeyboardInterrupt as ex: + Logger.error("Interrupted by the user", ex) + ret = 252 except NotDvcProjectError as ex: - Logger.error(str(ex)) + Logger.error("", ex) ret = 253 except DvcParserError: ret = 254
{"golden_diff": "diff --git a/dvc/logger.py b/dvc/logger.py\n--- a/dvc/logger.py\n+++ b/dvc/logger.py\n@@ -106,21 +106,26 @@\n @staticmethod\n def parse_exc(exc, tb=None):\n str_tb = tb if tb else None\n- str_exc = ': {}'.format(str(exc)) if exc else \"\"\n+ str_exc = str(exc) if exc else \"\"\n+ l_str_exc = []\n+\n+ if len(str_exc) != 0:\n+ l_str_exc.append(str_exc)\n \n if exc and hasattr(exc, 'cause') and exc.cause:\n cause_tb = exc.cause_tb if hasattr(exc, 'cause_tb') else None\n- cause_str_exc, cause_str_tb = Logger.parse_exc(exc.cause, cause_tb)\n+ l_cause_str_exc, cause_str_tb = Logger.parse_exc(exc.cause,\n+ cause_tb)\n \n str_tb = cause_str_tb\n- str_exc = '{}{}'.format(str_exc, cause_str_exc)\n+ l_str_exc += l_cause_str_exc\n \n- return (str_exc, str_tb)\n+ return (l_str_exc, str_tb)\n \n @staticmethod\n def _prefix(msg, typ):\n color = Logger.LEVEL_COLOR_MAP.get(typ.lower(), '')\n- return Logger.colorize('{}: '.format(msg), color)\n+ return Logger.colorize('{}'.format(msg), color)\n \n @staticmethod\n def error_prefix():\n@@ -141,43 +146,42 @@\n func(msg)\n \n @staticmethod\n- def _error_exc(exc):\n- if exc is None:\n- return\n+ def _with_exc(func, prefix, msg, suffix=\"\", exc=None):\n+ l_str_exc, str_tb = Logger.parse_exc(exc)\n \n- if Logger.logger().getEffectiveLevel() != logging.DEBUG:\n- return\n+ if exc is not None and Logger.is_verbose():\n+ str_tb = str_tb if str_tb else traceback.format_exc()\n+ Logger._with_progress(Logger.logger().error, str_tb)\n \n- prefix = Logger.error_prefix()\n- str_exc, str_tb = Logger.parse_exc(exc)\n- str_tb = str_tb if str_tb else traceback.format_exc()\n- Logger._with_progress(Logger.logger().error, prefix + str_tb)\n+ l_msg = [prefix]\n+ if msg is not None and len(msg) != 0:\n+ l_msg.append(msg)\n+ l_msg += l_str_exc\n \n- @staticmethod\n- def _with_exc(func, msg, suffix=\"\", exc=None):\n- Logger._error_exc(exc)\n- msg = msg + Logger.parse_exc(exc)[0] + suffix\n- Logger._with_progress(func, msg)\n+ Logger._with_progress(func, ': '.join(l_msg) + suffix)\n \n @staticmethod\n def error(msg, exc=None):\n chat = \"\\n\\nHaving any troubles? Hit us up at dvc.org/support, \" \\\n \"we are always happy to help!\"\n Logger._with_exc(Logger.logger().error,\n- Logger.error_prefix() + msg,\n+ Logger.error_prefix(),\n+ msg,\n suffix=chat,\n exc=exc)\n \n @classmethod\n def warn(cls, msg, exc=None):\n cls._with_exc(cls.logger().warning,\n- cls.warning_prefix() + msg,\n+ cls.warning_prefix(),\n+ msg,\n exc=exc)\n \n @classmethod\n def debug(cls, msg, exc=None):\n cls._with_exc(cls.logger().debug,\n- cls.debug_prefix() + msg,\n+ cls.debug_prefix(),\n+ msg,\n exc=exc)\n \n @staticmethod\n@@ -188,6 +192,10 @@\n def is_quiet():\n return Logger.logger().level == logging.CRITICAL\n \n+ @staticmethod\n+ def is_verbose():\n+ return Logger.logger().getEffectiveLevel() == logging.DEBUG\n+\n @classmethod\n def box(cls, msg, border_color=''):\n if cls.is_quiet():\ndiff --git a/dvc/main.py b/dvc/main.py\n--- a/dvc/main.py\n+++ b/dvc/main.py\n@@ -20,8 +20,11 @@\n cmd = args.func(args)\n \n ret = cmd.run_cmd()\n+ except KeyboardInterrupt as ex:\n+ Logger.error(\"Interrupted by the user\", ex)\n+ ret = 252\n except NotDvcProjectError as ex:\n- Logger.error(str(ex))\n+ Logger.error(\"\", ex)\n ret = 253\n except DvcParserError:\n ret = 254\n", "issue": "Handle KeyboardInterrupt gracefully\nCurrently, when you send a `SIGINT` to a running instance of DVC, it doesn't terminate properly.\r\n\r\n```bash\r\ndvc run 'sleep 999s'\r\n# Ctrl+C\r\n# or\r\n# pkill -INT dvc\r\n```\r\n\r\nError:\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/mroutis/src/iterative/dvc/.venv/bin/dvc\", line 11, in <module>\r\n load_entry_point('dvc', 'console_scripts', 'dvc')()\r\n File \"/home/mroutis/src/iterative/dvc/dvc/main.py\", line 22, in main\r\n ret = cmd.run_cmd()\r\n File \"/home/mroutis/src/iterative/dvc/dvc/command/base.py\", line 41, in run_cmd\r\n return self.run()\r\n File \"/home/mroutis/src/iterative/dvc/dvc/command/run.py\", line 34, in run\r\n no_exec=self.args.no_exec)\r\n File \"/home/mroutis/src/iterative/dvc/dvc/project.py\", line 355, in run\r\n stage.run()\r\n File \"/home/mroutis/src/iterative/dvc/dvc/stage.py\", line 488, in run\r\n self._run()\r\n File \"/home/mroutis/src/iterative/dvc/dvc/stage.py\", line 460, in _run\r\n p.communicate()\r\n File \"/usr/lib/python3.7/subprocess.py\", line 925, in communicate\r\n self.wait()\r\n File \"/usr/lib/python3.7/subprocess.py\", line 984, in wait\r\n return self._wait(timeout=timeout)\r\n File \"/usr/lib/python3.7/subprocess.py\", line 1618, in _wait\r\n (pid, sts) = self._try_wait(0)\r\n File \"/usr/lib/python3.7/subprocess.py\", line 1576, in _try_wait\r\n (pid, sts) = os.waitpid(self.pid, wait_flags)\r\nKeyboardInterrupt\r\n```\r\n\r\n\n", "before_files": [{"content": "import sys\nimport logging\nimport colorama\nimport traceback\nimport re\n\n\ncolorama.init()\n\n\ndef visual_width(line):\n \"\"\" Get the the number of columns required to display a string \"\"\"\n\n return len(re.sub(colorama.ansitowin32.AnsiToWin32.ANSI_CSI_RE, '', line))\n\n\ndef visual_center(line, width):\n \"\"\" Center align string according to it's visual width \"\"\"\n\n spaces = max(width - visual_width(line), 0)\n left_padding = int(spaces / 2)\n right_padding = (spaces - left_padding)\n\n return (left_padding * ' ') + line + (right_padding * ' ')\n\n\nclass Logger(object):\n FMT = '%(message)s'\n DEFAULT_LEVEL = logging.INFO\n\n LEVEL_MAP = {\n 'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warn': logging.WARNING,\n 'error': logging.ERROR\n }\n\n COLOR_MAP = {\n 'green': colorama.Fore.GREEN,\n 'yellow': colorama.Fore.YELLOW,\n 'blue': colorama.Fore.BLUE,\n 'red': colorama.Fore.RED,\n }\n\n LEVEL_COLOR_MAP = {\n 'debug': 'blue',\n 'warn': 'yellow',\n 'error': 'red',\n }\n\n def __init__(self, loglevel=None):\n if loglevel:\n Logger.set_level(loglevel)\n\n @staticmethod\n def init():\n\n class LogLevelFilter(logging.Filter):\n def filter(self, record):\n return record.levelno <= logging.WARNING\n\n sh_out = logging.StreamHandler(sys.stdout)\n sh_out.setFormatter(logging.Formatter(Logger.FMT))\n sh_out.setLevel(logging.DEBUG)\n sh_out.addFilter(LogLevelFilter())\n\n sh_err = logging.StreamHandler(sys.stderr)\n sh_err.setFormatter(logging.Formatter(Logger.FMT))\n sh_err.setLevel(logging.ERROR)\n\n Logger.logger().addHandler(sh_out)\n Logger.logger().addHandler(sh_err)\n Logger.set_level()\n\n @staticmethod\n def logger():\n return logging.getLogger('dvc')\n\n @staticmethod\n def set_level(level=None):\n if not level:\n lvl = Logger.DEFAULT_LEVEL\n else:\n lvl = Logger.LEVEL_MAP.get(level.lower(), Logger.DEFAULT_LEVEL)\n Logger.logger().setLevel(lvl)\n\n @staticmethod\n def be_quiet():\n Logger.logger().setLevel(logging.CRITICAL)\n\n @staticmethod\n def be_verbose():\n Logger.logger().setLevel(logging.DEBUG)\n\n @staticmethod\n def colorize(msg, color):\n header = ''\n footer = ''\n\n if sys.stdout.isatty(): # pragma: no cover\n header = Logger.COLOR_MAP.get(color.lower(), '')\n footer = colorama.Style.RESET_ALL\n\n return u'{}{}{}'.format(header, msg, footer)\n\n @staticmethod\n def parse_exc(exc, tb=None):\n str_tb = tb if tb else None\n str_exc = ': {}'.format(str(exc)) if exc else \"\"\n\n if exc and hasattr(exc, 'cause') and exc.cause:\n cause_tb = exc.cause_tb if hasattr(exc, 'cause_tb') else None\n cause_str_exc, cause_str_tb = Logger.parse_exc(exc.cause, cause_tb)\n\n str_tb = cause_str_tb\n str_exc = '{}{}'.format(str_exc, cause_str_exc)\n\n return (str_exc, str_tb)\n\n @staticmethod\n def _prefix(msg, typ):\n color = Logger.LEVEL_COLOR_MAP.get(typ.lower(), '')\n return Logger.colorize('{}: '.format(msg), color)\n\n @staticmethod\n def error_prefix():\n return Logger._prefix('Error', 'error')\n\n @staticmethod\n def warning_prefix():\n return Logger._prefix('Warning', 'warn')\n\n @staticmethod\n def debug_prefix():\n return Logger._prefix('Debug', 'debug')\n\n @staticmethod\n def _with_progress(func, msg):\n from dvc.progress import progress\n with progress:\n func(msg)\n\n @staticmethod\n def _error_exc(exc):\n if exc is None:\n return\n\n if Logger.logger().getEffectiveLevel() != logging.DEBUG:\n return\n\n prefix = Logger.error_prefix()\n str_exc, str_tb = Logger.parse_exc(exc)\n str_tb = str_tb if str_tb else traceback.format_exc()\n Logger._with_progress(Logger.logger().error, prefix + str_tb)\n\n @staticmethod\n def _with_exc(func, msg, suffix=\"\", exc=None):\n Logger._error_exc(exc)\n msg = msg + Logger.parse_exc(exc)[0] + suffix\n Logger._with_progress(func, msg)\n\n @staticmethod\n def error(msg, exc=None):\n chat = \"\\n\\nHaving any troubles? Hit us up at dvc.org/support, \" \\\n \"we are always happy to help!\"\n Logger._with_exc(Logger.logger().error,\n Logger.error_prefix() + msg,\n suffix=chat,\n exc=exc)\n\n @classmethod\n def warn(cls, msg, exc=None):\n cls._with_exc(cls.logger().warning,\n cls.warning_prefix() + msg,\n exc=exc)\n\n @classmethod\n def debug(cls, msg, exc=None):\n cls._with_exc(cls.logger().debug,\n cls.debug_prefix() + msg,\n exc=exc)\n\n @staticmethod\n def info(msg):\n Logger._with_progress(Logger.logger().info, msg)\n\n @staticmethod\n def is_quiet():\n return Logger.logger().level == logging.CRITICAL\n\n @classmethod\n def box(cls, msg, border_color=''):\n if cls.is_quiet():\n return\n\n lines = msg.split('\\n')\n max_width = max(visual_width(line) for line in lines)\n\n padding_horizontal = 5\n padding_vertical = 1\n\n box_size_horizontal = (max_width + (padding_horizontal * 2))\n\n chars = {\n 'corner': '+',\n 'horizontal': '-',\n 'vertical': '|',\n 'empty': ' ',\n }\n\n margin = \"{corner}{line}{corner}\\n\".format(\n corner=chars['corner'],\n line=chars['horizontal'] * box_size_horizontal,\n )\n\n padding_lines = [\n \"{border}{space}{border}\\n\".format(\n border=cls.colorize(chars['vertical'], border_color),\n space=chars['empty'] * box_size_horizontal,\n ) * padding_vertical\n ]\n\n content_lines = [\n \"{border}{space}{content}{space}{border}\\n\".format(\n border=cls.colorize(chars['vertical'], border_color),\n space=chars['empty'] * padding_horizontal,\n content=visual_center(line, max_width),\n ) for line in lines\n ]\n\n box = \"{margin}{padding}{content}{padding}{margin}\".format(\n margin=cls.colorize(margin, border_color),\n padding=''.join(padding_lines),\n content=''.join(content_lines),\n )\n\n print(box)\n", "path": "dvc/logger.py"}, {"content": "from dvc.logger import Logger\nfrom dvc.cli import parse_args\nfrom dvc.command.base import CmdBase\nfrom dvc.analytics import Analytics\nfrom dvc.exceptions import NotDvcProjectError, DvcParserError\n\n\ndef main(argv=None):\n Logger.init()\n\n args = None\n cmd = None\n try:\n args = parse_args(argv)\n\n # Init loglevel early in case we'll run\n # into errors before setting it properly\n CmdBase._set_loglevel(args)\n\n cmd = args.func(args)\n\n ret = cmd.run_cmd()\n except NotDvcProjectError as ex:\n Logger.error(str(ex))\n ret = 253\n except DvcParserError:\n ret = 254\n except Exception as ex:\n Logger.error('Unexpected error', ex)\n ret = 255\n\n Analytics().send_cmd(cmd, args, ret)\n\n return ret\n", "path": "dvc/main.py"}], "after_files": [{"content": "import sys\nimport logging\nimport colorama\nimport traceback\nimport re\n\n\ncolorama.init()\n\n\ndef visual_width(line):\n \"\"\" Get the the number of columns required to display a string \"\"\"\n\n return len(re.sub(colorama.ansitowin32.AnsiToWin32.ANSI_CSI_RE, '', line))\n\n\ndef visual_center(line, width):\n \"\"\" Center align string according to it's visual width \"\"\"\n\n spaces = max(width - visual_width(line), 0)\n left_padding = int(spaces / 2)\n right_padding = (spaces - left_padding)\n\n return (left_padding * ' ') + line + (right_padding * ' ')\n\n\nclass Logger(object):\n FMT = '%(message)s'\n DEFAULT_LEVEL = logging.INFO\n\n LEVEL_MAP = {\n 'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warn': logging.WARNING,\n 'error': logging.ERROR\n }\n\n COLOR_MAP = {\n 'green': colorama.Fore.GREEN,\n 'yellow': colorama.Fore.YELLOW,\n 'blue': colorama.Fore.BLUE,\n 'red': colorama.Fore.RED,\n }\n\n LEVEL_COLOR_MAP = {\n 'debug': 'blue',\n 'warn': 'yellow',\n 'error': 'red',\n }\n\n def __init__(self, loglevel=None):\n if loglevel:\n Logger.set_level(loglevel)\n\n @staticmethod\n def init():\n\n class LogLevelFilter(logging.Filter):\n def filter(self, record):\n return record.levelno <= logging.WARNING\n\n sh_out = logging.StreamHandler(sys.stdout)\n sh_out.setFormatter(logging.Formatter(Logger.FMT))\n sh_out.setLevel(logging.DEBUG)\n sh_out.addFilter(LogLevelFilter())\n\n sh_err = logging.StreamHandler(sys.stderr)\n sh_err.setFormatter(logging.Formatter(Logger.FMT))\n sh_err.setLevel(logging.ERROR)\n\n Logger.logger().addHandler(sh_out)\n Logger.logger().addHandler(sh_err)\n Logger.set_level()\n\n @staticmethod\n def logger():\n return logging.getLogger('dvc')\n\n @staticmethod\n def set_level(level=None):\n if not level:\n lvl = Logger.DEFAULT_LEVEL\n else:\n lvl = Logger.LEVEL_MAP.get(level.lower(), Logger.DEFAULT_LEVEL)\n Logger.logger().setLevel(lvl)\n\n @staticmethod\n def be_quiet():\n Logger.logger().setLevel(logging.CRITICAL)\n\n @staticmethod\n def be_verbose():\n Logger.logger().setLevel(logging.DEBUG)\n\n @staticmethod\n def colorize(msg, color):\n header = ''\n footer = ''\n\n if sys.stdout.isatty(): # pragma: no cover\n header = Logger.COLOR_MAP.get(color.lower(), '')\n footer = colorama.Style.RESET_ALL\n\n return u'{}{}{}'.format(header, msg, footer)\n\n @staticmethod\n def parse_exc(exc, tb=None):\n str_tb = tb if tb else None\n str_exc = str(exc) if exc else \"\"\n l_str_exc = []\n\n if len(str_exc) != 0:\n l_str_exc.append(str_exc)\n\n if exc and hasattr(exc, 'cause') and exc.cause:\n cause_tb = exc.cause_tb if hasattr(exc, 'cause_tb') else None\n l_cause_str_exc, cause_str_tb = Logger.parse_exc(exc.cause,\n cause_tb)\n\n str_tb = cause_str_tb\n l_str_exc += l_cause_str_exc\n\n return (l_str_exc, str_tb)\n\n @staticmethod\n def _prefix(msg, typ):\n color = Logger.LEVEL_COLOR_MAP.get(typ.lower(), '')\n return Logger.colorize('{}'.format(msg), color)\n\n @staticmethod\n def error_prefix():\n return Logger._prefix('Error', 'error')\n\n @staticmethod\n def warning_prefix():\n return Logger._prefix('Warning', 'warn')\n\n @staticmethod\n def debug_prefix():\n return Logger._prefix('Debug', 'debug')\n\n @staticmethod\n def _with_progress(func, msg):\n from dvc.progress import progress\n with progress:\n func(msg)\n\n @staticmethod\n def _with_exc(func, prefix, msg, suffix=\"\", exc=None):\n l_str_exc, str_tb = Logger.parse_exc(exc)\n\n if exc is not None and Logger.is_verbose():\n str_tb = str_tb if str_tb else traceback.format_exc()\n Logger._with_progress(Logger.logger().error, str_tb)\n\n l_msg = [prefix]\n if msg is not None and len(msg) != 0:\n l_msg.append(msg)\n l_msg += l_str_exc\n\n Logger._with_progress(func, ': '.join(l_msg) + suffix)\n\n @staticmethod\n def error(msg, exc=None):\n chat = \"\\n\\nHaving any troubles? Hit us up at dvc.org/support, \" \\\n \"we are always happy to help!\"\n Logger._with_exc(Logger.logger().error,\n Logger.error_prefix(),\n msg,\n suffix=chat,\n exc=exc)\n\n @classmethod\n def warn(cls, msg, exc=None):\n cls._with_exc(cls.logger().warning,\n cls.warning_prefix(),\n msg,\n exc=exc)\n\n @classmethod\n def debug(cls, msg, exc=None):\n cls._with_exc(cls.logger().debug,\n cls.debug_prefix(),\n msg,\n exc=exc)\n\n @staticmethod\n def info(msg):\n Logger._with_progress(Logger.logger().info, msg)\n\n @staticmethod\n def is_quiet():\n return Logger.logger().level == logging.CRITICAL\n\n @staticmethod\n def is_verbose():\n return Logger.logger().getEffectiveLevel() == logging.DEBUG\n\n @classmethod\n def box(cls, msg, border_color=''):\n if cls.is_quiet():\n return\n\n lines = msg.split('\\n')\n max_width = max(visual_width(line) for line in lines)\n\n padding_horizontal = 5\n padding_vertical = 1\n\n box_size_horizontal = (max_width + (padding_horizontal * 2))\n\n chars = {\n 'corner': '+',\n 'horizontal': '-',\n 'vertical': '|',\n 'empty': ' ',\n }\n\n margin = \"{corner}{line}{corner}\\n\".format(\n corner=chars['corner'],\n line=chars['horizontal'] * box_size_horizontal,\n )\n\n padding_lines = [\n \"{border}{space}{border}\\n\".format(\n border=cls.colorize(chars['vertical'], border_color),\n space=chars['empty'] * box_size_horizontal,\n ) * padding_vertical\n ]\n\n content_lines = [\n \"{border}{space}{content}{space}{border}\\n\".format(\n border=cls.colorize(chars['vertical'], border_color),\n space=chars['empty'] * padding_horizontal,\n content=visual_center(line, max_width),\n ) for line in lines\n ]\n\n box = \"{margin}{padding}{content}{padding}{margin}\".format(\n margin=cls.colorize(margin, border_color),\n padding=''.join(padding_lines),\n content=''.join(content_lines),\n )\n\n print(box)\n", "path": "dvc/logger.py"}, {"content": "from dvc.logger import Logger\nfrom dvc.cli import parse_args\nfrom dvc.command.base import CmdBase\nfrom dvc.analytics import Analytics\nfrom dvc.exceptions import NotDvcProjectError, DvcParserError\n\n\ndef main(argv=None):\n Logger.init()\n\n args = None\n cmd = None\n try:\n args = parse_args(argv)\n\n # Init loglevel early in case we'll run\n # into errors before setting it properly\n CmdBase._set_loglevel(args)\n\n cmd = args.func(args)\n\n ret = cmd.run_cmd()\n except KeyboardInterrupt as ex:\n Logger.error(\"Interrupted by the user\", ex)\n ret = 252\n except NotDvcProjectError as ex:\n Logger.error(\"\", ex)\n ret = 253\n except DvcParserError:\n ret = 254\n except Exception as ex:\n Logger.error('Unexpected error', ex)\n ret = 255\n\n Analytics().send_cmd(cmd, args, ret)\n\n return ret\n", "path": "dvc/main.py"}]}
3,123
1,015
gh_patches_debug_22103
rasdani/github-patches
git_diff
open-mmlab__mmengine-262
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 用户设置完了paramwise_cfg,如何知道是否符合预期,这个是否也要提供相应的脚本,用户运行后可以很容易的知道哪些参数被 frozen,不太参数组超参的不同。暂时没有时间开发的话,可以作为未来一个需求吧 用户设置完了,如何知道是否符合预期,这个是否也要提供相应的脚本,用户运行后可以很容易的知道哪些参数被 frozen,不太参数组超参的不同。暂时没有时间开发的话,可以作为未来一个需求吧 _Originally posted by @hhaAndroid in https://github.com/open-mmlab/mmengine/pull/25#discussion_r809786760_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mmengine/optim/optimizer/default_constructor.py` Content: ``` 1 # Copyright (c) OpenMMLab. All rights reserved. 2 import warnings 3 from typing import List, Optional, Union 4 5 import torch 6 import torch.nn as nn 7 from torch.nn import GroupNorm, LayerNorm 8 9 from mmengine.registry import OPTIMIZER_CONSTRUCTORS, OPTIMIZERS 10 from mmengine.utils import is_list_of, mmcv_full_available 11 from mmengine.utils.parrots_wrapper import _BatchNorm, _InstanceNorm 12 13 14 @OPTIMIZER_CONSTRUCTORS.register_module() 15 class DefaultOptimizerConstructor: 16 """Default constructor for optimizers. 17 18 By default each parameter share the same optimizer settings, and we 19 provide an argument ``paramwise_cfg`` to specify parameter-wise settings. 20 It is a dict and may contain the following fields: 21 22 - ``custom_keys`` (dict): Specified parameters-wise settings by keys. If 23 one of the keys in ``custom_keys`` is a substring of the name of one 24 parameter, then the setting of the parameter will be specified by 25 ``custom_keys[key]`` and other setting like ``bias_lr_mult`` etc. will 26 be ignored. It should be noted that the aforementioned ``key`` is the 27 longest key that is a substring of the name of the parameter. If there 28 are multiple matched keys with the same length, then the key with lower 29 alphabet order will be chosen. 30 ``custom_keys[key]`` should be a dict and may contain fields ``lr_mult`` 31 and ``decay_mult``. See Example 2 below. 32 - ``bias_lr_mult`` (float): It will be multiplied to the learning 33 rate for all bias parameters (except for those in normalization 34 layers and offset layers of DCN). 35 - ``bias_decay_mult`` (float): It will be multiplied to the weight 36 decay for all bias parameters (except for those in 37 normalization layers, depthwise conv layers, offset layers of DCN). 38 - ``norm_decay_mult`` (float): It will be multiplied to the weight 39 decay for all weight and bias parameters of normalization 40 layers. 41 - ``dwconv_decay_mult`` (float): It will be multiplied to the weight 42 decay for all weight and bias parameters of depthwise conv 43 layers. 44 - ``dcn_offset_lr_mult`` (float): It will be multiplied to the learning 45 rate for parameters of offset layer in the deformable convs 46 of a model. 47 - ``bypass_duplicate`` (bool): If true, the duplicate parameters 48 would not be added into optimizer. Default: False. 49 50 Note: 51 52 1. If the option ``dcn_offset_lr_mult`` is used, the constructor will 53 override the effect of ``bias_lr_mult`` in the bias of offset layer. 54 So be careful when using both ``bias_lr_mult`` and 55 ``dcn_offset_lr_mult``. If you wish to apply both of them to the offset 56 layer in deformable convs, set ``dcn_offset_lr_mult`` to the original 57 ``dcn_offset_lr_mult`` * ``bias_lr_mult``. 58 59 2. If the option ``dcn_offset_lr_mult`` is used, the constructor will 60 apply it to all the DCN layers in the model. So be careful when the 61 model contains multiple DCN layers in places other than backbone. 62 63 Args: 64 optimizer_cfg (dict): The config dict of the optimizer. 65 Positional fields are 66 67 - `type`: class name of the optimizer. 68 69 Optional fields are 70 71 - any arguments of the corresponding optimizer type, e.g., 72 lr, weight_decay, momentum, etc. 73 paramwise_cfg (dict, optional): Parameter-wise options. 74 75 Example 1: 76 >>> model = torch.nn.modules.Conv1d(1, 1, 1) 77 >>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9, 78 >>> weight_decay=0.0001) 79 >>> paramwise_cfg = dict(norm_decay_mult=0.) 80 >>> optim_builder = DefaultOptimizerConstructor( 81 >>> optimizer_cfg, paramwise_cfg) 82 >>> optimizer = optim_builder(model) 83 84 Example 2: 85 >>> # assume model have attribute model.backbone and model.cls_head 86 >>> optimizer_cfg = dict(type='SGD', lr=0.01, weight_decay=0.95) 87 >>> paramwise_cfg = dict(custom_keys={ 88 '.backbone': dict(lr_mult=0.1, decay_mult=0.9)}) 89 >>> optim_builder = DefaultOptimizerConstructor( 90 >>> optimizer_cfg, paramwise_cfg) 91 >>> optimizer = optim_builder(model) 92 >>> # Then the `lr` and `weight_decay` for model.backbone is 93 >>> # (0.01 * 0.1, 0.95 * 0.9). `lr` and `weight_decay` for 94 >>> # model.cls_head is (0.01, 0.95). 95 """ 96 97 def __init__(self, 98 optimizer_cfg: dict, 99 paramwise_cfg: Optional[dict] = None): 100 if not isinstance(optimizer_cfg, dict): 101 raise TypeError('optimizer_cfg should be a dict', 102 f'but got {type(optimizer_cfg)}') 103 self.optimizer_cfg = optimizer_cfg 104 self.paramwise_cfg = {} if paramwise_cfg is None else paramwise_cfg 105 self.base_lr = optimizer_cfg.get('lr', None) 106 self.base_wd = optimizer_cfg.get('weight_decay', None) 107 self._validate_cfg() 108 109 def _validate_cfg(self) -> None: 110 """verify the correctness of the config.""" 111 if not isinstance(self.paramwise_cfg, dict): 112 raise TypeError('paramwise_cfg should be None or a dict, ' 113 f'but got {type(self.paramwise_cfg)}') 114 115 if 'custom_keys' in self.paramwise_cfg: 116 if not isinstance(self.paramwise_cfg['custom_keys'], dict): 117 raise TypeError( 118 'If specified, custom_keys must be a dict, ' 119 f'but got {type(self.paramwise_cfg["custom_keys"])}') 120 if self.base_wd is None: 121 for key in self.paramwise_cfg['custom_keys']: 122 if 'decay_mult' in self.paramwise_cfg['custom_keys'][key]: 123 raise ValueError('base_wd should not be None') 124 125 # get base lr and weight decay 126 # weight_decay must be explicitly specified if mult is specified 127 if ('bias_decay_mult' in self.paramwise_cfg 128 or 'norm_decay_mult' in self.paramwise_cfg 129 or 'dwconv_decay_mult' in self.paramwise_cfg): 130 if self.base_wd is None: 131 raise ValueError('base_wd should not be None') 132 133 def _is_in(self, param_group: dict, param_group_list: list) -> bool: 134 """check whether the `param_group` is in the`param_group_list`""" 135 assert is_list_of(param_group_list, dict) 136 param = set(param_group['params']) 137 param_set = set() 138 for group in param_group_list: 139 param_set.update(set(group['params'])) 140 141 return not param.isdisjoint(param_set) 142 143 def add_params(self, 144 params: List[dict], 145 module: nn.Module, 146 prefix: str = '', 147 is_dcn_module: Optional[Union[int, float]] = None) -> None: 148 """Add all parameters of module to the params list. 149 150 The parameters of the given module will be added to the list of param 151 groups, with specific rules defined by paramwise_cfg. 152 153 Args: 154 params (list[dict]): A list of param groups, it will be modified 155 in place. 156 module (nn.Module): The module to be added. 157 prefix (str): The prefix of the module 158 is_dcn_module (int|float|None): If the current module is a 159 submodule of DCN, `is_dcn_module` will be passed to 160 control conv_offset layer's learning rate. Defaults to None. 161 """ 162 # get param-wise options 163 custom_keys = self.paramwise_cfg.get('custom_keys', {}) 164 # first sort with alphabet order and then sort with reversed len of str 165 sorted_keys = sorted(sorted(custom_keys.keys()), key=len, reverse=True) 166 167 bias_lr_mult = self.paramwise_cfg.get('bias_lr_mult', 1.) 168 bias_decay_mult = self.paramwise_cfg.get('bias_decay_mult', 1.) 169 norm_decay_mult = self.paramwise_cfg.get('norm_decay_mult', 1.) 170 dwconv_decay_mult = self.paramwise_cfg.get('dwconv_decay_mult', 1.) 171 bypass_duplicate = self.paramwise_cfg.get('bypass_duplicate', False) 172 dcn_offset_lr_mult = self.paramwise_cfg.get('dcn_offset_lr_mult', 1.) 173 174 # special rules for norm layers and depth-wise conv layers 175 is_norm = isinstance(module, 176 (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm)) 177 is_dwconv = ( 178 isinstance(module, torch.nn.Conv2d) 179 and module.in_channels == module.groups) 180 181 for name, param in module.named_parameters(recurse=False): 182 param_group = {'params': [param]} 183 if not param.requires_grad: 184 params.append(param_group) 185 continue 186 if bypass_duplicate and self._is_in(param_group, params): 187 warnings.warn(f'{prefix} is duplicate. It is skipped since ' 188 f'bypass_duplicate={bypass_duplicate}') 189 continue 190 # if the parameter match one of the custom keys, ignore other rules 191 is_custom = False 192 for key in sorted_keys: 193 if key in f'{prefix}.{name}': 194 is_custom = True 195 lr_mult = custom_keys[key].get('lr_mult', 1.) 196 param_group['lr'] = self.base_lr * lr_mult 197 if self.base_wd is not None: 198 decay_mult = custom_keys[key].get('decay_mult', 1.) 199 param_group['weight_decay'] = self.base_wd * decay_mult 200 break 201 202 if not is_custom: 203 # bias_lr_mult affects all bias parameters 204 # except for norm.bias dcn.conv_offset.bias 205 if name == 'bias' and not (is_norm or is_dcn_module): 206 param_group['lr'] = self.base_lr * bias_lr_mult 207 208 if (prefix.find('conv_offset') != -1 and is_dcn_module 209 and isinstance(module, torch.nn.Conv2d)): 210 # deal with both dcn_offset's bias & weight 211 param_group['lr'] = self.base_lr * dcn_offset_lr_mult 212 213 # apply weight decay policies 214 if self.base_wd is not None: 215 # norm decay 216 if is_norm: 217 param_group[ 218 'weight_decay'] = self.base_wd * norm_decay_mult 219 # depth-wise conv 220 elif is_dwconv: 221 param_group[ 222 'weight_decay'] = self.base_wd * dwconv_decay_mult 223 # bias lr and decay 224 elif name == 'bias' and not is_dcn_module: 225 # TODO: current bias_decay_mult will have affect on DCN 226 param_group[ 227 'weight_decay'] = self.base_wd * bias_decay_mult 228 params.append(param_group) 229 230 if mmcv_full_available(): 231 from mmcv.ops import DeformConv2d, ModulatedDeformConv2d 232 is_dcn_module = isinstance(module, 233 (DeformConv2d, ModulatedDeformConv2d)) 234 else: 235 is_dcn_module = False 236 for child_name, child_mod in module.named_children(): 237 child_prefix = f'{prefix}.{child_name}' if prefix else child_name 238 self.add_params( 239 params, 240 child_mod, 241 prefix=child_prefix, 242 is_dcn_module=is_dcn_module) 243 244 def __call__(self, model: nn.Module) -> torch.optim.Optimizer: 245 if hasattr(model, 'module'): 246 model = model.module 247 248 optimizer_cfg = self.optimizer_cfg.copy() 249 # if no paramwise option is specified, just use the global setting 250 if not self.paramwise_cfg: 251 optimizer_cfg['params'] = model.parameters() 252 return OPTIMIZERS.build(optimizer_cfg) 253 254 # set param-wise lr and weight decay recursively 255 params: List = [] 256 self.add_params(params, model) 257 optimizer_cfg['params'] = params 258 259 return OPTIMIZERS.build(optimizer_cfg) 260 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mmengine/optim/optimizer/default_constructor.py b/mmengine/optim/optimizer/default_constructor.py --- a/mmengine/optim/optimizer/default_constructor.py +++ b/mmengine/optim/optimizer/default_constructor.py @@ -5,6 +5,7 @@ import torch import torch.nn as nn from torch.nn import GroupNorm, LayerNorm +from mmengine.mmengine.logging.logger import print_log from mmengine.registry import OPTIMIZER_CONSTRUCTORS, OPTIMIZERS from mmengine.utils import is_list_of, mmcv_full_available @@ -226,6 +227,13 @@ param_group[ 'weight_decay'] = self.base_wd * bias_decay_mult params.append(param_group) + for key, value in param_group.items(): + if key == 'params': + continue + full_name = f'{prefix}.{name}' if prefix else name + print_log( + f'paramwise_options -- {full_name}:{key}={value}', + logger='current') if mmcv_full_available(): from mmcv.ops import DeformConv2d, ModulatedDeformConv2d
{"golden_diff": "diff --git a/mmengine/optim/optimizer/default_constructor.py b/mmengine/optim/optimizer/default_constructor.py\n--- a/mmengine/optim/optimizer/default_constructor.py\n+++ b/mmengine/optim/optimizer/default_constructor.py\n@@ -5,6 +5,7 @@\n import torch\n import torch.nn as nn\n from torch.nn import GroupNorm, LayerNorm\n+from mmengine.mmengine.logging.logger import print_log\n \n from mmengine.registry import OPTIMIZER_CONSTRUCTORS, OPTIMIZERS\n from mmengine.utils import is_list_of, mmcv_full_available\n@@ -226,6 +227,13 @@\n param_group[\n 'weight_decay'] = self.base_wd * bias_decay_mult\n params.append(param_group)\n+ for key, value in param_group.items():\n+ if key == 'params':\n+ continue\n+ full_name = f'{prefix}.{name}' if prefix else name\n+ print_log(\n+ f'paramwise_options -- {full_name}:{key}={value}',\n+ logger='current')\n \n if mmcv_full_available():\n from mmcv.ops import DeformConv2d, ModulatedDeformConv2d\n", "issue": "\u7528\u6237\u8bbe\u7f6e\u5b8c\u4e86paramwise_cfg\uff0c\u5982\u4f55\u77e5\u9053\u662f\u5426\u7b26\u5408\u9884\u671f\uff0c\u8fd9\u4e2a\u662f\u5426\u4e5f\u8981\u63d0\u4f9b\u76f8\u5e94\u7684\u811a\u672c\uff0c\u7528\u6237\u8fd0\u884c\u540e\u53ef\u4ee5\u5f88\u5bb9\u6613\u7684\u77e5\u9053\u54ea\u4e9b\u53c2\u6570\u88ab frozen\uff0c\u4e0d\u592a\u53c2\u6570\u7ec4\u8d85\u53c2\u7684\u4e0d\u540c\u3002\u6682\u65f6\u6ca1\u6709\u65f6\u95f4\u5f00\u53d1\u7684\u8bdd\uff0c\u53ef\u4ee5\u4f5c\u4e3a\u672a\u6765\u4e00\u4e2a\u9700\u6c42\u5427\n\u7528\u6237\u8bbe\u7f6e\u5b8c\u4e86\uff0c\u5982\u4f55\u77e5\u9053\u662f\u5426\u7b26\u5408\u9884\u671f\uff0c\u8fd9\u4e2a\u662f\u5426\u4e5f\u8981\u63d0\u4f9b\u76f8\u5e94\u7684\u811a\u672c\uff0c\u7528\u6237\u8fd0\u884c\u540e\u53ef\u4ee5\u5f88\u5bb9\u6613\u7684\u77e5\u9053\u54ea\u4e9b\u53c2\u6570\u88ab frozen\uff0c\u4e0d\u592a\u53c2\u6570\u7ec4\u8d85\u53c2\u7684\u4e0d\u540c\u3002\u6682\u65f6\u6ca1\u6709\u65f6\u95f4\u5f00\u53d1\u7684\u8bdd\uff0c\u53ef\u4ee5\u4f5c\u4e3a\u672a\u6765\u4e00\u4e2a\u9700\u6c42\u5427\r\n\r\n_Originally posted by @hhaAndroid in https://github.com/open-mmlab/mmengine/pull/25#discussion_r809786760_\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\nfrom typing import List, Optional, Union\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import GroupNorm, LayerNorm\n\nfrom mmengine.registry import OPTIMIZER_CONSTRUCTORS, OPTIMIZERS\nfrom mmengine.utils import is_list_of, mmcv_full_available\nfrom mmengine.utils.parrots_wrapper import _BatchNorm, _InstanceNorm\n\n\n@OPTIMIZER_CONSTRUCTORS.register_module()\nclass DefaultOptimizerConstructor:\n \"\"\"Default constructor for optimizers.\n\n By default each parameter share the same optimizer settings, and we\n provide an argument ``paramwise_cfg`` to specify parameter-wise settings.\n It is a dict and may contain the following fields:\n\n - ``custom_keys`` (dict): Specified parameters-wise settings by keys. If\n one of the keys in ``custom_keys`` is a substring of the name of one\n parameter, then the setting of the parameter will be specified by\n ``custom_keys[key]`` and other setting like ``bias_lr_mult`` etc. will\n be ignored. It should be noted that the aforementioned ``key`` is the\n longest key that is a substring of the name of the parameter. If there\n are multiple matched keys with the same length, then the key with lower\n alphabet order will be chosen.\n ``custom_keys[key]`` should be a dict and may contain fields ``lr_mult``\n and ``decay_mult``. See Example 2 below.\n - ``bias_lr_mult`` (float): It will be multiplied to the learning\n rate for all bias parameters (except for those in normalization\n layers and offset layers of DCN).\n - ``bias_decay_mult`` (float): It will be multiplied to the weight\n decay for all bias parameters (except for those in\n normalization layers, depthwise conv layers, offset layers of DCN).\n - ``norm_decay_mult`` (float): It will be multiplied to the weight\n decay for all weight and bias parameters of normalization\n layers.\n - ``dwconv_decay_mult`` (float): It will be multiplied to the weight\n decay for all weight and bias parameters of depthwise conv\n layers.\n - ``dcn_offset_lr_mult`` (float): It will be multiplied to the learning\n rate for parameters of offset layer in the deformable convs\n of a model.\n - ``bypass_duplicate`` (bool): If true, the duplicate parameters\n would not be added into optimizer. Default: False.\n\n Note:\n\n 1. If the option ``dcn_offset_lr_mult`` is used, the constructor will\n override the effect of ``bias_lr_mult`` in the bias of offset layer.\n So be careful when using both ``bias_lr_mult`` and\n ``dcn_offset_lr_mult``. If you wish to apply both of them to the offset\n layer in deformable convs, set ``dcn_offset_lr_mult`` to the original\n ``dcn_offset_lr_mult`` * ``bias_lr_mult``.\n\n 2. If the option ``dcn_offset_lr_mult`` is used, the constructor will\n apply it to all the DCN layers in the model. So be careful when the\n model contains multiple DCN layers in places other than backbone.\n\n Args:\n optimizer_cfg (dict): The config dict of the optimizer.\n Positional fields are\n\n - `type`: class name of the optimizer.\n\n Optional fields are\n\n - any arguments of the corresponding optimizer type, e.g.,\n lr, weight_decay, momentum, etc.\n paramwise_cfg (dict, optional): Parameter-wise options.\n\n Example 1:\n >>> model = torch.nn.modules.Conv1d(1, 1, 1)\n >>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9,\n >>> weight_decay=0.0001)\n >>> paramwise_cfg = dict(norm_decay_mult=0.)\n >>> optim_builder = DefaultOptimizerConstructor(\n >>> optimizer_cfg, paramwise_cfg)\n >>> optimizer = optim_builder(model)\n\n Example 2:\n >>> # assume model have attribute model.backbone and model.cls_head\n >>> optimizer_cfg = dict(type='SGD', lr=0.01, weight_decay=0.95)\n >>> paramwise_cfg = dict(custom_keys={\n '.backbone': dict(lr_mult=0.1, decay_mult=0.9)})\n >>> optim_builder = DefaultOptimizerConstructor(\n >>> optimizer_cfg, paramwise_cfg)\n >>> optimizer = optim_builder(model)\n >>> # Then the `lr` and `weight_decay` for model.backbone is\n >>> # (0.01 * 0.1, 0.95 * 0.9). `lr` and `weight_decay` for\n >>> # model.cls_head is (0.01, 0.95).\n \"\"\"\n\n def __init__(self,\n optimizer_cfg: dict,\n paramwise_cfg: Optional[dict] = None):\n if not isinstance(optimizer_cfg, dict):\n raise TypeError('optimizer_cfg should be a dict',\n f'but got {type(optimizer_cfg)}')\n self.optimizer_cfg = optimizer_cfg\n self.paramwise_cfg = {} if paramwise_cfg is None else paramwise_cfg\n self.base_lr = optimizer_cfg.get('lr', None)\n self.base_wd = optimizer_cfg.get('weight_decay', None)\n self._validate_cfg()\n\n def _validate_cfg(self) -> None:\n \"\"\"verify the correctness of the config.\"\"\"\n if not isinstance(self.paramwise_cfg, dict):\n raise TypeError('paramwise_cfg should be None or a dict, '\n f'but got {type(self.paramwise_cfg)}')\n\n if 'custom_keys' in self.paramwise_cfg:\n if not isinstance(self.paramwise_cfg['custom_keys'], dict):\n raise TypeError(\n 'If specified, custom_keys must be a dict, '\n f'but got {type(self.paramwise_cfg[\"custom_keys\"])}')\n if self.base_wd is None:\n for key in self.paramwise_cfg['custom_keys']:\n if 'decay_mult' in self.paramwise_cfg['custom_keys'][key]:\n raise ValueError('base_wd should not be None')\n\n # get base lr and weight decay\n # weight_decay must be explicitly specified if mult is specified\n if ('bias_decay_mult' in self.paramwise_cfg\n or 'norm_decay_mult' in self.paramwise_cfg\n or 'dwconv_decay_mult' in self.paramwise_cfg):\n if self.base_wd is None:\n raise ValueError('base_wd should not be None')\n\n def _is_in(self, param_group: dict, param_group_list: list) -> bool:\n \"\"\"check whether the `param_group` is in the`param_group_list`\"\"\"\n assert is_list_of(param_group_list, dict)\n param = set(param_group['params'])\n param_set = set()\n for group in param_group_list:\n param_set.update(set(group['params']))\n\n return not param.isdisjoint(param_set)\n\n def add_params(self,\n params: List[dict],\n module: nn.Module,\n prefix: str = '',\n is_dcn_module: Optional[Union[int, float]] = None) -> None:\n \"\"\"Add all parameters of module to the params list.\n\n The parameters of the given module will be added to the list of param\n groups, with specific rules defined by paramwise_cfg.\n\n Args:\n params (list[dict]): A list of param groups, it will be modified\n in place.\n module (nn.Module): The module to be added.\n prefix (str): The prefix of the module\n is_dcn_module (int|float|None): If the current module is a\n submodule of DCN, `is_dcn_module` will be passed to\n control conv_offset layer's learning rate. Defaults to None.\n \"\"\"\n # get param-wise options\n custom_keys = self.paramwise_cfg.get('custom_keys', {})\n # first sort with alphabet order and then sort with reversed len of str\n sorted_keys = sorted(sorted(custom_keys.keys()), key=len, reverse=True)\n\n bias_lr_mult = self.paramwise_cfg.get('bias_lr_mult', 1.)\n bias_decay_mult = self.paramwise_cfg.get('bias_decay_mult', 1.)\n norm_decay_mult = self.paramwise_cfg.get('norm_decay_mult', 1.)\n dwconv_decay_mult = self.paramwise_cfg.get('dwconv_decay_mult', 1.)\n bypass_duplicate = self.paramwise_cfg.get('bypass_duplicate', False)\n dcn_offset_lr_mult = self.paramwise_cfg.get('dcn_offset_lr_mult', 1.)\n\n # special rules for norm layers and depth-wise conv layers\n is_norm = isinstance(module,\n (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm))\n is_dwconv = (\n isinstance(module, torch.nn.Conv2d)\n and module.in_channels == module.groups)\n\n for name, param in module.named_parameters(recurse=False):\n param_group = {'params': [param]}\n if not param.requires_grad:\n params.append(param_group)\n continue\n if bypass_duplicate and self._is_in(param_group, params):\n warnings.warn(f'{prefix} is duplicate. It is skipped since '\n f'bypass_duplicate={bypass_duplicate}')\n continue\n # if the parameter match one of the custom keys, ignore other rules\n is_custom = False\n for key in sorted_keys:\n if key in f'{prefix}.{name}':\n is_custom = True\n lr_mult = custom_keys[key].get('lr_mult', 1.)\n param_group['lr'] = self.base_lr * lr_mult\n if self.base_wd is not None:\n decay_mult = custom_keys[key].get('decay_mult', 1.)\n param_group['weight_decay'] = self.base_wd * decay_mult\n break\n\n if not is_custom:\n # bias_lr_mult affects all bias parameters\n # except for norm.bias dcn.conv_offset.bias\n if name == 'bias' and not (is_norm or is_dcn_module):\n param_group['lr'] = self.base_lr * bias_lr_mult\n\n if (prefix.find('conv_offset') != -1 and is_dcn_module\n and isinstance(module, torch.nn.Conv2d)):\n # deal with both dcn_offset's bias & weight\n param_group['lr'] = self.base_lr * dcn_offset_lr_mult\n\n # apply weight decay policies\n if self.base_wd is not None:\n # norm decay\n if is_norm:\n param_group[\n 'weight_decay'] = self.base_wd * norm_decay_mult\n # depth-wise conv\n elif is_dwconv:\n param_group[\n 'weight_decay'] = self.base_wd * dwconv_decay_mult\n # bias lr and decay\n elif name == 'bias' and not is_dcn_module:\n # TODO: current bias_decay_mult will have affect on DCN\n param_group[\n 'weight_decay'] = self.base_wd * bias_decay_mult\n params.append(param_group)\n\n if mmcv_full_available():\n from mmcv.ops import DeformConv2d, ModulatedDeformConv2d\n is_dcn_module = isinstance(module,\n (DeformConv2d, ModulatedDeformConv2d))\n else:\n is_dcn_module = False\n for child_name, child_mod in module.named_children():\n child_prefix = f'{prefix}.{child_name}' if prefix else child_name\n self.add_params(\n params,\n child_mod,\n prefix=child_prefix,\n is_dcn_module=is_dcn_module)\n\n def __call__(self, model: nn.Module) -> torch.optim.Optimizer:\n if hasattr(model, 'module'):\n model = model.module\n\n optimizer_cfg = self.optimizer_cfg.copy()\n # if no paramwise option is specified, just use the global setting\n if not self.paramwise_cfg:\n optimizer_cfg['params'] = model.parameters()\n return OPTIMIZERS.build(optimizer_cfg)\n\n # set param-wise lr and weight decay recursively\n params: List = []\n self.add_params(params, model)\n optimizer_cfg['params'] = params\n\n return OPTIMIZERS.build(optimizer_cfg)\n", "path": "mmengine/optim/optimizer/default_constructor.py"}], "after_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\nfrom typing import List, Optional, Union\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import GroupNorm, LayerNorm\nfrom mmengine.mmengine.logging.logger import print_log\n\nfrom mmengine.registry import OPTIMIZER_CONSTRUCTORS, OPTIMIZERS\nfrom mmengine.utils import is_list_of, mmcv_full_available\nfrom mmengine.utils.parrots_wrapper import _BatchNorm, _InstanceNorm\n\n\n@OPTIMIZER_CONSTRUCTORS.register_module()\nclass DefaultOptimizerConstructor:\n \"\"\"Default constructor for optimizers.\n\n By default each parameter share the same optimizer settings, and we\n provide an argument ``paramwise_cfg`` to specify parameter-wise settings.\n It is a dict and may contain the following fields:\n\n - ``custom_keys`` (dict): Specified parameters-wise settings by keys. If\n one of the keys in ``custom_keys`` is a substring of the name of one\n parameter, then the setting of the parameter will be specified by\n ``custom_keys[key]`` and other setting like ``bias_lr_mult`` etc. will\n be ignored. It should be noted that the aforementioned ``key`` is the\n longest key that is a substring of the name of the parameter. If there\n are multiple matched keys with the same length, then the key with lower\n alphabet order will be chosen.\n ``custom_keys[key]`` should be a dict and may contain fields ``lr_mult``\n and ``decay_mult``. See Example 2 below.\n - ``bias_lr_mult`` (float): It will be multiplied to the learning\n rate for all bias parameters (except for those in normalization\n layers and offset layers of DCN).\n - ``bias_decay_mult`` (float): It will be multiplied to the weight\n decay for all bias parameters (except for those in\n normalization layers, depthwise conv layers, offset layers of DCN).\n - ``norm_decay_mult`` (float): It will be multiplied to the weight\n decay for all weight and bias parameters of normalization\n layers.\n - ``dwconv_decay_mult`` (float): It will be multiplied to the weight\n decay for all weight and bias parameters of depthwise conv\n layers.\n - ``dcn_offset_lr_mult`` (float): It will be multiplied to the learning\n rate for parameters of offset layer in the deformable convs\n of a model.\n - ``bypass_duplicate`` (bool): If true, the duplicate parameters\n would not be added into optimizer. Default: False.\n\n Note:\n\n 1. If the option ``dcn_offset_lr_mult`` is used, the constructor will\n override the effect of ``bias_lr_mult`` in the bias of offset layer.\n So be careful when using both ``bias_lr_mult`` and\n ``dcn_offset_lr_mult``. If you wish to apply both of them to the offset\n layer in deformable convs, set ``dcn_offset_lr_mult`` to the original\n ``dcn_offset_lr_mult`` * ``bias_lr_mult``.\n\n 2. If the option ``dcn_offset_lr_mult`` is used, the constructor will\n apply it to all the DCN layers in the model. So be careful when the\n model contains multiple DCN layers in places other than backbone.\n\n Args:\n optimizer_cfg (dict): The config dict of the optimizer.\n Positional fields are\n\n - `type`: class name of the optimizer.\n\n Optional fields are\n\n - any arguments of the corresponding optimizer type, e.g.,\n lr, weight_decay, momentum, etc.\n paramwise_cfg (dict, optional): Parameter-wise options.\n\n Example 1:\n >>> model = torch.nn.modules.Conv1d(1, 1, 1)\n >>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9,\n >>> weight_decay=0.0001)\n >>> paramwise_cfg = dict(norm_decay_mult=0.)\n >>> optim_builder = DefaultOptimizerConstructor(\n >>> optimizer_cfg, paramwise_cfg)\n >>> optimizer = optim_builder(model)\n\n Example 2:\n >>> # assume model have attribute model.backbone and model.cls_head\n >>> optimizer_cfg = dict(type='SGD', lr=0.01, weight_decay=0.95)\n >>> paramwise_cfg = dict(custom_keys={\n '.backbone': dict(lr_mult=0.1, decay_mult=0.9)})\n >>> optim_builder = DefaultOptimizerConstructor(\n >>> optimizer_cfg, paramwise_cfg)\n >>> optimizer = optim_builder(model)\n >>> # Then the `lr` and `weight_decay` for model.backbone is\n >>> # (0.01 * 0.1, 0.95 * 0.9). `lr` and `weight_decay` for\n >>> # model.cls_head is (0.01, 0.95).\n \"\"\"\n\n def __init__(self,\n optimizer_cfg: dict,\n paramwise_cfg: Optional[dict] = None):\n if not isinstance(optimizer_cfg, dict):\n raise TypeError('optimizer_cfg should be a dict',\n f'but got {type(optimizer_cfg)}')\n self.optimizer_cfg = optimizer_cfg\n self.paramwise_cfg = {} if paramwise_cfg is None else paramwise_cfg\n self.base_lr = optimizer_cfg.get('lr', None)\n self.base_wd = optimizer_cfg.get('weight_decay', None)\n self._validate_cfg()\n\n def _validate_cfg(self) -> None:\n \"\"\"verify the correctness of the config.\"\"\"\n if not isinstance(self.paramwise_cfg, dict):\n raise TypeError('paramwise_cfg should be None or a dict, '\n f'but got {type(self.paramwise_cfg)}')\n\n if 'custom_keys' in self.paramwise_cfg:\n if not isinstance(self.paramwise_cfg['custom_keys'], dict):\n raise TypeError(\n 'If specified, custom_keys must be a dict, '\n f'but got {type(self.paramwise_cfg[\"custom_keys\"])}')\n if self.base_wd is None:\n for key in self.paramwise_cfg['custom_keys']:\n if 'decay_mult' in self.paramwise_cfg['custom_keys'][key]:\n raise ValueError('base_wd should not be None')\n\n # get base lr and weight decay\n # weight_decay must be explicitly specified if mult is specified\n if ('bias_decay_mult' in self.paramwise_cfg\n or 'norm_decay_mult' in self.paramwise_cfg\n or 'dwconv_decay_mult' in self.paramwise_cfg):\n if self.base_wd is None:\n raise ValueError('base_wd should not be None')\n\n def _is_in(self, param_group: dict, param_group_list: list) -> bool:\n \"\"\"check whether the `param_group` is in the`param_group_list`\"\"\"\n assert is_list_of(param_group_list, dict)\n param = set(param_group['params'])\n param_set = set()\n for group in param_group_list:\n param_set.update(set(group['params']))\n\n return not param.isdisjoint(param_set)\n\n def add_params(self,\n params: List[dict],\n module: nn.Module,\n prefix: str = '',\n is_dcn_module: Optional[Union[int, float]] = None) -> None:\n \"\"\"Add all parameters of module to the params list.\n\n The parameters of the given module will be added to the list of param\n groups, with specific rules defined by paramwise_cfg.\n\n Args:\n params (list[dict]): A list of param groups, it will be modified\n in place.\n module (nn.Module): The module to be added.\n prefix (str): The prefix of the module\n is_dcn_module (int|float|None): If the current module is a\n submodule of DCN, `is_dcn_module` will be passed to\n control conv_offset layer's learning rate. Defaults to None.\n \"\"\"\n # get param-wise options\n custom_keys = self.paramwise_cfg.get('custom_keys', {})\n # first sort with alphabet order and then sort with reversed len of str\n sorted_keys = sorted(sorted(custom_keys.keys()), key=len, reverse=True)\n\n bias_lr_mult = self.paramwise_cfg.get('bias_lr_mult', 1.)\n bias_decay_mult = self.paramwise_cfg.get('bias_decay_mult', 1.)\n norm_decay_mult = self.paramwise_cfg.get('norm_decay_mult', 1.)\n dwconv_decay_mult = self.paramwise_cfg.get('dwconv_decay_mult', 1.)\n bypass_duplicate = self.paramwise_cfg.get('bypass_duplicate', False)\n dcn_offset_lr_mult = self.paramwise_cfg.get('dcn_offset_lr_mult', 1.)\n\n # special rules for norm layers and depth-wise conv layers\n is_norm = isinstance(module,\n (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm))\n is_dwconv = (\n isinstance(module, torch.nn.Conv2d)\n and module.in_channels == module.groups)\n\n for name, param in module.named_parameters(recurse=False):\n param_group = {'params': [param]}\n if not param.requires_grad:\n params.append(param_group)\n continue\n if bypass_duplicate and self._is_in(param_group, params):\n warnings.warn(f'{prefix} is duplicate. It is skipped since '\n f'bypass_duplicate={bypass_duplicate}')\n continue\n # if the parameter match one of the custom keys, ignore other rules\n is_custom = False\n for key in sorted_keys:\n if key in f'{prefix}.{name}':\n is_custom = True\n lr_mult = custom_keys[key].get('lr_mult', 1.)\n param_group['lr'] = self.base_lr * lr_mult\n if self.base_wd is not None:\n decay_mult = custom_keys[key].get('decay_mult', 1.)\n param_group['weight_decay'] = self.base_wd * decay_mult\n break\n\n if not is_custom:\n # bias_lr_mult affects all bias parameters\n # except for norm.bias dcn.conv_offset.bias\n if name == 'bias' and not (is_norm or is_dcn_module):\n param_group['lr'] = self.base_lr * bias_lr_mult\n\n if (prefix.find('conv_offset') != -1 and is_dcn_module\n and isinstance(module, torch.nn.Conv2d)):\n # deal with both dcn_offset's bias & weight\n param_group['lr'] = self.base_lr * dcn_offset_lr_mult\n\n # apply weight decay policies\n if self.base_wd is not None:\n # norm decay\n if is_norm:\n param_group[\n 'weight_decay'] = self.base_wd * norm_decay_mult\n # depth-wise conv\n elif is_dwconv:\n param_group[\n 'weight_decay'] = self.base_wd * dwconv_decay_mult\n # bias lr and decay\n elif name == 'bias' and not is_dcn_module:\n # TODO: current bias_decay_mult will have affect on DCN\n param_group[\n 'weight_decay'] = self.base_wd * bias_decay_mult\n params.append(param_group)\n for key, value in param_group.items():\n if key == 'params':\n continue\n full_name = f'{prefix}.{name}' if prefix else name\n print_log(\n f'paramwise_options -- {full_name}:{key}={value}',\n logger='current')\n\n if mmcv_full_available():\n from mmcv.ops import DeformConv2d, ModulatedDeformConv2d\n is_dcn_module = isinstance(module,\n (DeformConv2d, ModulatedDeformConv2d))\n else:\n is_dcn_module = False\n for child_name, child_mod in module.named_children():\n child_prefix = f'{prefix}.{child_name}' if prefix else child_name\n self.add_params(\n params,\n child_mod,\n prefix=child_prefix,\n is_dcn_module=is_dcn_module)\n\n def __call__(self, model: nn.Module) -> torch.optim.Optimizer:\n if hasattr(model, 'module'):\n model = model.module\n\n optimizer_cfg = self.optimizer_cfg.copy()\n # if no paramwise option is specified, just use the global setting\n if not self.paramwise_cfg:\n optimizer_cfg['params'] = model.parameters()\n return OPTIMIZERS.build(optimizer_cfg)\n\n # set param-wise lr and weight decay recursively\n params: List = []\n self.add_params(params, model)\n optimizer_cfg['params'] = params\n\n return OPTIMIZERS.build(optimizer_cfg)\n", "path": "mmengine/optim/optimizer/default_constructor.py"}]}
3,743
255
gh_patches_debug_63334
rasdani/github-patches
git_diff
sanic-org__sanic-1527
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Publish 19.3 release to PyPI Thank you for the release 3 days ago! https://github.com/huge-success/sanic/releases/tag/19.3 It's missing from PyPI at the moment: https://pypi.org/project/sanic/#history Please publish it at your convenience 🙇 Keep up the awesome work ❤️ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sanic/__init__.py` Content: ``` 1 from sanic.app import Sanic 2 from sanic.blueprints import Blueprint 3 4 5 __version__ = "18.12.0" 6 7 __all__ = ["Sanic", "Blueprint"] 8 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sanic/__init__.py b/sanic/__init__.py --- a/sanic/__init__.py +++ b/sanic/__init__.py @@ -2,6 +2,6 @@ from sanic.blueprints import Blueprint -__version__ = "18.12.0" +__version__ = "19.03.0" __all__ = ["Sanic", "Blueprint"]
{"golden_diff": "diff --git a/sanic/__init__.py b/sanic/__init__.py\n--- a/sanic/__init__.py\n+++ b/sanic/__init__.py\n@@ -2,6 +2,6 @@\n from sanic.blueprints import Blueprint\n \n \n-__version__ = \"18.12.0\"\n+__version__ = \"19.03.0\"\n \n __all__ = [\"Sanic\", \"Blueprint\"]\n", "issue": "Publish 19.3 release to PyPI\nThank you for the release 3 days ago!\r\n\r\nhttps://github.com/huge-success/sanic/releases/tag/19.3\r\n\r\nIt's missing from PyPI at the moment:\r\n\r\nhttps://pypi.org/project/sanic/#history\r\n\r\nPlease publish it at your convenience \ud83d\ude47 \r\n\r\nKeep up the awesome work \u2764\ufe0f \n", "before_files": [{"content": "from sanic.app import Sanic\nfrom sanic.blueprints import Blueprint\n\n\n__version__ = \"18.12.0\"\n\n__all__ = [\"Sanic\", \"Blueprint\"]\n", "path": "sanic/__init__.py"}], "after_files": [{"content": "from sanic.app import Sanic\nfrom sanic.blueprints import Blueprint\n\n\n__version__ = \"19.03.0\"\n\n__all__ = [\"Sanic\", \"Blueprint\"]\n", "path": "sanic/__init__.py"}]}
389
95
gh_patches_debug_42094
rasdani/github-patches
git_diff
crytic__slither-494
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `slither-flat` doesn't flatten dYdX solo contracts correctly Hi There, Thanks for building `slither-flat`! It seems to be a lot more forgivign than `soljitsu` or `truffle-flatten`. I'm struggling, however, with using it to flatten dYdX contracts. **How to reproduce** ```bash #!/usr/bin/bash # Dependency: npm, nvm # Better not to run this as a script # Get the repository echo "solo" > .gitignore git clone https://github.com/dydxprotocol/solo.git cd solo # Install javascript dependencies npm i keccakjs --save npm i solium@^1.0.0 --save npm i acorn --save npm i # Testing that everything installed correctly (make sure Docker is enabled at this point) nvm use 10.16.2 # Need this version to compile contracts npm test # Install and run slither-flat cd .. pipenv install slither-analyzer pipenv run slither-flat solo ``` **Issues** I noticed two issues with these contracts: 1) Duplicate contracts In particular, `ReentrancyGuard` appeared many times in different contract files (and multiple times per file) 2) Missing characters In particular, `ReentrancyGuard` appeared as `ontract ReentrancyGuard` (missing a `c`) 3) Some contracts don't exist in their own file anymore Contract `external/OnlySolo.sol` doesn't have a corresponding `OnlySolo.sol` file in the output, instead it's embedded inline with other contracts in multiple places. [perhaps this is intentional] **Potential problems** With `truffle-flatten` it flagged a circular dependency in the contracts, so perhaps this is related to the issue here. `slither-flat` doesn't flatten dYdX solo contracts correctly Hi There, Thanks for building `slither-flat`! It seems to be a lot more forgivign than `soljitsu` or `truffle-flatten`. I'm struggling, however, with using it to flatten dYdX contracts. **How to reproduce** ```bash #!/usr/bin/bash # Dependency: npm, nvm # Better not to run this as a script # Get the repository echo "solo" > .gitignore git clone https://github.com/dydxprotocol/solo.git cd solo # Install javascript dependencies npm i keccakjs --save npm i solium@^1.0.0 --save npm i acorn --save npm i # Testing that everything installed correctly (make sure Docker is enabled at this point) nvm use 10.16.2 # Need this version to compile contracts npm test # Install and run slither-flat cd .. pipenv install slither-analyzer pipenv run slither-flat solo ``` **Issues** I noticed two issues with these contracts: 1) Duplicate contracts In particular, `ReentrancyGuard` appeared many times in different contract files (and multiple times per file) 2) Missing characters In particular, `ReentrancyGuard` appeared as `ontract ReentrancyGuard` (missing a `c`) 3) Some contracts don't exist in their own file anymore Contract `external/OnlySolo.sol` doesn't have a corresponding `OnlySolo.sol` file in the output, instead it's embedded inline with other contracts in multiple places. [perhaps this is intentional] **Potential problems** With `truffle-flatten` it flagged a circular dependency in the contracts, so perhaps this is related to the issue here. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `slither/tools/flattening/flattening.py` Content: ``` 1 from pathlib import Path 2 import re 3 import logging 4 from collections import namedtuple 5 6 from slither.core.declarations import SolidityFunction 7 from slither.exceptions import SlitherException 8 from slither.core.solidity_types.user_defined_type import UserDefinedType 9 from slither.core.declarations.structure import Structure 10 from slither.core.declarations.enum import Enum 11 from slither.core.declarations.contract import Contract 12 from slither.slithir.operations import NewContract, TypeConversion, SolidityCall 13 14 logger = logging.getLogger("Slither-flattening") 15 16 # index: where to start 17 # patch_type: 18 # - public_to_external: public to external (external-to-public) 19 # - calldata_to_memory: calldata to memory (external-to-public) 20 # - line_removal: remove the line (remove-assert) 21 Patch = namedtuple('PatchExternal', ['index', 'patch_type']) 22 23 class Flattening: 24 DEFAULT_EXPORT_PATH = Path('crytic-export/flattening') 25 26 def __init__(self, slither, external_to_public=False, remove_assert=False, private_to_internal=False): 27 self._source_codes = {} 28 self._slither = slither 29 self._external_to_public = external_to_public 30 self._remove_assert = remove_assert 31 self._use_abi_encoder_v2 = False 32 self._private_to_internal = private_to_internal 33 34 self._check_abi_encoder_v2() 35 36 for contract in slither.contracts: 37 self._get_source_code(contract) 38 39 def _check_abi_encoder_v2(self): 40 for p in self._slither.pragma_directives: 41 if 'ABIEncoderV2' in str(p.directive): 42 self._use_abi_encoder_v2 = True 43 return 44 45 def _get_source_code(self, contract): 46 src_mapping = contract.source_mapping 47 content = self._slither.source_code[src_mapping['filename_absolute']] 48 start = src_mapping['start'] 49 end = src_mapping['start'] + src_mapping['length'] 50 51 to_patch = [] 52 # interface must use external 53 if self._external_to_public and contract.contract_kind != "interface": 54 for f in contract.functions_declared: 55 # fallback must be external 56 if f.is_fallback or f.is_constructor_variables: 57 continue 58 if f.visibility == 'external': 59 attributes_start = (f.parameters_src.source_mapping['start'] + 60 f.parameters_src.source_mapping['length']) 61 attributes_end = f.returns_src.source_mapping['start'] 62 attributes = content[attributes_start:attributes_end] 63 regex = re.search(r'((\sexternal)\s+)|(\sexternal)$|(\)external)$', attributes) 64 if regex: 65 to_patch.append(Patch(attributes_start + regex.span()[0] + 1, 'public_to_external')) 66 else: 67 raise SlitherException(f'External keyword not found {f.name} {attributes}') 68 69 for var in f.parameters: 70 if var.location == "calldata": 71 calldata_start = var.source_mapping['start'] 72 calldata_end = calldata_start + var.source_mapping['length'] 73 calldata_idx = content[calldata_start:calldata_end].find(' calldata ') 74 to_patch.append(Patch(calldata_start + calldata_idx + 1, 'calldata_to_memory')) 75 76 if self._private_to_internal: 77 for variable in contract.state_variables_declared: 78 if variable.visibility == 'private': 79 print(variable.source_mapping) 80 attributes_start = variable.source_mapping['start'] 81 attributes_end = attributes_start + variable.source_mapping['length'] 82 attributes = content[attributes_start:attributes_end] 83 print(attributes) 84 regex = re.search(r' private ', attributes) 85 if regex: 86 to_patch.append(Patch(attributes_start + regex.span()[0] + 1, 'private_to_internal')) 87 else: 88 raise SlitherException(f'private keyword not found {v.name} {attributes}') 89 90 if self._remove_assert: 91 for function in contract.functions_and_modifiers_declared: 92 for node in function.nodes: 93 for ir in node.irs: 94 if isinstance(ir, SolidityCall) and ir.function == SolidityFunction('assert(bool)'): 95 to_patch.append(Patch(node.source_mapping['start'], 'line_removal')) 96 logger.info(f'Code commented: {node.expression} ({node.source_mapping_str})') 97 98 to_patch.sort(key=lambda x: x.index, reverse=True) 99 100 content = content[start:end] 101 for patch in to_patch: 102 patch_type = patch.patch_type 103 index = patch.index 104 index = index - start 105 if patch_type == 'public_to_external': 106 content = content[:index] + 'public' + content[index + len('external'):] 107 if patch_type == 'private_to_internal': 108 content = content[:index] + 'internal' + content[index + len('private'):] 109 elif patch_type == 'calldata_to_memory': 110 content = content[:index] + 'memory' + content[index + len('calldata'):] 111 else: 112 assert patch_type == 'line_removal' 113 content = content[:index] + ' // ' + content[index:] 114 115 self._source_codes[contract] = content 116 117 def _export_from_type(self, t, contract, exported, list_contract): 118 if isinstance(t, UserDefinedType): 119 if isinstance(t.type, (Enum, Structure)): 120 if t.type.contract != contract and not t.type.contract in exported: 121 self._export_contract(t.type.contract, exported, list_contract) 122 else: 123 assert isinstance(t.type, Contract) 124 if t.type != contract and not t.type in exported: 125 self._export_contract(t.type, exported, list_contract) 126 127 def _export_contract(self, contract, exported, list_contract): 128 if contract.name in exported: 129 return 130 exported.add(contract.name) 131 for inherited in contract.inheritance: 132 self._export_contract(inherited, exported, list_contract) 133 134 # Find all the external contracts called 135 externals = contract.all_library_calls + contract.all_high_level_calls 136 # externals is a list of (contract, function) 137 # We also filter call to itself to avoid infilite loop 138 externals = list(set([e[0] for e in externals if e[0] != contract])) 139 140 for inherited in externals: 141 self._export_contract(inherited, exported, list_contract) 142 143 # Find all the external contracts use as a base type 144 local_vars = [] 145 for f in contract.functions_declared: 146 local_vars += f.variables 147 148 for v in contract.variables + local_vars: 149 self._export_from_type(v.type, contract, exported, list_contract) 150 151 # Find all convert and "new" operation that can lead to use an external contract 152 for f in contract.functions_declared: 153 for ir in f.slithir_operations: 154 if isinstance(ir, NewContract): 155 if ir.contract_created != contract and not ir.contract_created in exported: 156 self._export_contract(ir.contract_created, exported, list_contract) 157 if isinstance(ir, TypeConversion): 158 self._export_from_type(ir.type, contract, exported, list_contract) 159 list_contract.append(self._source_codes[contract]) 160 161 def _export(self, contract, ret): 162 self._export_contract(contract, set(), ret) 163 path = Path(self.DEFAULT_EXPORT_PATH, f'{contract.name}.sol') 164 logger.info(f'Export {path}') 165 with open(path, 'w') as f: 166 if self._slither.solc_version: 167 f.write(f'pragma solidity {self._slither.solc_version};\n') 168 if self._use_abi_encoder_v2: 169 f.write('pragma experimental ABIEncoderV2;\n') 170 f.write('\n'.join(ret)) 171 f.write('\n') 172 173 def export(self, target=None): 174 175 if not self.DEFAULT_EXPORT_PATH.exists(): 176 self.DEFAULT_EXPORT_PATH.mkdir(parents=True) 177 178 if target is None: 179 for contract in self._slither.contracts_derived: 180 ret = [] 181 self._export(contract, ret) 182 else: 183 contract = self._slither.get_contract_from_name(target) 184 if contract is None: 185 logger.error(f'{target} not found') 186 else: 187 ret = [] 188 self._export(contract, ret) 189 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/slither/tools/flattening/flattening.py b/slither/tools/flattening/flattening.py --- a/slither/tools/flattening/flattening.py +++ b/slither/tools/flattening/flattening.py @@ -4,6 +4,7 @@ from collections import namedtuple from slither.core.declarations import SolidityFunction +from slither.core.solidity_types import MappingType, ArrayType from slither.exceptions import SlitherException from slither.core.solidity_types.user_defined_type import UserDefinedType from slither.core.declarations.structure import Structure @@ -20,6 +21,7 @@ # - line_removal: remove the line (remove-assert) Patch = namedtuple('PatchExternal', ['index', 'patch_type']) + class Flattening: DEFAULT_EXPORT_PATH = Path('crytic-export/flattening') @@ -44,7 +46,7 @@ def _get_source_code(self, contract): src_mapping = contract.source_mapping - content = self._slither.source_code[src_mapping['filename_absolute']] + content = self._slither.source_code[src_mapping['filename_absolute']].encode('utf8') start = src_mapping['start'] end = src_mapping['start'] + src_mapping['length'] @@ -112,17 +114,22 @@ assert patch_type == 'line_removal' content = content[:index] + ' // ' + content[index:] - self._source_codes[contract] = content + self._source_codes[contract] = content.decode('utf8') def _export_from_type(self, t, contract, exported, list_contract): if isinstance(t, UserDefinedType): if isinstance(t.type, (Enum, Structure)): - if t.type.contract != contract and not t.type.contract in exported: + if t.type.contract != contract and t.type.contract not in exported: self._export_contract(t.type.contract, exported, list_contract) else: assert isinstance(t.type, Contract) - if t.type != contract and not t.type in exported: + if t.type != contract and t.type not in exported: self._export_contract(t.type, exported, list_contract) + elif isinstance(t, MappingType): + self._export_from_type(t.type_from, contract, exported, list_contract) + self._export_from_type(t.type_to, contract, exported, list_contract) + elif isinstance(t, ArrayType): + self._export_from_type(t.type, contract, exported, list_contract) def _export_contract(self, contract, exported, list_contract): if contract.name in exported: @@ -148,6 +155,10 @@ for v in contract.variables + local_vars: self._export_from_type(v.type, contract, exported, list_contract) + for s in contract.structures: + for elem in s.elems.values(): + self._export_from_type(elem.type, contract, exported, list_contract) + # Find all convert and "new" operation that can lead to use an external contract for f in contract.functions_declared: for ir in f.slithir_operations:
{"golden_diff": "diff --git a/slither/tools/flattening/flattening.py b/slither/tools/flattening/flattening.py\n--- a/slither/tools/flattening/flattening.py\n+++ b/slither/tools/flattening/flattening.py\n@@ -4,6 +4,7 @@\n from collections import namedtuple\n \n from slither.core.declarations import SolidityFunction\n+from slither.core.solidity_types import MappingType, ArrayType\n from slither.exceptions import SlitherException\n from slither.core.solidity_types.user_defined_type import UserDefinedType\n from slither.core.declarations.structure import Structure\n@@ -20,6 +21,7 @@\n # - line_removal: remove the line (remove-assert)\n Patch = namedtuple('PatchExternal', ['index', 'patch_type'])\n \n+\n class Flattening:\n DEFAULT_EXPORT_PATH = Path('crytic-export/flattening')\n \n@@ -44,7 +46,7 @@\n \n def _get_source_code(self, contract):\n src_mapping = contract.source_mapping\n- content = self._slither.source_code[src_mapping['filename_absolute']]\n+ content = self._slither.source_code[src_mapping['filename_absolute']].encode('utf8')\n start = src_mapping['start']\n end = src_mapping['start'] + src_mapping['length']\n \n@@ -112,17 +114,22 @@\n assert patch_type == 'line_removal'\n content = content[:index] + ' // ' + content[index:]\n \n- self._source_codes[contract] = content\n+ self._source_codes[contract] = content.decode('utf8')\n \n def _export_from_type(self, t, contract, exported, list_contract):\n if isinstance(t, UserDefinedType):\n if isinstance(t.type, (Enum, Structure)):\n- if t.type.contract != contract and not t.type.contract in exported:\n+ if t.type.contract != contract and t.type.contract not in exported:\n self._export_contract(t.type.contract, exported, list_contract)\n else:\n assert isinstance(t.type, Contract)\n- if t.type != contract and not t.type in exported:\n+ if t.type != contract and t.type not in exported:\n self._export_contract(t.type, exported, list_contract)\n+ elif isinstance(t, MappingType):\n+ self._export_from_type(t.type_from, contract, exported, list_contract)\n+ self._export_from_type(t.type_to, contract, exported, list_contract)\n+ elif isinstance(t, ArrayType):\n+ self._export_from_type(t.type, contract, exported, list_contract)\n \n def _export_contract(self, contract, exported, list_contract):\n if contract.name in exported:\n@@ -148,6 +155,10 @@\n for v in contract.variables + local_vars:\n self._export_from_type(v.type, contract, exported, list_contract)\n \n+ for s in contract.structures:\n+ for elem in s.elems.values():\n+ self._export_from_type(elem.type, contract, exported, list_contract)\n+\n # Find all convert and \"new\" operation that can lead to use an external contract\n for f in contract.functions_declared:\n for ir in f.slithir_operations:\n", "issue": "`slither-flat` doesn't flatten dYdX solo contracts correctly\nHi There,\r\n\r\nThanks for building `slither-flat`! It seems to be a lot more forgivign than `soljitsu` or `truffle-flatten`. I'm struggling, however, with using it to flatten dYdX contracts.\r\n\r\n**How to reproduce**\r\n\r\n```bash\r\n#!/usr/bin/bash\r\n\r\n# Dependency: npm, nvm\r\n# Better not to run this as a script\r\n\r\n# Get the repository\r\necho \"solo\" > .gitignore\r\ngit clone https://github.com/dydxprotocol/solo.git\r\ncd solo\r\n\r\n# Install javascript dependencies\r\nnpm i keccakjs --save\r\nnpm i solium@^1.0.0 --save\r\nnpm i acorn --save\r\nnpm i\r\n\r\n# Testing that everything installed correctly (make sure Docker is enabled at this point)\r\nnvm use 10.16.2 # Need this version to compile contracts\r\nnpm test\r\n\r\n# Install and run slither-flat\r\ncd ..\r\npipenv install slither-analyzer\r\npipenv run slither-flat solo\r\n```\r\n\r\n**Issues**\r\n\r\nI noticed two issues with these contracts:\r\n\r\n1) Duplicate contracts\r\n\r\nIn particular, `ReentrancyGuard` appeared many times in different contract files (and multiple times per file)\r\n\r\n2) Missing characters\r\n\r\nIn particular, `ReentrancyGuard` appeared as `ontract ReentrancyGuard` (missing a `c`)\r\n\r\n3) Some contracts don't exist in their own file anymore\r\n\r\nContract `external/OnlySolo.sol` doesn't have a corresponding `OnlySolo.sol` file in the output, instead it's embedded inline with other contracts in multiple places. [perhaps this is intentional]\r\n\r\n**Potential problems**\r\n\r\nWith `truffle-flatten` it flagged a circular dependency in the contracts, so perhaps this is related to the issue here.\n`slither-flat` doesn't flatten dYdX solo contracts correctly\nHi There,\r\n\r\nThanks for building `slither-flat`! It seems to be a lot more forgivign than `soljitsu` or `truffle-flatten`. I'm struggling, however, with using it to flatten dYdX contracts.\r\n\r\n**How to reproduce**\r\n\r\n```bash\r\n#!/usr/bin/bash\r\n\r\n# Dependency: npm, nvm\r\n# Better not to run this as a script\r\n\r\n# Get the repository\r\necho \"solo\" > .gitignore\r\ngit clone https://github.com/dydxprotocol/solo.git\r\ncd solo\r\n\r\n# Install javascript dependencies\r\nnpm i keccakjs --save\r\nnpm i solium@^1.0.0 --save\r\nnpm i acorn --save\r\nnpm i\r\n\r\n# Testing that everything installed correctly (make sure Docker is enabled at this point)\r\nnvm use 10.16.2 # Need this version to compile contracts\r\nnpm test\r\n\r\n# Install and run slither-flat\r\ncd ..\r\npipenv install slither-analyzer\r\npipenv run slither-flat solo\r\n```\r\n\r\n**Issues**\r\n\r\nI noticed two issues with these contracts:\r\n\r\n1) Duplicate contracts\r\n\r\nIn particular, `ReentrancyGuard` appeared many times in different contract files (and multiple times per file)\r\n\r\n2) Missing characters\r\n\r\nIn particular, `ReentrancyGuard` appeared as `ontract ReentrancyGuard` (missing a `c`)\r\n\r\n3) Some contracts don't exist in their own file anymore\r\n\r\nContract `external/OnlySolo.sol` doesn't have a corresponding `OnlySolo.sol` file in the output, instead it's embedded inline with other contracts in multiple places. [perhaps this is intentional]\r\n\r\n**Potential problems**\r\n\r\nWith `truffle-flatten` it flagged a circular dependency in the contracts, so perhaps this is related to the issue here.\n", "before_files": [{"content": "from pathlib import Path\nimport re\nimport logging\nfrom collections import namedtuple\n\nfrom slither.core.declarations import SolidityFunction\nfrom slither.exceptions import SlitherException\nfrom slither.core.solidity_types.user_defined_type import UserDefinedType\nfrom slither.core.declarations.structure import Structure\nfrom slither.core.declarations.enum import Enum\nfrom slither.core.declarations.contract import Contract\nfrom slither.slithir.operations import NewContract, TypeConversion, SolidityCall\n\nlogger = logging.getLogger(\"Slither-flattening\")\n\n# index: where to start\n# patch_type:\n# - public_to_external: public to external (external-to-public)\n# - calldata_to_memory: calldata to memory (external-to-public)\n# - line_removal: remove the line (remove-assert)\nPatch = namedtuple('PatchExternal', ['index', 'patch_type'])\n\nclass Flattening:\n DEFAULT_EXPORT_PATH = Path('crytic-export/flattening')\n\n def __init__(self, slither, external_to_public=False, remove_assert=False, private_to_internal=False):\n self._source_codes = {}\n self._slither = slither\n self._external_to_public = external_to_public\n self._remove_assert = remove_assert\n self._use_abi_encoder_v2 = False\n self._private_to_internal = private_to_internal\n\n self._check_abi_encoder_v2()\n\n for contract in slither.contracts:\n self._get_source_code(contract)\n\n def _check_abi_encoder_v2(self):\n for p in self._slither.pragma_directives:\n if 'ABIEncoderV2' in str(p.directive):\n self._use_abi_encoder_v2 = True\n return\n\n def _get_source_code(self, contract):\n src_mapping = contract.source_mapping\n content = self._slither.source_code[src_mapping['filename_absolute']]\n start = src_mapping['start']\n end = src_mapping['start'] + src_mapping['length']\n\n to_patch = []\n # interface must use external\n if self._external_to_public and contract.contract_kind != \"interface\":\n for f in contract.functions_declared:\n # fallback must be external\n if f.is_fallback or f.is_constructor_variables:\n continue\n if f.visibility == 'external':\n attributes_start = (f.parameters_src.source_mapping['start'] +\n f.parameters_src.source_mapping['length'])\n attributes_end = f.returns_src.source_mapping['start']\n attributes = content[attributes_start:attributes_end]\n regex = re.search(r'((\\sexternal)\\s+)|(\\sexternal)$|(\\)external)$', attributes)\n if regex:\n to_patch.append(Patch(attributes_start + regex.span()[0] + 1, 'public_to_external'))\n else:\n raise SlitherException(f'External keyword not found {f.name} {attributes}')\n\n for var in f.parameters:\n if var.location == \"calldata\":\n calldata_start = var.source_mapping['start']\n calldata_end = calldata_start + var.source_mapping['length']\n calldata_idx = content[calldata_start:calldata_end].find(' calldata ')\n to_patch.append(Patch(calldata_start + calldata_idx + 1, 'calldata_to_memory'))\n\n if self._private_to_internal:\n for variable in contract.state_variables_declared:\n if variable.visibility == 'private':\n print(variable.source_mapping)\n attributes_start = variable.source_mapping['start']\n attributes_end = attributes_start + variable.source_mapping['length']\n attributes = content[attributes_start:attributes_end]\n print(attributes)\n regex = re.search(r' private ', attributes)\n if regex:\n to_patch.append(Patch(attributes_start + regex.span()[0] + 1, 'private_to_internal'))\n else:\n raise SlitherException(f'private keyword not found {v.name} {attributes}')\n\n if self._remove_assert:\n for function in contract.functions_and_modifiers_declared:\n for node in function.nodes:\n for ir in node.irs:\n if isinstance(ir, SolidityCall) and ir.function == SolidityFunction('assert(bool)'):\n to_patch.append(Patch(node.source_mapping['start'], 'line_removal'))\n logger.info(f'Code commented: {node.expression} ({node.source_mapping_str})')\n\n to_patch.sort(key=lambda x: x.index, reverse=True)\n\n content = content[start:end]\n for patch in to_patch:\n patch_type = patch.patch_type\n index = patch.index\n index = index - start\n if patch_type == 'public_to_external':\n content = content[:index] + 'public' + content[index + len('external'):]\n if patch_type == 'private_to_internal':\n content = content[:index] + 'internal' + content[index + len('private'):]\n elif patch_type == 'calldata_to_memory':\n content = content[:index] + 'memory' + content[index + len('calldata'):]\n else:\n assert patch_type == 'line_removal'\n content = content[:index] + ' // ' + content[index:]\n\n self._source_codes[contract] = content\n\n def _export_from_type(self, t, contract, exported, list_contract):\n if isinstance(t, UserDefinedType):\n if isinstance(t.type, (Enum, Structure)):\n if t.type.contract != contract and not t.type.contract in exported:\n self._export_contract(t.type.contract, exported, list_contract)\n else:\n assert isinstance(t.type, Contract)\n if t.type != contract and not t.type in exported:\n self._export_contract(t.type, exported, list_contract)\n\n def _export_contract(self, contract, exported, list_contract):\n if contract.name in exported:\n return\n exported.add(contract.name)\n for inherited in contract.inheritance:\n self._export_contract(inherited, exported, list_contract)\n\n # Find all the external contracts called\n externals = contract.all_library_calls + contract.all_high_level_calls\n # externals is a list of (contract, function)\n # We also filter call to itself to avoid infilite loop\n externals = list(set([e[0] for e in externals if e[0] != contract]))\n\n for inherited in externals:\n self._export_contract(inherited, exported, list_contract)\n\n # Find all the external contracts use as a base type\n local_vars = []\n for f in contract.functions_declared:\n local_vars += f.variables\n\n for v in contract.variables + local_vars:\n self._export_from_type(v.type, contract, exported, list_contract)\n\n # Find all convert and \"new\" operation that can lead to use an external contract\n for f in contract.functions_declared:\n for ir in f.slithir_operations:\n if isinstance(ir, NewContract):\n if ir.contract_created != contract and not ir.contract_created in exported:\n self._export_contract(ir.contract_created, exported, list_contract)\n if isinstance(ir, TypeConversion):\n self._export_from_type(ir.type, contract, exported, list_contract)\n list_contract.append(self._source_codes[contract])\n\n def _export(self, contract, ret):\n self._export_contract(contract, set(), ret)\n path = Path(self.DEFAULT_EXPORT_PATH, f'{contract.name}.sol')\n logger.info(f'Export {path}')\n with open(path, 'w') as f:\n if self._slither.solc_version:\n f.write(f'pragma solidity {self._slither.solc_version};\\n')\n if self._use_abi_encoder_v2:\n f.write('pragma experimental ABIEncoderV2;\\n')\n f.write('\\n'.join(ret))\n f.write('\\n')\n\n def export(self, target=None):\n\n if not self.DEFAULT_EXPORT_PATH.exists():\n self.DEFAULT_EXPORT_PATH.mkdir(parents=True)\n\n if target is None:\n for contract in self._slither.contracts_derived:\n ret = []\n self._export(contract, ret)\n else:\n contract = self._slither.get_contract_from_name(target)\n if contract is None:\n logger.error(f'{target} not found')\n else:\n ret = []\n self._export(contract, ret)\n", "path": "slither/tools/flattening/flattening.py"}], "after_files": [{"content": "from pathlib import Path\nimport re\nimport logging\nfrom collections import namedtuple\n\nfrom slither.core.declarations import SolidityFunction\nfrom slither.core.solidity_types import MappingType, ArrayType\nfrom slither.exceptions import SlitherException\nfrom slither.core.solidity_types.user_defined_type import UserDefinedType\nfrom slither.core.declarations.structure import Structure\nfrom slither.core.declarations.enum import Enum\nfrom slither.core.declarations.contract import Contract\nfrom slither.slithir.operations import NewContract, TypeConversion, SolidityCall\n\nlogger = logging.getLogger(\"Slither-flattening\")\n\n# index: where to start\n# patch_type:\n# - public_to_external: public to external (external-to-public)\n# - calldata_to_memory: calldata to memory (external-to-public)\n# - line_removal: remove the line (remove-assert)\nPatch = namedtuple('PatchExternal', ['index', 'patch_type'])\n\n\nclass Flattening:\n DEFAULT_EXPORT_PATH = Path('crytic-export/flattening')\n\n def __init__(self, slither, external_to_public=False, remove_assert=False, private_to_internal=False):\n self._source_codes = {}\n self._slither = slither\n self._external_to_public = external_to_public\n self._remove_assert = remove_assert\n self._use_abi_encoder_v2 = False\n self._private_to_internal = private_to_internal\n\n self._check_abi_encoder_v2()\n\n for contract in slither.contracts:\n self._get_source_code(contract)\n\n def _check_abi_encoder_v2(self):\n for p in self._slither.pragma_directives:\n if 'ABIEncoderV2' in str(p.directive):\n self._use_abi_encoder_v2 = True\n return\n\n def _get_source_code(self, contract):\n src_mapping = contract.source_mapping\n content = self._slither.source_code[src_mapping['filename_absolute']].encode('utf8')\n start = src_mapping['start']\n end = src_mapping['start'] + src_mapping['length']\n\n to_patch = []\n # interface must use external\n if self._external_to_public and contract.contract_kind != \"interface\":\n for f in contract.functions_declared:\n # fallback must be external\n if f.is_fallback or f.is_constructor_variables:\n continue\n if f.visibility == 'external':\n attributes_start = (f.parameters_src.source_mapping['start'] +\n f.parameters_src.source_mapping['length'])\n attributes_end = f.returns_src.source_mapping['start']\n attributes = content[attributes_start:attributes_end]\n regex = re.search(r'((\\sexternal)\\s+)|(\\sexternal)$|(\\)external)$', attributes)\n if regex:\n to_patch.append(Patch(attributes_start + regex.span()[0] + 1, 'public_to_external'))\n else:\n raise SlitherException(f'External keyword not found {f.name} {attributes}')\n\n for var in f.parameters:\n if var.location == \"calldata\":\n calldata_start = var.source_mapping['start']\n calldata_end = calldata_start + var.source_mapping['length']\n calldata_idx = content[calldata_start:calldata_end].find(' calldata ')\n to_patch.append(Patch(calldata_start + calldata_idx + 1, 'calldata_to_memory'))\n\n if self._private_to_internal:\n for variable in contract.state_variables_declared:\n if variable.visibility == 'private':\n print(variable.source_mapping)\n attributes_start = variable.source_mapping['start']\n attributes_end = attributes_start + variable.source_mapping['length']\n attributes = content[attributes_start:attributes_end]\n print(attributes)\n regex = re.search(r' private ', attributes)\n if regex:\n to_patch.append(Patch(attributes_start + regex.span()[0] + 1, 'private_to_internal'))\n else:\n raise SlitherException(f'private keyword not found {v.name} {attributes}')\n\n if self._remove_assert:\n for function in contract.functions_and_modifiers_declared:\n for node in function.nodes:\n for ir in node.irs:\n if isinstance(ir, SolidityCall) and ir.function == SolidityFunction('assert(bool)'):\n to_patch.append(Patch(node.source_mapping['start'], 'line_removal'))\n logger.info(f'Code commented: {node.expression} ({node.source_mapping_str})')\n\n to_patch.sort(key=lambda x: x.index, reverse=True)\n\n content = content[start:end]\n for patch in to_patch:\n patch_type = patch.patch_type\n index = patch.index\n index = index - start\n if patch_type == 'public_to_external':\n content = content[:index] + 'public' + content[index + len('external'):]\n if patch_type == 'private_to_internal':\n content = content[:index] + 'internal' + content[index + len('private'):]\n elif patch_type == 'calldata_to_memory':\n content = content[:index] + 'memory' + content[index + len('calldata'):]\n else:\n assert patch_type == 'line_removal'\n content = content[:index] + ' // ' + content[index:]\n\n self._source_codes[contract] = content.decode('utf8')\n\n def _export_from_type(self, t, contract, exported, list_contract):\n if isinstance(t, UserDefinedType):\n if isinstance(t.type, (Enum, Structure)):\n if t.type.contract != contract and t.type.contract not in exported:\n self._export_contract(t.type.contract, exported, list_contract)\n else:\n assert isinstance(t.type, Contract)\n if t.type != contract and t.type not in exported:\n self._export_contract(t.type, exported, list_contract)\n elif isinstance(t, MappingType):\n self._export_from_type(t.type_from, contract, exported, list_contract)\n self._export_from_type(t.type_to, contract, exported, list_contract)\n elif isinstance(t, ArrayType):\n self._export_from_type(t.type, contract, exported, list_contract)\n\n def _export_contract(self, contract, exported, list_contract):\n if contract.name in exported:\n return\n exported.add(contract.name)\n for inherited in contract.inheritance:\n self._export_contract(inherited, exported, list_contract)\n\n # Find all the external contracts called\n externals = contract.all_library_calls + contract.all_high_level_calls\n # externals is a list of (contract, function)\n # We also filter call to itself to avoid infilite loop\n externals = list(set([e[0] for e in externals if e[0] != contract]))\n\n for inherited in externals:\n self._export_contract(inherited, exported, list_contract)\n\n # Find all the external contracts use as a base type\n local_vars = []\n for f in contract.functions_declared:\n local_vars += f.variables\n\n for v in contract.variables + local_vars:\n self._export_from_type(v.type, contract, exported, list_contract)\n\n for s in contract.structures:\n for elem in s.elems.values():\n self._export_from_type(elem.type, contract, exported, list_contract)\n\n # Find all convert and \"new\" operation that can lead to use an external contract\n for f in contract.functions_declared:\n for ir in f.slithir_operations:\n if isinstance(ir, NewContract):\n if ir.contract_created != contract and not ir.contract_created in exported:\n self._export_contract(ir.contract_created, exported, list_contract)\n if isinstance(ir, TypeConversion):\n self._export_from_type(ir.type, contract, exported, list_contract)\n list_contract.append(self._source_codes[contract])\n\n def _export(self, contract, ret):\n self._export_contract(contract, set(), ret)\n path = Path(self.DEFAULT_EXPORT_PATH, f'{contract.name}.sol')\n logger.info(f'Export {path}')\n with open(path, 'w') as f:\n if self._slither.solc_version:\n f.write(f'pragma solidity {self._slither.solc_version};\\n')\n if self._use_abi_encoder_v2:\n f.write('pragma experimental ABIEncoderV2;\\n')\n f.write('\\n'.join(ret))\n f.write('\\n')\n\n def export(self, target=None):\n\n if not self.DEFAULT_EXPORT_PATH.exists():\n self.DEFAULT_EXPORT_PATH.mkdir(parents=True)\n\n if target is None:\n for contract in self._slither.contracts_derived:\n ret = []\n self._export(contract, ret)\n else:\n contract = self._slither.get_contract_from_name(target)\n if contract is None:\n logger.error(f'{target} not found')\n else:\n ret = []\n self._export(contract, ret)\n", "path": "slither/tools/flattening/flattening.py"}]}
3,284
701
gh_patches_debug_2102
rasdani/github-patches
git_diff
rucio__rucio-1372
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fix activity in BB8 Motivation ---------- BB8 uses activity `Data Rebalancing` but the activity defined in ATLAS schema is `Data rebalancing`. We should use the same activity everywhere, and it should be consistent with the share defined in FTS --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/rucio/vcsversion.py` Content: ``` 1 2 ''' 3 This file is automatically generated; Do not edit it. :) 4 ''' 5 VERSION_INFO = { 6 'final': True, 7 'version': '1.17.4', 8 'branch_nick': 'patch-0-1_17_4_client_release_prep', 9 'revision_id': 'ba996ce9bf8366cd7d8d1fb60a7f1daf8d4f517e', 10 'revno': 6827 11 } 12 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/rucio/vcsversion.py b/lib/rucio/vcsversion.py --- a/lib/rucio/vcsversion.py +++ b/lib/rucio/vcsversion.py @@ -4,8 +4,8 @@ ''' VERSION_INFO = { 'final': True, - 'version': '1.17.4', - 'branch_nick': 'patch-0-1_17_4_client_release_prep', - 'revision_id': 'ba996ce9bf8366cd7d8d1fb60a7f1daf8d4f517e', - 'revno': 6827 + 'version': '1.17.5', + 'branch_nick': 'patch-0-1_17_5_preparation', + 'revision_id': '537e1e47eb627741394b6bb9bc21d0f046296275', + 'revno': 6837 }
{"golden_diff": "diff --git a/lib/rucio/vcsversion.py b/lib/rucio/vcsversion.py\n--- a/lib/rucio/vcsversion.py\n+++ b/lib/rucio/vcsversion.py\n@@ -4,8 +4,8 @@\n '''\n VERSION_INFO = {\n 'final': True,\n- 'version': '1.17.4',\n- 'branch_nick': 'patch-0-1_17_4_client_release_prep',\n- 'revision_id': 'ba996ce9bf8366cd7d8d1fb60a7f1daf8d4f517e',\n- 'revno': 6827\n+ 'version': '1.17.5',\n+ 'branch_nick': 'patch-0-1_17_5_preparation',\n+ 'revision_id': '537e1e47eb627741394b6bb9bc21d0f046296275',\n+ 'revno': 6837\n }\n", "issue": "Fix activity in BB8\nMotivation\r\n----------\r\n\r\nBB8 uses activity `Data Rebalancing` but the activity defined in ATLAS schema is `Data rebalancing`. We should use the same activity everywhere, and it should be consistent with the share defined in FTS\n", "before_files": [{"content": "\n'''\nThis file is automatically generated; Do not edit it. :)\n'''\nVERSION_INFO = {\n 'final': True,\n 'version': '1.17.4',\n 'branch_nick': 'patch-0-1_17_4_client_release_prep',\n 'revision_id': 'ba996ce9bf8366cd7d8d1fb60a7f1daf8d4f517e',\n 'revno': 6827\n}\n", "path": "lib/rucio/vcsversion.py"}], "after_files": [{"content": "\n'''\nThis file is automatically generated; Do not edit it. :)\n'''\nVERSION_INFO = {\n 'final': True,\n 'version': '1.17.5',\n 'branch_nick': 'patch-0-1_17_5_preparation',\n 'revision_id': '537e1e47eb627741394b6bb9bc21d0f046296275',\n 'revno': 6837\n}\n", "path": "lib/rucio/vcsversion.py"}]}
443
243
gh_patches_debug_19673
rasdani/github-patches
git_diff
pypa__setuptools-3023
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- AssertionError no _distutils when running get-pip with Setuptools and USE_DISTUTILS=local Somehow related: Since `setuptools` v60 at least on Debian running `get-pip.py` over an existing `pip` + `setuptools` fails, when the Debian `python3-distutils` package is installed, e.g. as dependency of the Python development headers package: ``` /usr/local/lib/python3.9/dist-packages/_distutils_hack/__init__.py:24: UserWarning: Distutils was imported before Setuptools, but importing Setuptools also replaces the `distutils` module in `sys.modules`. This may lead to undesirable behaviors or errors. To avoid these issues, avoid using distutils directly, ensure that setuptools is installed in the traditional way (e.g. not an editable install), and/or make sure that setuptools is always imported before distutils. warnings.warn( /usr/local/lib/python3.9/dist-packages/_distutils_hack/__init__.py:36: UserWarning: Setuptools is replacing distutils. warnings.warn("Setuptools is replacing distutils.") Traceback (most recent call last): File "/tmp/DietPi-Software/./get-pip.py", line 27081, in <module> main() File "/tmp/DietPi-Software/./get-pip.py", line 139, in main bootstrap(tmpdir=tmpdir) File "/tmp/DietPi-Software/./get-pip.py", line 120, in bootstrap args = determine_pip_install_arguments() File "/tmp/DietPi-Software/./get-pip.py", line 65, in determine_pip_install_arguments import setuptools # noqa File "/usr/local/lib/python3.9/dist-packages/setuptools/__init__.py", line 8, in <module> import _distutils_hack.override # noqa: F401 File "/usr/local/lib/python3.9/dist-packages/_distutils_hack/override.py", line 1, in <module> __import__('_distutils_hack').do_override() File "/usr/local/lib/python3.9/dist-packages/_distutils_hack/__init__.py", line 73, in do_override ensure_local_distutils() File "/usr/local/lib/python3.9/dist-packages/_distutils_hack/__init__.py", line 61, in ensure_local_distutils assert '_distutils' in core.__file__, core.__file__ AssertionError: /usr/lib/python3.9/distutils/core.py ``` This `_distutils_hack` comes from `get-pip.py`. Looks like removing any use and detangling `distutils` from `pip` in favour of `setuptools` would solve both issues. _Originally posted by @MichaIng in https://github.com/pypa/pip/issues/10742#issuecomment-1003741034_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `_distutils_hack/__init__.py` Content: ``` 1 # don't import any costly modules 2 import sys 3 import os 4 5 6 is_pypy = '__pypy__' in sys.builtin_module_names 7 8 9 def warn_distutils_present(): 10 if 'distutils' not in sys.modules: 11 return 12 if is_pypy and sys.version_info < (3, 7): 13 # PyPy for 3.6 unconditionally imports distutils, so bypass the warning 14 # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250 15 return 16 import warnings 17 warnings.warn( 18 "Distutils was imported before Setuptools, but importing Setuptools " 19 "also replaces the `distutils` module in `sys.modules`. This may lead " 20 "to undesirable behaviors or errors. To avoid these issues, avoid " 21 "using distutils directly, ensure that setuptools is installed in the " 22 "traditional way (e.g. not an editable install), and/or make sure " 23 "that setuptools is always imported before distutils.") 24 25 26 def clear_distutils(): 27 if 'distutils' not in sys.modules: 28 return 29 import warnings 30 warnings.warn("Setuptools is replacing distutils.") 31 mods = [ 32 name for name in sys.modules 33 if name == "distutils" or name.startswith("distutils.") 34 ] 35 for name in mods: 36 del sys.modules[name] 37 38 39 def enabled(): 40 """ 41 Allow selection of distutils by environment variable. 42 """ 43 which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local') 44 return which == 'local' 45 46 47 def ensure_local_distutils(): 48 import importlib 49 clear_distutils() 50 51 # With the DistutilsMetaFinder in place, 52 # perform an import to cause distutils to be 53 # loaded from setuptools._distutils. Ref #2906. 54 with shim(): 55 importlib.import_module('distutils') 56 57 # check that submodules load as expected 58 core = importlib.import_module('distutils.core') 59 assert '_distutils' in core.__file__, core.__file__ 60 61 62 def do_override(): 63 """ 64 Ensure that the local copy of distutils is preferred over stdlib. 65 66 See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401 67 for more motivation. 68 """ 69 if enabled(): 70 warn_distutils_present() 71 ensure_local_distutils() 72 73 74 class _TrivialRe: 75 def __init__(self, *patterns): 76 self._patterns = patterns 77 78 def match(self, string): 79 return all(pat in string for pat in self._patterns) 80 81 82 class DistutilsMetaFinder: 83 def find_spec(self, fullname, path, target=None): 84 if path is not None: 85 return 86 87 method_name = 'spec_for_{fullname}'.format(**locals()) 88 method = getattr(self, method_name, lambda: None) 89 return method() 90 91 def spec_for_distutils(self): 92 import importlib 93 import importlib.abc 94 import importlib.util 95 import warnings 96 97 # warnings.filterwarnings() imports the re module 98 warnings._add_filter( 99 'ignore', 100 _TrivialRe("distutils", "deprecated"), 101 DeprecationWarning, 102 None, 103 0, 104 append=True 105 ) 106 107 try: 108 mod = importlib.import_module('setuptools._distutils') 109 except Exception: 110 # There are a couple of cases where setuptools._distutils 111 # may not be present: 112 # - An older Setuptools without a local distutils is 113 # taking precedence. Ref #2957. 114 # - Path manipulation during sitecustomize removes 115 # setuptools from the path but only after the hook 116 # has been loaded. Ref #2980. 117 # In either case, fall back to stdlib behavior. 118 return 119 120 class DistutilsLoader(importlib.abc.Loader): 121 122 def create_module(self, spec): 123 return mod 124 125 def exec_module(self, module): 126 pass 127 128 return importlib.util.spec_from_loader( 129 'distutils', DistutilsLoader(), origin=mod.__file__ 130 ) 131 132 def spec_for_pip(self): 133 """ 134 Ensure stdlib distutils when running under pip. 135 See pypa/pip#8761 for rationale. 136 """ 137 if self.pip_imported_during_build(): 138 return 139 if self.is_get_pip(): 140 return 141 clear_distutils() 142 self.spec_for_distutils = lambda: None 143 144 @classmethod 145 def pip_imported_during_build(cls): 146 """ 147 Detect if pip is being imported in a build script. Ref #2355. 148 """ 149 import traceback 150 return any( 151 cls.frame_file_is_setup(frame) 152 for frame, line in traceback.walk_stack(None) 153 ) 154 155 @classmethod 156 def is_get_pip(cls): 157 """ 158 Detect if get-pip is being invoked. Ref #2993. 159 """ 160 try: 161 import __main__ 162 return os.path.basename(__main__.__file__) == 'get-pip.py' 163 except AttributeError: 164 pass 165 166 @staticmethod 167 def frame_file_is_setup(frame): 168 """ 169 Return True if the indicated frame suggests a setup.py file. 170 """ 171 # some frames may not have __file__ (#2940) 172 return frame.f_globals.get('__file__', '').endswith('setup.py') 173 174 175 DISTUTILS_FINDER = DistutilsMetaFinder() 176 177 178 def add_shim(): 179 DISTUTILS_FINDER in sys.meta_path or insert_shim() 180 181 182 class shim: 183 def __enter__(self): 184 insert_shim() 185 186 def __exit__(self, exc, value, tb): 187 remove_shim() 188 189 190 def insert_shim(): 191 sys.meta_path.insert(0, DISTUTILS_FINDER) 192 193 194 def remove_shim(): 195 try: 196 sys.meta_path.remove(DISTUTILS_FINDER) 197 except ValueError: 198 pass 199 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py --- a/_distutils_hack/__init__.py +++ b/_distutils_hack/__init__.py @@ -136,11 +136,36 @@ """ if self.pip_imported_during_build(): return - if self.is_get_pip(): - return clear_distutils() self.spec_for_distutils = lambda: None + def spec_for_setuptools(self): + """ + get-pip imports setuptools solely for the purpose of + determining if it's installed. In this case, provide + a stubbed spec to represent setuptools being present + without invoking any behavior. + + Workaround for pypa/get-pip#137. + """ + if not self.is_get_pip(): + return + + import importlib + + class StubbedLoader(importlib.abc.Loader): + + def create_module(self, spec): + import types + return types.ModuleType('setuptools') + + def exec_module(self, module): + pass + + return importlib.util.spec_from_loader( + 'setuptools', StubbedLoader(), + ) + @classmethod def pip_imported_during_build(cls): """
{"golden_diff": "diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py\n--- a/_distutils_hack/__init__.py\n+++ b/_distutils_hack/__init__.py\n@@ -136,11 +136,36 @@\n \"\"\"\n if self.pip_imported_during_build():\n return\n- if self.is_get_pip():\n- return\n clear_distutils()\n self.spec_for_distutils = lambda: None\n \n+ def spec_for_setuptools(self):\n+ \"\"\"\n+ get-pip imports setuptools solely for the purpose of\n+ determining if it's installed. In this case, provide\n+ a stubbed spec to represent setuptools being present\n+ without invoking any behavior.\n+\n+ Workaround for pypa/get-pip#137.\n+ \"\"\"\n+ if not self.is_get_pip():\n+ return\n+\n+ import importlib\n+\n+ class StubbedLoader(importlib.abc.Loader):\n+\n+ def create_module(self, spec):\n+ import types\n+ return types.ModuleType('setuptools')\n+\n+ def exec_module(self, module):\n+ pass\n+\n+ return importlib.util.spec_from_loader(\n+ 'setuptools', StubbedLoader(),\n+ )\n+\n @classmethod\n def pip_imported_during_build(cls):\n \"\"\"\n", "issue": "AssertionError no _distutils when running get-pip with Setuptools and USE_DISTUTILS=local\nSomehow related: Since `setuptools` v60 at least on Debian running `get-pip.py` over an existing `pip` + `setuptools` fails, when the Debian `python3-distutils` package is installed, e.g. as dependency of the Python development headers package:\r\n```\r\n/usr/local/lib/python3.9/dist-packages/_distutils_hack/__init__.py:24: UserWarning: Distutils was imported before Setuptools, but importing Setuptools also replaces the `distutils` module in `sys.modules`. This may lead to undesirable behaviors or errors. To avoid these issues, avoid using distutils directly, ensure that setuptools is installed in the traditional way (e.g. not an editable install), and/or make sure that setuptools is always imported before distutils.\r\n warnings.warn(\r\n/usr/local/lib/python3.9/dist-packages/_distutils_hack/__init__.py:36: UserWarning: Setuptools is replacing distutils.\r\n warnings.warn(\"Setuptools is replacing distutils.\")\r\nTraceback (most recent call last):\r\n File \"/tmp/DietPi-Software/./get-pip.py\", line 27081, in <module>\r\n main()\r\n File \"/tmp/DietPi-Software/./get-pip.py\", line 139, in main\r\n bootstrap(tmpdir=tmpdir)\r\n File \"/tmp/DietPi-Software/./get-pip.py\", line 120, in bootstrap\r\n args = determine_pip_install_arguments()\r\n File \"/tmp/DietPi-Software/./get-pip.py\", line 65, in determine_pip_install_arguments\r\n import setuptools # noqa\r\n File \"/usr/local/lib/python3.9/dist-packages/setuptools/__init__.py\", line 8, in <module>\r\n import _distutils_hack.override # noqa: F401\r\n File \"/usr/local/lib/python3.9/dist-packages/_distutils_hack/override.py\", line 1, in <module>\r\n __import__('_distutils_hack').do_override()\r\n File \"/usr/local/lib/python3.9/dist-packages/_distutils_hack/__init__.py\", line 73, in do_override\r\n ensure_local_distutils()\r\n File \"/usr/local/lib/python3.9/dist-packages/_distutils_hack/__init__.py\", line 61, in ensure_local_distutils\r\n assert '_distutils' in core.__file__, core.__file__\r\nAssertionError: /usr/lib/python3.9/distutils/core.py\r\n```\r\nThis `_distutils_hack` comes from `get-pip.py`. Looks like removing any use and detangling `distutils` from `pip` in favour of `setuptools` would solve both issues.\r\n\r\n_Originally posted by @MichaIng in https://github.com/pypa/pip/issues/10742#issuecomment-1003741034_\n", "before_files": [{"content": "# don't import any costly modules\nimport sys\nimport os\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n import warnings\n warnings.warn(\n \"Distutils was imported before Setuptools, but importing Setuptools \"\n \"also replaces the `distutils` module in `sys.modules`. This may lead \"\n \"to undesirable behaviors or errors. To avoid these issues, avoid \"\n \"using distutils directly, ensure that setuptools is installed in the \"\n \"traditional way (e.g. not an editable install), and/or make sure \"\n \"that setuptools is always imported before distutils.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n import warnings\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [\n name for name in sys.modules\n if name == \"distutils\" or name.startswith(\"distutils.\")\n ]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n import importlib\n clear_distutils()\n\n # With the DistutilsMetaFinder in place,\n # perform an import to cause distutils to be\n # loaded from setuptools._distutils. Ref #2906.\n with shim():\n importlib.import_module('distutils')\n\n # check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\ndef do_override():\n \"\"\"\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n \"\"\"\n if enabled():\n warn_distutils_present()\n ensure_local_distutils()\n\n\nclass _TrivialRe:\n def __init__(self, *patterns):\n self._patterns = patterns\n\n def match(self, string):\n return all(pat in string for pat in self._patterns)\n\n\nclass DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n if path is not None:\n return\n\n method_name = 'spec_for_{fullname}'.format(**locals())\n method = getattr(self, method_name, lambda: None)\n return method()\n\n def spec_for_distutils(self):\n import importlib\n import importlib.abc\n import importlib.util\n import warnings\n\n # warnings.filterwarnings() imports the re module\n warnings._add_filter(\n 'ignore',\n _TrivialRe(\"distutils\", \"deprecated\"),\n DeprecationWarning,\n None,\n 0,\n append=True\n )\n\n try:\n mod = importlib.import_module('setuptools._distutils')\n except Exception:\n # There are a couple of cases where setuptools._distutils\n # may not be present:\n # - An older Setuptools without a local distutils is\n # taking precedence. Ref #2957.\n # - Path manipulation during sitecustomize removes\n # setuptools from the path but only after the hook\n # has been loaded. Ref #2980.\n # In either case, fall back to stdlib behavior.\n return\n\n class DistutilsLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n return mod\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader(\n 'distutils', DistutilsLoader(), origin=mod.__file__\n )\n\n def spec_for_pip(self):\n \"\"\"\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n if self.pip_imported_during_build():\n return\n if self.is_get_pip():\n return\n clear_distutils()\n self.spec_for_distutils = lambda: None\n\n @classmethod\n def pip_imported_during_build(cls):\n \"\"\"\n Detect if pip is being imported in a build script. Ref #2355.\n \"\"\"\n import traceback\n return any(\n cls.frame_file_is_setup(frame)\n for frame, line in traceback.walk_stack(None)\n )\n\n @classmethod\n def is_get_pip(cls):\n \"\"\"\n Detect if get-pip is being invoked. Ref #2993.\n \"\"\"\n try:\n import __main__\n return os.path.basename(__main__.__file__) == 'get-pip.py'\n except AttributeError:\n pass\n\n @staticmethod\n def frame_file_is_setup(frame):\n \"\"\"\n Return True if the indicated frame suggests a setup.py file.\n \"\"\"\n # some frames may not have __file__ (#2940)\n return frame.f_globals.get('__file__', '').endswith('setup.py')\n\n\nDISTUTILS_FINDER = DistutilsMetaFinder()\n\n\ndef add_shim():\n DISTUTILS_FINDER in sys.meta_path or insert_shim()\n\n\nclass shim:\n def __enter__(self):\n insert_shim()\n\n def __exit__(self, exc, value, tb):\n remove_shim()\n\n\ndef insert_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n\n\ndef remove_shim():\n try:\n sys.meta_path.remove(DISTUTILS_FINDER)\n except ValueError:\n pass\n", "path": "_distutils_hack/__init__.py"}], "after_files": [{"content": "# don't import any costly modules\nimport sys\nimport os\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n import warnings\n warnings.warn(\n \"Distutils was imported before Setuptools, but importing Setuptools \"\n \"also replaces the `distutils` module in `sys.modules`. This may lead \"\n \"to undesirable behaviors or errors. To avoid these issues, avoid \"\n \"using distutils directly, ensure that setuptools is installed in the \"\n \"traditional way (e.g. not an editable install), and/or make sure \"\n \"that setuptools is always imported before distutils.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n import warnings\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [\n name for name in sys.modules\n if name == \"distutils\" or name.startswith(\"distutils.\")\n ]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n import importlib\n clear_distutils()\n\n # With the DistutilsMetaFinder in place,\n # perform an import to cause distutils to be\n # loaded from setuptools._distutils. Ref #2906.\n with shim():\n importlib.import_module('distutils')\n\n # check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\ndef do_override():\n \"\"\"\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n \"\"\"\n if enabled():\n warn_distutils_present()\n ensure_local_distutils()\n\n\nclass _TrivialRe:\n def __init__(self, *patterns):\n self._patterns = patterns\n\n def match(self, string):\n return all(pat in string for pat in self._patterns)\n\n\nclass DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n if path is not None:\n return\n\n method_name = 'spec_for_{fullname}'.format(**locals())\n method = getattr(self, method_name, lambda: None)\n return method()\n\n def spec_for_distutils(self):\n import importlib\n import importlib.abc\n import importlib.util\n import warnings\n\n # warnings.filterwarnings() imports the re module\n warnings._add_filter(\n 'ignore',\n _TrivialRe(\"distutils\", \"deprecated\"),\n DeprecationWarning,\n None,\n 0,\n append=True\n )\n\n try:\n mod = importlib.import_module('setuptools._distutils')\n except Exception:\n # There are a couple of cases where setuptools._distutils\n # may not be present:\n # - An older Setuptools without a local distutils is\n # taking precedence. Ref #2957.\n # - Path manipulation during sitecustomize removes\n # setuptools from the path but only after the hook\n # has been loaded. Ref #2980.\n # In either case, fall back to stdlib behavior.\n return\n\n class DistutilsLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n return mod\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader(\n 'distutils', DistutilsLoader(), origin=mod.__file__\n )\n\n def spec_for_pip(self):\n \"\"\"\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n if self.pip_imported_during_build():\n return\n clear_distutils()\n self.spec_for_distutils = lambda: None\n\n def spec_for_setuptools(self):\n \"\"\"\n get-pip imports setuptools solely for the purpose of\n determining if it's installed. In this case, provide\n a stubbed spec to represent setuptools being present\n without invoking any behavior.\n\n Workaround for pypa/get-pip#137.\n \"\"\"\n if not self.is_get_pip():\n return\n\n import importlib\n\n class StubbedLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n import types\n return types.ModuleType('setuptools')\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader(\n 'setuptools', StubbedLoader(),\n )\n\n @classmethod\n def pip_imported_during_build(cls):\n \"\"\"\n Detect if pip is being imported in a build script. Ref #2355.\n \"\"\"\n import traceback\n return any(\n cls.frame_file_is_setup(frame)\n for frame, line in traceback.walk_stack(None)\n )\n\n @classmethod\n def is_get_pip(cls):\n \"\"\"\n Detect if get-pip is being invoked. Ref #2993.\n \"\"\"\n try:\n import __main__\n return os.path.basename(__main__.__file__) == 'get-pip.py'\n except AttributeError:\n pass\n\n @staticmethod\n def frame_file_is_setup(frame):\n \"\"\"\n Return True if the indicated frame suggests a setup.py file.\n \"\"\"\n # some frames may not have __file__ (#2940)\n return frame.f_globals.get('__file__', '').endswith('setup.py')\n\n\nDISTUTILS_FINDER = DistutilsMetaFinder()\n\n\ndef add_shim():\n DISTUTILS_FINDER in sys.meta_path or insert_shim()\n\n\nclass shim:\n def __enter__(self):\n insert_shim()\n\n def __exit__(self, exc, value, tb):\n remove_shim()\n\n\ndef insert_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n\n\ndef remove_shim():\n try:\n sys.meta_path.remove(DISTUTILS_FINDER)\n except ValueError:\n pass\n", "path": "_distutils_hack/__init__.py"}]}
2,741
301
gh_patches_debug_17555
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-5324
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `extensions/base_extension.py` Content: ``` 1 import hashlib 2 import os 3 from abc import ABC, abstractmethod 4 from typing import Union 5 6 __all__ = ["_Extension"] 7 8 9 class _Extension(ABC): 10 def __init__(self, name: str, support_aot: bool, support_jit: bool, priority: int = 1): 11 self._name = name 12 self._support_aot = support_aot 13 self._support_jit = support_jit 14 self.priority = priority 15 16 @property 17 def name(self): 18 return self._name 19 20 @property 21 def support_aot(self): 22 return self._support_aot 23 24 @property 25 def support_jit(self): 26 return self._support_jit 27 28 @staticmethod 29 def get_jit_extension_folder_path(): 30 """ 31 Kernels which are compiled during runtime will be stored in the same cache folder for reuse. 32 The folder is in the path ~/.cache/colossalai/torch_extensions/<cache-folder>. 33 The name of the <cache-folder> follows a common format: 34 torch<torch_version_major>.<torch_version_minor>_<device_name><device_version>-<hash> 35 36 The <hash> suffix is the hash value of the path of the `colossalai` file. 37 """ 38 import torch 39 40 import colossalai 41 from colossalai.accelerator import get_accelerator 42 43 # get torch version 44 torch_version_major = torch.__version__.split(".")[0] 45 torch_version_minor = torch.__version__.split(".")[1] 46 47 # get device version 48 device_name = get_accelerator().name 49 device_version = get_accelerator().get_version() 50 51 # use colossalai's file path as hash 52 hash_suffix = hashlib.sha256(colossalai.__file__.encode()).hexdigest() 53 54 # concat 55 home_directory = os.path.expanduser("~") 56 extension_directory = f".cache/colossalai/torch_extensions/torch{torch_version_major}.{torch_version_minor}_{device_name}-{device_version}-{hash_suffix}" 57 cache_directory = os.path.join(home_directory, extension_directory) 58 return cache_directory 59 60 @abstractmethod 61 def is_hardware_available(self) -> bool: 62 """ 63 Check if the hardware required by the kernel is available. 64 """ 65 66 @abstractmethod 67 def assert_hardware_compatible(self) -> bool: 68 """ 69 Check if the hardware required by the kernel is compatible. 70 """ 71 72 @abstractmethod 73 def build_aot(self) -> Union["CppExtension", "CUDAExtension"]: 74 pass 75 76 @abstractmethod 77 def build_jit(self) -> None: 78 pass 79 80 @abstractmethod 81 def load(self): 82 pass 83 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/extensions/base_extension.py b/extensions/base_extension.py --- a/extensions/base_extension.py +++ b/extensions/base_extension.py @@ -1,7 +1,7 @@ import hashlib import os from abc import ABC, abstractmethod -from typing import Union +from typing import Callable, Union __all__ = ["_Extension"] @@ -64,7 +64,7 @@ """ @abstractmethod - def assert_hardware_compatible(self) -> bool: + def assert_hardware_compatible(self) -> None: """ Check if the hardware required by the kernel is compatible. """ @@ -74,9 +74,9 @@ pass @abstractmethod - def build_jit(self) -> None: + def build_jit(self) -> Callable: pass @abstractmethod - def load(self): + def load(self) -> Callable: pass
{"golden_diff": "diff --git a/extensions/base_extension.py b/extensions/base_extension.py\n--- a/extensions/base_extension.py\n+++ b/extensions/base_extension.py\n@@ -1,7 +1,7 @@\n import hashlib\n import os\n from abc import ABC, abstractmethod\n-from typing import Union\n+from typing import Callable, Union\n \n __all__ = [\"_Extension\"]\n \n@@ -64,7 +64,7 @@\n \"\"\"\n \n @abstractmethod\n- def assert_hardware_compatible(self) -> bool:\n+ def assert_hardware_compatible(self) -> None:\n \"\"\"\n Check if the hardware required by the kernel is compatible.\n \"\"\"\n@@ -74,9 +74,9 @@\n pass\n \n @abstractmethod\n- def build_jit(self) -> None:\n+ def build_jit(self) -> Callable:\n pass\n \n @abstractmethod\n- def load(self):\n+ def load(self) -> Callable:\n pass\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import hashlib\nimport os\nfrom abc import ABC, abstractmethod\nfrom typing import Union\n\n__all__ = [\"_Extension\"]\n\n\nclass _Extension(ABC):\n def __init__(self, name: str, support_aot: bool, support_jit: bool, priority: int = 1):\n self._name = name\n self._support_aot = support_aot\n self._support_jit = support_jit\n self.priority = priority\n\n @property\n def name(self):\n return self._name\n\n @property\n def support_aot(self):\n return self._support_aot\n\n @property\n def support_jit(self):\n return self._support_jit\n\n @staticmethod\n def get_jit_extension_folder_path():\n \"\"\"\n Kernels which are compiled during runtime will be stored in the same cache folder for reuse.\n The folder is in the path ~/.cache/colossalai/torch_extensions/<cache-folder>.\n The name of the <cache-folder> follows a common format:\n torch<torch_version_major>.<torch_version_minor>_<device_name><device_version>-<hash>\n\n The <hash> suffix is the hash value of the path of the `colossalai` file.\n \"\"\"\n import torch\n\n import colossalai\n from colossalai.accelerator import get_accelerator\n\n # get torch version\n torch_version_major = torch.__version__.split(\".\")[0]\n torch_version_minor = torch.__version__.split(\".\")[1]\n\n # get device version\n device_name = get_accelerator().name\n device_version = get_accelerator().get_version()\n\n # use colossalai's file path as hash\n hash_suffix = hashlib.sha256(colossalai.__file__.encode()).hexdigest()\n\n # concat\n home_directory = os.path.expanduser(\"~\")\n extension_directory = f\".cache/colossalai/torch_extensions/torch{torch_version_major}.{torch_version_minor}_{device_name}-{device_version}-{hash_suffix}\"\n cache_directory = os.path.join(home_directory, extension_directory)\n return cache_directory\n\n @abstractmethod\n def is_hardware_available(self) -> bool:\n \"\"\"\n Check if the hardware required by the kernel is available.\n \"\"\"\n\n @abstractmethod\n def assert_hardware_compatible(self) -> bool:\n \"\"\"\n Check if the hardware required by the kernel is compatible.\n \"\"\"\n\n @abstractmethod\n def build_aot(self) -> Union[\"CppExtension\", \"CUDAExtension\"]:\n pass\n\n @abstractmethod\n def build_jit(self) -> None:\n pass\n\n @abstractmethod\n def load(self):\n pass\n", "path": "extensions/base_extension.py"}], "after_files": [{"content": "import hashlib\nimport os\nfrom abc import ABC, abstractmethod\nfrom typing import Callable, Union\n\n__all__ = [\"_Extension\"]\n\n\nclass _Extension(ABC):\n def __init__(self, name: str, support_aot: bool, support_jit: bool, priority: int = 1):\n self._name = name\n self._support_aot = support_aot\n self._support_jit = support_jit\n self.priority = priority\n\n @property\n def name(self):\n return self._name\n\n @property\n def support_aot(self):\n return self._support_aot\n\n @property\n def support_jit(self):\n return self._support_jit\n\n @staticmethod\n def get_jit_extension_folder_path():\n \"\"\"\n Kernels which are compiled during runtime will be stored in the same cache folder for reuse.\n The folder is in the path ~/.cache/colossalai/torch_extensions/<cache-folder>.\n The name of the <cache-folder> follows a common format:\n torch<torch_version_major>.<torch_version_minor>_<device_name><device_version>-<hash>\n\n The <hash> suffix is the hash value of the path of the `colossalai` file.\n \"\"\"\n import torch\n\n import colossalai\n from colossalai.accelerator import get_accelerator\n\n # get torch version\n torch_version_major = torch.__version__.split(\".\")[0]\n torch_version_minor = torch.__version__.split(\".\")[1]\n\n # get device version\n device_name = get_accelerator().name\n device_version = get_accelerator().get_version()\n\n # use colossalai's file path as hash\n hash_suffix = hashlib.sha256(colossalai.__file__.encode()).hexdigest()\n\n # concat\n home_directory = os.path.expanduser(\"~\")\n extension_directory = f\".cache/colossalai/torch_extensions/torch{torch_version_major}.{torch_version_minor}_{device_name}-{device_version}-{hash_suffix}\"\n cache_directory = os.path.join(home_directory, extension_directory)\n return cache_directory\n\n @abstractmethod\n def is_hardware_available(self) -> bool:\n \"\"\"\n Check if the hardware required by the kernel is available.\n \"\"\"\n\n @abstractmethod\n def assert_hardware_compatible(self) -> None:\n \"\"\"\n Check if the hardware required by the kernel is compatible.\n \"\"\"\n\n @abstractmethod\n def build_aot(self) -> Union[\"CppExtension\", \"CUDAExtension\"]:\n pass\n\n @abstractmethod\n def build_jit(self) -> Callable:\n pass\n\n @abstractmethod\n def load(self) -> Callable:\n pass\n", "path": "extensions/base_extension.py"}]}
1,015
201
gh_patches_debug_407
rasdani/github-patches
git_diff
wemake-services__wemake-python-styleguide-200
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Feature: allow magic numbers in async functions constructors We check that some magic numbers can be used in function constructors like so: ```python def some_function(price, delta=0.1): return price * delta ``` But, we only allow regular functions, not `async` ones: https://github.com/wemake-services/wemake-python-styleguide/blob/master/wemake_python_styleguide/visitors/ast/numbers.py#L19-L21 What we need to do is: 1. Add `ast.AsyncFunctionDef` to the allowed list 2. Write a unit test for it: https://github.com/wemake-services/wemake-python-styleguide/blob/master/tests/test_visitors/test_ast/test_general/test_magic_numbers.py --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wemake_python_styleguide/visitors/ast/numbers.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 import ast 4 from typing import Optional 5 6 from wemake_python_styleguide.constants import MAGIC_NUMBERS_WHITELIST 7 from wemake_python_styleguide.violations.best_practices import ( 8 MagicNumberViolation, 9 ) 10 from wemake_python_styleguide.visitors.base import BaseNodeVisitor 11 12 13 class MagicNumberVisitor(BaseNodeVisitor): 14 """Checks magic numbers used in the code.""" 15 16 _ALLOWED_PARENTS = ( 17 ast.Assign, 18 19 # Constructor usages: 20 ast.FunctionDef, 21 ast.arguments, 22 23 # Primitives: 24 ast.List, 25 ast.Dict, 26 ast.Set, 27 ast.Tuple, 28 ) 29 30 # TODO: make consistent naming rules for class attributes: 31 _PROXY_PARENTS = ( 32 ast.UnaryOp, 33 ) 34 35 def _get_real_parent(self, node: Optional[ast.AST]) -> Optional[ast.AST]: 36 """ 37 Returns real number's parent. 38 39 What can go wrong? 40 41 1. Number can be negative: ``x = -1``, 42 so ``1`` has ``UnaryOp`` as parent, but should return ``Assign`` 43 44 """ 45 parent = getattr(node, 'parent', None) 46 if isinstance(parent, self._PROXY_PARENTS): 47 return self._get_real_parent(parent) 48 return parent 49 50 def _check_is_magic(self, node: ast.Num) -> None: 51 parent = self._get_real_parent(node) 52 if isinstance(parent, self._ALLOWED_PARENTS): 53 return 54 55 if node.n in MAGIC_NUMBERS_WHITELIST: 56 return 57 58 if isinstance(node.n, int) and node.n <= 10: 59 return 60 61 self.add_violation(MagicNumberViolation(node, text=str(node.n))) 62 63 def visit_Num(self, node: ast.Num) -> None: 64 """ 65 Checks numbers not to be magic constants inside the code. 66 67 Raises: 68 MagicNumberViolation 69 70 """ 71 self._check_is_magic(node) 72 self.generic_visit(node) 73 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wemake_python_styleguide/visitors/ast/numbers.py b/wemake_python_styleguide/visitors/ast/numbers.py --- a/wemake_python_styleguide/visitors/ast/numbers.py +++ b/wemake_python_styleguide/visitors/ast/numbers.py @@ -18,6 +18,7 @@ # Constructor usages: ast.FunctionDef, + ast.AsyncFunctionDef, ast.arguments, # Primitives:
{"golden_diff": "diff --git a/wemake_python_styleguide/visitors/ast/numbers.py b/wemake_python_styleguide/visitors/ast/numbers.py\n--- a/wemake_python_styleguide/visitors/ast/numbers.py\n+++ b/wemake_python_styleguide/visitors/ast/numbers.py\n@@ -18,6 +18,7 @@\n \n # Constructor usages:\n ast.FunctionDef,\n+ ast.AsyncFunctionDef,\n ast.arguments,\n \n # Primitives:\n", "issue": "Feature: allow magic numbers in async functions constructors\nWe check that some magic numbers can be used in function constructors like so:\r\n\r\n```python\r\ndef some_function(price, delta=0.1):\r\n return price * delta\r\n```\r\n\r\nBut, we only allow regular functions, not `async` ones: https://github.com/wemake-services/wemake-python-styleguide/blob/master/wemake_python_styleguide/visitors/ast/numbers.py#L19-L21\r\n\r\nWhat we need to do is:\r\n1. Add `ast.AsyncFunctionDef` to the allowed list\r\n2. Write a unit test for it: https://github.com/wemake-services/wemake-python-styleguide/blob/master/tests/test_visitors/test_ast/test_general/test_magic_numbers.py\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import Optional\n\nfrom wemake_python_styleguide.constants import MAGIC_NUMBERS_WHITELIST\nfrom wemake_python_styleguide.violations.best_practices import (\n MagicNumberViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\nclass MagicNumberVisitor(BaseNodeVisitor):\n \"\"\"Checks magic numbers used in the code.\"\"\"\n\n _ALLOWED_PARENTS = (\n ast.Assign,\n\n # Constructor usages:\n ast.FunctionDef,\n ast.arguments,\n\n # Primitives:\n ast.List,\n ast.Dict,\n ast.Set,\n ast.Tuple,\n )\n\n # TODO: make consistent naming rules for class attributes:\n _PROXY_PARENTS = (\n ast.UnaryOp,\n )\n\n def _get_real_parent(self, node: Optional[ast.AST]) -> Optional[ast.AST]:\n \"\"\"\n Returns real number's parent.\n\n What can go wrong?\n\n 1. Number can be negative: ``x = -1``,\n so ``1`` has ``UnaryOp`` as parent, but should return ``Assign``\n\n \"\"\"\n parent = getattr(node, 'parent', None)\n if isinstance(parent, self._PROXY_PARENTS):\n return self._get_real_parent(parent)\n return parent\n\n def _check_is_magic(self, node: ast.Num) -> None:\n parent = self._get_real_parent(node)\n if isinstance(parent, self._ALLOWED_PARENTS):\n return\n\n if node.n in MAGIC_NUMBERS_WHITELIST:\n return\n\n if isinstance(node.n, int) and node.n <= 10:\n return\n\n self.add_violation(MagicNumberViolation(node, text=str(node.n)))\n\n def visit_Num(self, node: ast.Num) -> None:\n \"\"\"\n Checks numbers not to be magic constants inside the code.\n\n Raises:\n MagicNumberViolation\n\n \"\"\"\n self._check_is_magic(node)\n self.generic_visit(node)\n", "path": "wemake_python_styleguide/visitors/ast/numbers.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import Optional\n\nfrom wemake_python_styleguide.constants import MAGIC_NUMBERS_WHITELIST\nfrom wemake_python_styleguide.violations.best_practices import (\n MagicNumberViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\nclass MagicNumberVisitor(BaseNodeVisitor):\n \"\"\"Checks magic numbers used in the code.\"\"\"\n\n _ALLOWED_PARENTS = (\n ast.Assign,\n\n # Constructor usages:\n ast.FunctionDef,\n ast.AsyncFunctionDef,\n ast.arguments,\n\n # Primitives:\n ast.List,\n ast.Dict,\n ast.Set,\n ast.Tuple,\n )\n\n _PROXY_PARENTS = (\n ast.UnaryOp,\n )\n\n def _get_real_parent(self, node: Optional[ast.AST]) -> Optional[ast.AST]:\n \"\"\"\n Returns real number's parent.\n\n What can go wrong?\n\n 1. Number can be negative: ``x = -1``,\n so ``1`` has ``UnaryOp`` as parent, but should return ``Assign``\n\n \"\"\"\n parent = getattr(node, 'parent', None)\n if isinstance(parent, self._PROXY_PARENTS):\n return self._get_real_parent(parent)\n return parent\n\n def _check_is_magic(self, node: ast.Num) -> None:\n parent = self._get_real_parent(node)\n if isinstance(parent, self._ALLOWED_PARENTS):\n return\n\n if node.n in MAGIC_NUMBERS_WHITELIST:\n return\n\n if isinstance(node.n, int) and node.n <= 10:\n return\n\n self.add_violation(MagicNumberViolation(node, text=str(node.n)))\n\n def visit_Num(self, node: ast.Num) -> None:\n \"\"\"\n Checks numbers not to be magic constants inside the code.\n\n Raises:\n MagicNumberViolation\n\n \"\"\"\n self._check_is_magic(node)\n self.generic_visit(node)\n", "path": "wemake_python_styleguide/visitors/ast/numbers.py"}]}
1,004
109
gh_patches_debug_21827
rasdani/github-patches
git_diff
iterative__dvc-151
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Unconfigured git error Git repository might be created but git might be not configured. The results of this failure: 1. .gitignore file was not created. 2. change was not committed ``` c:\src\tmp>dvc init Directories data/, .cache/ and .state/ were created Error. Git command error (git commit -m DVC init. data dir data, cache dir .cache, state dir .state): *** Please tell me who you are. Run git config --global user.email "[email protected]" git config --global user.name "Your Name" to set your account's default identity. Omit --global to set the identity only in this repository. fatal: empty ident name (for <(NULL)>) not allowed ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dvc/git_wrapper.py` Content: ``` 1 import os 2 3 from dvc.logger import Logger 4 from dvc.config import Config 5 from dvc.executor import Executor, ExecutorError 6 from dvc.system import System 7 8 9 class GitWrapperI(object): 10 COMMIT_LEN = 7 11 12 def __init__(self, git_dir=None, commit=None): 13 self._git_dir = git_dir 14 self._commit = commit 15 16 @property 17 def git_dir(self): 18 return self._git_dir 19 20 @property 21 def lock_file(self): 22 return os.path.join(self.git_dir_abs, '.' + Config.CONFIG + '.lock') 23 24 @property 25 def git_dir_abs(self): 26 return System.realpath(self.git_dir) 27 28 @property 29 def curr_dir_abs(self): 30 return os.path.abspath(os.curdir) 31 32 @property 33 def curr_commit(self): 34 return self._commit 35 36 def is_ready_to_go(self): 37 return True 38 39 @staticmethod 40 def git_file_statuses(): 41 Logger.debug('[dvc-git] Getting file statuses. Command: git status --porcelain') 42 code, out, err = Executor.exec_cmd(['git', 'status', '--porcelain']) 43 if code != 0: 44 raise ExecutorError('[dvc-git] File status command error - {}'.format(err)) 45 Logger.debug('[dvc-git] Getting file statuses. Success.') 46 47 return GitWrapper.parse_porcelain_files(out) 48 49 @staticmethod 50 def git_path_to_system_path(path): 51 if os.name == 'nt': 52 return path.replace('/', '\\') 53 return path 54 55 @staticmethod 56 def parse_porcelain_files(out): 57 result = [] 58 if len(out) > 0: 59 lines = out.split('\n') 60 for line in lines: 61 status = line[:2] 62 file = GitWrapperI.git_path_to_system_path(line[3:]) 63 result.append((status, file)) 64 return result 65 66 def abs_paths_to_dvc(self, files): 67 result = [] 68 for file in files: 69 result.append(os.path.relpath(os.path.abspath(file), self.git_dir_abs)) 70 71 return result 72 73 def commit_all_changes_and_log_status(self, message): 74 pass 75 76 77 class GitWrapper(GitWrapperI): 78 def __init__(self): 79 super(GitWrapper, self).__init__() 80 81 def is_ready_to_go(self): 82 statuses = self.git_file_statuses() 83 if len(statuses) > 0: 84 Logger.error('[dvc-git] Commit all changed files before running reproducible command. Changed files:') 85 for status, file in statuses: 86 Logger.error("{} {}".format(status, file)) 87 return False 88 89 return True 90 91 @property 92 def curr_dir_dvc(self): 93 return os.path.relpath(os.path.abspath(os.curdir), self.git_dir_abs) 94 95 @property 96 def git_dir(self): 97 if self._git_dir: 98 return self._git_dir 99 100 try: 101 Logger.debug('[dvc-git] Getting git directory. Command: git rev-parse --show-toplevel') 102 code, out, err = Executor.exec_cmd(['git', 'rev-parse', '--show-toplevel']) 103 104 if code != 0: 105 raise ExecutorError('[dvc-git] Git directory command error - {}'.format(err)) 106 Logger.debug('[dvc-git] Getting git directory. Success.') 107 108 self._git_dir = out 109 return self._git_dir 110 except ExecutorError: 111 raise 112 except Exception as e: 113 raise ExecutorError('Unable to run git command: {}'.format(e)) 114 pass 115 116 @property 117 def curr_commit(self): 118 Logger.debug('[dvc-git] Getting current git commit. Command: git rev-parse --short HEAD') 119 120 code, out, err = Executor.exec_cmd(['git', 'rev-parse', '--short', 'HEAD']) 121 if code != 0: 122 raise ExecutorError('[dvc-git] Commit command error - {}'.format(err)) 123 Logger.debug('[dvc-git] Getting current git commit. Success.') 124 return out 125 126 @staticmethod 127 def commit_all_changes(message): 128 Logger.debug('[dvc-git] Commit all changes. Commands: {} && {} && {}'.format( 129 'git add --all', 'git status --porcelain', 'git commit -m')) 130 131 Executor.exec_cmd_only_success(['git', 'add', '--all']) 132 out_status = Executor.exec_cmd_only_success(['git', 'status', '--porcelain']) 133 Executor.exec_cmd_only_success(['git', 'commit', '-m', message]) 134 Logger.debug('[dvc-git] Commit all changes. Success.') 135 136 return GitWrapper.parse_porcelain_files(out_status) 137 138 def commit_all_changes_and_log_status(self, message): 139 statuses = self.commit_all_changes(message) 140 Logger.debug('[dvc-git] A new commit {} was made in the current branch. Added files:'.format( 141 self.curr_commit)) 142 for status, file in statuses: 143 Logger.debug('[dvc-git]\t{} {}'.format(status, file)) 144 pass 145 146 @staticmethod 147 def abs_paths_to_relative(files): 148 cur_dir = System.realpath(os.curdir) 149 150 result = [] 151 for file in files: 152 result.append(os.path.relpath(System.realpath(file), cur_dir)) 153 154 return result 155 156 def dvc_paths_to_abs(self, files): 157 results = [] 158 159 for file in files: 160 results.append(os.path.join(self.git_dir_abs, file)) 161 162 return results 163 164 def were_files_changed(self, code_dependencies, path_factory, changed_files): 165 code_files, code_dirs = self.separate_dependency_files_and_dirs(code_dependencies) 166 code_files_set = set([path_factory.path(x).dvc for x in code_files]) 167 for changed_file in changed_files: 168 if changed_file in code_files_set: 169 return True 170 171 for dir in code_dirs: 172 if changed_file.startswith(dir): 173 return True 174 175 return False 176 177 @staticmethod 178 def get_changed_files(target_commit): 179 Logger.debug('[dvc-git] Identify changes. Command: git diff --name-only HEAD {}'.format( 180 target_commit)) 181 182 changed_files_str = Executor.exec_cmd_only_success(['git', 'diff', '--name-only', 'HEAD', target_commit]) 183 changed_files = changed_files_str.strip('"').split('\n') 184 185 Logger.debug('[dvc-git] Identify changes. Success. Changed files: {}'.format( 186 ', '.join(changed_files))) 187 return changed_files 188 189 @staticmethod 190 def get_target_commit(file): 191 try: 192 commit = Executor.exec_cmd_only_success(['git', 'log', '-1', '--pretty=format:"%h"', file]) 193 return commit.strip('"') 194 except ExecutorError: 195 return None 196 197 def separate_dependency_files_and_dirs(self, code_dependencies): 198 code_files = [] 199 code_dirs = [] 200 201 code_dependencies_abs = self.dvc_paths_to_abs(code_dependencies) 202 for code in code_dependencies_abs: 203 if os.path.isdir(code): 204 code_dirs.append(code) 205 else: 206 code_files.append(code) 207 208 return code_files, code_dirs 209 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dvc/git_wrapper.py b/dvc/git_wrapper.py --- a/dvc/git_wrapper.py +++ b/dvc/git_wrapper.py @@ -46,6 +46,15 @@ return GitWrapper.parse_porcelain_files(out) + @staticmethod + def git_config_get(name): + code, out, err = Executor.exec_cmd(['git', 'config', '--get', name]) + Logger.debug('[dvc-git] "git config --get {}": code({}), out({}), err({})'.format( + name, code, out, err)) + if code != 0: + return None + return out + @staticmethod def git_path_to_system_path(path): if os.name == 'nt': @@ -86,6 +95,17 @@ Logger.error("{} {}".format(status, file)) return False + # Sanity check to make sure we will be able to commit + name = self.git_config_get('user.name') + if name == None: + Logger.error('[dvc-git] Please setup user.name in git config') + return False + + email = self.git_config_get('user.email') + if email == None: + Logger.error('[dvc-git] Please setup user.email in git config') + return False + return True @property
{"golden_diff": "diff --git a/dvc/git_wrapper.py b/dvc/git_wrapper.py\n--- a/dvc/git_wrapper.py\n+++ b/dvc/git_wrapper.py\n@@ -46,6 +46,15 @@\n \n return GitWrapper.parse_porcelain_files(out)\n \n+ @staticmethod\n+ def git_config_get(name):\n+ code, out, err = Executor.exec_cmd(['git', 'config', '--get', name])\n+ Logger.debug('[dvc-git] \"git config --get {}\": code({}), out({}), err({})'.format(\n+ name, code, out, err))\n+ if code != 0:\n+ return None\n+ return out\n+\n @staticmethod\n def git_path_to_system_path(path):\n if os.name == 'nt':\n@@ -86,6 +95,17 @@\n Logger.error(\"{} {}\".format(status, file))\n return False\n \n+ # Sanity check to make sure we will be able to commit\n+ name = self.git_config_get('user.name')\n+ if name == None:\n+ Logger.error('[dvc-git] Please setup user.name in git config')\n+ return False\n+\n+ email = self.git_config_get('user.email')\n+ if email == None:\n+ Logger.error('[dvc-git] Please setup user.email in git config')\n+ return False\n+\n return True\n \n @property\n", "issue": "Unconfigured git error\nGit repository might be created but git might be not configured.\r\nThe results of this failure:\r\n1. .gitignore file was not created.\r\n2. change was not committed\r\n\r\n```\r\nc:\\src\\tmp>dvc init\r\nDirectories data/, .cache/ and .state/ were created\r\nError. Git command error (git commit -m DVC init. data dir data, cache dir .cache, state dir .state):\r\n*** Please tell me who you are.\r\n\r\nRun\r\n\r\n git config --global user.email \"[email protected]\"\r\n git config --global user.name \"Your Name\"\r\n\r\nto set your account's default identity.\r\nOmit --global to set the identity only in this repository.\r\n\r\nfatal: empty ident name (for <(NULL)>) not allowed\r\n```\n", "before_files": [{"content": "import os\n\nfrom dvc.logger import Logger\nfrom dvc.config import Config\nfrom dvc.executor import Executor, ExecutorError\nfrom dvc.system import System\n\n\nclass GitWrapperI(object):\n COMMIT_LEN = 7\n\n def __init__(self, git_dir=None, commit=None):\n self._git_dir = git_dir\n self._commit = commit\n\n @property\n def git_dir(self):\n return self._git_dir\n\n @property\n def lock_file(self):\n return os.path.join(self.git_dir_abs, '.' + Config.CONFIG + '.lock')\n\n @property\n def git_dir_abs(self):\n return System.realpath(self.git_dir)\n\n @property\n def curr_dir_abs(self):\n return os.path.abspath(os.curdir)\n\n @property\n def curr_commit(self):\n return self._commit\n\n def is_ready_to_go(self):\n return True\n\n @staticmethod\n def git_file_statuses():\n Logger.debug('[dvc-git] Getting file statuses. Command: git status --porcelain')\n code, out, err = Executor.exec_cmd(['git', 'status', '--porcelain'])\n if code != 0:\n raise ExecutorError('[dvc-git] File status command error - {}'.format(err))\n Logger.debug('[dvc-git] Getting file statuses. Success.')\n\n return GitWrapper.parse_porcelain_files(out)\n\n @staticmethod\n def git_path_to_system_path(path):\n if os.name == 'nt':\n return path.replace('/', '\\\\')\n return path\n\n @staticmethod\n def parse_porcelain_files(out):\n result = []\n if len(out) > 0:\n lines = out.split('\\n')\n for line in lines:\n status = line[:2]\n file = GitWrapperI.git_path_to_system_path(line[3:])\n result.append((status, file))\n return result\n\n def abs_paths_to_dvc(self, files):\n result = []\n for file in files:\n result.append(os.path.relpath(os.path.abspath(file), self.git_dir_abs))\n\n return result\n\n def commit_all_changes_and_log_status(self, message):\n pass\n\n\nclass GitWrapper(GitWrapperI):\n def __init__(self):\n super(GitWrapper, self).__init__()\n\n def is_ready_to_go(self):\n statuses = self.git_file_statuses()\n if len(statuses) > 0:\n Logger.error('[dvc-git] Commit all changed files before running reproducible command. Changed files:')\n for status, file in statuses:\n Logger.error(\"{} {}\".format(status, file))\n return False\n\n return True\n\n @property\n def curr_dir_dvc(self):\n return os.path.relpath(os.path.abspath(os.curdir), self.git_dir_abs)\n\n @property\n def git_dir(self):\n if self._git_dir:\n return self._git_dir\n\n try:\n Logger.debug('[dvc-git] Getting git directory. Command: git rev-parse --show-toplevel')\n code, out, err = Executor.exec_cmd(['git', 'rev-parse', '--show-toplevel'])\n\n if code != 0:\n raise ExecutorError('[dvc-git] Git directory command error - {}'.format(err))\n Logger.debug('[dvc-git] Getting git directory. Success.')\n\n self._git_dir = out\n return self._git_dir\n except ExecutorError:\n raise\n except Exception as e:\n raise ExecutorError('Unable to run git command: {}'.format(e))\n pass\n\n @property\n def curr_commit(self):\n Logger.debug('[dvc-git] Getting current git commit. Command: git rev-parse --short HEAD')\n\n code, out, err = Executor.exec_cmd(['git', 'rev-parse', '--short', 'HEAD'])\n if code != 0:\n raise ExecutorError('[dvc-git] Commit command error - {}'.format(err))\n Logger.debug('[dvc-git] Getting current git commit. Success.')\n return out\n\n @staticmethod\n def commit_all_changes(message):\n Logger.debug('[dvc-git] Commit all changes. Commands: {} && {} && {}'.format(\n 'git add --all', 'git status --porcelain', 'git commit -m'))\n\n Executor.exec_cmd_only_success(['git', 'add', '--all'])\n out_status = Executor.exec_cmd_only_success(['git', 'status', '--porcelain'])\n Executor.exec_cmd_only_success(['git', 'commit', '-m', message])\n Logger.debug('[dvc-git] Commit all changes. Success.')\n\n return GitWrapper.parse_porcelain_files(out_status)\n\n def commit_all_changes_and_log_status(self, message):\n statuses = self.commit_all_changes(message)\n Logger.debug('[dvc-git] A new commit {} was made in the current branch. Added files:'.format(\n self.curr_commit))\n for status, file in statuses:\n Logger.debug('[dvc-git]\\t{} {}'.format(status, file))\n pass\n\n @staticmethod\n def abs_paths_to_relative(files):\n cur_dir = System.realpath(os.curdir)\n\n result = []\n for file in files:\n result.append(os.path.relpath(System.realpath(file), cur_dir))\n\n return result\n\n def dvc_paths_to_abs(self, files):\n results = []\n\n for file in files:\n results.append(os.path.join(self.git_dir_abs, file))\n\n return results\n\n def were_files_changed(self, code_dependencies, path_factory, changed_files):\n code_files, code_dirs = self.separate_dependency_files_and_dirs(code_dependencies)\n code_files_set = set([path_factory.path(x).dvc for x in code_files])\n for changed_file in changed_files:\n if changed_file in code_files_set:\n return True\n\n for dir in code_dirs:\n if changed_file.startswith(dir):\n return True\n\n return False\n\n @staticmethod\n def get_changed_files(target_commit):\n Logger.debug('[dvc-git] Identify changes. Command: git diff --name-only HEAD {}'.format(\n target_commit))\n\n changed_files_str = Executor.exec_cmd_only_success(['git', 'diff', '--name-only', 'HEAD', target_commit])\n changed_files = changed_files_str.strip('\"').split('\\n')\n\n Logger.debug('[dvc-git] Identify changes. Success. Changed files: {}'.format(\n ', '.join(changed_files)))\n return changed_files\n\n @staticmethod\n def get_target_commit(file):\n try:\n commit = Executor.exec_cmd_only_success(['git', 'log', '-1', '--pretty=format:\"%h\"', file])\n return commit.strip('\"')\n except ExecutorError:\n return None\n\n def separate_dependency_files_and_dirs(self, code_dependencies):\n code_files = []\n code_dirs = []\n\n code_dependencies_abs = self.dvc_paths_to_abs(code_dependencies)\n for code in code_dependencies_abs:\n if os.path.isdir(code):\n code_dirs.append(code)\n else:\n code_files.append(code)\n\n return code_files, code_dirs\n", "path": "dvc/git_wrapper.py"}], "after_files": [{"content": "import os\n\nfrom dvc.logger import Logger\nfrom dvc.config import Config\nfrom dvc.executor import Executor, ExecutorError\nfrom dvc.system import System\n\n\nclass GitWrapperI(object):\n COMMIT_LEN = 7\n\n def __init__(self, git_dir=None, commit=None):\n self._git_dir = git_dir\n self._commit = commit\n\n @property\n def git_dir(self):\n return self._git_dir\n\n @property\n def lock_file(self):\n return os.path.join(self.git_dir_abs, '.' + Config.CONFIG + '.lock')\n\n @property\n def git_dir_abs(self):\n return System.realpath(self.git_dir)\n\n @property\n def curr_dir_abs(self):\n return os.path.abspath(os.curdir)\n\n @property\n def curr_commit(self):\n return self._commit\n\n def is_ready_to_go(self):\n return True\n\n @staticmethod\n def git_file_statuses():\n Logger.debug('[dvc-git] Getting file statuses. Command: git status --porcelain')\n code, out, err = Executor.exec_cmd(['git', 'status', '--porcelain'])\n if code != 0:\n raise ExecutorError('[dvc-git] File status command error - {}'.format(err))\n Logger.debug('[dvc-git] Getting file statuses. Success.')\n\n return GitWrapper.parse_porcelain_files(out)\n\n @staticmethod\n def git_config_get(name):\n code, out, err = Executor.exec_cmd(['git', 'config', '--get', name])\n Logger.debug('[dvc-git] \"git config --get {}\": code({}), out({}), err({})'.format(\n name, code, out, err))\n if code != 0:\n return None\n return out\n\n @staticmethod\n def git_path_to_system_path(path):\n if os.name == 'nt':\n return path.replace('/', '\\\\')\n return path\n\n @staticmethod\n def parse_porcelain_files(out):\n result = []\n if len(out) > 0:\n lines = out.split('\\n')\n for line in lines:\n status = line[:2]\n file = GitWrapperI.git_path_to_system_path(line[3:])\n result.append((status, file))\n return result\n\n def abs_paths_to_dvc(self, files):\n result = []\n for file in files:\n result.append(os.path.relpath(os.path.abspath(file), self.git_dir_abs))\n\n return result\n\n def commit_all_changes_and_log_status(self, message):\n pass\n\n\nclass GitWrapper(GitWrapperI):\n def __init__(self):\n super(GitWrapper, self).__init__()\n\n def is_ready_to_go(self):\n statuses = self.git_file_statuses()\n if len(statuses) > 0:\n Logger.error('[dvc-git] Commit all changed files before running reproducible command. Changed files:')\n for status, file in statuses:\n Logger.error(\"{} {}\".format(status, file))\n return False\n\n # Sanity check to make sure we will be able to commit\n name = self.git_config_get('user.name')\n if name == None:\n Logger.error('[dvc-git] Please setup user.name in git config')\n return False\n\n email = self.git_config_get('user.email')\n if email == None:\n Logger.error('[dvc-git] Please setup user.email in git config')\n return False\n\n return True\n\n @property\n def curr_dir_dvc(self):\n return os.path.relpath(os.path.abspath(os.curdir), self.git_dir_abs)\n\n @property\n def git_dir(self):\n if self._git_dir:\n return self._git_dir\n\n try:\n Logger.debug('[dvc-git] Getting git directory. Command: git rev-parse --show-toplevel')\n code, out, err = Executor.exec_cmd(['git', 'rev-parse', '--show-toplevel'])\n\n if code != 0:\n raise ExecutorError('[dvc-git] Git directory command error - {}'.format(err))\n Logger.debug('[dvc-git] Getting git directory. Success.')\n\n self._git_dir = out\n return self._git_dir\n except ExecutorError:\n raise\n except Exception as e:\n raise ExecutorError('Unable to run git command: {}'.format(e))\n pass\n\n @property\n def curr_commit(self):\n Logger.debug('[dvc-git] Getting current git commit. Command: git rev-parse --short HEAD')\n\n code, out, err = Executor.exec_cmd(['git', 'rev-parse', '--short', 'HEAD'])\n if code != 0:\n raise ExecutorError('[dvc-git] Commit command error - {}'.format(err))\n Logger.debug('[dvc-git] Getting current git commit. Success.')\n return out\n\n @staticmethod\n def commit_all_changes(message):\n Logger.debug('[dvc-git] Commit all changes. Commands: {} && {} && {}'.format(\n 'git add --all', 'git status --porcelain', 'git commit -m'))\n\n Executor.exec_cmd_only_success(['git', 'add', '--all'])\n out_status = Executor.exec_cmd_only_success(['git', 'status', '--porcelain'])\n Executor.exec_cmd_only_success(['git', 'commit', '-m', message])\n Logger.debug('[dvc-git] Commit all changes. Success.')\n\n return GitWrapper.parse_porcelain_files(out_status)\n\n def commit_all_changes_and_log_status(self, message):\n statuses = self.commit_all_changes(message)\n Logger.debug('[dvc-git] A new commit {} was made in the current branch. Added files:'.format(\n self.curr_commit))\n for status, file in statuses:\n Logger.debug('[dvc-git]\\t{} {}'.format(status, file))\n pass\n\n @staticmethod\n def abs_paths_to_relative(files):\n cur_dir = System.realpath(os.curdir)\n\n result = []\n for file in files:\n result.append(os.path.relpath(System.realpath(file), cur_dir))\n\n return result\n\n def dvc_paths_to_abs(self, files):\n results = []\n\n for file in files:\n results.append(os.path.join(self.git_dir_abs, file))\n\n return results\n\n def were_files_changed(self, code_dependencies, path_factory, changed_files):\n code_files, code_dirs = self.separate_dependency_files_and_dirs(code_dependencies)\n code_files_set = set([path_factory.path(x).dvc for x in code_files])\n for changed_file in changed_files:\n if changed_file in code_files_set:\n return True\n\n for dir in code_dirs:\n if changed_file.startswith(dir):\n return True\n\n return False\n\n @staticmethod\n def get_changed_files(target_commit):\n Logger.debug('[dvc-git] Identify changes. Command: git diff --name-only HEAD {}'.format(\n target_commit))\n\n changed_files_str = Executor.exec_cmd_only_success(['git', 'diff', '--name-only', 'HEAD', target_commit])\n changed_files = changed_files_str.strip('\"').split('\\n')\n\n Logger.debug('[dvc-git] Identify changes. Success. Changed files: {}'.format(\n ', '.join(changed_files)))\n return changed_files\n\n @staticmethod\n def get_target_commit(file):\n try:\n commit = Executor.exec_cmd_only_success(['git', 'log', '-1', '--pretty=format:\"%h\"', file])\n return commit.strip('\"')\n except ExecutorError:\n return None\n\n def separate_dependency_files_and_dirs(self, code_dependencies):\n code_files = []\n code_dirs = []\n\n code_dependencies_abs = self.dvc_paths_to_abs(code_dependencies)\n for code in code_dependencies_abs:\n if os.path.isdir(code):\n code_dirs.append(code)\n else:\n code_files.append(code)\n\n return code_files, code_dirs\n", "path": "dvc/git_wrapper.py"}]}
2,451
304
gh_patches_debug_19879
rasdani/github-patches
git_diff
Lightning-AI__torchmetrics-2573
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Documentation of ERGAS ## 📚 Documentation It seems to me that the documentation of ERGAS are not consistent with the code. In the documentation the ratio h/l is multiplied with the rest, in the code it is a divisor. I think the confusion originates from the paper that is marked as the source. In the paper the ratio h/l < 1. The way the documentation states it, h/l should be > 1. Page 161 seems to give some insights on that. The bug regarding these incosistencies was fixed in april, but now the documentation is not so clear and not consistent with the literature. Thanks! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/torchmetrics/image/ergas.py` Content: ``` 1 # Copyright The Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from typing import Any, List, Optional, Sequence, Union 16 17 from torch import Tensor 18 from typing_extensions import Literal 19 20 from torchmetrics.functional.image.ergas import _ergas_compute, _ergas_update 21 from torchmetrics.metric import Metric 22 from torchmetrics.utilities import rank_zero_warn 23 from torchmetrics.utilities.data import dim_zero_cat 24 from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE 25 from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE 26 27 if not _MATPLOTLIB_AVAILABLE: 28 __doctest_skip__ = ["ErrorRelativeGlobalDimensionlessSynthesis.plot"] 29 30 31 class ErrorRelativeGlobalDimensionlessSynthesis(Metric): 32 r"""Calculate the `Error relative global dimensionless synthesis`_ (ERGAS) metric. 33 34 This metric is used to calculate the accuracy of Pan sharpened image considering normalized average error of each 35 band of the result image. It is defined as: 36 37 .. math:: 38 ERGAS = 100 \cdot \frac{h}{l} \cdot \sqrt{\frac{1}{N} \sum_{k=1}^{N} \frac{RMSE(B_k)^2}{\mu_k^2}} 39 40 where :math:`h` and :math:`l` denote the spatial resolution (pixel size) of the high and low resolution images, 41 often shorted to the ratio between them :math:`r=h/l`. :math:`N` is the number of spectral bands, :math:`RMSE(B_k)` 42 is the root mean square error of the k-th band between low and high resolution images, and :math:`\\mu_k` is the 43 mean value of the k-th band of the reference image. 44 45 As input to ``forward`` and ``update`` the metric accepts the following input 46 47 - ``preds`` (:class:`~torch.Tensor`): Predictions from model 48 - ``target`` (:class:`~torch.Tensor`): Ground truth values 49 50 As output of `forward` and `compute` the metric returns the following output 51 52 - ``ergas`` (:class:`~torch.Tensor`): if ``reduction!='none'`` returns float scalar tensor with average ERGAS 53 value over sample else returns tensor of shape ``(N,)`` with ERGAS values per sample 54 55 Args: 56 ratio: ratio of high resolution to low resolution. 57 reduction: a method to reduce metric score over labels. 58 59 - ``'elementwise_mean'``: takes the mean (default) 60 - ``'sum'``: takes the sum 61 - ``'none'`` or ``None``: no reduction will be applied 62 63 kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. 64 65 Example: 66 >>> import torch 67 >>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis 68 >>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42)) 69 >>> target = preds * 0.75 70 >>> ergas = ErrorRelativeGlobalDimensionlessSynthesis() 71 >>> torch.round(ergas(preds, target)) 72 tensor(10.) 73 74 """ 75 76 higher_is_better: bool = False 77 is_differentiable: bool = True 78 full_state_update: bool = False 79 plot_lower_bound: float = 0.0 80 81 preds: List[Tensor] 82 target: List[Tensor] 83 84 def __init__( 85 self, 86 ratio: float = 4, 87 reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", 88 **kwargs: Any, 89 ) -> None: 90 super().__init__(**kwargs) 91 rank_zero_warn( 92 "Metric `UniversalImageQualityIndex` will save all targets and" 93 " predictions in buffer. For large datasets this may lead" 94 " to large memory footprint." 95 ) 96 97 self.add_state("preds", default=[], dist_reduce_fx="cat") 98 self.add_state("target", default=[], dist_reduce_fx="cat") 99 self.ratio = ratio 100 self.reduction = reduction 101 102 def update(self, preds: Tensor, target: Tensor) -> None: 103 """Update state with predictions and targets.""" 104 preds, target = _ergas_update(preds, target) 105 self.preds.append(preds) 106 self.target.append(target) 107 108 def compute(self) -> Tensor: 109 """Compute explained variance over state.""" 110 preds = dim_zero_cat(self.preds) 111 target = dim_zero_cat(self.target) 112 return _ergas_compute(preds, target, self.ratio, self.reduction) 113 114 def plot( 115 self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None 116 ) -> _PLOT_OUT_TYPE: 117 """Plot a single or multiple values from the metric. 118 119 Args: 120 val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results. 121 If no value is provided, will automatically call `metric.compute` and plot that result. 122 ax: An matplotlib axis object. If provided will add plot to that axis 123 124 Returns: 125 Figure and Axes object 126 127 Raises: 128 ModuleNotFoundError: 129 If `matplotlib` is not installed 130 131 .. plot:: 132 :scale: 75 133 134 >>> # Example plotting a single value 135 >>> import torch 136 >>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis 137 >>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42)) 138 >>> target = preds * 0.75 139 >>> metric = ErrorRelativeGlobalDimensionlessSynthesis() 140 >>> metric.update(preds, target) 141 >>> fig_, ax_ = metric.plot() 142 143 .. plot:: 144 :scale: 75 145 146 >>> # Example plotting multiple values 147 >>> import torch 148 >>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis 149 >>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42)) 150 >>> target = preds * 0.75 151 >>> metric = ErrorRelativeGlobalDimensionlessSynthesis() 152 >>> values = [ ] 153 >>> for _ in range(10): 154 ... values.append(metric(preds, target)) 155 >>> fig_, ax_ = metric.plot(values) 156 157 """ 158 return self._plot(val, ax) 159 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/torchmetrics/image/ergas.py b/src/torchmetrics/image/ergas.py --- a/src/torchmetrics/image/ergas.py +++ b/src/torchmetrics/image/ergas.py @@ -35,12 +35,11 @@ band of the result image. It is defined as: .. math:: - ERGAS = 100 \cdot \frac{h}{l} \cdot \sqrt{\frac{1}{N} \sum_{k=1}^{N} \frac{RMSE(B_k)^2}{\mu_k^2}} + ERGAS = \frac{100}{r} \cdot \sqrt{\frac{1}{N} \sum_{k=1}^{N} \frac{RMSE(B_k)^2}{\mu_k^2}} - where :math:`h` and :math:`l` denote the spatial resolution (pixel size) of the high and low resolution images, - often shorted to the ratio between them :math:`r=h/l`. :math:`N` is the number of spectral bands, :math:`RMSE(B_k)` - is the root mean square error of the k-th band between low and high resolution images, and :math:`\\mu_k` is the - mean value of the k-th band of the reference image. + where :math:`r=h/l` denote the ratio in spatial resolution (pixel size) between the high and low resolution images. + :math:`N` is the number of spectral bands, :math:`RMSE(B_k)` is the root mean square error of the k-th band between + low and high resolution images, and :math:`\\mu_k` is the mean value of the k-th band of the reference image. As input to ``forward`` and ``update`` the metric accepts the following input
{"golden_diff": "diff --git a/src/torchmetrics/image/ergas.py b/src/torchmetrics/image/ergas.py\n--- a/src/torchmetrics/image/ergas.py\n+++ b/src/torchmetrics/image/ergas.py\n@@ -35,12 +35,11 @@\n band of the result image. It is defined as:\n \n .. math::\n- ERGAS = 100 \\cdot \\frac{h}{l} \\cdot \\sqrt{\\frac{1}{N} \\sum_{k=1}^{N} \\frac{RMSE(B_k)^2}{\\mu_k^2}}\n+ ERGAS = \\frac{100}{r} \\cdot \\sqrt{\\frac{1}{N} \\sum_{k=1}^{N} \\frac{RMSE(B_k)^2}{\\mu_k^2}}\n \n- where :math:`h` and :math:`l` denote the spatial resolution (pixel size) of the high and low resolution images,\n- often shorted to the ratio between them :math:`r=h/l`. :math:`N` is the number of spectral bands, :math:`RMSE(B_k)`\n- is the root mean square error of the k-th band between low and high resolution images, and :math:`\\\\mu_k` is the\n- mean value of the k-th band of the reference image.\n+ where :math:`r=h/l` denote the ratio in spatial resolution (pixel size) between the high and low resolution images.\n+ :math:`N` is the number of spectral bands, :math:`RMSE(B_k)` is the root mean square error of the k-th band between\n+ low and high resolution images, and :math:`\\\\mu_k` is the mean value of the k-th band of the reference image.\n \n As input to ``forward`` and ``update`` the metric accepts the following input\n", "issue": "Documentation of ERGAS\n## \ud83d\udcda Documentation\r\n\r\nIt seems to me that the documentation of ERGAS are not consistent with the code. In the documentation the ratio h/l is multiplied with the rest, in the code it is a divisor. I think the confusion originates from the paper that is marked as the source. In the paper the ratio h/l < 1. The way the documentation states it, h/l should be > 1. Page 161 seems to give some insights on that.\r\nThe bug regarding these incosistencies was fixed in april, but now the documentation is not so clear and not consistent with the literature. \r\n\r\nThanks!\r\n\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, List, Optional, Sequence, Union\n\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.functional.image.ergas import _ergas_compute, _ergas_update\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities import rank_zero_warn\nfrom torchmetrics.utilities.data import dim_zero_cat\nfrom torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE\nfrom torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE\n\nif not _MATPLOTLIB_AVAILABLE:\n __doctest_skip__ = [\"ErrorRelativeGlobalDimensionlessSynthesis.plot\"]\n\n\nclass ErrorRelativeGlobalDimensionlessSynthesis(Metric):\n r\"\"\"Calculate the `Error relative global dimensionless synthesis`_ (ERGAS) metric.\n\n This metric is used to calculate the accuracy of Pan sharpened image considering normalized average error of each\n band of the result image. It is defined as:\n\n .. math::\n ERGAS = 100 \\cdot \\frac{h}{l} \\cdot \\sqrt{\\frac{1}{N} \\sum_{k=1}^{N} \\frac{RMSE(B_k)^2}{\\mu_k^2}}\n\n where :math:`h` and :math:`l` denote the spatial resolution (pixel size) of the high and low resolution images,\n often shorted to the ratio between them :math:`r=h/l`. :math:`N` is the number of spectral bands, :math:`RMSE(B_k)`\n is the root mean square error of the k-th band between low and high resolution images, and :math:`\\\\mu_k` is the\n mean value of the k-th band of the reference image.\n\n As input to ``forward`` and ``update`` the metric accepts the following input\n\n - ``preds`` (:class:`~torch.Tensor`): Predictions from model\n - ``target`` (:class:`~torch.Tensor`): Ground truth values\n\n As output of `forward` and `compute` the metric returns the following output\n\n - ``ergas`` (:class:`~torch.Tensor`): if ``reduction!='none'`` returns float scalar tensor with average ERGAS\n value over sample else returns tensor of shape ``(N,)`` with ERGAS values per sample\n\n Args:\n ratio: ratio of high resolution to low resolution.\n reduction: a method to reduce metric score over labels.\n\n - ``'elementwise_mean'``: takes the mean (default)\n - ``'sum'``: takes the sum\n - ``'none'`` or ``None``: no reduction will be applied\n\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Example:\n >>> import torch\n >>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis\n >>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))\n >>> target = preds * 0.75\n >>> ergas = ErrorRelativeGlobalDimensionlessSynthesis()\n >>> torch.round(ergas(preds, target))\n tensor(10.)\n\n \"\"\"\n\n higher_is_better: bool = False\n is_differentiable: bool = True\n full_state_update: bool = False\n plot_lower_bound: float = 0.0\n\n preds: List[Tensor]\n target: List[Tensor]\n\n def __init__(\n self,\n ratio: float = 4,\n reduction: Literal[\"elementwise_mean\", \"sum\", \"none\", None] = \"elementwise_mean\",\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n rank_zero_warn(\n \"Metric `UniversalImageQualityIndex` will save all targets and\"\n \" predictions in buffer. For large datasets this may lead\"\n \" to large memory footprint.\"\n )\n\n self.add_state(\"preds\", default=[], dist_reduce_fx=\"cat\")\n self.add_state(\"target\", default=[], dist_reduce_fx=\"cat\")\n self.ratio = ratio\n self.reduction = reduction\n\n def update(self, preds: Tensor, target: Tensor) -> None:\n \"\"\"Update state with predictions and targets.\"\"\"\n preds, target = _ergas_update(preds, target)\n self.preds.append(preds)\n self.target.append(target)\n\n def compute(self) -> Tensor:\n \"\"\"Compute explained variance over state.\"\"\"\n preds = dim_zero_cat(self.preds)\n target = dim_zero_cat(self.target)\n return _ergas_compute(preds, target, self.ratio, self.reduction)\n\n def plot(\n self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None\n ) -> _PLOT_OUT_TYPE:\n \"\"\"Plot a single or multiple values from the metric.\n\n Args:\n val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.\n If no value is provided, will automatically call `metric.compute` and plot that result.\n ax: An matplotlib axis object. If provided will add plot to that axis\n\n Returns:\n Figure and Axes object\n\n Raises:\n ModuleNotFoundError:\n If `matplotlib` is not installed\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting a single value\n >>> import torch\n >>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis\n >>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))\n >>> target = preds * 0.75\n >>> metric = ErrorRelativeGlobalDimensionlessSynthesis()\n >>> metric.update(preds, target)\n >>> fig_, ax_ = metric.plot()\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting multiple values\n >>> import torch\n >>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis\n >>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))\n >>> target = preds * 0.75\n >>> metric = ErrorRelativeGlobalDimensionlessSynthesis()\n >>> values = [ ]\n >>> for _ in range(10):\n ... values.append(metric(preds, target))\n >>> fig_, ax_ = metric.plot(values)\n\n \"\"\"\n return self._plot(val, ax)\n", "path": "src/torchmetrics/image/ergas.py"}], "after_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, List, Optional, Sequence, Union\n\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.functional.image.ergas import _ergas_compute, _ergas_update\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities import rank_zero_warn\nfrom torchmetrics.utilities.data import dim_zero_cat\nfrom torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE\nfrom torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE\n\nif not _MATPLOTLIB_AVAILABLE:\n __doctest_skip__ = [\"ErrorRelativeGlobalDimensionlessSynthesis.plot\"]\n\n\nclass ErrorRelativeGlobalDimensionlessSynthesis(Metric):\n r\"\"\"Calculate the `Error relative global dimensionless synthesis`_ (ERGAS) metric.\n\n This metric is used to calculate the accuracy of Pan sharpened image considering normalized average error of each\n band of the result image. It is defined as:\n\n .. math::\n ERGAS = \\frac{100}{r} \\cdot \\sqrt{\\frac{1}{N} \\sum_{k=1}^{N} \\frac{RMSE(B_k)^2}{\\mu_k^2}}\n\n where :math:`r=h/l` denote the ratio in spatial resolution (pixel size) between the high and low resolution images.\n :math:`N` is the number of spectral bands, :math:`RMSE(B_k)` is the root mean square error of the k-th band between\n low and high resolution images, and :math:`\\\\mu_k` is the mean value of the k-th band of the reference image.\n\n As input to ``forward`` and ``update`` the metric accepts the following input\n\n - ``preds`` (:class:`~torch.Tensor`): Predictions from model\n - ``target`` (:class:`~torch.Tensor`): Ground truth values\n\n As output of `forward` and `compute` the metric returns the following output\n\n - ``ergas`` (:class:`~torch.Tensor`): if ``reduction!='none'`` returns float scalar tensor with average ERGAS\n value over sample else returns tensor of shape ``(N,)`` with ERGAS values per sample\n\n Args:\n ratio: ratio of high resolution to low resolution.\n reduction: a method to reduce metric score over labels.\n\n - ``'elementwise_mean'``: takes the mean (default)\n - ``'sum'``: takes the sum\n - ``'none'`` or ``None``: no reduction will be applied\n\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Example:\n >>> import torch\n >>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis\n >>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))\n >>> target = preds * 0.75\n >>> ergas = ErrorRelativeGlobalDimensionlessSynthesis()\n >>> torch.round(ergas(preds, target))\n tensor(10.)\n\n \"\"\"\n\n higher_is_better: bool = False\n is_differentiable: bool = True\n full_state_update: bool = False\n plot_lower_bound: float = 0.0\n\n preds: List[Tensor]\n target: List[Tensor]\n\n def __init__(\n self,\n ratio: float = 4,\n reduction: Literal[\"elementwise_mean\", \"sum\", \"none\", None] = \"elementwise_mean\",\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n rank_zero_warn(\n \"Metric `UniversalImageQualityIndex` will save all targets and\"\n \" predictions in buffer. For large datasets this may lead\"\n \" to large memory footprint.\"\n )\n\n self.add_state(\"preds\", default=[], dist_reduce_fx=\"cat\")\n self.add_state(\"target\", default=[], dist_reduce_fx=\"cat\")\n self.ratio = ratio\n self.reduction = reduction\n\n def update(self, preds: Tensor, target: Tensor) -> None:\n \"\"\"Update state with predictions and targets.\"\"\"\n preds, target = _ergas_update(preds, target)\n self.preds.append(preds)\n self.target.append(target)\n\n def compute(self) -> Tensor:\n \"\"\"Compute explained variance over state.\"\"\"\n preds = dim_zero_cat(self.preds)\n target = dim_zero_cat(self.target)\n return _ergas_compute(preds, target, self.ratio, self.reduction)\n\n def plot(\n self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None\n ) -> _PLOT_OUT_TYPE:\n \"\"\"Plot a single or multiple values from the metric.\n\n Args:\n val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.\n If no value is provided, will automatically call `metric.compute` and plot that result.\n ax: An matplotlib axis object. If provided will add plot to that axis\n\n Returns:\n Figure and Axes object\n\n Raises:\n ModuleNotFoundError:\n If `matplotlib` is not installed\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting a single value\n >>> import torch\n >>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis\n >>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))\n >>> target = preds * 0.75\n >>> metric = ErrorRelativeGlobalDimensionlessSynthesis()\n >>> metric.update(preds, target)\n >>> fig_, ax_ = metric.plot()\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting multiple values\n >>> import torch\n >>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis\n >>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))\n >>> target = preds * 0.75\n >>> metric = ErrorRelativeGlobalDimensionlessSynthesis()\n >>> values = [ ]\n >>> for _ in range(10):\n ... values.append(metric(preds, target))\n >>> fig_, ax_ = metric.plot(values)\n\n \"\"\"\n return self._plot(val, ax)\n", "path": "src/torchmetrics/image/ergas.py"}]}
2,302
412
gh_patches_debug_21157
rasdani/github-patches
git_diff
ipython__ipython-11382
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- async-detection: nonlocal does not raise syntaxerror See https://github.com/ipython/ipython/issues/11303#issuecomment-421297197 ``` In [1]: x = 1 ...: def f(): ...: nonlocal x ...: x = 10000 ``` Should raise but does not. It's minor, but may be good to fix as behavior is likely undefined. async-detection: nonlocal does not raise syntaxerror See https://github.com/ipython/ipython/issues/11303#issuecomment-421297197 ``` In [1]: x = 1 ...: def f(): ...: nonlocal x ...: x = 10000 ``` Should raise but does not. It's minor, but may be good to fix as behavior is likely undefined. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `IPython/core/async_helpers.py` Content: ``` 1 """ 2 Async helper function that are invalid syntax on Python 3.5 and below. 3 4 This code is best effort, and may have edge cases not behaving as expected. In 5 particular it contain a number of heuristics to detect whether code is 6 effectively async and need to run in an event loop or not. 7 8 Some constructs (like top-level `return`, or `yield`) are taken care of 9 explicitly to actually raise a SyntaxError and stay as close as possible to 10 Python semantics. 11 """ 12 13 14 import ast 15 import sys 16 from textwrap import dedent, indent 17 18 19 class _AsyncIORunner: 20 21 def __call__(self, coro): 22 """ 23 Handler for asyncio autoawait 24 """ 25 import asyncio 26 27 return asyncio.get_event_loop().run_until_complete(coro) 28 29 def __str__(self): 30 return 'asyncio' 31 32 _asyncio_runner = _AsyncIORunner() 33 34 35 def _curio_runner(coroutine): 36 """ 37 handler for curio autoawait 38 """ 39 import curio 40 41 return curio.run(coroutine) 42 43 44 def _trio_runner(async_fn): 45 import trio 46 47 async def loc(coro): 48 """ 49 We need the dummy no-op async def to protect from 50 trio's internal. See https://github.com/python-trio/trio/issues/89 51 """ 52 return await coro 53 54 return trio.run(loc, async_fn) 55 56 57 def _pseudo_sync_runner(coro): 58 """ 59 A runner that does not really allow async execution, and just advance the coroutine. 60 61 See discussion in https://github.com/python-trio/trio/issues/608, 62 63 Credit to Nathaniel Smith 64 65 """ 66 try: 67 coro.send(None) 68 except StopIteration as exc: 69 return exc.value 70 else: 71 # TODO: do not raise but return an execution result with the right info. 72 raise RuntimeError( 73 "{coro_name!r} needs a real async loop".format(coro_name=coro.__name__) 74 ) 75 76 77 def _asyncify(code: str) -> str: 78 """wrap code in async def definition. 79 80 And setup a bit of context to run it later. 81 """ 82 res = dedent( 83 """ 84 async def __wrapper__(): 85 try: 86 {usercode} 87 finally: 88 locals() 89 """ 90 ).format(usercode=indent(code, " " * 8)) 91 return res 92 93 94 class _AsyncSyntaxErrorVisitor(ast.NodeVisitor): 95 """ 96 Find syntax errors that would be an error in an async repl, but because 97 the implementation involves wrapping the repl in an async function, it 98 is erroneously allowed (e.g. yield or return at the top level) 99 """ 100 101 def generic_visit(self, node): 102 func_types = (ast.FunctionDef, ast.AsyncFunctionDef) 103 invalid_types = (ast.Return, ast.Yield, ast.YieldFrom) 104 105 if isinstance(node, func_types): 106 return # Don't recurse into functions 107 elif isinstance(node, invalid_types): 108 raise SyntaxError() 109 else: 110 super().generic_visit(node) 111 112 113 def _async_parse_cell(cell: str) -> ast.AST: 114 """ 115 This is a compatibility shim for pre-3.7 when async outside of a function 116 is a syntax error at the parse stage. 117 118 It will return an abstract syntax tree parsed as if async and await outside 119 of a function were not a syntax error. 120 """ 121 if sys.version_info < (3, 7): 122 # Prior to 3.7 you need to asyncify before parse 123 wrapped_parse_tree = ast.parse(_asyncify(cell)) 124 return wrapped_parse_tree.body[0].body[0] 125 else: 126 return ast.parse(cell) 127 128 129 def _should_be_async(cell: str) -> bool: 130 """Detect if a block of code need to be wrapped in an `async def` 131 132 Attempt to parse the block of code, it it compile we're fine. 133 Otherwise we wrap if and try to compile. 134 135 If it works, assume it should be async. Otherwise Return False. 136 137 Not handled yet: If the block of code has a return statement as the top 138 level, it will be seen as async. This is a know limitation. 139 """ 140 141 try: 142 # we can't limit ourself to ast.parse, as it __accepts__ to parse on 143 # 3.7+, but just does not _compile_ 144 compile(cell, "<>", "exec") 145 return False 146 except SyntaxError: 147 try: 148 parse_tree = _async_parse_cell(cell) 149 150 # Raise a SyntaxError if there are top-level return or yields 151 v = _AsyncSyntaxErrorVisitor() 152 v.visit(parse_tree) 153 154 except SyntaxError: 155 return False 156 return True 157 return False 158 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/IPython/core/async_helpers.py b/IPython/core/async_helpers.py --- a/IPython/core/async_helpers.py +++ b/IPython/core/async_helpers.py @@ -97,14 +97,22 @@ the implementation involves wrapping the repl in an async function, it is erroneously allowed (e.g. yield or return at the top level) """ + def __init__(self): + self.depth = 0 + super().__init__() def generic_visit(self, node): func_types = (ast.FunctionDef, ast.AsyncFunctionDef) - invalid_types = (ast.Return, ast.Yield, ast.YieldFrom) - - if isinstance(node, func_types): - return # Don't recurse into functions - elif isinstance(node, invalid_types): + invalid_types_by_depth = { + 0: (ast.Return, ast.Yield, ast.YieldFrom), + 1: (ast.Nonlocal,) + } + + should_traverse = self.depth < max(invalid_types_by_depth.keys()) + if isinstance(node, func_types) and should_traverse: + self.depth += 1 + super().generic_visit(node) + elif isinstance(node, invalid_types_by_depth[self.depth]): raise SyntaxError() else: super().generic_visit(node)
{"golden_diff": "diff --git a/IPython/core/async_helpers.py b/IPython/core/async_helpers.py\n--- a/IPython/core/async_helpers.py\n+++ b/IPython/core/async_helpers.py\n@@ -97,14 +97,22 @@\n the implementation involves wrapping the repl in an async function, it\n is erroneously allowed (e.g. yield or return at the top level)\n \"\"\"\n+ def __init__(self):\n+ self.depth = 0\n+ super().__init__()\n \n def generic_visit(self, node):\n func_types = (ast.FunctionDef, ast.AsyncFunctionDef)\n- invalid_types = (ast.Return, ast.Yield, ast.YieldFrom)\n-\n- if isinstance(node, func_types):\n- return # Don't recurse into functions\n- elif isinstance(node, invalid_types):\n+ invalid_types_by_depth = {\n+ 0: (ast.Return, ast.Yield, ast.YieldFrom),\n+ 1: (ast.Nonlocal,)\n+ }\n+\n+ should_traverse = self.depth < max(invalid_types_by_depth.keys())\n+ if isinstance(node, func_types) and should_traverse:\n+ self.depth += 1\n+ super().generic_visit(node)\n+ elif isinstance(node, invalid_types_by_depth[self.depth]):\n raise SyntaxError()\n else:\n super().generic_visit(node)\n", "issue": "async-detection: nonlocal does not raise syntaxerror\nSee https://github.com/ipython/ipython/issues/11303#issuecomment-421297197\r\n\r\n```\r\nIn [1]: x = 1\r\n ...: def f():\r\n ...: nonlocal x\r\n ...: x = 10000\r\n```\r\nShould raise but does not.\r\n\r\nIt's minor, but may be good to fix as behavior is likely undefined.\nasync-detection: nonlocal does not raise syntaxerror\nSee https://github.com/ipython/ipython/issues/11303#issuecomment-421297197\r\n\r\n```\r\nIn [1]: x = 1\r\n ...: def f():\r\n ...: nonlocal x\r\n ...: x = 10000\r\n```\r\nShould raise but does not.\r\n\r\nIt's minor, but may be good to fix as behavior is likely undefined.\n", "before_files": [{"content": "\"\"\"\nAsync helper function that are invalid syntax on Python 3.5 and below.\n\nThis code is best effort, and may have edge cases not behaving as expected. In\nparticular it contain a number of heuristics to detect whether code is\neffectively async and need to run in an event loop or not.\n\nSome constructs (like top-level `return`, or `yield`) are taken care of\nexplicitly to actually raise a SyntaxError and stay as close as possible to\nPython semantics.\n\"\"\"\n\n\nimport ast\nimport sys\nfrom textwrap import dedent, indent\n\n\nclass _AsyncIORunner:\n\n def __call__(self, coro):\n \"\"\"\n Handler for asyncio autoawait\n \"\"\"\n import asyncio\n\n return asyncio.get_event_loop().run_until_complete(coro)\n\n def __str__(self):\n return 'asyncio'\n\n_asyncio_runner = _AsyncIORunner()\n\n\ndef _curio_runner(coroutine):\n \"\"\"\n handler for curio autoawait\n \"\"\"\n import curio\n\n return curio.run(coroutine)\n\n\ndef _trio_runner(async_fn):\n import trio\n\n async def loc(coro):\n \"\"\"\n We need the dummy no-op async def to protect from\n trio's internal. See https://github.com/python-trio/trio/issues/89\n \"\"\"\n return await coro\n\n return trio.run(loc, async_fn)\n\n\ndef _pseudo_sync_runner(coro):\n \"\"\"\n A runner that does not really allow async execution, and just advance the coroutine.\n\n See discussion in https://github.com/python-trio/trio/issues/608,\n\n Credit to Nathaniel Smith\n\n \"\"\"\n try:\n coro.send(None)\n except StopIteration as exc:\n return exc.value\n else:\n # TODO: do not raise but return an execution result with the right info.\n raise RuntimeError(\n \"{coro_name!r} needs a real async loop\".format(coro_name=coro.__name__)\n )\n\n\ndef _asyncify(code: str) -> str:\n \"\"\"wrap code in async def definition.\n\n And setup a bit of context to run it later.\n \"\"\"\n res = dedent(\n \"\"\"\n async def __wrapper__():\n try:\n {usercode}\n finally:\n locals()\n \"\"\"\n ).format(usercode=indent(code, \" \" * 8))\n return res\n\n\nclass _AsyncSyntaxErrorVisitor(ast.NodeVisitor):\n \"\"\"\n Find syntax errors that would be an error in an async repl, but because\n the implementation involves wrapping the repl in an async function, it\n is erroneously allowed (e.g. yield or return at the top level)\n \"\"\"\n\n def generic_visit(self, node):\n func_types = (ast.FunctionDef, ast.AsyncFunctionDef)\n invalid_types = (ast.Return, ast.Yield, ast.YieldFrom)\n\n if isinstance(node, func_types):\n return # Don't recurse into functions\n elif isinstance(node, invalid_types):\n raise SyntaxError()\n else:\n super().generic_visit(node)\n\n\ndef _async_parse_cell(cell: str) -> ast.AST:\n \"\"\"\n This is a compatibility shim for pre-3.7 when async outside of a function\n is a syntax error at the parse stage.\n\n It will return an abstract syntax tree parsed as if async and await outside\n of a function were not a syntax error.\n \"\"\"\n if sys.version_info < (3, 7):\n # Prior to 3.7 you need to asyncify before parse\n wrapped_parse_tree = ast.parse(_asyncify(cell))\n return wrapped_parse_tree.body[0].body[0]\n else:\n return ast.parse(cell)\n\n\ndef _should_be_async(cell: str) -> bool:\n \"\"\"Detect if a block of code need to be wrapped in an `async def`\n\n Attempt to parse the block of code, it it compile we're fine.\n Otherwise we wrap if and try to compile.\n\n If it works, assume it should be async. Otherwise Return False.\n\n Not handled yet: If the block of code has a return statement as the top\n level, it will be seen as async. This is a know limitation.\n \"\"\"\n\n try:\n # we can't limit ourself to ast.parse, as it __accepts__ to parse on\n # 3.7+, but just does not _compile_\n compile(cell, \"<>\", \"exec\")\n return False\n except SyntaxError:\n try:\n parse_tree = _async_parse_cell(cell)\n\n # Raise a SyntaxError if there are top-level return or yields\n v = _AsyncSyntaxErrorVisitor()\n v.visit(parse_tree)\n\n except SyntaxError:\n return False\n return True\n return False\n", "path": "IPython/core/async_helpers.py"}], "after_files": [{"content": "\"\"\"\nAsync helper function that are invalid syntax on Python 3.5 and below.\n\nThis code is best effort, and may have edge cases not behaving as expected. In\nparticular it contain a number of heuristics to detect whether code is\neffectively async and need to run in an event loop or not.\n\nSome constructs (like top-level `return`, or `yield`) are taken care of\nexplicitly to actually raise a SyntaxError and stay as close as possible to\nPython semantics.\n\"\"\"\n\n\nimport ast\nimport sys\nfrom textwrap import dedent, indent\n\n\nclass _AsyncIORunner:\n\n def __call__(self, coro):\n \"\"\"\n Handler for asyncio autoawait\n \"\"\"\n import asyncio\n\n return asyncio.get_event_loop().run_until_complete(coro)\n\n def __str__(self):\n return 'asyncio'\n\n_asyncio_runner = _AsyncIORunner()\n\n\ndef _curio_runner(coroutine):\n \"\"\"\n handler for curio autoawait\n \"\"\"\n import curio\n\n return curio.run(coroutine)\n\n\ndef _trio_runner(async_fn):\n import trio\n\n async def loc(coro):\n \"\"\"\n We need the dummy no-op async def to protect from\n trio's internal. See https://github.com/python-trio/trio/issues/89\n \"\"\"\n return await coro\n\n return trio.run(loc, async_fn)\n\n\ndef _pseudo_sync_runner(coro):\n \"\"\"\n A runner that does not really allow async execution, and just advance the coroutine.\n\n See discussion in https://github.com/python-trio/trio/issues/608,\n\n Credit to Nathaniel Smith\n\n \"\"\"\n try:\n coro.send(None)\n except StopIteration as exc:\n return exc.value\n else:\n # TODO: do not raise but return an execution result with the right info.\n raise RuntimeError(\n \"{coro_name!r} needs a real async loop\".format(coro_name=coro.__name__)\n )\n\n\ndef _asyncify(code: str) -> str:\n \"\"\"wrap code in async def definition.\n\n And setup a bit of context to run it later.\n \"\"\"\n res = dedent(\n \"\"\"\n async def __wrapper__():\n try:\n {usercode}\n finally:\n locals()\n \"\"\"\n ).format(usercode=indent(code, \" \" * 8))\n return res\n\n\nclass _AsyncSyntaxErrorVisitor(ast.NodeVisitor):\n \"\"\"\n Find syntax errors that would be an error in an async repl, but because\n the implementation involves wrapping the repl in an async function, it\n is erroneously allowed (e.g. yield or return at the top level)\n \"\"\"\n def __init__(self):\n self.depth = 0\n super().__init__()\n\n def generic_visit(self, node):\n func_types = (ast.FunctionDef, ast.AsyncFunctionDef)\n invalid_types_by_depth = {\n 0: (ast.Return, ast.Yield, ast.YieldFrom),\n 1: (ast.Nonlocal,)\n }\n\n should_traverse = self.depth < max(invalid_types_by_depth.keys())\n if isinstance(node, func_types) and should_traverse:\n self.depth += 1\n super().generic_visit(node)\n elif isinstance(node, invalid_types_by_depth[self.depth]):\n raise SyntaxError()\n else:\n super().generic_visit(node)\n\n\ndef _async_parse_cell(cell: str) -> ast.AST:\n \"\"\"\n This is a compatibility shim for pre-3.7 when async outside of a function\n is a syntax error at the parse stage.\n\n It will return an abstract syntax tree parsed as if async and await outside\n of a function were not a syntax error.\n \"\"\"\n if sys.version_info < (3, 7):\n # Prior to 3.7 you need to asyncify before parse\n wrapped_parse_tree = ast.parse(_asyncify(cell))\n return wrapped_parse_tree.body[0].body[0]\n else:\n return ast.parse(cell)\n\n\ndef _should_be_async(cell: str) -> bool:\n \"\"\"Detect if a block of code need to be wrapped in an `async def`\n\n Attempt to parse the block of code, it it compile we're fine.\n Otherwise we wrap if and try to compile.\n\n If it works, assume it should be async. Otherwise Return False.\n\n Not handled yet: If the block of code has a return statement as the top\n level, it will be seen as async. This is a know limitation.\n \"\"\"\n\n try:\n # we can't limit ourself to ast.parse, as it __accepts__ to parse on\n # 3.7+, but just does not _compile_\n compile(cell, \"<>\", \"exec\")\n return False\n except SyntaxError:\n try:\n parse_tree = _async_parse_cell(cell)\n\n # Raise a SyntaxError if there are top-level return or yields\n v = _AsyncSyntaxErrorVisitor()\n v.visit(parse_tree)\n\n except SyntaxError:\n return False\n return True\n return False\n", "path": "IPython/core/async_helpers.py"}]}
1,878
295
gh_patches_debug_32876
rasdani/github-patches
git_diff
getsentry__sentry-52083
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- SDK Crash Detection: Set in app Set all in app for all frames to `false` except for the SDK frames after stripping the event data. https://github.com/getsentry/sentry/blob/95086b406dec79e6bcef45f299a3e92f727da2c0/src/sentry/utils/sdk_crashes/sdk_crash_detection.py#L58 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/sentry/utils/sdk_crashes/event_stripper.py` Content: ``` 1 from enum import Enum, auto 2 from typing import Any, Dict, Mapping, Optional, Sequence 3 4 from sentry.db.models import NodeData 5 from sentry.utils.safe import get_path 6 from sentry.utils.sdk_crashes.sdk_crash_detector import SDKCrashDetector 7 8 9 class Allow(Enum): 10 def __init__(self, explanation: str = "") -> None: 11 self.explanation = explanation 12 13 """Keeps the event data if it is of type str, int, float, bool.""" 14 SIMPLE_TYPE = auto() 15 16 """ 17 Doesn't keep the event data no matter the type. This can be used to explicitly 18 specify that data should be removed with an explanation. 19 """ 20 NEVER = auto() 21 22 def with_explanation(self, explanation: str) -> "Allow": 23 self.explanation = explanation 24 return self 25 26 27 EVENT_DATA_ALLOWLIST = { 28 "type": Allow.SIMPLE_TYPE, 29 "datetime": Allow.SIMPLE_TYPE, 30 "timestamp": Allow.SIMPLE_TYPE, 31 "platform": Allow.SIMPLE_TYPE, 32 "sdk": { 33 "name": Allow.SIMPLE_TYPE, 34 "version": Allow.SIMPLE_TYPE, 35 "integrations": Allow.NEVER.with_explanation("Users can add their own integrations."), 36 }, 37 "exception": { 38 "values": { 39 "stacktrace": { 40 "frames": { 41 "filename": Allow.SIMPLE_TYPE, 42 "function": Allow.SIMPLE_TYPE, 43 "raw_function": Allow.SIMPLE_TYPE, 44 "module": Allow.SIMPLE_TYPE, 45 "abs_path": Allow.SIMPLE_TYPE, 46 "in_app": Allow.SIMPLE_TYPE, 47 "instruction_addr": Allow.SIMPLE_TYPE, 48 "addr_mode": Allow.SIMPLE_TYPE, 49 "symbol": Allow.SIMPLE_TYPE, 50 "symbol_addr": Allow.SIMPLE_TYPE, 51 "image_addr": Allow.SIMPLE_TYPE, 52 "package": Allow.SIMPLE_TYPE, 53 "platform": Allow.SIMPLE_TYPE, 54 } 55 }, 56 "value": Allow.NEVER.with_explanation("The exception value could contain PII."), 57 "type": Allow.SIMPLE_TYPE, 58 "mechanism": { 59 "handled": Allow.SIMPLE_TYPE, 60 "type": Allow.SIMPLE_TYPE, 61 "meta": { 62 "signal": { 63 "number": Allow.SIMPLE_TYPE, 64 "code": Allow.SIMPLE_TYPE, 65 "name": Allow.SIMPLE_TYPE, 66 "code_name": Allow.SIMPLE_TYPE, 67 }, 68 "mach_exception": { 69 "exception": Allow.SIMPLE_TYPE, 70 "code": Allow.SIMPLE_TYPE, 71 "subcode": Allow.SIMPLE_TYPE, 72 "name": Allow.SIMPLE_TYPE, 73 }, 74 }, 75 }, 76 } 77 }, 78 "contexts": { 79 "device": { 80 "family": Allow.SIMPLE_TYPE, 81 "model": Allow.SIMPLE_TYPE, 82 "arch": Allow.SIMPLE_TYPE, 83 }, 84 "os": { 85 "name": Allow.SIMPLE_TYPE, 86 "version": Allow.SIMPLE_TYPE, 87 "build": Allow.SIMPLE_TYPE, 88 }, 89 }, 90 } 91 92 93 def strip_event_data( 94 event_data: NodeData, sdk_crash_detector: SDKCrashDetector 95 ) -> Mapping[str, Any]: 96 new_event_data = _strip_event_data_with_allowlist(event_data, EVENT_DATA_ALLOWLIST) 97 98 if (new_event_data is None) or (new_event_data == {}): 99 return {} 100 101 stripped_frames: Sequence[Mapping[str, Any]] = [] 102 frames = get_path(new_event_data, "exception", "values", -1, "stacktrace", "frames") 103 104 if frames is not None: 105 stripped_frames = _strip_frames(frames, sdk_crash_detector) 106 new_event_data["exception"]["values"][0]["stacktrace"]["frames"] = stripped_frames 107 108 return new_event_data 109 110 111 def _strip_event_data_with_allowlist( 112 data: Mapping[str, Any], allowlist: Optional[Mapping[str, Any]] 113 ) -> Optional[Mapping[str, Any]]: 114 """ 115 Recursively traverses the data and only keeps values based on the allowlist. 116 """ 117 if allowlist is None: 118 return None 119 120 stripped_data: Dict[str, Any] = {} 121 for data_key, data_value in data.items(): 122 allowlist_for_data = allowlist.get(data_key) 123 if allowlist_for_data is None: 124 continue 125 126 if isinstance(allowlist_for_data, Allow): 127 allowed = allowlist_for_data 128 129 if allowed is Allow.SIMPLE_TYPE and isinstance(data_value, (str, int, float, bool)): 130 stripped_data[data_key] = data_value 131 else: 132 continue 133 134 elif isinstance(data_value, Mapping): 135 stripped_data[data_key] = _strip_event_data_with_allowlist( 136 data_value, allowlist_for_data 137 ) 138 elif isinstance(data_value, Sequence): 139 stripped_data[data_key] = [ 140 _strip_event_data_with_allowlist(item, allowlist_for_data) for item in data_value 141 ] 142 143 return stripped_data 144 145 146 def _strip_frames( 147 frames: Sequence[Mapping[str, Any]], sdk_crash_detector: SDKCrashDetector 148 ) -> Sequence[Mapping[str, Any]]: 149 """ 150 Only keep SDK frames or Apple system libraries. 151 We need to adapt this logic once we support other platforms. 152 """ 153 154 def is_system_library(frame: Mapping[str, Any]) -> bool: 155 fields_containing_paths = {"package", "module", "abs_path"} 156 system_library_paths = {"/System/Library/", "/usr/lib/system/"} 157 158 for field in fields_containing_paths: 159 for path in system_library_paths: 160 if frame.get(field, "").startswith(path): 161 return True 162 163 return False 164 165 return [ 166 frame 167 for frame in frames 168 if sdk_crash_detector.is_sdk_frame(frame) or is_system_library(frame) 169 ] 170 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/sentry/utils/sdk_crashes/event_stripper.py b/src/sentry/utils/sdk_crashes/event_stripper.py --- a/src/sentry/utils/sdk_crashes/event_stripper.py +++ b/src/sentry/utils/sdk_crashes/event_stripper.py @@ -1,5 +1,5 @@ from enum import Enum, auto -from typing import Any, Dict, Mapping, Optional, Sequence +from typing import Any, Dict, Mapping, MutableMapping, Optional, Sequence from sentry.db.models import NodeData from sentry.utils.safe import get_path @@ -98,11 +98,11 @@ if (new_event_data is None) or (new_event_data == {}): return {} - stripped_frames: Sequence[Mapping[str, Any]] = [] frames = get_path(new_event_data, "exception", "values", -1, "stacktrace", "frames") if frames is not None: stripped_frames = _strip_frames(frames, sdk_crash_detector) + new_event_data["exception"]["values"][0]["stacktrace"]["frames"] = stripped_frames return new_event_data @@ -144,7 +144,7 @@ def _strip_frames( - frames: Sequence[Mapping[str, Any]], sdk_crash_detector: SDKCrashDetector + frames: Sequence[MutableMapping[str, Any]], sdk_crash_detector: SDKCrashDetector ) -> Sequence[Mapping[str, Any]]: """ Only keep SDK frames or Apple system libraries. @@ -162,8 +162,15 @@ return False + def strip_frame(frame: MutableMapping[str, Any]) -> MutableMapping[str, Any]: + if sdk_crash_detector.is_sdk_frame(frame): + frame["in_app"] = True + else: + frame["in_app"] = False + return frame + return [ - frame + strip_frame(frame) for frame in frames if sdk_crash_detector.is_sdk_frame(frame) or is_system_library(frame) ]
{"golden_diff": "diff --git a/src/sentry/utils/sdk_crashes/event_stripper.py b/src/sentry/utils/sdk_crashes/event_stripper.py\n--- a/src/sentry/utils/sdk_crashes/event_stripper.py\n+++ b/src/sentry/utils/sdk_crashes/event_stripper.py\n@@ -1,5 +1,5 @@\n from enum import Enum, auto\n-from typing import Any, Dict, Mapping, Optional, Sequence\n+from typing import Any, Dict, Mapping, MutableMapping, Optional, Sequence\n \n from sentry.db.models import NodeData\n from sentry.utils.safe import get_path\n@@ -98,11 +98,11 @@\n if (new_event_data is None) or (new_event_data == {}):\n return {}\n \n- stripped_frames: Sequence[Mapping[str, Any]] = []\n frames = get_path(new_event_data, \"exception\", \"values\", -1, \"stacktrace\", \"frames\")\n \n if frames is not None:\n stripped_frames = _strip_frames(frames, sdk_crash_detector)\n+\n new_event_data[\"exception\"][\"values\"][0][\"stacktrace\"][\"frames\"] = stripped_frames\n \n return new_event_data\n@@ -144,7 +144,7 @@\n \n \n def _strip_frames(\n- frames: Sequence[Mapping[str, Any]], sdk_crash_detector: SDKCrashDetector\n+ frames: Sequence[MutableMapping[str, Any]], sdk_crash_detector: SDKCrashDetector\n ) -> Sequence[Mapping[str, Any]]:\n \"\"\"\n Only keep SDK frames or Apple system libraries.\n@@ -162,8 +162,15 @@\n \n return False\n \n+ def strip_frame(frame: MutableMapping[str, Any]) -> MutableMapping[str, Any]:\n+ if sdk_crash_detector.is_sdk_frame(frame):\n+ frame[\"in_app\"] = True\n+ else:\n+ frame[\"in_app\"] = False\n+ return frame\n+\n return [\n- frame\n+ strip_frame(frame)\n for frame in frames\n if sdk_crash_detector.is_sdk_frame(frame) or is_system_library(frame)\n ]\n", "issue": "SDK Crash Detection: Set in app\nSet all in app for all frames to `false` except for the SDK frames after stripping the event data. \r\n\r\nhttps://github.com/getsentry/sentry/blob/95086b406dec79e6bcef45f299a3e92f727da2c0/src/sentry/utils/sdk_crashes/sdk_crash_detection.py#L58\n", "before_files": [{"content": "from enum import Enum, auto\nfrom typing import Any, Dict, Mapping, Optional, Sequence\n\nfrom sentry.db.models import NodeData\nfrom sentry.utils.safe import get_path\nfrom sentry.utils.sdk_crashes.sdk_crash_detector import SDKCrashDetector\n\n\nclass Allow(Enum):\n def __init__(self, explanation: str = \"\") -> None:\n self.explanation = explanation\n\n \"\"\"Keeps the event data if it is of type str, int, float, bool.\"\"\"\n SIMPLE_TYPE = auto()\n\n \"\"\"\n Doesn't keep the event data no matter the type. This can be used to explicitly\n specify that data should be removed with an explanation.\n \"\"\"\n NEVER = auto()\n\n def with_explanation(self, explanation: str) -> \"Allow\":\n self.explanation = explanation\n return self\n\n\nEVENT_DATA_ALLOWLIST = {\n \"type\": Allow.SIMPLE_TYPE,\n \"datetime\": Allow.SIMPLE_TYPE,\n \"timestamp\": Allow.SIMPLE_TYPE,\n \"platform\": Allow.SIMPLE_TYPE,\n \"sdk\": {\n \"name\": Allow.SIMPLE_TYPE,\n \"version\": Allow.SIMPLE_TYPE,\n \"integrations\": Allow.NEVER.with_explanation(\"Users can add their own integrations.\"),\n },\n \"exception\": {\n \"values\": {\n \"stacktrace\": {\n \"frames\": {\n \"filename\": Allow.SIMPLE_TYPE,\n \"function\": Allow.SIMPLE_TYPE,\n \"raw_function\": Allow.SIMPLE_TYPE,\n \"module\": Allow.SIMPLE_TYPE,\n \"abs_path\": Allow.SIMPLE_TYPE,\n \"in_app\": Allow.SIMPLE_TYPE,\n \"instruction_addr\": Allow.SIMPLE_TYPE,\n \"addr_mode\": Allow.SIMPLE_TYPE,\n \"symbol\": Allow.SIMPLE_TYPE,\n \"symbol_addr\": Allow.SIMPLE_TYPE,\n \"image_addr\": Allow.SIMPLE_TYPE,\n \"package\": Allow.SIMPLE_TYPE,\n \"platform\": Allow.SIMPLE_TYPE,\n }\n },\n \"value\": Allow.NEVER.with_explanation(\"The exception value could contain PII.\"),\n \"type\": Allow.SIMPLE_TYPE,\n \"mechanism\": {\n \"handled\": Allow.SIMPLE_TYPE,\n \"type\": Allow.SIMPLE_TYPE,\n \"meta\": {\n \"signal\": {\n \"number\": Allow.SIMPLE_TYPE,\n \"code\": Allow.SIMPLE_TYPE,\n \"name\": Allow.SIMPLE_TYPE,\n \"code_name\": Allow.SIMPLE_TYPE,\n },\n \"mach_exception\": {\n \"exception\": Allow.SIMPLE_TYPE,\n \"code\": Allow.SIMPLE_TYPE,\n \"subcode\": Allow.SIMPLE_TYPE,\n \"name\": Allow.SIMPLE_TYPE,\n },\n },\n },\n }\n },\n \"contexts\": {\n \"device\": {\n \"family\": Allow.SIMPLE_TYPE,\n \"model\": Allow.SIMPLE_TYPE,\n \"arch\": Allow.SIMPLE_TYPE,\n },\n \"os\": {\n \"name\": Allow.SIMPLE_TYPE,\n \"version\": Allow.SIMPLE_TYPE,\n \"build\": Allow.SIMPLE_TYPE,\n },\n },\n}\n\n\ndef strip_event_data(\n event_data: NodeData, sdk_crash_detector: SDKCrashDetector\n) -> Mapping[str, Any]:\n new_event_data = _strip_event_data_with_allowlist(event_data, EVENT_DATA_ALLOWLIST)\n\n if (new_event_data is None) or (new_event_data == {}):\n return {}\n\n stripped_frames: Sequence[Mapping[str, Any]] = []\n frames = get_path(new_event_data, \"exception\", \"values\", -1, \"stacktrace\", \"frames\")\n\n if frames is not None:\n stripped_frames = _strip_frames(frames, sdk_crash_detector)\n new_event_data[\"exception\"][\"values\"][0][\"stacktrace\"][\"frames\"] = stripped_frames\n\n return new_event_data\n\n\ndef _strip_event_data_with_allowlist(\n data: Mapping[str, Any], allowlist: Optional[Mapping[str, Any]]\n) -> Optional[Mapping[str, Any]]:\n \"\"\"\n Recursively traverses the data and only keeps values based on the allowlist.\n \"\"\"\n if allowlist is None:\n return None\n\n stripped_data: Dict[str, Any] = {}\n for data_key, data_value in data.items():\n allowlist_for_data = allowlist.get(data_key)\n if allowlist_for_data is None:\n continue\n\n if isinstance(allowlist_for_data, Allow):\n allowed = allowlist_for_data\n\n if allowed is Allow.SIMPLE_TYPE and isinstance(data_value, (str, int, float, bool)):\n stripped_data[data_key] = data_value\n else:\n continue\n\n elif isinstance(data_value, Mapping):\n stripped_data[data_key] = _strip_event_data_with_allowlist(\n data_value, allowlist_for_data\n )\n elif isinstance(data_value, Sequence):\n stripped_data[data_key] = [\n _strip_event_data_with_allowlist(item, allowlist_for_data) for item in data_value\n ]\n\n return stripped_data\n\n\ndef _strip_frames(\n frames: Sequence[Mapping[str, Any]], sdk_crash_detector: SDKCrashDetector\n) -> Sequence[Mapping[str, Any]]:\n \"\"\"\n Only keep SDK frames or Apple system libraries.\n We need to adapt this logic once we support other platforms.\n \"\"\"\n\n def is_system_library(frame: Mapping[str, Any]) -> bool:\n fields_containing_paths = {\"package\", \"module\", \"abs_path\"}\n system_library_paths = {\"/System/Library/\", \"/usr/lib/system/\"}\n\n for field in fields_containing_paths:\n for path in system_library_paths:\n if frame.get(field, \"\").startswith(path):\n return True\n\n return False\n\n return [\n frame\n for frame in frames\n if sdk_crash_detector.is_sdk_frame(frame) or is_system_library(frame)\n ]\n", "path": "src/sentry/utils/sdk_crashes/event_stripper.py"}], "after_files": [{"content": "from enum import Enum, auto\nfrom typing import Any, Dict, Mapping, MutableMapping, Optional, Sequence\n\nfrom sentry.db.models import NodeData\nfrom sentry.utils.safe import get_path\nfrom sentry.utils.sdk_crashes.sdk_crash_detector import SDKCrashDetector\n\n\nclass Allow(Enum):\n def __init__(self, explanation: str = \"\") -> None:\n self.explanation = explanation\n\n \"\"\"Keeps the event data if it is of type str, int, float, bool.\"\"\"\n SIMPLE_TYPE = auto()\n\n \"\"\"\n Doesn't keep the event data no matter the type. This can be used to explicitly\n specify that data should be removed with an explanation.\n \"\"\"\n NEVER = auto()\n\n def with_explanation(self, explanation: str) -> \"Allow\":\n self.explanation = explanation\n return self\n\n\nEVENT_DATA_ALLOWLIST = {\n \"type\": Allow.SIMPLE_TYPE,\n \"datetime\": Allow.SIMPLE_TYPE,\n \"timestamp\": Allow.SIMPLE_TYPE,\n \"platform\": Allow.SIMPLE_TYPE,\n \"sdk\": {\n \"name\": Allow.SIMPLE_TYPE,\n \"version\": Allow.SIMPLE_TYPE,\n \"integrations\": Allow.NEVER.with_explanation(\"Users can add their own integrations.\"),\n },\n \"exception\": {\n \"values\": {\n \"stacktrace\": {\n \"frames\": {\n \"filename\": Allow.SIMPLE_TYPE,\n \"function\": Allow.SIMPLE_TYPE,\n \"raw_function\": Allow.SIMPLE_TYPE,\n \"module\": Allow.SIMPLE_TYPE,\n \"abs_path\": Allow.SIMPLE_TYPE,\n \"in_app\": Allow.SIMPLE_TYPE,\n \"instruction_addr\": Allow.SIMPLE_TYPE,\n \"addr_mode\": Allow.SIMPLE_TYPE,\n \"symbol\": Allow.SIMPLE_TYPE,\n \"symbol_addr\": Allow.SIMPLE_TYPE,\n \"image_addr\": Allow.SIMPLE_TYPE,\n \"package\": Allow.SIMPLE_TYPE,\n \"platform\": Allow.SIMPLE_TYPE,\n }\n },\n \"value\": Allow.NEVER.with_explanation(\"The exception value could contain PII.\"),\n \"type\": Allow.SIMPLE_TYPE,\n \"mechanism\": {\n \"handled\": Allow.SIMPLE_TYPE,\n \"type\": Allow.SIMPLE_TYPE,\n \"meta\": {\n \"signal\": {\n \"number\": Allow.SIMPLE_TYPE,\n \"code\": Allow.SIMPLE_TYPE,\n \"name\": Allow.SIMPLE_TYPE,\n \"code_name\": Allow.SIMPLE_TYPE,\n },\n \"mach_exception\": {\n \"exception\": Allow.SIMPLE_TYPE,\n \"code\": Allow.SIMPLE_TYPE,\n \"subcode\": Allow.SIMPLE_TYPE,\n \"name\": Allow.SIMPLE_TYPE,\n },\n },\n },\n }\n },\n \"contexts\": {\n \"device\": {\n \"family\": Allow.SIMPLE_TYPE,\n \"model\": Allow.SIMPLE_TYPE,\n \"arch\": Allow.SIMPLE_TYPE,\n },\n \"os\": {\n \"name\": Allow.SIMPLE_TYPE,\n \"version\": Allow.SIMPLE_TYPE,\n \"build\": Allow.SIMPLE_TYPE,\n },\n },\n}\n\n\ndef strip_event_data(\n event_data: NodeData, sdk_crash_detector: SDKCrashDetector\n) -> Mapping[str, Any]:\n new_event_data = _strip_event_data_with_allowlist(event_data, EVENT_DATA_ALLOWLIST)\n\n if (new_event_data is None) or (new_event_data == {}):\n return {}\n\n frames = get_path(new_event_data, \"exception\", \"values\", -1, \"stacktrace\", \"frames\")\n\n if frames is not None:\n stripped_frames = _strip_frames(frames, sdk_crash_detector)\n\n new_event_data[\"exception\"][\"values\"][0][\"stacktrace\"][\"frames\"] = stripped_frames\n\n return new_event_data\n\n\ndef _strip_event_data_with_allowlist(\n data: Mapping[str, Any], allowlist: Optional[Mapping[str, Any]]\n) -> Optional[Mapping[str, Any]]:\n \"\"\"\n Recursively traverses the data and only keeps values based on the allowlist.\n \"\"\"\n if allowlist is None:\n return None\n\n stripped_data: Dict[str, Any] = {}\n for data_key, data_value in data.items():\n allowlist_for_data = allowlist.get(data_key)\n if allowlist_for_data is None:\n continue\n\n if isinstance(allowlist_for_data, Allow):\n allowed = allowlist_for_data\n\n if allowed is Allow.SIMPLE_TYPE and isinstance(data_value, (str, int, float, bool)):\n stripped_data[data_key] = data_value\n else:\n continue\n\n elif isinstance(data_value, Mapping):\n stripped_data[data_key] = _strip_event_data_with_allowlist(\n data_value, allowlist_for_data\n )\n elif isinstance(data_value, Sequence):\n stripped_data[data_key] = [\n _strip_event_data_with_allowlist(item, allowlist_for_data) for item in data_value\n ]\n\n return stripped_data\n\n\ndef _strip_frames(\n frames: Sequence[MutableMapping[str, Any]], sdk_crash_detector: SDKCrashDetector\n) -> Sequence[Mapping[str, Any]]:\n \"\"\"\n Only keep SDK frames or Apple system libraries.\n We need to adapt this logic once we support other platforms.\n \"\"\"\n\n def is_system_library(frame: Mapping[str, Any]) -> bool:\n fields_containing_paths = {\"package\", \"module\", \"abs_path\"}\n system_library_paths = {\"/System/Library/\", \"/usr/lib/system/\"}\n\n for field in fields_containing_paths:\n for path in system_library_paths:\n if frame.get(field, \"\").startswith(path):\n return True\n\n return False\n\n def strip_frame(frame: MutableMapping[str, Any]) -> MutableMapping[str, Any]:\n if sdk_crash_detector.is_sdk_frame(frame):\n frame[\"in_app\"] = True\n else:\n frame[\"in_app\"] = False\n return frame\n\n return [\n strip_frame(frame)\n for frame in frames\n if sdk_crash_detector.is_sdk_frame(frame) or is_system_library(frame)\n ]\n", "path": "src/sentry/utils/sdk_crashes/event_stripper.py"}]}
2,041
448
gh_patches_debug_6916
rasdani/github-patches
git_diff
joke2k__faker-678
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- NameError: name 'args' is not defined I'm getting an error using the *args method. ``` fake.email(*args, **kwargs) Traceback (most recent call last): File "<stdin>", line 1, in <module> NameError: name 'args' is not defined ``` Also can I change the **kwargs with @gmail to get [email protected]? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `faker/providers/internet/__init__.py` Content: ``` 1 # coding=utf-8 2 from __future__ import unicode_literals 3 4 from text_unidecode import unidecode 5 6 from .. import BaseProvider 7 8 from ipaddress import ip_address, ip_network, IPV4LENGTH, IPV6LENGTH 9 10 # from faker.generator import random 11 # from faker.providers.lorem.la import Provider as Lorem 12 from faker.utils.decorators import lowercase, slugify, slugify_unicode 13 14 15 localized = True 16 17 18 class Provider(BaseProvider): 19 safe_email_tlds = ('org', 'com', 'net') 20 free_email_domains = ('gmail.com', 'yahoo.com', 'hotmail.com') 21 tlds = ( 22 'com', 'com', 'com', 'com', 'com', 'com', 'biz', 'info', 'net', 'org' 23 ) 24 25 uri_pages = ( 26 'index', 'home', 'search', 'main', 'post', 'homepage', 'category', 27 'register', 'login', 'faq', 'about', 'terms', 'privacy', 'author' 28 ) 29 uri_paths = ( 30 'app', 'main', 'wp-content', 'search', 'category', 'tag', 'categories', 31 'tags', 'blog', 'posts', 'list', 'explore' 32 ) 33 uri_extensions = ( 34 '.html', '.html', '.html', '.htm', '.htm', '.php', '.php', '.jsp', 35 '.asp' 36 ) 37 38 user_name_formats = ( 39 '{{last_name}}.{{first_name}}', 40 '{{first_name}}.{{last_name}}', 41 '{{first_name}}##', 42 '?{{last_name}}', 43 ) 44 email_formats = ( 45 '{{user_name}}@{{domain_name}}', 46 '{{user_name}}@{{free_email_domain}}', 47 ) 48 url_formats = ( 49 'http://www.{{domain_name}}/', 50 'http://{{domain_name}}/', 51 'https://www.{{domain_name}}/', 52 'https://{{domain_name}}/', 53 ) 54 uri_formats = ( 55 '{{url}}', 56 '{{url}}{{uri_page}}/', 57 '{{url}}{{uri_page}}{{uri_extension}}', 58 '{{url}}{{uri_path}}/{{uri_page}}/', 59 '{{url}}{{uri_path}}/{{uri_page}}{{uri_extension}}', 60 ) 61 image_placeholder_services = ( 62 'https://placeholdit.imgix.net/~text' 63 '?txtsize=55&txt={width}x{height}&w={width}&h={height}', 64 'https://www.lorempixel.com/{width}/{height}', 65 'https://dummyimage.com/{width}x{height}', 66 ) 67 68 replacements = tuple() 69 70 def _to_ascii(self, string): 71 for search, replace in self.replacements: 72 string = string.replace(search, replace) 73 74 string = unidecode(string) 75 return string 76 77 @lowercase 78 def email(self): 79 pattern = self.random_element(self.email_formats) 80 return "".join(self.generator.parse(pattern).split(" ")) 81 82 @lowercase 83 def safe_email(self): 84 return '{}@example.{}'.format( 85 self.user_name(), self.random_element(self.safe_email_tlds) 86 ) 87 88 @lowercase 89 def free_email(self): 90 return self.user_name() + '@' + self.free_email_domain() 91 92 @lowercase 93 def company_email(self): 94 return self.user_name() + '@' + self.domain_name() 95 96 @lowercase 97 def free_email_domain(self): 98 return self.random_element(self.free_email_domains) 99 100 @lowercase 101 def ascii_email(self): 102 pattern = self.random_element(self.email_formats) 103 return self._to_ascii( 104 "".join(self.generator.parse(pattern).split(" ")) 105 ) 106 107 @lowercase 108 def ascii_safe_email(self): 109 return self._to_ascii( 110 self.user_name() + 111 '@example.' + 112 self.random_element(self.safe_email_tlds) 113 ) 114 115 @lowercase 116 def ascii_free_email(self): 117 return self._to_ascii( 118 self.user_name() + '@' + self.free_email_domain() 119 ) 120 121 @lowercase 122 def ascii_company_email(self): 123 return self._to_ascii( 124 self.user_name() + '@' + self.domain_name() 125 ) 126 127 @slugify_unicode 128 def user_name(self): 129 pattern = self.random_element(self.user_name_formats) 130 username = self._to_ascii( 131 self.bothify(self.generator.parse(pattern)).lower() 132 ) 133 return username 134 135 @lowercase 136 def domain_name(self, levels=1): 137 """ 138 Produce an Internet domain name with the specified number of 139 subdomain levels. 140 141 >>> domain_name() 142 nichols-phillips.com 143 >>> domain_name(2) 144 williamson-hopkins.jackson.com 145 """ 146 if levels < 1: 147 raise ValueError("levels must be greater than or equal to 1") 148 if levels == 1: 149 return self.domain_word() + '.' + self.tld() 150 else: 151 return self.domain_word() + '.' + self.domain_name(levels - 1) 152 153 @lowercase 154 @slugify_unicode 155 def domain_word(self,): 156 company = self.generator.format('company') 157 company_elements = company.split(' ') 158 company = self._to_ascii(company_elements.pop(0)) 159 return company 160 161 def tld(self): 162 return self.random_element(self.tlds) 163 164 def url(self): 165 pattern = self.random_element(self.url_formats) 166 return self.generator.parse(pattern) 167 168 def ipv4(self, network=False): 169 """Produce a random IPv4 address or network with a valid CIDR""" 170 address = str(ip_address(self.generator.random.randint( 171 0, (2 ** IPV4LENGTH) - 1))) 172 if network: 173 address += '/' + str(self.generator.random.randint(0, IPV4LENGTH)) 174 address = str(ip_network(address, strict=False)) 175 return address 176 177 def ipv6(self, network=False): 178 """Produce a random IPv6 address or network with a valid CIDR""" 179 address = str(ip_address(self.generator.random.randint( 180 2 ** IPV4LENGTH, (2 ** IPV6LENGTH) - 1))) 181 if network: 182 address += '/' + str(self.generator.random.randint(0, IPV6LENGTH)) 183 address = str(ip_network(address, strict=False)) 184 return address 185 186 def mac_address(self): 187 mac = [self.generator.random.randint(0x00, 0xff) for i in range(0, 6)] 188 return ":".join(map(lambda x: "%02x" % x, mac)) 189 190 def uri_page(self): 191 return self.random_element(self.uri_pages) 192 193 def uri_path(self, deep=None): 194 deep = deep if deep else self.generator.random.randint(1, 3) 195 return "/".join( 196 [self.random_element(self.uri_paths) for _ in range(0, deep)] 197 ) 198 199 def uri_extension(self): 200 return self.random_element(self.uri_extensions) 201 202 def uri(self): 203 pattern = self.random_element(self.uri_formats) 204 return self.generator.parse(pattern) 205 206 @slugify 207 def slug(self, value=None): 208 """Django algorithm""" 209 if value is None: 210 value = self.generator.text(20) 211 return value 212 213 def image_url(self, width=None, height=None): 214 """ 215 Returns URL to placeholder image 216 Example: http://placehold.it/640x480 217 """ 218 width_ = width or self.random_int(max=1024) 219 height_ = height or self.random_int(max=1024) 220 placeholder_url = self.random_element(self.image_placeholder_services) 221 return placeholder_url.format(width=width_, height=height_) 222 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/faker/providers/internet/__init__.py b/faker/providers/internet/__init__.py --- a/faker/providers/internet/__init__.py +++ b/faker/providers/internet/__init__.py @@ -75,9 +75,13 @@ return string @lowercase - def email(self): - pattern = self.random_element(self.email_formats) - return "".join(self.generator.parse(pattern).split(" ")) + def email(self, domain=None): + if domain: + email = '{0}@{1}'.format(self.user_name(), domain) + else: + pattern = self.random_element(self.email_formats) + email = "".join(self.generator.parse(pattern).split(" ")) + return email @lowercase def safe_email(self):
{"golden_diff": "diff --git a/faker/providers/internet/__init__.py b/faker/providers/internet/__init__.py\n--- a/faker/providers/internet/__init__.py\n+++ b/faker/providers/internet/__init__.py\n@@ -75,9 +75,13 @@\n return string\n \n @lowercase\n- def email(self):\n- pattern = self.random_element(self.email_formats)\n- return \"\".join(self.generator.parse(pattern).split(\" \"))\n+ def email(self, domain=None):\n+ if domain:\n+ email = '{0}@{1}'.format(self.user_name(), domain)\n+ else:\n+ pattern = self.random_element(self.email_formats)\n+ email = \"\".join(self.generator.parse(pattern).split(\" \"))\n+ return email\n \n @lowercase\n def safe_email(self):\n", "issue": "NameError: name 'args' is not defined\nI'm getting an error using the *args method.\r\n\r\n```\r\nfake.email(*args, **kwargs)\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\nNameError: name 'args' is not defined\r\n\r\n```\r\nAlso can I change the **kwargs with @gmail to get [email protected]?\r\n\r\n\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\n\nfrom text_unidecode import unidecode\n\nfrom .. import BaseProvider\n\nfrom ipaddress import ip_address, ip_network, IPV4LENGTH, IPV6LENGTH\n\n# from faker.generator import random\n# from faker.providers.lorem.la import Provider as Lorem\nfrom faker.utils.decorators import lowercase, slugify, slugify_unicode\n\n\nlocalized = True\n\n\nclass Provider(BaseProvider):\n safe_email_tlds = ('org', 'com', 'net')\n free_email_domains = ('gmail.com', 'yahoo.com', 'hotmail.com')\n tlds = (\n 'com', 'com', 'com', 'com', 'com', 'com', 'biz', 'info', 'net', 'org'\n )\n\n uri_pages = (\n 'index', 'home', 'search', 'main', 'post', 'homepage', 'category',\n 'register', 'login', 'faq', 'about', 'terms', 'privacy', 'author'\n )\n uri_paths = (\n 'app', 'main', 'wp-content', 'search', 'category', 'tag', 'categories',\n 'tags', 'blog', 'posts', 'list', 'explore'\n )\n uri_extensions = (\n '.html', '.html', '.html', '.htm', '.htm', '.php', '.php', '.jsp',\n '.asp'\n )\n\n user_name_formats = (\n '{{last_name}}.{{first_name}}',\n '{{first_name}}.{{last_name}}',\n '{{first_name}}##',\n '?{{last_name}}',\n )\n email_formats = (\n '{{user_name}}@{{domain_name}}',\n '{{user_name}}@{{free_email_domain}}',\n )\n url_formats = (\n 'http://www.{{domain_name}}/',\n 'http://{{domain_name}}/',\n 'https://www.{{domain_name}}/',\n 'https://{{domain_name}}/',\n )\n uri_formats = (\n '{{url}}',\n '{{url}}{{uri_page}}/',\n '{{url}}{{uri_page}}{{uri_extension}}',\n '{{url}}{{uri_path}}/{{uri_page}}/',\n '{{url}}{{uri_path}}/{{uri_page}}{{uri_extension}}',\n )\n image_placeholder_services = (\n 'https://placeholdit.imgix.net/~text'\n '?txtsize=55&txt={width}x{height}&w={width}&h={height}',\n 'https://www.lorempixel.com/{width}/{height}',\n 'https://dummyimage.com/{width}x{height}',\n )\n\n replacements = tuple()\n\n def _to_ascii(self, string):\n for search, replace in self.replacements:\n string = string.replace(search, replace)\n\n string = unidecode(string)\n return string\n\n @lowercase\n def email(self):\n pattern = self.random_element(self.email_formats)\n return \"\".join(self.generator.parse(pattern).split(\" \"))\n\n @lowercase\n def safe_email(self):\n return '{}@example.{}'.format(\n self.user_name(), self.random_element(self.safe_email_tlds)\n )\n\n @lowercase\n def free_email(self):\n return self.user_name() + '@' + self.free_email_domain()\n\n @lowercase\n def company_email(self):\n return self.user_name() + '@' + self.domain_name()\n\n @lowercase\n def free_email_domain(self):\n return self.random_element(self.free_email_domains)\n\n @lowercase\n def ascii_email(self):\n pattern = self.random_element(self.email_formats)\n return self._to_ascii(\n \"\".join(self.generator.parse(pattern).split(\" \"))\n )\n\n @lowercase\n def ascii_safe_email(self):\n return self._to_ascii(\n self.user_name() +\n '@example.' +\n self.random_element(self.safe_email_tlds)\n )\n\n @lowercase\n def ascii_free_email(self):\n return self._to_ascii(\n self.user_name() + '@' + self.free_email_domain()\n )\n\n @lowercase\n def ascii_company_email(self):\n return self._to_ascii(\n self.user_name() + '@' + self.domain_name()\n )\n\n @slugify_unicode\n def user_name(self):\n pattern = self.random_element(self.user_name_formats)\n username = self._to_ascii(\n self.bothify(self.generator.parse(pattern)).lower()\n )\n return username\n\n @lowercase\n def domain_name(self, levels=1):\n \"\"\"\n Produce an Internet domain name with the specified number of\n subdomain levels.\n\n >>> domain_name()\n nichols-phillips.com\n >>> domain_name(2)\n williamson-hopkins.jackson.com\n \"\"\"\n if levels < 1:\n raise ValueError(\"levels must be greater than or equal to 1\")\n if levels == 1:\n return self.domain_word() + '.' + self.tld()\n else:\n return self.domain_word() + '.' + self.domain_name(levels - 1)\n\n @lowercase\n @slugify_unicode\n def domain_word(self,):\n company = self.generator.format('company')\n company_elements = company.split(' ')\n company = self._to_ascii(company_elements.pop(0))\n return company\n\n def tld(self):\n return self.random_element(self.tlds)\n\n def url(self):\n pattern = self.random_element(self.url_formats)\n return self.generator.parse(pattern)\n\n def ipv4(self, network=False):\n \"\"\"Produce a random IPv4 address or network with a valid CIDR\"\"\"\n address = str(ip_address(self.generator.random.randint(\n 0, (2 ** IPV4LENGTH) - 1)))\n if network:\n address += '/' + str(self.generator.random.randint(0, IPV4LENGTH))\n address = str(ip_network(address, strict=False))\n return address\n\n def ipv6(self, network=False):\n \"\"\"Produce a random IPv6 address or network with a valid CIDR\"\"\"\n address = str(ip_address(self.generator.random.randint(\n 2 ** IPV4LENGTH, (2 ** IPV6LENGTH) - 1)))\n if network:\n address += '/' + str(self.generator.random.randint(0, IPV6LENGTH))\n address = str(ip_network(address, strict=False))\n return address\n\n def mac_address(self):\n mac = [self.generator.random.randint(0x00, 0xff) for i in range(0, 6)]\n return \":\".join(map(lambda x: \"%02x\" % x, mac))\n\n def uri_page(self):\n return self.random_element(self.uri_pages)\n\n def uri_path(self, deep=None):\n deep = deep if deep else self.generator.random.randint(1, 3)\n return \"/\".join(\n [self.random_element(self.uri_paths) for _ in range(0, deep)]\n )\n\n def uri_extension(self):\n return self.random_element(self.uri_extensions)\n\n def uri(self):\n pattern = self.random_element(self.uri_formats)\n return self.generator.parse(pattern)\n\n @slugify\n def slug(self, value=None):\n \"\"\"Django algorithm\"\"\"\n if value is None:\n value = self.generator.text(20)\n return value\n\n def image_url(self, width=None, height=None):\n \"\"\"\n Returns URL to placeholder image\n Example: http://placehold.it/640x480\n \"\"\"\n width_ = width or self.random_int(max=1024)\n height_ = height or self.random_int(max=1024)\n placeholder_url = self.random_element(self.image_placeholder_services)\n return placeholder_url.format(width=width_, height=height_)\n", "path": "faker/providers/internet/__init__.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\n\nfrom text_unidecode import unidecode\n\nfrom .. import BaseProvider\n\nfrom ipaddress import ip_address, ip_network, IPV4LENGTH, IPV6LENGTH\n\n# from faker.generator import random\n# from faker.providers.lorem.la import Provider as Lorem\nfrom faker.utils.decorators import lowercase, slugify, slugify_unicode\n\n\nlocalized = True\n\n\nclass Provider(BaseProvider):\n safe_email_tlds = ('org', 'com', 'net')\n free_email_domains = ('gmail.com', 'yahoo.com', 'hotmail.com')\n tlds = (\n 'com', 'com', 'com', 'com', 'com', 'com', 'biz', 'info', 'net', 'org'\n )\n\n uri_pages = (\n 'index', 'home', 'search', 'main', 'post', 'homepage', 'category',\n 'register', 'login', 'faq', 'about', 'terms', 'privacy', 'author'\n )\n uri_paths = (\n 'app', 'main', 'wp-content', 'search', 'category', 'tag', 'categories',\n 'tags', 'blog', 'posts', 'list', 'explore'\n )\n uri_extensions = (\n '.html', '.html', '.html', '.htm', '.htm', '.php', '.php', '.jsp',\n '.asp'\n )\n\n user_name_formats = (\n '{{last_name}}.{{first_name}}',\n '{{first_name}}.{{last_name}}',\n '{{first_name}}##',\n '?{{last_name}}',\n )\n email_formats = (\n '{{user_name}}@{{domain_name}}',\n '{{user_name}}@{{free_email_domain}}',\n )\n url_formats = (\n 'http://www.{{domain_name}}/',\n 'http://{{domain_name}}/',\n 'https://www.{{domain_name}}/',\n 'https://{{domain_name}}/',\n )\n uri_formats = (\n '{{url}}',\n '{{url}}{{uri_page}}/',\n '{{url}}{{uri_page}}{{uri_extension}}',\n '{{url}}{{uri_path}}/{{uri_page}}/',\n '{{url}}{{uri_path}}/{{uri_page}}{{uri_extension}}',\n )\n image_placeholder_services = (\n 'https://placeholdit.imgix.net/~text'\n '?txtsize=55&txt={width}x{height}&w={width}&h={height}',\n 'https://www.lorempixel.com/{width}/{height}',\n 'https://dummyimage.com/{width}x{height}',\n )\n\n replacements = tuple()\n\n def _to_ascii(self, string):\n for search, replace in self.replacements:\n string = string.replace(search, replace)\n\n string = unidecode(string)\n return string\n\n @lowercase\n def email(self, domain=None):\n if domain:\n email = '{0}@{1}'.format(self.user_name(), domain)\n else:\n pattern = self.random_element(self.email_formats)\n email = \"\".join(self.generator.parse(pattern).split(\" \"))\n return email\n\n @lowercase\n def safe_email(self):\n return '{}@example.{}'.format(\n self.user_name(), self.random_element(self.safe_email_tlds)\n )\n\n @lowercase\n def free_email(self):\n return self.user_name() + '@' + self.free_email_domain()\n\n @lowercase\n def company_email(self):\n return self.user_name() + '@' + self.domain_name()\n\n @lowercase\n def free_email_domain(self):\n return self.random_element(self.free_email_domains)\n\n @lowercase\n def ascii_email(self):\n pattern = self.random_element(self.email_formats)\n return self._to_ascii(\n \"\".join(self.generator.parse(pattern).split(\" \"))\n )\n\n @lowercase\n def ascii_safe_email(self):\n return self._to_ascii(\n self.user_name() +\n '@example.' +\n self.random_element(self.safe_email_tlds)\n )\n\n @lowercase\n def ascii_free_email(self):\n return self._to_ascii(\n self.user_name() + '@' + self.free_email_domain()\n )\n\n @lowercase\n def ascii_company_email(self):\n return self._to_ascii(\n self.user_name() + '@' + self.domain_name()\n )\n\n @slugify_unicode\n def user_name(self):\n pattern = self.random_element(self.user_name_formats)\n username = self._to_ascii(\n self.bothify(self.generator.parse(pattern)).lower()\n )\n return username\n\n @lowercase\n def domain_name(self, levels=1):\n \"\"\"\n Produce an Internet domain name with the specified number of\n subdomain levels.\n\n >>> domain_name()\n nichols-phillips.com\n >>> domain_name(2)\n williamson-hopkins.jackson.com\n \"\"\"\n if levels < 1:\n raise ValueError(\"levels must be greater than or equal to 1\")\n if levels == 1:\n return self.domain_word() + '.' + self.tld()\n else:\n return self.domain_word() + '.' + self.domain_name(levels - 1)\n\n @lowercase\n @slugify_unicode\n def domain_word(self,):\n company = self.generator.format('company')\n company_elements = company.split(' ')\n company = self._to_ascii(company_elements.pop(0))\n return company\n\n def tld(self):\n return self.random_element(self.tlds)\n\n def url(self):\n pattern = self.random_element(self.url_formats)\n return self.generator.parse(pattern)\n\n def ipv4(self, network=False):\n \"\"\"Produce a random IPv4 address or network with a valid CIDR\"\"\"\n address = str(ip_address(self.generator.random.randint(\n 0, (2 ** IPV4LENGTH) - 1)))\n if network:\n address += '/' + str(self.generator.random.randint(0, IPV4LENGTH))\n address = str(ip_network(address, strict=False))\n return address\n\n def ipv6(self, network=False):\n \"\"\"Produce a random IPv6 address or network with a valid CIDR\"\"\"\n address = str(ip_address(self.generator.random.randint(\n 2 ** IPV4LENGTH, (2 ** IPV6LENGTH) - 1)))\n if network:\n address += '/' + str(self.generator.random.randint(0, IPV6LENGTH))\n address = str(ip_network(address, strict=False))\n return address\n\n def mac_address(self):\n mac = [self.generator.random.randint(0x00, 0xff) for i in range(0, 6)]\n return \":\".join(map(lambda x: \"%02x\" % x, mac))\n\n def uri_page(self):\n return self.random_element(self.uri_pages)\n\n def uri_path(self, deep=None):\n deep = deep if deep else self.generator.random.randint(1, 3)\n return \"/\".join(\n [self.random_element(self.uri_paths) for _ in range(0, deep)]\n )\n\n def uri_extension(self):\n return self.random_element(self.uri_extensions)\n\n def uri(self):\n pattern = self.random_element(self.uri_formats)\n return self.generator.parse(pattern)\n\n @slugify\n def slug(self, value=None):\n \"\"\"Django algorithm\"\"\"\n if value is None:\n value = self.generator.text(20)\n return value\n\n def image_url(self, width=None, height=None):\n \"\"\"\n Returns URL to placeholder image\n Example: http://placehold.it/640x480\n \"\"\"\n width_ = width or self.random_int(max=1024)\n height_ = height or self.random_int(max=1024)\n placeholder_url = self.random_element(self.image_placeholder_services)\n return placeholder_url.format(width=width_, height=height_)\n", "path": "faker/providers/internet/__init__.py"}]}
2,565
176
gh_patches_debug_16733
rasdani/github-patches
git_diff
mampfes__hacs_waste_collection_schedule-1729
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Bug]: adur_worthing_gov_uk has changed format of address ### I Have A Problem With: A specific source ### What's Your Problem The Adur and Worthing council used to return my address as “12 Roadname”, so that’s what I had in my source args. But the format has recently changed to “12 ROADNAME”, causing the lookup in adur_worthing_gov_uk.py to fail. As the council is just as likely to change it back at some point can I suggest that the lookup is made case independent? ### Source (if relevant) adur_worthing_gov_uk ### Logs _No response_ ### Relevant Configuration _No response_ ### Checklist Source Error - [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration) - [X] Checked that the website of your service provider is still working - [X] Tested my attributes on the service provider website (if possible) - [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version) ### Checklist Sensor Error - [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used) ### Required - [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been. - [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py` Content: ``` 1 from datetime import datetime 2 3 import bs4 4 import requests 5 from waste_collection_schedule import Collection # type: ignore[attr-defined] 6 7 TITLE = "Adur & Worthing Councils" 8 DESCRIPTION = "Source for adur-worthing.gov.uk services for Adur & Worthing, UK." 9 URL = "https://adur-worthing.gov.uk" 10 TEST_CASES = { 11 "Test_001": {"postcode": "BN15 9UX", "address": "1 Western Road North"}, 12 "Test_002": {"postcode": "BN43 5WE", "address": "6 Hebe Road"}, 13 } 14 HEADERS = { 15 "user-agent": "Mozilla/5.0", 16 } 17 ICON_MAP = { 18 "Recycling": "mdi:recycle", 19 "Refuse": "mdi:trash-can", 20 "Garden": "mdi:leaf", 21 } 22 23 24 class Source: 25 def __init__(self, postcode, address): 26 self._postcode = postcode 27 self._address = address 28 29 def fetch(self): 30 31 if self._postcode is None or self._address is None: 32 raise ValueError("Either postcode or address is None") 33 34 s = requests.Session() 35 36 postcode_search_request = s.get( 37 f"https://www.adur-worthing.gov.uk/bin-day/?brlu-address-postcode={self._postcode}&return-url=/bin-day/&action=search", 38 headers=HEADERS, 39 ) 40 html_addresses = postcode_search_request.content 41 addresses = bs4.BeautifulSoup(html_addresses, "html.parser") 42 addresses_select = addresses.find("select", {"id": "brlu-selected-address"}) 43 44 found_address = None 45 for address in addresses_select.find_all("option"): 46 if self._address in address.get_text(): 47 found_address = address 48 49 if found_address is None: 50 raise ValueError("Address not found") 51 52 collections_request = s.get( 53 f"https://www.adur-worthing.gov.uk/bin-day/?brlu-selected-address={address['value']}&return-url=/bin-day/", 54 headers=HEADERS, 55 ) 56 html_collections = collections_request.content 57 bin_collections = bs4.BeautifulSoup(html_collections, "html.parser") 58 59 bin_days_table = bin_collections.find("table", class_="bin-days") 60 bin_days_table_body = bin_days_table.find("tbody") 61 bin_days_by_type = bin_days_table_body.find_all("tr") 62 63 entries = [] 64 65 for bin_by_type in bin_days_by_type: 66 bin_type = bin_by_type.find("th").text 67 icon = ICON_MAP.get(bin_type) 68 bin_days = bin_by_type.find_all("td")[-1].get_text(separator="\n") 69 for bin_day in bin_days.split("\n"): 70 bin_datetime = datetime.strptime(bin_day, "%A %d %b %Y").date() 71 entries.append(Collection(t=bin_type, date=bin_datetime, icon=icon)) 72 73 return entries 74 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py --- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py +++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py @@ -27,7 +27,6 @@ self._address = address def fetch(self): - if self._postcode is None or self._address is None: raise ValueError("Either postcode or address is None") @@ -43,7 +42,7 @@ found_address = None for address in addresses_select.find_all("option"): - if self._address in address.get_text(): + if self._address.upper() in address.get_text().upper(): found_address = address if found_address is None:
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py\n@@ -27,7 +27,6 @@\n self._address = address\n \n def fetch(self):\n-\n if self._postcode is None or self._address is None:\n raise ValueError(\"Either postcode or address is None\")\n \n@@ -43,7 +42,7 @@\n \n found_address = None\n for address in addresses_select.find_all(\"option\"):\n- if self._address in address.get_text():\n+ if self._address.upper() in address.get_text().upper():\n found_address = address\n \n if found_address is None:\n", "issue": "[Bug]: adur_worthing_gov_uk has changed format of address\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nThe Adur and Worthing council used to return my address as \u201c12 Roadname\u201d, so that\u2019s what I had in my source args. But the format has recently changed to \u201c12 ROADNAME\u201d, causing the lookup in adur_worthing_gov_uk.py to fail. \r\n\r\nAs the council is just as likely to change it back at some point can I suggest that the lookup is made case independent?\n\n### Source (if relevant)\n\nadur_worthing_gov_uk\n\n### Logs\n\n_No response_\n\n### Relevant Configuration\n\n_No response_\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "from datetime import datetime\n\nimport bs4\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Adur & Worthing Councils\"\nDESCRIPTION = \"Source for adur-worthing.gov.uk services for Adur & Worthing, UK.\"\nURL = \"https://adur-worthing.gov.uk\"\nTEST_CASES = {\n \"Test_001\": {\"postcode\": \"BN15 9UX\", \"address\": \"1 Western Road North\"},\n \"Test_002\": {\"postcode\": \"BN43 5WE\", \"address\": \"6 Hebe Road\"},\n}\nHEADERS = {\n \"user-agent\": \"Mozilla/5.0\",\n}\nICON_MAP = {\n \"Recycling\": \"mdi:recycle\",\n \"Refuse\": \"mdi:trash-can\",\n \"Garden\": \"mdi:leaf\",\n}\n\n\nclass Source:\n def __init__(self, postcode, address):\n self._postcode = postcode\n self._address = address\n\n def fetch(self):\n\n if self._postcode is None or self._address is None:\n raise ValueError(\"Either postcode or address is None\")\n\n s = requests.Session()\n\n postcode_search_request = s.get(\n f\"https://www.adur-worthing.gov.uk/bin-day/?brlu-address-postcode={self._postcode}&return-url=/bin-day/&action=search\",\n headers=HEADERS,\n )\n html_addresses = postcode_search_request.content\n addresses = bs4.BeautifulSoup(html_addresses, \"html.parser\")\n addresses_select = addresses.find(\"select\", {\"id\": \"brlu-selected-address\"})\n\n found_address = None\n for address in addresses_select.find_all(\"option\"):\n if self._address in address.get_text():\n found_address = address\n\n if found_address is None:\n raise ValueError(\"Address not found\")\n\n collections_request = s.get(\n f\"https://www.adur-worthing.gov.uk/bin-day/?brlu-selected-address={address['value']}&return-url=/bin-day/\",\n headers=HEADERS,\n )\n html_collections = collections_request.content\n bin_collections = bs4.BeautifulSoup(html_collections, \"html.parser\")\n\n bin_days_table = bin_collections.find(\"table\", class_=\"bin-days\")\n bin_days_table_body = bin_days_table.find(\"tbody\")\n bin_days_by_type = bin_days_table_body.find_all(\"tr\")\n\n entries = []\n\n for bin_by_type in bin_days_by_type:\n bin_type = bin_by_type.find(\"th\").text\n icon = ICON_MAP.get(bin_type)\n bin_days = bin_by_type.find_all(\"td\")[-1].get_text(separator=\"\\n\")\n for bin_day in bin_days.split(\"\\n\"):\n bin_datetime = datetime.strptime(bin_day, \"%A %d %b %Y\").date()\n entries.append(Collection(t=bin_type, date=bin_datetime, icon=icon))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py"}], "after_files": [{"content": "from datetime import datetime\n\nimport bs4\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Adur & Worthing Councils\"\nDESCRIPTION = \"Source for adur-worthing.gov.uk services for Adur & Worthing, UK.\"\nURL = \"https://adur-worthing.gov.uk\"\nTEST_CASES = {\n \"Test_001\": {\"postcode\": \"BN15 9UX\", \"address\": \"1 Western Road North\"},\n \"Test_002\": {\"postcode\": \"BN43 5WE\", \"address\": \"6 Hebe Road\"},\n}\nHEADERS = {\n \"user-agent\": \"Mozilla/5.0\",\n}\nICON_MAP = {\n \"Recycling\": \"mdi:recycle\",\n \"Refuse\": \"mdi:trash-can\",\n \"Garden\": \"mdi:leaf\",\n}\n\n\nclass Source:\n def __init__(self, postcode, address):\n self._postcode = postcode\n self._address = address\n\n def fetch(self):\n if self._postcode is None or self._address is None:\n raise ValueError(\"Either postcode or address is None\")\n\n s = requests.Session()\n\n postcode_search_request = s.get(\n f\"https://www.adur-worthing.gov.uk/bin-day/?brlu-address-postcode={self._postcode}&return-url=/bin-day/&action=search\",\n headers=HEADERS,\n )\n html_addresses = postcode_search_request.content\n addresses = bs4.BeautifulSoup(html_addresses, \"html.parser\")\n addresses_select = addresses.find(\"select\", {\"id\": \"brlu-selected-address\"})\n\n found_address = None\n for address in addresses_select.find_all(\"option\"):\n if self._address.upper() in address.get_text().upper():\n found_address = address\n\n if found_address is None:\n raise ValueError(\"Address not found\")\n\n collections_request = s.get(\n f\"https://www.adur-worthing.gov.uk/bin-day/?brlu-selected-address={address['value']}&return-url=/bin-day/\",\n headers=HEADERS,\n )\n html_collections = collections_request.content\n bin_collections = bs4.BeautifulSoup(html_collections, \"html.parser\")\n\n bin_days_table = bin_collections.find(\"table\", class_=\"bin-days\")\n bin_days_table_body = bin_days_table.find(\"tbody\")\n bin_days_by_type = bin_days_table_body.find_all(\"tr\")\n\n entries = []\n\n for bin_by_type in bin_days_by_type:\n bin_type = bin_by_type.find(\"th\").text\n icon = ICON_MAP.get(bin_type)\n bin_days = bin_by_type.find_all(\"td\")[-1].get_text(separator=\"\\n\")\n for bin_day in bin_days.split(\"\\n\"):\n bin_datetime = datetime.strptime(bin_day, \"%A %d %b %Y\").date()\n entries.append(Collection(t=bin_type, date=bin_datetime, icon=icon))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py"}]}
1,405
216
gh_patches_debug_30705
rasdani/github-patches
git_diff
getredash__redash-3506
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- No invitation link after adding a new user when email server is not configured <!-- We use GitHub only for bug reports 🐛 Anything else should be posted to https://discuss.redash.io 👫 🚨For support, help & questions use https://discuss.redash.io/c/support 💡For feature requests & ideas use https://discuss.redash.io/c/feature-requests **Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key. --> ### Issue Summary After #3267 and #3229 admins no longer receive a link to invite users when the server doesn't have an email server configured. ### Steps to Reproduce 1. Open `/users/new` in a server without email configured (e.g.: [preview](https://redash-preview.netlify.com/users/new)) 2. Create a new user and check that no link is provided. 3. Open the pending invitation User Profile and notice there's only the "Resend" option (again it leads to nowhere) Any other info e.g. Why do you consider this to be a bug? What did you expect to happen instead? The admin should receive a link to send to the user manually. ### Technical details: * Redash Version: Latest (Redash Preview) * Browser/OS: -- * How did you install Redash: -- --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `redash/handlers/users.py` Content: ``` 1 import re 2 import time 3 from flask import request 4 from flask_restful import abort 5 from flask_login import current_user, login_user 6 from funcy import project 7 from sqlalchemy.exc import IntegrityError 8 from disposable_email_domains import blacklist 9 from funcy import partial 10 11 from redash import models 12 from redash.permissions import require_permission, require_admin_or_owner, is_admin_or_owner, \ 13 require_permission_or_owner, require_admin 14 from redash.handlers.base import BaseResource, require_fields, get_object_or_404, paginate, order_results as _order_results 15 16 from redash.authentication.account import invite_link_for_user, send_invite_email, send_password_reset_email, send_verify_email 17 from redash.settings import parse_boolean 18 19 20 # Ordering map for relationships 21 order_map = { 22 'name': 'name', 23 '-name': '-name', 24 'active_at': 'active_at', 25 '-active_at': '-active_at', 26 'created_at': 'created_at', 27 '-created_at': '-created_at', 28 'groups': 'group_ids', 29 '-groups': '-group_ids', 30 } 31 32 order_results = partial( 33 _order_results, 34 default_order='-created_at', 35 allowed_orders=order_map, 36 ) 37 38 39 def invite_user(org, inviter, user): 40 invite_url = invite_link_for_user(user) 41 send_invite_email(inviter, user, invite_url, org) 42 43 44 class UserListResource(BaseResource): 45 def get_users(self, disabled, pending, search_term): 46 if disabled: 47 users = models.User.all_disabled(self.current_org) 48 else: 49 users = models.User.all(self.current_org) 50 51 if pending is not None: 52 users = models.User.pending(users, pending) 53 54 if search_term: 55 users = models.User.search(users, search_term) 56 self.record_event({ 57 'action': 'search', 58 'object_type': 'user', 59 'term': search_term, 60 'pending': pending, 61 }) 62 else: 63 self.record_event({ 64 'action': 'list', 65 'object_type': 'user', 66 'pending': pending, 67 }) 68 69 # order results according to passed order parameter, 70 # special-casing search queries where the database 71 # provides an order by search rank 72 return order_results(users, fallback=bool(search_term)) 73 74 @require_permission('list_users') 75 def get(self): 76 page = request.args.get('page', 1, type=int) 77 page_size = request.args.get('page_size', 25, type=int) 78 79 groups = {group.id: group for group in models.Group.all(self.current_org)} 80 81 def serialize_user(user): 82 d = user.to_dict() 83 user_groups = [] 84 for group_id in set(d['groups']): 85 group = groups.get(group_id) 86 87 if group: 88 user_groups.append({'id': group.id, 'name': group.name}) 89 90 d['groups'] = user_groups 91 92 return d 93 94 search_term = request.args.get('q', '') 95 96 disabled = request.args.get('disabled', 'false') # get enabled users by default 97 disabled = parse_boolean(disabled) 98 99 pending = request.args.get('pending', None) # get both active and pending by default 100 if pending is not None: 101 pending = parse_boolean(pending) 102 103 users = self.get_users(disabled, pending, search_term) 104 105 return paginate(users, page, page_size, serialize_user) 106 107 @require_admin 108 def post(self): 109 req = request.get_json(force=True) 110 require_fields(req, ('name', 'email')) 111 112 if '@' not in req['email']: 113 abort(400, message='Bad email address.') 114 name, domain = req['email'].split('@', 1) 115 116 if domain.lower() in blacklist or domain.lower() == 'qq.com': 117 abort(400, message='Bad email address.') 118 119 user = models.User(org=self.current_org, 120 name=req['name'], 121 email=req['email'], 122 is_invitation_pending=True, 123 group_ids=[self.current_org.default_group.id]) 124 125 try: 126 models.db.session.add(user) 127 models.db.session.commit() 128 except IntegrityError as e: 129 if "email" in e.message: 130 abort(400, message='Email already taken.') 131 abort(500) 132 133 self.record_event({ 134 'action': 'create', 135 'object_id': user.id, 136 'object_type': 'user' 137 }) 138 139 should_send_invitation = 'no_invite' not in request.args 140 if should_send_invitation: 141 invite_user(self.current_org, self.current_user, user) 142 143 return user.to_dict() 144 145 146 class UserInviteResource(BaseResource): 147 @require_admin 148 def post(self, user_id): 149 user = models.User.get_by_id_and_org(user_id, self.current_org) 150 invite_url = invite_user(self.current_org, self.current_user, user) 151 152 return user.to_dict() 153 154 155 class UserResetPasswordResource(BaseResource): 156 @require_admin 157 def post(self, user_id): 158 user = models.User.get_by_id_and_org(user_id, self.current_org) 159 if user.is_disabled: 160 abort(404, message='Not found') 161 reset_link = send_password_reset_email(user) 162 163 return { 164 'reset_link': reset_link, 165 } 166 167 168 class UserRegenerateApiKeyResource(BaseResource): 169 def post(self, user_id): 170 user = models.User.get_by_id_and_org(user_id, self.current_org) 171 if user.is_disabled: 172 abort(404, message='Not found') 173 if not is_admin_or_owner(user_id): 174 abort(403) 175 176 user.regenerate_api_key() 177 models.db.session.commit() 178 179 self.record_event({ 180 'action': 'regnerate_api_key', 181 'object_id': user.id, 182 'object_type': 'user' 183 }) 184 185 return user.to_dict(with_api_key=True) 186 187 188 class UserResource(BaseResource): 189 def get(self, user_id): 190 require_permission_or_owner('list_users', user_id) 191 user = get_object_or_404(models.User.get_by_id_and_org, user_id, self.current_org) 192 193 self.record_event({ 194 'action': 'view', 195 'object_id': user_id, 196 'object_type': 'user', 197 }) 198 199 return user.to_dict(with_api_key=is_admin_or_owner(user_id)) 200 201 def post(self, user_id): 202 require_admin_or_owner(user_id) 203 user = models.User.get_by_id_and_org(user_id, self.current_org) 204 205 req = request.get_json(True) 206 207 params = project(req, ('email', 'name', 'password', 'old_password', 'groups')) 208 209 if 'password' in params and 'old_password' not in params: 210 abort(403, message="Must provide current password to update password.") 211 212 if 'old_password' in params and not user.verify_password(params['old_password']): 213 abort(403, message="Incorrect current password.") 214 215 if 'password' in params: 216 user.hash_password(params.pop('password')) 217 params.pop('old_password') 218 219 if 'groups' in params and not self.current_user.has_permission('admin'): 220 abort(403, message="Must be admin to change groups membership.") 221 222 if 'email' in params: 223 _, domain = params['email'].split('@', 1) 224 225 if domain.lower() in blacklist or domain.lower() == 'qq.com': 226 abort(400, message='Bad email address.') 227 228 email_changed = 'email' in params and params['email'] != user.email 229 if email_changed: 230 user.is_email_verified = False 231 232 try: 233 self.update_model(user, params) 234 models.db.session.commit() 235 236 if email_changed: 237 send_verify_email(user, self.current_org) 238 239 # The user has updated their email or password. This should invalidate all _other_ sessions, 240 # forcing them to log in again. Since we don't want to force _this_ session to have to go 241 # through login again, we call `login_user` in order to update the session with the new identity details. 242 if current_user.id == user.id: 243 login_user(user, remember=True) 244 except IntegrityError as e: 245 if "email" in e.message: 246 message = "Email already taken." 247 else: 248 message = "Error updating record" 249 250 abort(400, message=message) 251 252 self.record_event({ 253 'action': 'edit', 254 'object_id': user.id, 255 'object_type': 'user', 256 'updated_fields': params.keys() 257 }) 258 259 return user.to_dict(with_api_key=is_admin_or_owner(user_id)) 260 261 @require_admin 262 def delete(self, user_id): 263 user = models.User.get_by_id_and_org(user_id, self.current_org) 264 # admin cannot delete self; current user is an admin (`@require_admin`) 265 # so just check user id 266 if user.id == current_user.id: 267 abort(403, message="You cannot delete your own account. " 268 "Please ask another admin to do this for you.") 269 elif not user.is_invitation_pending: 270 abort(403, message="You cannot delete activated users. " 271 "Please disable the user instead.") 272 models.db.session.delete(user) 273 models.db.session.commit() 274 275 return user.to_dict(with_api_key=is_admin_or_owner(user_id)) 276 277 278 class UserDisableResource(BaseResource): 279 @require_admin 280 def post(self, user_id): 281 user = models.User.get_by_id_and_org(user_id, self.current_org) 282 # admin cannot disable self; current user is an admin (`@require_admin`) 283 # so just check user id 284 if user.id == current_user.id: 285 abort(403, message="You cannot disable your own account. " 286 "Please ask another admin to do this for you.") 287 user.disable() 288 models.db.session.commit() 289 290 return user.to_dict(with_api_key=is_admin_or_owner(user_id)) 291 292 @require_admin 293 def delete(self, user_id): 294 user = models.User.get_by_id_and_org(user_id, self.current_org) 295 user.enable() 296 models.db.session.commit() 297 298 return user.to_dict(with_api_key=is_admin_or_owner(user_id)) 299 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/redash/handlers/users.py b/redash/handlers/users.py --- a/redash/handlers/users.py +++ b/redash/handlers/users.py @@ -15,6 +15,7 @@ from redash.authentication.account import invite_link_for_user, send_invite_email, send_password_reset_email, send_verify_email from redash.settings import parse_boolean +from redash import settings # Ordering map for relationships @@ -36,9 +37,17 @@ ) -def invite_user(org, inviter, user): +def invite_user(org, inviter, user, send_email=True): + email_configured = settings.MAIL_DEFAULT_SENDER is not None + d = user.to_dict() + invite_url = invite_link_for_user(user) - send_invite_email(inviter, user, invite_url, org) + if email_configured and send_email: + send_invite_email(inviter, user, invite_url, org) + else: + d['invite_link'] = invite_url + + return d class UserListResource(BaseResource): @@ -137,19 +146,14 @@ }) should_send_invitation = 'no_invite' not in request.args - if should_send_invitation: - invite_user(self.current_org, self.current_user, user) - - return user.to_dict() + return invite_user(self.current_org, self.current_user, user, send_email=should_send_invitation) class UserInviteResource(BaseResource): @require_admin def post(self, user_id): user = models.User.get_by_id_and_org(user_id, self.current_org) - invite_url = invite_user(self.current_org, self.current_user, user) - - return user.to_dict() + return invite_user(self.current_org, self.current_user, user) class UserResetPasswordResource(BaseResource):
{"golden_diff": "diff --git a/redash/handlers/users.py b/redash/handlers/users.py\n--- a/redash/handlers/users.py\n+++ b/redash/handlers/users.py\n@@ -15,6 +15,7 @@\n \n from redash.authentication.account import invite_link_for_user, send_invite_email, send_password_reset_email, send_verify_email\n from redash.settings import parse_boolean\n+from redash import settings\n \n \n # Ordering map for relationships\n@@ -36,9 +37,17 @@\n )\n \n \n-def invite_user(org, inviter, user):\n+def invite_user(org, inviter, user, send_email=True):\n+ email_configured = settings.MAIL_DEFAULT_SENDER is not None\n+ d = user.to_dict()\n+\n invite_url = invite_link_for_user(user)\n- send_invite_email(inviter, user, invite_url, org)\n+ if email_configured and send_email:\n+ send_invite_email(inviter, user, invite_url, org)\n+ else:\n+ d['invite_link'] = invite_url\n+\n+ return d\n \n \n class UserListResource(BaseResource):\n@@ -137,19 +146,14 @@\n })\n \n should_send_invitation = 'no_invite' not in request.args\n- if should_send_invitation:\n- invite_user(self.current_org, self.current_user, user)\n-\n- return user.to_dict()\n+ return invite_user(self.current_org, self.current_user, user, send_email=should_send_invitation)\n \n \n class UserInviteResource(BaseResource):\n @require_admin\n def post(self, user_id):\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n- invite_url = invite_user(self.current_org, self.current_user, user)\n-\n- return user.to_dict()\n+ return invite_user(self.current_org, self.current_user, user)\n \n \n class UserResetPasswordResource(BaseResource):\n", "issue": "No invitation link after adding a new user when email server is not configured\n<!--\r\n\r\nWe use GitHub only for bug reports \ud83d\udc1b\r\n\r\nAnything else should be posted to https://discuss.redash.io \ud83d\udc6b\r\n\r\n\ud83d\udea8For support, help & questions use https://discuss.redash.io/c/support\r\n\ud83d\udca1For feature requests & ideas use https://discuss.redash.io/c/feature-requests\r\n\r\n**Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key.\r\n\r\n-->\r\n\r\n### Issue Summary\r\n\r\nAfter #3267 and #3229 admins no longer receive a link to invite users when the server doesn't have an email server configured.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Open `/users/new` in a server without email configured (e.g.: [preview](https://redash-preview.netlify.com/users/new))\r\n2. Create a new user and check that no link is provided.\r\n3. Open the pending invitation User Profile and notice there's only the \"Resend\" option (again it leads to nowhere)\r\n\r\nAny other info e.g. Why do you consider this to be a bug? What did you expect to happen instead?\r\nThe admin should receive a link to send to the user manually.\r\n\r\n### Technical details:\r\n\r\n* Redash Version: Latest (Redash Preview)\r\n* Browser/OS: --\r\n* How did you install Redash: --\r\n\n", "before_files": [{"content": "import re\nimport time\nfrom flask import request\nfrom flask_restful import abort\nfrom flask_login import current_user, login_user\nfrom funcy import project\nfrom sqlalchemy.exc import IntegrityError\nfrom disposable_email_domains import blacklist\nfrom funcy import partial\n\nfrom redash import models\nfrom redash.permissions import require_permission, require_admin_or_owner, is_admin_or_owner, \\\n require_permission_or_owner, require_admin\nfrom redash.handlers.base import BaseResource, require_fields, get_object_or_404, paginate, order_results as _order_results\n\nfrom redash.authentication.account import invite_link_for_user, send_invite_email, send_password_reset_email, send_verify_email\nfrom redash.settings import parse_boolean\n\n\n# Ordering map for relationships\norder_map = {\n 'name': 'name',\n '-name': '-name',\n 'active_at': 'active_at',\n '-active_at': '-active_at',\n 'created_at': 'created_at',\n '-created_at': '-created_at',\n 'groups': 'group_ids',\n '-groups': '-group_ids',\n}\n\norder_results = partial(\n _order_results,\n default_order='-created_at',\n allowed_orders=order_map,\n)\n\n\ndef invite_user(org, inviter, user):\n invite_url = invite_link_for_user(user)\n send_invite_email(inviter, user, invite_url, org)\n\n\nclass UserListResource(BaseResource):\n def get_users(self, disabled, pending, search_term):\n if disabled:\n users = models.User.all_disabled(self.current_org)\n else:\n users = models.User.all(self.current_org)\n\n if pending is not None:\n users = models.User.pending(users, pending)\n\n if search_term:\n users = models.User.search(users, search_term)\n self.record_event({\n 'action': 'search',\n 'object_type': 'user',\n 'term': search_term,\n 'pending': pending,\n })\n else:\n self.record_event({\n 'action': 'list',\n 'object_type': 'user',\n 'pending': pending,\n })\n\n # order results according to passed order parameter,\n # special-casing search queries where the database\n # provides an order by search rank\n return order_results(users, fallback=bool(search_term))\n\n @require_permission('list_users')\n def get(self):\n page = request.args.get('page', 1, type=int)\n page_size = request.args.get('page_size', 25, type=int)\n\n groups = {group.id: group for group in models.Group.all(self.current_org)}\n\n def serialize_user(user):\n d = user.to_dict()\n user_groups = []\n for group_id in set(d['groups']):\n group = groups.get(group_id)\n\n if group:\n user_groups.append({'id': group.id, 'name': group.name})\n\n d['groups'] = user_groups\n\n return d\n\n search_term = request.args.get('q', '')\n\n disabled = request.args.get('disabled', 'false') # get enabled users by default\n disabled = parse_boolean(disabled)\n\n pending = request.args.get('pending', None) # get both active and pending by default\n if pending is not None:\n pending = parse_boolean(pending)\n\n users = self.get_users(disabled, pending, search_term)\n\n return paginate(users, page, page_size, serialize_user)\n\n @require_admin\n def post(self):\n req = request.get_json(force=True)\n require_fields(req, ('name', 'email'))\n\n if '@' not in req['email']:\n abort(400, message='Bad email address.')\n name, domain = req['email'].split('@', 1)\n\n if domain.lower() in blacklist or domain.lower() == 'qq.com':\n abort(400, message='Bad email address.')\n\n user = models.User(org=self.current_org,\n name=req['name'],\n email=req['email'],\n is_invitation_pending=True,\n group_ids=[self.current_org.default_group.id])\n\n try:\n models.db.session.add(user)\n models.db.session.commit()\n except IntegrityError as e:\n if \"email\" in e.message:\n abort(400, message='Email already taken.')\n abort(500)\n\n self.record_event({\n 'action': 'create',\n 'object_id': user.id,\n 'object_type': 'user'\n })\n\n should_send_invitation = 'no_invite' not in request.args\n if should_send_invitation:\n invite_user(self.current_org, self.current_user, user)\n\n return user.to_dict()\n\n\nclass UserInviteResource(BaseResource):\n @require_admin\n def post(self, user_id):\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n invite_url = invite_user(self.current_org, self.current_user, user)\n\n return user.to_dict()\n\n\nclass UserResetPasswordResource(BaseResource):\n @require_admin\n def post(self, user_id):\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n if user.is_disabled:\n abort(404, message='Not found')\n reset_link = send_password_reset_email(user)\n\n return {\n 'reset_link': reset_link,\n }\n\n\nclass UserRegenerateApiKeyResource(BaseResource):\n def post(self, user_id):\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n if user.is_disabled:\n abort(404, message='Not found')\n if not is_admin_or_owner(user_id):\n abort(403)\n\n user.regenerate_api_key()\n models.db.session.commit()\n\n self.record_event({\n 'action': 'regnerate_api_key',\n 'object_id': user.id,\n 'object_type': 'user'\n })\n\n return user.to_dict(with_api_key=True)\n\n\nclass UserResource(BaseResource):\n def get(self, user_id):\n require_permission_or_owner('list_users', user_id)\n user = get_object_or_404(models.User.get_by_id_and_org, user_id, self.current_org)\n\n self.record_event({\n 'action': 'view',\n 'object_id': user_id,\n 'object_type': 'user',\n })\n\n return user.to_dict(with_api_key=is_admin_or_owner(user_id))\n\n def post(self, user_id):\n require_admin_or_owner(user_id)\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n\n req = request.get_json(True)\n\n params = project(req, ('email', 'name', 'password', 'old_password', 'groups'))\n\n if 'password' in params and 'old_password' not in params:\n abort(403, message=\"Must provide current password to update password.\")\n\n if 'old_password' in params and not user.verify_password(params['old_password']):\n abort(403, message=\"Incorrect current password.\")\n\n if 'password' in params:\n user.hash_password(params.pop('password'))\n params.pop('old_password')\n\n if 'groups' in params and not self.current_user.has_permission('admin'):\n abort(403, message=\"Must be admin to change groups membership.\")\n\n if 'email' in params:\n _, domain = params['email'].split('@', 1)\n\n if domain.lower() in blacklist or domain.lower() == 'qq.com':\n abort(400, message='Bad email address.')\n\n email_changed = 'email' in params and params['email'] != user.email\n if email_changed:\n user.is_email_verified = False\n\n try:\n self.update_model(user, params)\n models.db.session.commit()\n\n if email_changed:\n send_verify_email(user, self.current_org)\n\n # The user has updated their email or password. This should invalidate all _other_ sessions,\n # forcing them to log in again. Since we don't want to force _this_ session to have to go\n # through login again, we call `login_user` in order to update the session with the new identity details.\n if current_user.id == user.id:\n login_user(user, remember=True)\n except IntegrityError as e:\n if \"email\" in e.message:\n message = \"Email already taken.\"\n else:\n message = \"Error updating record\"\n\n abort(400, message=message)\n\n self.record_event({\n 'action': 'edit',\n 'object_id': user.id,\n 'object_type': 'user',\n 'updated_fields': params.keys()\n })\n\n return user.to_dict(with_api_key=is_admin_or_owner(user_id))\n\n @require_admin\n def delete(self, user_id):\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n # admin cannot delete self; current user is an admin (`@require_admin`)\n # so just check user id\n if user.id == current_user.id:\n abort(403, message=\"You cannot delete your own account. \"\n \"Please ask another admin to do this for you.\")\n elif not user.is_invitation_pending:\n abort(403, message=\"You cannot delete activated users. \"\n \"Please disable the user instead.\")\n models.db.session.delete(user)\n models.db.session.commit()\n\n return user.to_dict(with_api_key=is_admin_or_owner(user_id))\n\n\nclass UserDisableResource(BaseResource):\n @require_admin\n def post(self, user_id):\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n # admin cannot disable self; current user is an admin (`@require_admin`)\n # so just check user id\n if user.id == current_user.id:\n abort(403, message=\"You cannot disable your own account. \"\n \"Please ask another admin to do this for you.\")\n user.disable()\n models.db.session.commit()\n\n return user.to_dict(with_api_key=is_admin_or_owner(user_id))\n\n @require_admin\n def delete(self, user_id):\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n user.enable()\n models.db.session.commit()\n\n return user.to_dict(with_api_key=is_admin_or_owner(user_id))\n", "path": "redash/handlers/users.py"}], "after_files": [{"content": "import re\nimport time\nfrom flask import request\nfrom flask_restful import abort\nfrom flask_login import current_user, login_user\nfrom funcy import project\nfrom sqlalchemy.exc import IntegrityError\nfrom disposable_email_domains import blacklist\nfrom funcy import partial\n\nfrom redash import models\nfrom redash.permissions import require_permission, require_admin_or_owner, is_admin_or_owner, \\\n require_permission_or_owner, require_admin\nfrom redash.handlers.base import BaseResource, require_fields, get_object_or_404, paginate, order_results as _order_results\n\nfrom redash.authentication.account import invite_link_for_user, send_invite_email, send_password_reset_email, send_verify_email\nfrom redash.settings import parse_boolean\nfrom redash import settings\n\n\n# Ordering map for relationships\norder_map = {\n 'name': 'name',\n '-name': '-name',\n 'active_at': 'active_at',\n '-active_at': '-active_at',\n 'created_at': 'created_at',\n '-created_at': '-created_at',\n 'groups': 'group_ids',\n '-groups': '-group_ids',\n}\n\norder_results = partial(\n _order_results,\n default_order='-created_at',\n allowed_orders=order_map,\n)\n\n\ndef invite_user(org, inviter, user, send_email=True):\n email_configured = settings.MAIL_DEFAULT_SENDER is not None\n d = user.to_dict()\n\n invite_url = invite_link_for_user(user)\n if email_configured and send_email:\n send_invite_email(inviter, user, invite_url, org)\n else:\n d['invite_link'] = invite_url\n\n return d\n\n\nclass UserListResource(BaseResource):\n def get_users(self, disabled, pending, search_term):\n if disabled:\n users = models.User.all_disabled(self.current_org)\n else:\n users = models.User.all(self.current_org)\n\n if pending is not None:\n users = models.User.pending(users, pending)\n\n if search_term:\n users = models.User.search(users, search_term)\n self.record_event({\n 'action': 'search',\n 'object_type': 'user',\n 'term': search_term,\n 'pending': pending,\n })\n else:\n self.record_event({\n 'action': 'list',\n 'object_type': 'user',\n 'pending': pending,\n })\n\n # order results according to passed order parameter,\n # special-casing search queries where the database\n # provides an order by search rank\n return order_results(users, fallback=bool(search_term))\n\n @require_permission('list_users')\n def get(self):\n page = request.args.get('page', 1, type=int)\n page_size = request.args.get('page_size', 25, type=int)\n\n groups = {group.id: group for group in models.Group.all(self.current_org)}\n\n def serialize_user(user):\n d = user.to_dict()\n user_groups = []\n for group_id in set(d['groups']):\n group = groups.get(group_id)\n\n if group:\n user_groups.append({'id': group.id, 'name': group.name})\n\n d['groups'] = user_groups\n\n return d\n\n search_term = request.args.get('q', '')\n\n disabled = request.args.get('disabled', 'false') # get enabled users by default\n disabled = parse_boolean(disabled)\n\n pending = request.args.get('pending', None) # get both active and pending by default\n if pending is not None:\n pending = parse_boolean(pending)\n\n users = self.get_users(disabled, pending, search_term)\n\n return paginate(users, page, page_size, serialize_user)\n\n @require_admin\n def post(self):\n req = request.get_json(force=True)\n require_fields(req, ('name', 'email'))\n\n if '@' not in req['email']:\n abort(400, message='Bad email address.')\n name, domain = req['email'].split('@', 1)\n\n if domain.lower() in blacklist or domain.lower() == 'qq.com':\n abort(400, message='Bad email address.')\n\n user = models.User(org=self.current_org,\n name=req['name'],\n email=req['email'],\n is_invitation_pending=True,\n group_ids=[self.current_org.default_group.id])\n\n try:\n models.db.session.add(user)\n models.db.session.commit()\n except IntegrityError as e:\n if \"email\" in e.message:\n abort(400, message='Email already taken.')\n abort(500)\n\n self.record_event({\n 'action': 'create',\n 'object_id': user.id,\n 'object_type': 'user'\n })\n\n should_send_invitation = 'no_invite' not in request.args\n return invite_user(self.current_org, self.current_user, user, send_email=should_send_invitation)\n\n\nclass UserInviteResource(BaseResource):\n @require_admin\n def post(self, user_id):\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n return invite_user(self.current_org, self.current_user, user)\n\n\nclass UserResetPasswordResource(BaseResource):\n @require_admin\n def post(self, user_id):\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n if user.is_disabled:\n abort(404, message='Not found')\n reset_link = send_password_reset_email(user)\n\n return {\n 'reset_link': reset_link,\n }\n\n\nclass UserRegenerateApiKeyResource(BaseResource):\n def post(self, user_id):\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n if user.is_disabled:\n abort(404, message='Not found')\n if not is_admin_or_owner(user_id):\n abort(403)\n\n user.regenerate_api_key()\n models.db.session.commit()\n\n self.record_event({\n 'action': 'regnerate_api_key',\n 'object_id': user.id,\n 'object_type': 'user'\n })\n\n return user.to_dict(with_api_key=True)\n\n\nclass UserResource(BaseResource):\n def get(self, user_id):\n require_permission_or_owner('list_users', user_id)\n user = get_object_or_404(models.User.get_by_id_and_org, user_id, self.current_org)\n\n self.record_event({\n 'action': 'view',\n 'object_id': user_id,\n 'object_type': 'user',\n })\n\n return user.to_dict(with_api_key=is_admin_or_owner(user_id))\n\n def post(self, user_id):\n require_admin_or_owner(user_id)\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n\n req = request.get_json(True)\n\n params = project(req, ('email', 'name', 'password', 'old_password', 'groups'))\n\n if 'password' in params and 'old_password' not in params:\n abort(403, message=\"Must provide current password to update password.\")\n\n if 'old_password' in params and not user.verify_password(params['old_password']):\n abort(403, message=\"Incorrect current password.\")\n\n if 'password' in params:\n user.hash_password(params.pop('password'))\n params.pop('old_password')\n\n if 'groups' in params and not self.current_user.has_permission('admin'):\n abort(403, message=\"Must be admin to change groups membership.\")\n\n if 'email' in params:\n _, domain = params['email'].split('@', 1)\n\n if domain.lower() in blacklist or domain.lower() == 'qq.com':\n abort(400, message='Bad email address.')\n\n email_changed = 'email' in params and params['email'] != user.email\n if email_changed:\n user.is_email_verified = False\n\n try:\n self.update_model(user, params)\n models.db.session.commit()\n\n if email_changed:\n send_verify_email(user, self.current_org)\n\n # The user has updated their email or password. This should invalidate all _other_ sessions,\n # forcing them to log in again. Since we don't want to force _this_ session to have to go\n # through login again, we call `login_user` in order to update the session with the new identity details.\n if current_user.id == user.id:\n login_user(user, remember=True)\n except IntegrityError as e:\n if \"email\" in e.message:\n message = \"Email already taken.\"\n else:\n message = \"Error updating record\"\n\n abort(400, message=message)\n\n self.record_event({\n 'action': 'edit',\n 'object_id': user.id,\n 'object_type': 'user',\n 'updated_fields': params.keys()\n })\n\n return user.to_dict(with_api_key=is_admin_or_owner(user_id))\n\n @require_admin\n def delete(self, user_id):\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n # admin cannot delete self; current user is an admin (`@require_admin`)\n # so just check user id\n if user.id == current_user.id:\n abort(403, message=\"You cannot delete your own account. \"\n \"Please ask another admin to do this for you.\")\n elif not user.is_invitation_pending:\n abort(403, message=\"You cannot delete activated users. \"\n \"Please disable the user instead.\")\n models.db.session.delete(user)\n models.db.session.commit()\n\n return user.to_dict(with_api_key=is_admin_or_owner(user_id))\n\n\nclass UserDisableResource(BaseResource):\n @require_admin\n def post(self, user_id):\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n # admin cannot disable self; current user is an admin (`@require_admin`)\n # so just check user id\n if user.id == current_user.id:\n abort(403, message=\"You cannot disable your own account. \"\n \"Please ask another admin to do this for you.\")\n user.disable()\n models.db.session.commit()\n\n return user.to_dict(with_api_key=is_admin_or_owner(user_id))\n\n @require_admin\n def delete(self, user_id):\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n user.enable()\n models.db.session.commit()\n\n return user.to_dict(with_api_key=is_admin_or_owner(user_id))\n", "path": "redash/handlers/users.py"}]}
3,604
417
gh_patches_debug_44579
rasdani/github-patches
git_diff
great-expectations__great_expectations-858
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- great_expectations.yml has broken links Working off the latest version: ``` → great_expectations --version great_expectations, version 0.8.4.post0 ``` YAML file generated from `great_expectations init` has the following contents: ```yaml # Welcome to Great Expectations! Always know what to expect from your data. # # Here you can define datasources, generators, integrations and more. This file # is intended to be committed to your repo. For help with configuration please: # - Read our docs: https://docs.greatexpectations.io/en/0_8_4_post0/reference/data_context_reference.html#configuration # - Join our slack channel: http://greatexpectations.io/slack # # NOTE: GE uses the names of configured `datasources` and `generators` to manage # how `expectations` and other artifacts are stored in the `expectations/` and # `datasources/` folders. If you need to rename an existing `datasource` or # `generator`, be sure to also update the relevant directory names. config_version: 1 # Datasources tell Great Expectations where your data lives and how to get it. # You can use the CLI command `great_expectations add-datasource` to help you # add a new datasource. Read more at https://docs.greatexpectations.io/en/0_8_4_post0/features/datasource.html datasources: test: class_name: PandasDatasource data_asset_type: class_name: PandasDataset generators: default: class_name: SubdirReaderGenerator base_directory: ../misc reader_options: sep: engine: python ``` All the docs linked throughout the yaml file, i.e. https://docs.greatexpectations.io/en/0_8_4_post0/features/datasource.html, do not exist. Missing link/reference in docs? https://docs.greatexpectations.io/en/latest/reference/data_context_reference.html#validation-operators ![image](https://user-images.githubusercontent.com/11096727/68488005-24e6e500-0212-11ea-8e95-013d74a52096.png) Assuming the underlined part should have a hyperlink? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `great_expectations/data_context/templates.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 from great_expectations import rtd_url_ge_version 3 4 PROJECT_HELP_COMMENT = """ 5 # Welcome to Great Expectations! Always know what to expect from your data. 6 # 7 # Here you can define datasources, generators, integrations and more. This file 8 # is intended to be committed to your repo. For help with configuration please: 9 # - Read our docs: https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/reference/data_context_reference.html#configuration 10 # - Join our slack channel: http://greatexpectations.io/slack 11 # 12 # NOTE: GE uses the names of configured `datasources` and `generators` to manage 13 # how `expectations` and other artifacts are stored in the `expectations/` and 14 # `datasources/` folders. If you need to rename an existing `datasource` or 15 # `generator`, be sure to also update the relevant directory names. 16 17 config_version: 1 18 19 # Datasources tell Great Expectations where your data lives and how to get it. 20 # You can use the CLI command `great_expectations add-datasource` to help you 21 # add a new datasource. Read more at https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/features/datasource.html 22 datasources: {} 23 """ 24 25 CONFIG_VARIABLES_INTRO = """ 26 # This config file supports variable substitution which enables: 1) keeping 27 # secrets out of source control & 2) environment-based configuration changes 28 # such as staging vs prod. 29 # 30 # When GE encounters substitution syntax (like `my_key: ${my_value}` or 31 # `my_key: $my_value`) in the config file it will attempt to replace the value 32 # of `my_key` with the value from an environment variable `my_value` or a 33 # corresponding key read from the file specified using 34 # `config_variables_file_path`. Environment variables take precedence. 35 # 36 # If the substitution value comes from the config variables file, it can be a 37 # simple (non-nested) value or a nested value such as a dictionary. If it comes 38 # from an environment variable, it must be a simple value. Read more at: 39 # https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/reference/data_context_reference.html#managing-environment-and-secrets""" 40 41 # This junky fake key keeps ruaml.yml from nuking the important comment block 42 CONFIG_VARIABLES_TEMPLATE = "ge_comment_preservation_key: 1" + CONFIG_VARIABLES_INTRO 43 44 PROJECT_OPTIONAL_CONFIG_COMMENT = CONFIG_VARIABLES_INTRO + """ 45 config_variables_file_path: uncommitted/config_variables.yml 46 47 # The plugins_directory will be added to your python path for custom modules 48 # used to override and extend Great Expectations. 49 plugins_directory: plugins/ 50 51 # Validation Operators are customizable workflows that bundle the validation of 52 # one or more expectation suites and subsequent actions. The example below 53 # stores validations and send a slack notification. To read more about 54 # customizing and extending these, read: https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/features/validation_operators_and_actions.html 55 validation_operators: 56 action_list_operator: 57 # To learn how to configure sending Slack notifications during evaluation 58 # (and other customizations), read: https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/reference/validation_operators/action_list_validation_operator.html 59 class_name: ActionListValidationOperator 60 action_list: 61 - name: store_validation_result 62 action: 63 class_name: StoreAction 64 - name: store_evaluation_params 65 action: 66 class_name: ExtractAndStoreEvaluationParamsAction 67 - name: update_data_docs 68 action: 69 class_name: UpdateDataDocsAction 70 - name: send_slack_notification_on_validation_result 71 action: 72 class_name: SlackNotificationAction 73 # put the actual webhook URL in the uncommitted/config_variables.yml file 74 slack_webhook: ${validation_notification_slack_webhook} 75 notify_on: all # possible values: "all", "failure", "success" 76 renderer: 77 module_name: great_expectations.render.renderer.slack_renderer 78 class_name: SlackRenderer 79 80 stores: 81 # Stores are configurable places to store things like Expectations, Validations 82 # Data Docs, and more. These are for advanced users only - most users can simply 83 # leave this section alone. 84 # 85 # Three stores are required: expectations, validations, and 86 # evaluation_parameters, and must exist with a valid store entry. Additional 87 # stores can be configured for uses such as data_docs, validation_operators, etc. 88 expectations_store: 89 class_name: ExpectationsStore 90 store_backend: 91 class_name: FixedLengthTupleFilesystemStoreBackend 92 base_directory: expectations/ 93 94 validations_store: 95 class_name: ValidationsStore 96 store_backend: 97 class_name: FixedLengthTupleFilesystemStoreBackend 98 base_directory: uncommitted/validations/ 99 100 evaluation_parameter_store: 101 # Evaluation Parameters enable dynamic expectations. Read more here: 102 # https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/reference/evaluation_parameters.html 103 class_name: InMemoryEvaluationParameterStore 104 105 expectations_store_name: expectations_store 106 validations_store_name: validations_store 107 evaluation_parameter_store_name: evaluation_parameter_store 108 109 data_docs_sites: 110 # Data Docs make it simple to visualize data quality in your project. These 111 # include Expectations, Validations & Profiles. The are built for all 112 # Datasources from JSON artifacts in the local repo including validations & 113 # profiles from the uncommitted directory. Read more at https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/features/data_docs.html 114 local_site: 115 class_name: SiteBuilder 116 store_backend: 117 class_name: FixedLengthTupleFilesystemStoreBackend 118 base_directory: uncommitted/data_docs/local_site/ 119 site_index_builder: 120 show_cta_footer: True 121 """ 122 123 PROJECT_TEMPLATE = PROJECT_HELP_COMMENT + PROJECT_OPTIONAL_CONFIG_COMMENT 124 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/great_expectations/data_context/templates.py b/great_expectations/data_context/templates.py --- a/great_expectations/data_context/templates.py +++ b/great_expectations/data_context/templates.py @@ -6,7 +6,7 @@ # # Here you can define datasources, generators, integrations and more. This file # is intended to be committed to your repo. For help with configuration please: -# - Read our docs: https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/reference/data_context_reference.html#configuration +# - Read our docs: https://docs.greatexpectations.io/en/latest/reference/data_context_reference.html#configuration # - Join our slack channel: http://greatexpectations.io/slack # # NOTE: GE uses the names of configured `datasources` and `generators` to manage @@ -18,7 +18,7 @@ # Datasources tell Great Expectations where your data lives and how to get it. # You can use the CLI command `great_expectations add-datasource` to help you -# add a new datasource. Read more at https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/features/datasource.html +# add a new datasource. Read more at https://docs.greatexpectations.io/en/latest/features/datasource.html datasources: {} """ @@ -36,7 +36,7 @@ # If the substitution value comes from the config variables file, it can be a # simple (non-nested) value or a nested value such as a dictionary. If it comes # from an environment variable, it must be a simple value. Read more at: -# https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/reference/data_context_reference.html#managing-environment-and-secrets""" +# https://docs.greatexpectations.io/en/latest/reference/data_context_reference.html#managing-environment-and-secrets""" # This junky fake key keeps ruaml.yml from nuking the important comment block CONFIG_VARIABLES_TEMPLATE = "ge_comment_preservation_key: 1" + CONFIG_VARIABLES_INTRO @@ -51,11 +51,11 @@ # Validation Operators are customizable workflows that bundle the validation of # one or more expectation suites and subsequent actions. The example below # stores validations and send a slack notification. To read more about -# customizing and extending these, read: https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/features/validation_operators_and_actions.html +# customizing and extending these, read: https://docs.greatexpectations.io/en/latest/features/validation_operators_and_actions.html validation_operators: action_list_operator: # To learn how to configure sending Slack notifications during evaluation - # (and other customizations), read: https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/reference/validation_operators/action_list_validation_operator.html + # (and other customizations), read: https://docs.greatexpectations.io/en/latest/reference/validation_operators/action_list_validation_operator.html class_name: ActionListValidationOperator action_list: - name: store_validation_result @@ -99,7 +99,7 @@ evaluation_parameter_store: # Evaluation Parameters enable dynamic expectations. Read more here: - # https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/reference/evaluation_parameters.html + # https://docs.greatexpectations.io/en/latest/reference/evaluation_parameters.html class_name: InMemoryEvaluationParameterStore expectations_store_name: expectations_store @@ -110,7 +110,7 @@ # Data Docs make it simple to visualize data quality in your project. These # include Expectations, Validations & Profiles. The are built for all # Datasources from JSON artifacts in the local repo including validations & - # profiles from the uncommitted directory. Read more at https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/features/data_docs.html + # profiles from the uncommitted directory. Read more at https://docs.greatexpectations.io/en/latest/features/data_docs.html local_site: class_name: SiteBuilder store_backend:
{"golden_diff": "diff --git a/great_expectations/data_context/templates.py b/great_expectations/data_context/templates.py\n--- a/great_expectations/data_context/templates.py\n+++ b/great_expectations/data_context/templates.py\n@@ -6,7 +6,7 @@\n # \n # Here you can define datasources, generators, integrations and more. This file\n # is intended to be committed to your repo. For help with configuration please:\n-# - Read our docs: https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/reference/data_context_reference.html#configuration\n+# - Read our docs: https://docs.greatexpectations.io/en/latest/reference/data_context_reference.html#configuration\n # - Join our slack channel: http://greatexpectations.io/slack\n #\n # NOTE: GE uses the names of configured `datasources` and `generators` to manage\n@@ -18,7 +18,7 @@\n \n # Datasources tell Great Expectations where your data lives and how to get it.\n # You can use the CLI command `great_expectations add-datasource` to help you\n-# add a new datasource. Read more at https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/features/datasource.html\n+# add a new datasource. Read more at https://docs.greatexpectations.io/en/latest/features/datasource.html\n datasources: {}\n \"\"\"\n \n@@ -36,7 +36,7 @@\n # If the substitution value comes from the config variables file, it can be a\n # simple (non-nested) value or a nested value such as a dictionary. If it comes\n # from an environment variable, it must be a simple value. Read more at:\n-# https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/reference/data_context_reference.html#managing-environment-and-secrets\"\"\"\n+# https://docs.greatexpectations.io/en/latest/reference/data_context_reference.html#managing-environment-and-secrets\"\"\"\n \n # This junky fake key keeps ruaml.yml from nuking the important comment block\n CONFIG_VARIABLES_TEMPLATE = \"ge_comment_preservation_key: 1\" + CONFIG_VARIABLES_INTRO\n@@ -51,11 +51,11 @@\n # Validation Operators are customizable workflows that bundle the validation of\n # one or more expectation suites and subsequent actions. The example below\n # stores validations and send a slack notification. To read more about\n-# customizing and extending these, read: https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/features/validation_operators_and_actions.html\n+# customizing and extending these, read: https://docs.greatexpectations.io/en/latest/features/validation_operators_and_actions.html\n validation_operators:\n action_list_operator:\n # To learn how to configure sending Slack notifications during evaluation\n- # (and other customizations), read: https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/reference/validation_operators/action_list_validation_operator.html\n+ # (and other customizations), read: https://docs.greatexpectations.io/en/latest/reference/validation_operators/action_list_validation_operator.html\n class_name: ActionListValidationOperator\n action_list:\n - name: store_validation_result\n@@ -99,7 +99,7 @@\n \n evaluation_parameter_store:\n # Evaluation Parameters enable dynamic expectations. Read more here:\n- # https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/reference/evaluation_parameters.html\n+ # https://docs.greatexpectations.io/en/latest/reference/evaluation_parameters.html\n class_name: InMemoryEvaluationParameterStore\n \n expectations_store_name: expectations_store\n@@ -110,7 +110,7 @@\n # Data Docs make it simple to visualize data quality in your project. These\n # include Expectations, Validations & Profiles. The are built for all\n # Datasources from JSON artifacts in the local repo including validations &\n- # profiles from the uncommitted directory. Read more at https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/features/data_docs.html\n+ # profiles from the uncommitted directory. Read more at https://docs.greatexpectations.io/en/latest/features/data_docs.html\n local_site:\n class_name: SiteBuilder\n store_backend:\n", "issue": "great_expectations.yml has broken links\nWorking off the latest version:\r\n\r\n```\r\n\u2192 great_expectations --version\r\n\r\ngreat_expectations, version 0.8.4.post0\r\n```\r\n\r\n\r\nYAML file generated from `great_expectations init` has the following contents:\r\n\r\n```yaml\r\n\r\n# Welcome to Great Expectations! Always know what to expect from your data.\r\n# \r\n# Here you can define datasources, generators, integrations and more. This file\r\n# is intended to be committed to your repo. For help with configuration please:\r\n# - Read our docs: https://docs.greatexpectations.io/en/0_8_4_post0/reference/data_context_reference.html#configuration\r\n# - Join our slack channel: http://greatexpectations.io/slack\r\n#\r\n# NOTE: GE uses the names of configured `datasources` and `generators` to manage\r\n# how `expectations` and other artifacts are stored in the `expectations/` and \r\n# `datasources/` folders. If you need to rename an existing `datasource` or \r\n# `generator`, be sure to also update the relevant directory names.\r\n\r\nconfig_version: 1\r\n\r\n# Datasources tell Great Expectations where your data lives and how to get it.\r\n# You can use the CLI command `great_expectations add-datasource` to help you\r\n# add a new datasource. Read more at https://docs.greatexpectations.io/en/0_8_4_post0/features/datasource.html\r\ndatasources:\r\n test:\r\n class_name: PandasDatasource\r\n data_asset_type:\r\n class_name: PandasDataset\r\n generators:\r\n default:\r\n class_name: SubdirReaderGenerator\r\n base_directory: ../misc\r\n reader_options:\r\n sep:\r\n engine: python\r\n```\r\n\r\n\r\nAll the docs linked throughout the yaml file, i.e. https://docs.greatexpectations.io/en/0_8_4_post0/features/datasource.html, do not exist.\r\n\nMissing link/reference in docs?\nhttps://docs.greatexpectations.io/en/latest/reference/data_context_reference.html#validation-operators\r\n\r\n![image](https://user-images.githubusercontent.com/11096727/68488005-24e6e500-0212-11ea-8e95-013d74a52096.png)\r\n\r\nAssuming the underlined part should have a hyperlink?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom great_expectations import rtd_url_ge_version\n\nPROJECT_HELP_COMMENT = \"\"\"\n# Welcome to Great Expectations! Always know what to expect from your data.\n# \n# Here you can define datasources, generators, integrations and more. This file\n# is intended to be committed to your repo. For help with configuration please:\n# - Read our docs: https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/reference/data_context_reference.html#configuration\n# - Join our slack channel: http://greatexpectations.io/slack\n#\n# NOTE: GE uses the names of configured `datasources` and `generators` to manage\n# how `expectations` and other artifacts are stored in the `expectations/` and \n# `datasources/` folders. If you need to rename an existing `datasource` or \n# `generator`, be sure to also update the relevant directory names.\n\nconfig_version: 1\n\n# Datasources tell Great Expectations where your data lives and how to get it.\n# You can use the CLI command `great_expectations add-datasource` to help you\n# add a new datasource. Read more at https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/features/datasource.html\ndatasources: {}\n\"\"\"\n\nCONFIG_VARIABLES_INTRO = \"\"\"\n# This config file supports variable substitution which enables: 1) keeping\n# secrets out of source control & 2) environment-based configuration changes\n# such as staging vs prod.\n#\n# When GE encounters substitution syntax (like `my_key: ${my_value}` or \n# `my_key: $my_value`) in the config file it will attempt to replace the value\n# of `my_key` with the value from an environment variable `my_value` or a\n# corresponding key read from the file specified using\n# `config_variables_file_path`. Environment variables take precedence.\n#\n# If the substitution value comes from the config variables file, it can be a\n# simple (non-nested) value or a nested value such as a dictionary. If it comes\n# from an environment variable, it must be a simple value. Read more at:\n# https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/reference/data_context_reference.html#managing-environment-and-secrets\"\"\"\n\n# This junky fake key keeps ruaml.yml from nuking the important comment block\nCONFIG_VARIABLES_TEMPLATE = \"ge_comment_preservation_key: 1\" + CONFIG_VARIABLES_INTRO\n\nPROJECT_OPTIONAL_CONFIG_COMMENT = CONFIG_VARIABLES_INTRO + \"\"\"\nconfig_variables_file_path: uncommitted/config_variables.yml\n\n# The plugins_directory will be added to your python path for custom modules\n# used to override and extend Great Expectations.\nplugins_directory: plugins/\n\n# Validation Operators are customizable workflows that bundle the validation of\n# one or more expectation suites and subsequent actions. The example below\n# stores validations and send a slack notification. To read more about\n# customizing and extending these, read: https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/features/validation_operators_and_actions.html\nvalidation_operators:\n action_list_operator:\n # To learn how to configure sending Slack notifications during evaluation\n # (and other customizations), read: https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/reference/validation_operators/action_list_validation_operator.html\n class_name: ActionListValidationOperator\n action_list:\n - name: store_validation_result\n action:\n class_name: StoreAction\n - name: store_evaluation_params\n action:\n class_name: ExtractAndStoreEvaluationParamsAction\n - name: update_data_docs\n action:\n class_name: UpdateDataDocsAction\n - name: send_slack_notification_on_validation_result\n action:\n class_name: SlackNotificationAction\n # put the actual webhook URL in the uncommitted/config_variables.yml file\n slack_webhook: ${validation_notification_slack_webhook}\n notify_on: all # possible values: \"all\", \"failure\", \"success\"\n renderer:\n module_name: great_expectations.render.renderer.slack_renderer\n class_name: SlackRenderer\n \nstores:\n# Stores are configurable places to store things like Expectations, Validations\n# Data Docs, and more. These are for advanced users only - most users can simply\n# leave this section alone.\n# \n# Three stores are required: expectations, validations, and\n# evaluation_parameters, and must exist with a valid store entry. Additional\n# stores can be configured for uses such as data_docs, validation_operators, etc.\n expectations_store:\n class_name: ExpectationsStore\n store_backend:\n class_name: FixedLengthTupleFilesystemStoreBackend\n base_directory: expectations/\n\n validations_store:\n class_name: ValidationsStore\n store_backend:\n class_name: FixedLengthTupleFilesystemStoreBackend\n base_directory: uncommitted/validations/\n\n evaluation_parameter_store:\n # Evaluation Parameters enable dynamic expectations. Read more here:\n # https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/reference/evaluation_parameters.html\n class_name: InMemoryEvaluationParameterStore\n\nexpectations_store_name: expectations_store\nvalidations_store_name: validations_store\nevaluation_parameter_store_name: evaluation_parameter_store\n\ndata_docs_sites:\n # Data Docs make it simple to visualize data quality in your project. These\n # include Expectations, Validations & Profiles. The are built for all\n # Datasources from JSON artifacts in the local repo including validations &\n # profiles from the uncommitted directory. Read more at https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/features/data_docs.html\n local_site:\n class_name: SiteBuilder\n store_backend:\n class_name: FixedLengthTupleFilesystemStoreBackend\n base_directory: uncommitted/data_docs/local_site/\n site_index_builder:\n show_cta_footer: True\n\"\"\"\n\nPROJECT_TEMPLATE = PROJECT_HELP_COMMENT + PROJECT_OPTIONAL_CONFIG_COMMENT\n", "path": "great_expectations/data_context/templates.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom great_expectations import rtd_url_ge_version\n\nPROJECT_HELP_COMMENT = \"\"\"\n# Welcome to Great Expectations! Always know what to expect from your data.\n# \n# Here you can define datasources, generators, integrations and more. This file\n# is intended to be committed to your repo. For help with configuration please:\n# - Read our docs: https://docs.greatexpectations.io/en/latest/reference/data_context_reference.html#configuration\n# - Join our slack channel: http://greatexpectations.io/slack\n#\n# NOTE: GE uses the names of configured `datasources` and `generators` to manage\n# how `expectations` and other artifacts are stored in the `expectations/` and \n# `datasources/` folders. If you need to rename an existing `datasource` or \n# `generator`, be sure to also update the relevant directory names.\n\nconfig_version: 1\n\n# Datasources tell Great Expectations where your data lives and how to get it.\n# You can use the CLI command `great_expectations add-datasource` to help you\n# add a new datasource. Read more at https://docs.greatexpectations.io/en/latest/features/datasource.html\ndatasources: {}\n\"\"\"\n\nCONFIG_VARIABLES_INTRO = \"\"\"\n# This config file supports variable substitution which enables: 1) keeping\n# secrets out of source control & 2) environment-based configuration changes\n# such as staging vs prod.\n#\n# When GE encounters substitution syntax (like `my_key: ${my_value}` or \n# `my_key: $my_value`) in the config file it will attempt to replace the value\n# of `my_key` with the value from an environment variable `my_value` or a\n# corresponding key read from the file specified using\n# `config_variables_file_path`. Environment variables take precedence.\n#\n# If the substitution value comes from the config variables file, it can be a\n# simple (non-nested) value or a nested value such as a dictionary. If it comes\n# from an environment variable, it must be a simple value. Read more at:\n# https://docs.greatexpectations.io/en/latest/reference/data_context_reference.html#managing-environment-and-secrets\"\"\"\n\n# This junky fake key keeps ruaml.yml from nuking the important comment block\nCONFIG_VARIABLES_TEMPLATE = \"ge_comment_preservation_key: 1\" + CONFIG_VARIABLES_INTRO\n\nPROJECT_OPTIONAL_CONFIG_COMMENT = CONFIG_VARIABLES_INTRO + \"\"\"\nconfig_variables_file_path: uncommitted/config_variables.yml\n\n# The plugins_directory will be added to your python path for custom modules\n# used to override and extend Great Expectations.\nplugins_directory: plugins/\n\n# Validation Operators are customizable workflows that bundle the validation of\n# one or more expectation suites and subsequent actions. The example below\n# stores validations and send a slack notification. To read more about\n# customizing and extending these, read: https://docs.greatexpectations.io/en/latest/features/validation_operators_and_actions.html\nvalidation_operators:\n action_list_operator:\n # To learn how to configure sending Slack notifications during evaluation\n # (and other customizations), read: https://docs.greatexpectations.io/en/latest/reference/validation_operators/action_list_validation_operator.html\n class_name: ActionListValidationOperator\n action_list:\n - name: store_validation_result\n action:\n class_name: StoreAction\n - name: store_evaluation_params\n action:\n class_name: ExtractAndStoreEvaluationParamsAction\n - name: update_data_docs\n action:\n class_name: UpdateDataDocsAction\n - name: send_slack_notification_on_validation_result\n action:\n class_name: SlackNotificationAction\n # put the actual webhook URL in the uncommitted/config_variables.yml file\n slack_webhook: ${validation_notification_slack_webhook}\n notify_on: all # possible values: \"all\", \"failure\", \"success\"\n renderer:\n module_name: great_expectations.render.renderer.slack_renderer\n class_name: SlackRenderer\n \nstores:\n# Stores are configurable places to store things like Expectations, Validations\n# Data Docs, and more. These are for advanced users only - most users can simply\n# leave this section alone.\n# \n# Three stores are required: expectations, validations, and\n# evaluation_parameters, and must exist with a valid store entry. Additional\n# stores can be configured for uses such as data_docs, validation_operators, etc.\n expectations_store:\n class_name: ExpectationsStore\n store_backend:\n class_name: FixedLengthTupleFilesystemStoreBackend\n base_directory: expectations/\n\n validations_store:\n class_name: ValidationsStore\n store_backend:\n class_name: FixedLengthTupleFilesystemStoreBackend\n base_directory: uncommitted/validations/\n\n evaluation_parameter_store:\n # Evaluation Parameters enable dynamic expectations. Read more here:\n # https://docs.greatexpectations.io/en/latest/reference/evaluation_parameters.html\n class_name: InMemoryEvaluationParameterStore\n\nexpectations_store_name: expectations_store\nvalidations_store_name: validations_store\nevaluation_parameter_store_name: evaluation_parameter_store\n\ndata_docs_sites:\n # Data Docs make it simple to visualize data quality in your project. These\n # include Expectations, Validations & Profiles. The are built for all\n # Datasources from JSON artifacts in the local repo including validations &\n # profiles from the uncommitted directory. Read more at https://docs.greatexpectations.io/en/latest/features/data_docs.html\n local_site:\n class_name: SiteBuilder\n store_backend:\n class_name: FixedLengthTupleFilesystemStoreBackend\n base_directory: uncommitted/data_docs/local_site/\n site_index_builder:\n show_cta_footer: True\n\"\"\"\n\nPROJECT_TEMPLATE = PROJECT_HELP_COMMENT + PROJECT_OPTIONAL_CONFIG_COMMENT\n", "path": "great_expectations/data_context/templates.py"}]}
2,357
951
gh_patches_debug_659
rasdani/github-patches
git_diff
pex-tool__pex-2214
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Release 2.1.142 On the docket: + [x] KeyError when locking awscli on Python 3.11 #2211 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pex/version.py` Content: ``` 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.141" 5 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.141" +__version__ = "2.1.142"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.141\"\n+__version__ = \"2.1.142\"\n", "issue": "Release 2.1.142\nOn the docket:\r\n+ [x] KeyError when locking awscli on Python 3.11 #2211\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.141\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.142\"\n", "path": "pex/version.py"}]}
346
98
gh_patches_debug_34028
rasdani/github-patches
git_diff
ansible__awx-15011
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Missing tags and skip_tags on awx.awx.workflow_launch module ### Please confirm the following - [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html). - [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates. - [X] I understand that AWX is open source software provided for free and that I might not receive a timely response. - [X] I am **NOT** reporting a (potential) security vulnerability. (These should be emailed to `[email protected]` instead.) ### Bug Summary The UI and API support launching a workflow with `skip_tags` and `job_tags` set. This is not currently possible with the `workflow_launch` module. ### AWX version devel ### Select the relevant components - [ ] UI - [ ] UI (tech preview) - [ ] API - [ ] Docs - [X] Collection - [ ] CLI - [ ] Other ### Installation method N/A ### Modifications no ### Ansible version 2.15.2 ### Operating system _No response_ ### Web browser _No response_ ### Steps to reproduce <img width="1106" alt="Screenshot 2024-03-19 at 11 26 20" src="https://github.com/ansible/awx/assets/9056568/878ee7f0-57d3-4365-9353-46aa6f99d009"> Through the UI you can set job_tags or skip_tags if the relevant `ask_for_` is true. In the collection this is not possible ### Expected results The `skip_tags` and `tags` option should be possible as below ``` - name: Kickoff workflow awx.awx.workflow_launch: name: wf_test wait: false skip_tags: - sometag tags: - mytag ``` ### Actual results The options are not included on the module ### Additional information _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `awx_collection/plugins/modules/workflow_launch.py` Content: ``` 1 #!/usr/bin/python 2 # coding: utf-8 -*- 3 4 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) 5 6 from __future__ import absolute_import, division, print_function 7 8 __metaclass__ = type 9 10 ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} 11 12 DOCUMENTATION = ''' 13 --- 14 module: workflow_launch 15 author: "John Westcott IV (@john-westcott-iv)" 16 short_description: Run a workflow in Automation Platform Controller 17 description: 18 - Launch an Automation Platform Controller workflows. See 19 U(https://www.ansible.com/tower) for an overview. 20 options: 21 name: 22 description: 23 - The name of the workflow template to run. 24 required: True 25 type: str 26 aliases: 27 - workflow_template 28 organization: 29 description: 30 - Organization name, ID, or named URL the workflow job template exists in. 31 - Used to help lookup the object, cannot be modified using this module. 32 - If not provided, will lookup by name only, which does not work with duplicates. 33 type: str 34 inventory: 35 description: 36 - Inventory name, ID, or named URL to use for the job ran with this workflow, only used if prompt for inventory is set. 37 type: str 38 limit: 39 description: 40 - Limit to use for the I(job_template). 41 type: str 42 scm_branch: 43 description: 44 - A specific branch of the SCM project to run the template on. 45 - This is only applicable if your project allows for branch override. 46 type: str 47 extra_vars: 48 description: 49 - Any extra vars required to launch the job. 50 type: dict 51 wait: 52 description: 53 - Wait for the workflow to complete. 54 default: True 55 type: bool 56 interval: 57 description: 58 - The interval to request an update from the controller. 59 required: False 60 default: 2 61 type: float 62 timeout: 63 description: 64 - If waiting for the workflow to complete this will abort after this 65 amount of seconds 66 type: int 67 extends_documentation_fragment: awx.awx.auth 68 ''' 69 70 RETURN = ''' 71 job_info: 72 description: dictionary containing information about the workflow executed 73 returned: If workflow launched 74 type: dict 75 ''' 76 77 78 EXAMPLES = ''' 79 - name: Launch a workflow with a timeout of 10 seconds 80 workflow_launch: 81 workflow_template: "Test Workflow" 82 timeout: 10 83 84 - name: Launch a Workflow with extra_vars without waiting 85 workflow_launch: 86 workflow_template: "Test workflow" 87 extra_vars: 88 var1: My First Variable 89 var2: My Second Variable 90 wait: False 91 ''' 92 93 from ..module_utils.controller_api import ControllerAPIModule 94 95 96 def main(): 97 # Any additional arguments that are not fields of the item can be added here 98 argument_spec = dict( 99 name=dict(required=True, aliases=['workflow_template']), 100 organization=dict(), 101 inventory=dict(), 102 limit=dict(), 103 scm_branch=dict(), 104 extra_vars=dict(type='dict'), 105 wait=dict(required=False, default=True, type='bool'), 106 interval=dict(required=False, default=2.0, type='float'), 107 timeout=dict(required=False, type='int'), 108 ) 109 110 # Create a module for ourselves 111 module = ControllerAPIModule(argument_spec=argument_spec) 112 113 optional_args = {} 114 # Extract our parameters 115 name = module.params.get('name') 116 organization = module.params.get('organization') 117 inventory = module.params.get('inventory') 118 wait = module.params.get('wait') 119 interval = module.params.get('interval') 120 timeout = module.params.get('timeout') 121 122 for field_name in ( 123 'limit', 124 'extra_vars', 125 'scm_branch', 126 ): 127 field_val = module.params.get(field_name) 128 if field_val is not None: 129 optional_args[field_name] = field_val 130 131 # Create a datastructure to pass into our job launch 132 post_data = {} 133 for arg_name, arg_value in optional_args.items(): 134 if arg_value: 135 post_data[arg_name] = arg_value 136 137 # Attempt to look up the related items the user specified (these will fail the module if not found) 138 if inventory: 139 post_data['inventory'] = module.resolve_name_to_id('inventories', inventory) 140 141 # Attempt to look up job_template based on the provided name 142 lookup_data = {} 143 if organization: 144 lookup_data['organization'] = module.resolve_name_to_id('organizations', organization) 145 workflow_job_template = module.get_one('workflow_job_templates', name_or_id=name, data=lookup_data) 146 147 if workflow_job_template is None: 148 module.fail_json(msg="Unable to find workflow job template") 149 150 # The API will allow you to submit values to a jb launch that are not prompt on launch. 151 # Therefore, we will test to see if anything is set which is not prompt on launch and fail. 152 check_vars_to_prompts = { 153 'inventory': 'ask_inventory_on_launch', 154 'limit': 'ask_limit_on_launch', 155 'scm_branch': 'ask_scm_branch_on_launch', 156 } 157 158 param_errors = [] 159 for variable_name, prompt in check_vars_to_prompts.items(): 160 if variable_name in post_data and not workflow_job_template[prompt]: 161 param_errors.append("The field {0} was specified but the workflow job template does not allow for it to be overridden".format(variable_name)) 162 # Check if Either ask_variables_on_launch, or survey_enabled is enabled for use of extra vars. 163 if module.params.get('extra_vars') and not (workflow_job_template['ask_variables_on_launch'] or workflow_job_template['survey_enabled']): 164 param_errors.append("The field extra_vars was specified but the workflow job template does not allow for it to be overridden") 165 if len(param_errors) > 0: 166 module.fail_json(msg="Parameters specified which can not be passed into workflow job template, see errors for details", errors=param_errors) 167 168 # Launch the job 169 result = module.post_endpoint(workflow_job_template['related']['launch'], data=post_data) 170 171 if result['status_code'] != 201: 172 module.fail_json(msg="Failed to launch workflow, see response for details", response=result) 173 174 module.json_output['changed'] = True 175 module.json_output['id'] = result['json']['id'] 176 module.json_output['status'] = result['json']['status'] 177 # This is for backwards compatability 178 module.json_output['job_info'] = {'id': result['json']['id']} 179 180 if not wait: 181 module.exit_json(**module.json_output) 182 183 # Invoke wait function 184 module.wait_on_url(url=result['json']['url'], object_name=name, object_type='Workflow Job', timeout=timeout, interval=interval) 185 186 module.exit_json(**module.json_output) 187 188 189 if __name__ == '__main__': 190 main() 191 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/awx_collection/plugins/modules/workflow_launch.py b/awx_collection/plugins/modules/workflow_launch.py --- a/awx_collection/plugins/modules/workflow_launch.py +++ b/awx_collection/plugins/modules/workflow_launch.py @@ -39,6 +39,16 @@ description: - Limit to use for the I(job_template). type: str + tags: + description: + - Specific tags to apply from the I(job_template). + type: list + elements: str + skip_tags: + description: + - Specific tags to skip from the I(job_template). + type: list + elements: str scm_branch: description: - A specific branch of the SCM project to run the template on. @@ -100,6 +110,8 @@ organization=dict(), inventory=dict(), limit=dict(), + tags=dict(type='list', elements='str'), + skip_tags=dict(type='list', elements='str'), scm_branch=dict(), extra_vars=dict(type='dict'), wait=dict(required=False, default=True, type='bool'), @@ -128,6 +140,14 @@ if field_val is not None: optional_args[field_name] = field_val + # Special treatment of tags parameters + job_tags = module.params.get('tags') + if job_tags is not None: + optional_args['job_tags'] = ",".join(job_tags) + skip_tags = module.params.get('skip_tags') + if skip_tags is not None: + optional_args['skip_tags'] = ",".join(skip_tags) + # Create a datastructure to pass into our job launch post_data = {} for arg_name, arg_value in optional_args.items(): @@ -152,6 +172,8 @@ check_vars_to_prompts = { 'inventory': 'ask_inventory_on_launch', 'limit': 'ask_limit_on_launch', + 'job_tags': 'ask_tags_on_launch', + 'skip_tags': 'ask_skip_tags_on_launch', 'scm_branch': 'ask_scm_branch_on_launch', }
{"golden_diff": "diff --git a/awx_collection/plugins/modules/workflow_launch.py b/awx_collection/plugins/modules/workflow_launch.py\n--- a/awx_collection/plugins/modules/workflow_launch.py\n+++ b/awx_collection/plugins/modules/workflow_launch.py\n@@ -39,6 +39,16 @@\n description:\n - Limit to use for the I(job_template).\n type: str\n+ tags:\n+ description:\n+ - Specific tags to apply from the I(job_template).\n+ type: list\n+ elements: str\n+ skip_tags:\n+ description:\n+ - Specific tags to skip from the I(job_template).\n+ type: list\n+ elements: str\n scm_branch:\n description:\n - A specific branch of the SCM project to run the template on.\n@@ -100,6 +110,8 @@\n organization=dict(),\n inventory=dict(),\n limit=dict(),\n+ tags=dict(type='list', elements='str'),\n+ skip_tags=dict(type='list', elements='str'),\n scm_branch=dict(),\n extra_vars=dict(type='dict'),\n wait=dict(required=False, default=True, type='bool'),\n@@ -128,6 +140,14 @@\n if field_val is not None:\n optional_args[field_name] = field_val\n \n+ # Special treatment of tags parameters\n+ job_tags = module.params.get('tags')\n+ if job_tags is not None:\n+ optional_args['job_tags'] = \",\".join(job_tags)\n+ skip_tags = module.params.get('skip_tags')\n+ if skip_tags is not None:\n+ optional_args['skip_tags'] = \",\".join(skip_tags)\n+\n # Create a datastructure to pass into our job launch\n post_data = {}\n for arg_name, arg_value in optional_args.items():\n@@ -152,6 +172,8 @@\n check_vars_to_prompts = {\n 'inventory': 'ask_inventory_on_launch',\n 'limit': 'ask_limit_on_launch',\n+ 'job_tags': 'ask_tags_on_launch',\n+ 'skip_tags': 'ask_skip_tags_on_launch',\n 'scm_branch': 'ask_scm_branch_on_launch',\n }\n", "issue": "Missing tags and skip_tags on awx.awx.workflow_launch module\n### Please confirm the following\n\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\n- [X] I am **NOT** reporting a (potential) security vulnerability. (These should be emailed to `[email protected]` instead.)\n\n### Bug Summary\n\nThe UI and API support launching a workflow with `skip_tags` and `job_tags` set. This is not currently possible with the `workflow_launch` module.\n\n### AWX version\n\ndevel\n\n### Select the relevant components\n\n- [ ] UI\n- [ ] UI (tech preview)\n- [ ] API\n- [ ] Docs\n- [X] Collection\n- [ ] CLI\n- [ ] Other\n\n### Installation method\n\nN/A\n\n### Modifications\n\nno\n\n### Ansible version\n\n2.15.2\n\n### Operating system\n\n_No response_\n\n### Web browser\n\n_No response_\n\n### Steps to reproduce\n\n<img width=\"1106\" alt=\"Screenshot 2024-03-19 at 11 26 20\" src=\"https://github.com/ansible/awx/assets/9056568/878ee7f0-57d3-4365-9353-46aa6f99d009\">\r\n\r\nThrough the UI you can set job_tags or skip_tags if the relevant `ask_for_` is true.\r\n\r\nIn the collection this is not possible\n\n### Expected results\n\nThe `skip_tags` and `tags` option should be possible as below\r\n```\r\n- name: Kickoff workflow\r\n awx.awx.workflow_launch:\r\n name: wf_test\r\n wait: false\r\n skip_tags:\r\n - sometag\r\n tags:\r\n - mytag\r\n```\n\n### Actual results\n\nThe options are not included on the module\n\n### Additional information\n\n_No response_\n", "before_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\nANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}\n\nDOCUMENTATION = '''\n---\nmodule: workflow_launch\nauthor: \"John Westcott IV (@john-westcott-iv)\"\nshort_description: Run a workflow in Automation Platform Controller\ndescription:\n - Launch an Automation Platform Controller workflows. See\n U(https://www.ansible.com/tower) for an overview.\noptions:\n name:\n description:\n - The name of the workflow template to run.\n required: True\n type: str\n aliases:\n - workflow_template\n organization:\n description:\n - Organization name, ID, or named URL the workflow job template exists in.\n - Used to help lookup the object, cannot be modified using this module.\n - If not provided, will lookup by name only, which does not work with duplicates.\n type: str\n inventory:\n description:\n - Inventory name, ID, or named URL to use for the job ran with this workflow, only used if prompt for inventory is set.\n type: str\n limit:\n description:\n - Limit to use for the I(job_template).\n type: str\n scm_branch:\n description:\n - A specific branch of the SCM project to run the template on.\n - This is only applicable if your project allows for branch override.\n type: str\n extra_vars:\n description:\n - Any extra vars required to launch the job.\n type: dict\n wait:\n description:\n - Wait for the workflow to complete.\n default: True\n type: bool\n interval:\n description:\n - The interval to request an update from the controller.\n required: False\n default: 2\n type: float\n timeout:\n description:\n - If waiting for the workflow to complete this will abort after this\n amount of seconds\n type: int\nextends_documentation_fragment: awx.awx.auth\n'''\n\nRETURN = '''\njob_info:\n description: dictionary containing information about the workflow executed\n returned: If workflow launched\n type: dict\n'''\n\n\nEXAMPLES = '''\n- name: Launch a workflow with a timeout of 10 seconds\n workflow_launch:\n workflow_template: \"Test Workflow\"\n timeout: 10\n\n- name: Launch a Workflow with extra_vars without waiting\n workflow_launch:\n workflow_template: \"Test workflow\"\n extra_vars:\n var1: My First Variable\n var2: My Second Variable\n wait: False\n'''\n\nfrom ..module_utils.controller_api import ControllerAPIModule\n\n\ndef main():\n # Any additional arguments that are not fields of the item can be added here\n argument_spec = dict(\n name=dict(required=True, aliases=['workflow_template']),\n organization=dict(),\n inventory=dict(),\n limit=dict(),\n scm_branch=dict(),\n extra_vars=dict(type='dict'),\n wait=dict(required=False, default=True, type='bool'),\n interval=dict(required=False, default=2.0, type='float'),\n timeout=dict(required=False, type='int'),\n )\n\n # Create a module for ourselves\n module = ControllerAPIModule(argument_spec=argument_spec)\n\n optional_args = {}\n # Extract our parameters\n name = module.params.get('name')\n organization = module.params.get('organization')\n inventory = module.params.get('inventory')\n wait = module.params.get('wait')\n interval = module.params.get('interval')\n timeout = module.params.get('timeout')\n\n for field_name in (\n 'limit',\n 'extra_vars',\n 'scm_branch',\n ):\n field_val = module.params.get(field_name)\n if field_val is not None:\n optional_args[field_name] = field_val\n\n # Create a datastructure to pass into our job launch\n post_data = {}\n for arg_name, arg_value in optional_args.items():\n if arg_value:\n post_data[arg_name] = arg_value\n\n # Attempt to look up the related items the user specified (these will fail the module if not found)\n if inventory:\n post_data['inventory'] = module.resolve_name_to_id('inventories', inventory)\n\n # Attempt to look up job_template based on the provided name\n lookup_data = {}\n if organization:\n lookup_data['organization'] = module.resolve_name_to_id('organizations', organization)\n workflow_job_template = module.get_one('workflow_job_templates', name_or_id=name, data=lookup_data)\n\n if workflow_job_template is None:\n module.fail_json(msg=\"Unable to find workflow job template\")\n\n # The API will allow you to submit values to a jb launch that are not prompt on launch.\n # Therefore, we will test to see if anything is set which is not prompt on launch and fail.\n check_vars_to_prompts = {\n 'inventory': 'ask_inventory_on_launch',\n 'limit': 'ask_limit_on_launch',\n 'scm_branch': 'ask_scm_branch_on_launch',\n }\n\n param_errors = []\n for variable_name, prompt in check_vars_to_prompts.items():\n if variable_name in post_data and not workflow_job_template[prompt]:\n param_errors.append(\"The field {0} was specified but the workflow job template does not allow for it to be overridden\".format(variable_name))\n # Check if Either ask_variables_on_launch, or survey_enabled is enabled for use of extra vars.\n if module.params.get('extra_vars') and not (workflow_job_template['ask_variables_on_launch'] or workflow_job_template['survey_enabled']):\n param_errors.append(\"The field extra_vars was specified but the workflow job template does not allow for it to be overridden\")\n if len(param_errors) > 0:\n module.fail_json(msg=\"Parameters specified which can not be passed into workflow job template, see errors for details\", errors=param_errors)\n\n # Launch the job\n result = module.post_endpoint(workflow_job_template['related']['launch'], data=post_data)\n\n if result['status_code'] != 201:\n module.fail_json(msg=\"Failed to launch workflow, see response for details\", response=result)\n\n module.json_output['changed'] = True\n module.json_output['id'] = result['json']['id']\n module.json_output['status'] = result['json']['status']\n # This is for backwards compatability\n module.json_output['job_info'] = {'id': result['json']['id']}\n\n if not wait:\n module.exit_json(**module.json_output)\n\n # Invoke wait function\n module.wait_on_url(url=result['json']['url'], object_name=name, object_type='Workflow Job', timeout=timeout, interval=interval)\n\n module.exit_json(**module.json_output)\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/workflow_launch.py"}], "after_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\nANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}\n\nDOCUMENTATION = '''\n---\nmodule: workflow_launch\nauthor: \"John Westcott IV (@john-westcott-iv)\"\nshort_description: Run a workflow in Automation Platform Controller\ndescription:\n - Launch an Automation Platform Controller workflows. See\n U(https://www.ansible.com/tower) for an overview.\noptions:\n name:\n description:\n - The name of the workflow template to run.\n required: True\n type: str\n aliases:\n - workflow_template\n organization:\n description:\n - Organization name, ID, or named URL the workflow job template exists in.\n - Used to help lookup the object, cannot be modified using this module.\n - If not provided, will lookup by name only, which does not work with duplicates.\n type: str\n inventory:\n description:\n - Inventory name, ID, or named URL to use for the job ran with this workflow, only used if prompt for inventory is set.\n type: str\n limit:\n description:\n - Limit to use for the I(job_template).\n type: str\n tags:\n description:\n - Specific tags to apply from the I(job_template).\n type: list\n elements: str\n skip_tags:\n description:\n - Specific tags to skip from the I(job_template).\n type: list\n elements: str\n scm_branch:\n description:\n - A specific branch of the SCM project to run the template on.\n - This is only applicable if your project allows for branch override.\n type: str\n extra_vars:\n description:\n - Any extra vars required to launch the job.\n type: dict\n wait:\n description:\n - Wait for the workflow to complete.\n default: True\n type: bool\n interval:\n description:\n - The interval to request an update from the controller.\n required: False\n default: 2\n type: float\n timeout:\n description:\n - If waiting for the workflow to complete this will abort after this\n amount of seconds\n type: int\nextends_documentation_fragment: awx.awx.auth\n'''\n\nRETURN = '''\njob_info:\n description: dictionary containing information about the workflow executed\n returned: If workflow launched\n type: dict\n'''\n\n\nEXAMPLES = '''\n- name: Launch a workflow with a timeout of 10 seconds\n workflow_launch:\n workflow_template: \"Test Workflow\"\n timeout: 10\n\n- name: Launch a Workflow with extra_vars without waiting\n workflow_launch:\n workflow_template: \"Test workflow\"\n extra_vars:\n var1: My First Variable\n var2: My Second Variable\n wait: False\n'''\n\nfrom ..module_utils.controller_api import ControllerAPIModule\n\n\ndef main():\n # Any additional arguments that are not fields of the item can be added here\n argument_spec = dict(\n name=dict(required=True, aliases=['workflow_template']),\n organization=dict(),\n inventory=dict(),\n limit=dict(),\n tags=dict(type='list', elements='str'),\n skip_tags=dict(type='list', elements='str'),\n scm_branch=dict(),\n extra_vars=dict(type='dict'),\n wait=dict(required=False, default=True, type='bool'),\n interval=dict(required=False, default=2.0, type='float'),\n timeout=dict(required=False, type='int'),\n )\n\n # Create a module for ourselves\n module = ControllerAPIModule(argument_spec=argument_spec)\n\n optional_args = {}\n # Extract our parameters\n name = module.params.get('name')\n organization = module.params.get('organization')\n inventory = module.params.get('inventory')\n wait = module.params.get('wait')\n interval = module.params.get('interval')\n timeout = module.params.get('timeout')\n\n for field_name in (\n 'limit',\n 'extra_vars',\n 'scm_branch',\n ):\n field_val = module.params.get(field_name)\n if field_val is not None:\n optional_args[field_name] = field_val\n\n # Special treatment of tags parameters\n job_tags = module.params.get('tags')\n if job_tags is not None:\n optional_args['job_tags'] = \",\".join(job_tags)\n skip_tags = module.params.get('skip_tags')\n if skip_tags is not None:\n optional_args['skip_tags'] = \",\".join(skip_tags)\n\n # Create a datastructure to pass into our job launch\n post_data = {}\n for arg_name, arg_value in optional_args.items():\n if arg_value:\n post_data[arg_name] = arg_value\n\n # Attempt to look up the related items the user specified (these will fail the module if not found)\n if inventory:\n post_data['inventory'] = module.resolve_name_to_id('inventories', inventory)\n\n # Attempt to look up job_template based on the provided name\n lookup_data = {}\n if organization:\n lookup_data['organization'] = module.resolve_name_to_id('organizations', organization)\n workflow_job_template = module.get_one('workflow_job_templates', name_or_id=name, data=lookup_data)\n\n if workflow_job_template is None:\n module.fail_json(msg=\"Unable to find workflow job template\")\n\n # The API will allow you to submit values to a jb launch that are not prompt on launch.\n # Therefore, we will test to see if anything is set which is not prompt on launch and fail.\n check_vars_to_prompts = {\n 'inventory': 'ask_inventory_on_launch',\n 'limit': 'ask_limit_on_launch',\n 'job_tags': 'ask_tags_on_launch',\n 'skip_tags': 'ask_skip_tags_on_launch',\n 'scm_branch': 'ask_scm_branch_on_launch',\n }\n\n param_errors = []\n for variable_name, prompt in check_vars_to_prompts.items():\n if variable_name in post_data and not workflow_job_template[prompt]:\n param_errors.append(\"The field {0} was specified but the workflow job template does not allow for it to be overridden\".format(variable_name))\n # Check if Either ask_variables_on_launch, or survey_enabled is enabled for use of extra vars.\n if module.params.get('extra_vars') and not (workflow_job_template['ask_variables_on_launch'] or workflow_job_template['survey_enabled']):\n param_errors.append(\"The field extra_vars was specified but the workflow job template does not allow for it to be overridden\")\n if len(param_errors) > 0:\n module.fail_json(msg=\"Parameters specified which can not be passed into workflow job template, see errors for details\", errors=param_errors)\n\n # Launch the job\n result = module.post_endpoint(workflow_job_template['related']['launch'], data=post_data)\n\n if result['status_code'] != 201:\n module.fail_json(msg=\"Failed to launch workflow, see response for details\", response=result)\n\n module.json_output['changed'] = True\n module.json_output['id'] = result['json']['id']\n module.json_output['status'] = result['json']['status']\n # This is for backwards compatability\n module.json_output['job_info'] = {'id': result['json']['id']}\n\n if not wait:\n module.exit_json(**module.json_output)\n\n # Invoke wait function\n module.wait_on_url(url=result['json']['url'], object_name=name, object_type='Workflow Job', timeout=timeout, interval=interval)\n\n module.exit_json(**module.json_output)\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/workflow_launch.py"}]}
2,711
477
gh_patches_debug_22758
rasdani/github-patches
git_diff
ckan__ckan-5647
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Regression-2.9] Ckan Generate Config **CKAN version** 2.9 **Describe the bug** In ckan 2.9, ckan generate config does not make a config file that can then be immediately edited. **Steps to reproduce** In Ckan <= 2.8.5, ``` "$CKAN_HOME"/bin/paster make-config ckan "$CONFIG" "$CKAN_HOME"/bin/paster --plugin=ckan config-tool "$CONFIG" \ "sqlalchemy.url = ${DATABASE_URL}" \ "solr_url = ${SOLR_URL}" \ "ckan.site_url = ${SITE_SCHEME}${SITE_HOST}" \ "ckan.redis.url = ${REDIS_URL}" \ ... ``` worked, regardless of the database url. In ckan 2.9: ``` "$CKAN_HOME"/bin/ckan generate config "$CONFIG" "$CKAN_HOME"/bin/ckan config-tool "$CONFIG" \ "ckan.storage_path = /var/lib/ckan" \ "sqlalchemy.url = ${CKAN_DB_URL}"\ ``` This fails, because the database is not reachable (and potentially the site url is not defined, IIRC) **Expected behavior** Either: * Ckan generate config should provide the basic parameters required to run config-tool * config-tool should be able to edit an incomplete ckan.ini file, by not attempting to contact the database, redis, or solr servers. ``` ckan generate config $CKAN_INI ckan config-tool $CKAN_INI \ [options] ``` should work. **Additional details** If possible, please provide the full stack trace of the error raised, or add screenshots to help explain your problem. This is my current work-around, which is ugly: ``` if [ ! -e "$CONFIG" ]; then "$CKAN_HOME"/bin/ckan generate config "$CONFIG" # site url can't be set with the config tool perl -pi -e "s|^(ckan.site_url =)$|\1 ${SITE_SCHEME}${SITE_HOST}|;" $CONFIG perl -pi -e "s|^#?(solr_url =).*$|\1 ${SOLR_URL}|;" $CONFIG perl -pi -e "s|^#?(ckan.redis.url =).*|\1 ${REDIS_URL}|;" $CONFIG # perl has issues with @db in the replacement sed -i -e "s|^sqlalchemy.url =.*$|sqlalchemy.url = ${CKAN_DB_URL}|;" $CONFIG fi "$CKAN_HOME"/bin/ckan config-tool "$CONFIG" \ "ckan.storage_path = /var/lib/ckan" \ "sqlalchemy.url = ${CKAN_DB_URL}"\ "ckan.site_url = ${SITE_SCHEME}${SITE_HOST}" \ "solr_url = ${SOLR_URL}" \ ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ckan/cli/cli.py` Content: ``` 1 # encoding: utf-8 2 3 import logging 4 from collections import defaultdict 5 from pkg_resources import iter_entry_points 6 7 import six 8 import click 9 import sys 10 11 import ckan.plugins as p 12 import ckan.cli as ckan_cli 13 from ckan.config.middleware import make_app 14 from ckan.exceptions import CkanConfigurationException 15 from ckan.cli import ( 16 config_tool, 17 jobs, 18 front_end_build, 19 db, search_index, server, 20 profile, 21 asset, 22 sysadmin, 23 translation, 24 dataset, 25 views, 26 plugin_info, 27 notify, 28 tracking, 29 minify, 30 less, 31 generate, 32 user 33 ) 34 35 from ckan.cli import seed 36 37 log = logging.getLogger(__name__) 38 39 40 class CkanCommand(object): 41 42 def __init__(self, conf=None): 43 # Don't import `load_config` by itself, rather call it using 44 # module so that it can be patched during tests 45 self.config = ckan_cli.load_config(conf) 46 self.app = make_app(self.config) 47 48 49 def _get_commands_from_plugins(plugins): 50 for plugin in plugins: 51 for cmd in plugin.get_commands(): 52 cmd._ckan_meta = { 53 u'name': plugin.name, 54 u'type': u'plugin' 55 } 56 yield cmd 57 58 59 def _get_commands_from_entry_point(entry_point=u'ckan.click_command'): 60 registered_entries = {} 61 for entry in iter_entry_points(entry_point): 62 if entry.name in registered_entries: 63 p.toolkit.error_shout(( 64 u'Attempt to override entry_point `{name}`.\n' 65 u'First encounter:\n\t{first!r}\n' 66 u'Second encounter:\n\t{second!r}\n' 67 u'Either uninstall one of mentioned extensions or update' 68 u' corresponding `setup.py` and re-install the extension.' 69 ).format( 70 name=entry.name, 71 first=registered_entries[entry.name].dist, 72 second=entry.dist)) 73 raise click.Abort() 74 registered_entries[entry.name] = entry 75 76 cmd = entry.load() 77 cmd._ckan_meta = { 78 u'name': entry.name, 79 u'type': u'entry_point' 80 } 81 yield cmd 82 83 84 def _init_ckan_config(ctx, param, value): 85 is_help = u'--help' in sys.argv 86 no_config = len(sys.argv) > 1 and sys.argv[1] in ( 87 u'generate', u'config-tool') 88 89 try: 90 ctx.obj = CkanCommand(value) 91 except CkanConfigurationException as e: 92 # Some commands don't require the config loaded 93 if no_config or is_help: 94 return 95 else: 96 p.toolkit.error_shout(e) 97 raise click.Abort() 98 99 if six.PY2: 100 ctx.meta["flask_app"] = ctx.obj.app.apps["flask_app"]._wsgi_app 101 else: 102 ctx.meta["flask_app"] = ctx.obj.app._wsgi_app 103 104 for cmd in _get_commands_from_entry_point(): 105 ctx.command.add_command(cmd) 106 107 plugins = p.PluginImplementations(p.IClick) 108 for cmd in _get_commands_from_plugins(plugins): 109 ctx.command.add_command(cmd) 110 111 112 click_config_option = click.option( 113 u'-c', 114 u'--config', 115 default=None, 116 metavar=u'CONFIG', 117 help=u'Config file to use (default: development.ini)', 118 is_eager=True, 119 callback=_init_ckan_config 120 ) 121 122 123 class CustomGroup(click.Group): 124 _section_titles = { 125 u'plugin': u'Plugins', 126 u'entry_point': u'Entry points', 127 } 128 129 def format_commands(self, ctx, formatter): 130 # Without any arguments click skips option callbacks. 131 self.parse_args(ctx, [u'help']) 132 133 commands = [] 134 ext_commands = defaultdict(lambda: defaultdict(list)) 135 136 for subcommand in self.list_commands(ctx): 137 cmd = self.get_command(ctx, subcommand) 138 if cmd is None: 139 continue 140 help = cmd.short_help or u'' 141 142 meta = getattr(cmd, u'_ckan_meta', None) 143 if meta: 144 ext_commands[meta[u'type']][meta[u'name']].append( 145 (subcommand, help)) 146 else: 147 commands.append((subcommand, help)) 148 149 if commands: 150 with formatter.section(u'Commands'): 151 formatter.write_dl(commands) 152 153 for section, group in ext_commands.items(): 154 with formatter.section(self._section_titles.get(section, section)): 155 for _ext, rows in group.items(): 156 formatter.write_dl(rows) 157 158 159 @click.group(cls=CustomGroup) 160 @click.help_option(u'-h', u'--help') 161 @click_config_option 162 def ckan(config, *args, **kwargs): 163 pass 164 165 166 ckan.add_command(jobs.jobs) 167 ckan.add_command(config_tool.config_tool) 168 ckan.add_command(front_end_build.front_end_build) 169 ckan.add_command(server.run) 170 ckan.add_command(profile.profile) 171 ckan.add_command(seed.seed) 172 ckan.add_command(db.db) 173 ckan.add_command(search_index.search_index) 174 ckan.add_command(sysadmin.sysadmin) 175 ckan.add_command(asset.asset) 176 ckan.add_command(translation.translation) 177 ckan.add_command(dataset.dataset) 178 ckan.add_command(views.views) 179 ckan.add_command(plugin_info.plugin_info) 180 ckan.add_command(notify.notify) 181 ckan.add_command(tracking.tracking) 182 ckan.add_command(minify.minify) 183 ckan.add_command(less.less) 184 ckan.add_command(generate.generate) 185 ckan.add_command(user.user) 186 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ckan/cli/cli.py b/ckan/cli/cli.py --- a/ckan/cli/cli.py +++ b/ckan/cli/cli.py @@ -36,6 +36,12 @@ log = logging.getLogger(__name__) +_no_config_commands = [ + [u'config-tool'], + [u'generate', u'config'], + [u'generate', u'extension'], +] + class CkanCommand(object): @@ -83,18 +89,20 @@ def _init_ckan_config(ctx, param, value): is_help = u'--help' in sys.argv - no_config = len(sys.argv) > 1 and sys.argv[1] in ( - u'generate', u'config-tool') + no_config = False + if len(sys.argv) > 1: + for cmd in _no_config_commands: + if sys.argv[1:len(cmd) + 1] == cmd: + no_config = True + break + if no_config or is_help: + return try: ctx.obj = CkanCommand(value) except CkanConfigurationException as e: - # Some commands don't require the config loaded - if no_config or is_help: - return - else: - p.toolkit.error_shout(e) - raise click.Abort() + p.toolkit.error_shout(e) + raise click.Abort() if six.PY2: ctx.meta["flask_app"] = ctx.obj.app.apps["flask_app"]._wsgi_app
{"golden_diff": "diff --git a/ckan/cli/cli.py b/ckan/cli/cli.py\n--- a/ckan/cli/cli.py\n+++ b/ckan/cli/cli.py\n@@ -36,6 +36,12 @@\n \n log = logging.getLogger(__name__)\n \n+_no_config_commands = [\n+ [u'config-tool'],\n+ [u'generate', u'config'],\n+ [u'generate', u'extension'],\n+]\n+\n \n class CkanCommand(object):\n \n@@ -83,18 +89,20 @@\n \n def _init_ckan_config(ctx, param, value):\n is_help = u'--help' in sys.argv\n- no_config = len(sys.argv) > 1 and sys.argv[1] in (\n- u'generate', u'config-tool')\n+ no_config = False\n+ if len(sys.argv) > 1:\n+ for cmd in _no_config_commands:\n+ if sys.argv[1:len(cmd) + 1] == cmd:\n+ no_config = True\n+ break\n+ if no_config or is_help:\n+ return\n \n try:\n ctx.obj = CkanCommand(value)\n except CkanConfigurationException as e:\n- # Some commands don't require the config loaded\n- if no_config or is_help:\n- return\n- else:\n- p.toolkit.error_shout(e)\n- raise click.Abort()\n+ p.toolkit.error_shout(e)\n+ raise click.Abort()\n \n if six.PY2:\n ctx.meta[\"flask_app\"] = ctx.obj.app.apps[\"flask_app\"]._wsgi_app\n", "issue": "[Regression-2.9] Ckan Generate Config \n**CKAN version**\r\n\r\n2.9\r\n\r\n**Describe the bug**\r\n\r\nIn ckan 2.9, ckan generate config does not make a config file that can then be immediately edited. \r\n\r\n**Steps to reproduce**\r\nIn Ckan <= 2.8.5, \r\n```\r\n \"$CKAN_HOME\"/bin/paster make-config ckan \"$CONFIG\"\r\n\r\n \"$CKAN_HOME\"/bin/paster --plugin=ckan config-tool \"$CONFIG\" \\\r\n \"sqlalchemy.url = ${DATABASE_URL}\" \\\r\n \"solr_url = ${SOLR_URL}\" \\\r\n \"ckan.site_url = ${SITE_SCHEME}${SITE_HOST}\" \\\r\n \"ckan.redis.url = ${REDIS_URL}\" \\\r\n ...\r\n```\r\nworked, regardless of the database url. \r\n\r\nIn ckan 2.9:\r\n```\r\n \"$CKAN_HOME\"/bin/ckan generate config \"$CONFIG\"\r\n\r\n \"$CKAN_HOME\"/bin/ckan config-tool \"$CONFIG\" \\\r\n \"ckan.storage_path = /var/lib/ckan\" \\\r\n \"sqlalchemy.url = ${CKAN_DB_URL}\"\\\r\n```\r\nThis fails, because the database is not reachable (and potentially the site url is not defined, IIRC)\r\n\r\n**Expected behavior**\r\n\r\nEither:\r\n* Ckan generate config should provide the basic parameters required to run config-tool\r\n* config-tool should be able to edit an incomplete ckan.ini file, by not attempting to contact the database, redis, or solr servers. \r\n\r\n```\r\nckan generate config $CKAN_INI\r\nckan config-tool $CKAN_INI \\ \r\n [options] \r\n```\r\nshould work. \r\n\r\n**Additional details**\r\nIf possible, please provide the full stack trace of the error raised, or add screenshots to help explain your problem.\r\n\r\nThis is my current work-around, which is ugly:\r\n```\r\n if [ ! -e \"$CONFIG\" ]; then\r\n \"$CKAN_HOME\"/bin/ckan generate config \"$CONFIG\"\r\n # site url can't be set with the config tool\r\n perl -pi -e \"s|^(ckan.site_url =)$|\\1 ${SITE_SCHEME}${SITE_HOST}|;\" $CONFIG\r\n perl -pi -e \"s|^#?(solr_url =).*$|\\1 ${SOLR_URL}|;\" $CONFIG\r\n perl -pi -e \"s|^#?(ckan.redis.url =).*|\\1 ${REDIS_URL}|;\" $CONFIG\r\n # perl has issues with @db in the replacement\r\n sed -i -e \"s|^sqlalchemy.url =.*$|sqlalchemy.url = ${CKAN_DB_URL}|;\" $CONFIG\r\n\r\n fi\r\n\r\n\r\n \"$CKAN_HOME\"/bin/ckan config-tool \"$CONFIG\" \\\r\n \"ckan.storage_path = /var/lib/ckan\" \\\r\n \"sqlalchemy.url = ${CKAN_DB_URL}\"\\\r\n \"ckan.site_url = ${SITE_SCHEME}${SITE_HOST}\" \\\r\n \"solr_url = ${SOLR_URL}\" \\\r\n```\r\n\n", "before_files": [{"content": "# encoding: utf-8\n\nimport logging\nfrom collections import defaultdict\nfrom pkg_resources import iter_entry_points\n\nimport six\nimport click\nimport sys\n\nimport ckan.plugins as p\nimport ckan.cli as ckan_cli\nfrom ckan.config.middleware import make_app\nfrom ckan.exceptions import CkanConfigurationException\nfrom ckan.cli import (\n config_tool,\n jobs,\n front_end_build,\n db, search_index, server,\n profile,\n asset,\n sysadmin,\n translation,\n dataset,\n views,\n plugin_info,\n notify,\n tracking,\n minify,\n less,\n generate,\n user\n)\n\nfrom ckan.cli import seed\n\nlog = logging.getLogger(__name__)\n\n\nclass CkanCommand(object):\n\n def __init__(self, conf=None):\n # Don't import `load_config` by itself, rather call it using\n # module so that it can be patched during tests\n self.config = ckan_cli.load_config(conf)\n self.app = make_app(self.config)\n\n\ndef _get_commands_from_plugins(plugins):\n for plugin in plugins:\n for cmd in plugin.get_commands():\n cmd._ckan_meta = {\n u'name': plugin.name,\n u'type': u'plugin'\n }\n yield cmd\n\n\ndef _get_commands_from_entry_point(entry_point=u'ckan.click_command'):\n registered_entries = {}\n for entry in iter_entry_points(entry_point):\n if entry.name in registered_entries:\n p.toolkit.error_shout((\n u'Attempt to override entry_point `{name}`.\\n'\n u'First encounter:\\n\\t{first!r}\\n'\n u'Second encounter:\\n\\t{second!r}\\n'\n u'Either uninstall one of mentioned extensions or update'\n u' corresponding `setup.py` and re-install the extension.'\n ).format(\n name=entry.name,\n first=registered_entries[entry.name].dist,\n second=entry.dist))\n raise click.Abort()\n registered_entries[entry.name] = entry\n\n cmd = entry.load()\n cmd._ckan_meta = {\n u'name': entry.name,\n u'type': u'entry_point'\n }\n yield cmd\n\n\ndef _init_ckan_config(ctx, param, value):\n is_help = u'--help' in sys.argv\n no_config = len(sys.argv) > 1 and sys.argv[1] in (\n u'generate', u'config-tool')\n\n try:\n ctx.obj = CkanCommand(value)\n except CkanConfigurationException as e:\n # Some commands don't require the config loaded\n if no_config or is_help:\n return\n else:\n p.toolkit.error_shout(e)\n raise click.Abort()\n\n if six.PY2:\n ctx.meta[\"flask_app\"] = ctx.obj.app.apps[\"flask_app\"]._wsgi_app\n else:\n ctx.meta[\"flask_app\"] = ctx.obj.app._wsgi_app\n\n for cmd in _get_commands_from_entry_point():\n ctx.command.add_command(cmd)\n\n plugins = p.PluginImplementations(p.IClick)\n for cmd in _get_commands_from_plugins(plugins):\n ctx.command.add_command(cmd)\n\n\nclick_config_option = click.option(\n u'-c',\n u'--config',\n default=None,\n metavar=u'CONFIG',\n help=u'Config file to use (default: development.ini)',\n is_eager=True,\n callback=_init_ckan_config\n)\n\n\nclass CustomGroup(click.Group):\n _section_titles = {\n u'plugin': u'Plugins',\n u'entry_point': u'Entry points',\n }\n\n def format_commands(self, ctx, formatter):\n # Without any arguments click skips option callbacks.\n self.parse_args(ctx, [u'help'])\n\n commands = []\n ext_commands = defaultdict(lambda: defaultdict(list))\n\n for subcommand in self.list_commands(ctx):\n cmd = self.get_command(ctx, subcommand)\n if cmd is None:\n continue\n help = cmd.short_help or u''\n\n meta = getattr(cmd, u'_ckan_meta', None)\n if meta:\n ext_commands[meta[u'type']][meta[u'name']].append(\n (subcommand, help))\n else:\n commands.append((subcommand, help))\n\n if commands:\n with formatter.section(u'Commands'):\n formatter.write_dl(commands)\n\n for section, group in ext_commands.items():\n with formatter.section(self._section_titles.get(section, section)):\n for _ext, rows in group.items():\n formatter.write_dl(rows)\n\n\[email protected](cls=CustomGroup)\[email protected]_option(u'-h', u'--help')\n@click_config_option\ndef ckan(config, *args, **kwargs):\n pass\n\n\nckan.add_command(jobs.jobs)\nckan.add_command(config_tool.config_tool)\nckan.add_command(front_end_build.front_end_build)\nckan.add_command(server.run)\nckan.add_command(profile.profile)\nckan.add_command(seed.seed)\nckan.add_command(db.db)\nckan.add_command(search_index.search_index)\nckan.add_command(sysadmin.sysadmin)\nckan.add_command(asset.asset)\nckan.add_command(translation.translation)\nckan.add_command(dataset.dataset)\nckan.add_command(views.views)\nckan.add_command(plugin_info.plugin_info)\nckan.add_command(notify.notify)\nckan.add_command(tracking.tracking)\nckan.add_command(minify.minify)\nckan.add_command(less.less)\nckan.add_command(generate.generate)\nckan.add_command(user.user)\n", "path": "ckan/cli/cli.py"}], "after_files": [{"content": "# encoding: utf-8\n\nimport logging\nfrom collections import defaultdict\nfrom pkg_resources import iter_entry_points\n\nimport six\nimport click\nimport sys\n\nimport ckan.plugins as p\nimport ckan.cli as ckan_cli\nfrom ckan.config.middleware import make_app\nfrom ckan.exceptions import CkanConfigurationException\nfrom ckan.cli import (\n config_tool,\n jobs,\n front_end_build,\n db, search_index, server,\n profile,\n asset,\n sysadmin,\n translation,\n dataset,\n views,\n plugin_info,\n notify,\n tracking,\n minify,\n less,\n generate,\n user\n)\n\nfrom ckan.cli import seed\n\nlog = logging.getLogger(__name__)\n\n_no_config_commands = [\n [u'config-tool'],\n [u'generate', u'config'],\n [u'generate', u'extension'],\n]\n\n\nclass CkanCommand(object):\n\n def __init__(self, conf=None):\n # Don't import `load_config` by itself, rather call it using\n # module so that it can be patched during tests\n self.config = ckan_cli.load_config(conf)\n self.app = make_app(self.config)\n\n\ndef _get_commands_from_plugins(plugins):\n for plugin in plugins:\n for cmd in plugin.get_commands():\n cmd._ckan_meta = {\n u'name': plugin.name,\n u'type': u'plugin'\n }\n yield cmd\n\n\ndef _get_commands_from_entry_point(entry_point=u'ckan.click_command'):\n registered_entries = {}\n for entry in iter_entry_points(entry_point):\n if entry.name in registered_entries:\n p.toolkit.error_shout((\n u'Attempt to override entry_point `{name}`.\\n'\n u'First encounter:\\n\\t{first!r}\\n'\n u'Second encounter:\\n\\t{second!r}\\n'\n u'Either uninstall one of mentioned extensions or update'\n u' corresponding `setup.py` and re-install the extension.'\n ).format(\n name=entry.name,\n first=registered_entries[entry.name].dist,\n second=entry.dist))\n raise click.Abort()\n registered_entries[entry.name] = entry\n\n cmd = entry.load()\n cmd._ckan_meta = {\n u'name': entry.name,\n u'type': u'entry_point'\n }\n yield cmd\n\n\ndef _init_ckan_config(ctx, param, value):\n is_help = u'--help' in sys.argv\n no_config = False\n if len(sys.argv) > 1:\n for cmd in _no_config_commands:\n if sys.argv[1:len(cmd) + 1] == cmd:\n no_config = True\n break\n if no_config or is_help:\n return\n\n try:\n ctx.obj = CkanCommand(value)\n except CkanConfigurationException as e:\n p.toolkit.error_shout(e)\n raise click.Abort()\n\n if six.PY2:\n ctx.meta[\"flask_app\"] = ctx.obj.app.apps[\"flask_app\"]._wsgi_app\n else:\n ctx.meta[\"flask_app\"] = ctx.obj.app._wsgi_app\n\n for cmd in _get_commands_from_entry_point():\n ctx.command.add_command(cmd)\n\n plugins = p.PluginImplementations(p.IClick)\n for cmd in _get_commands_from_plugins(plugins):\n ctx.command.add_command(cmd)\n\n\nclick_config_option = click.option(\n u'-c',\n u'--config',\n default=None,\n metavar=u'CONFIG',\n help=u'Config file to use (default: development.ini)',\n is_eager=True,\n callback=_init_ckan_config\n)\n\n\nclass CustomGroup(click.Group):\n _section_titles = {\n u'plugin': u'Plugins',\n u'entry_point': u'Entry points',\n }\n\n def format_commands(self, ctx, formatter):\n # Without any arguments click skips option callbacks.\n self.parse_args(ctx, [u'help'])\n\n commands = []\n ext_commands = defaultdict(lambda: defaultdict(list))\n\n for subcommand in self.list_commands(ctx):\n cmd = self.get_command(ctx, subcommand)\n if cmd is None:\n continue\n help = cmd.short_help or u''\n\n meta = getattr(cmd, u'_ckan_meta', None)\n if meta:\n ext_commands[meta[u'type']][meta[u'name']].append(\n (subcommand, help))\n else:\n commands.append((subcommand, help))\n\n if commands:\n with formatter.section(u'Commands'):\n formatter.write_dl(commands)\n\n for section, group in ext_commands.items():\n with formatter.section(self._section_titles.get(section, section)):\n for _ext, rows in group.items():\n formatter.write_dl(rows)\n\n\[email protected](cls=CustomGroup)\[email protected]_option(u'-h', u'--help')\n@click_config_option\ndef ckan(config, *args, **kwargs):\n pass\n\n\nckan.add_command(jobs.jobs)\nckan.add_command(config_tool.config_tool)\nckan.add_command(front_end_build.front_end_build)\nckan.add_command(server.run)\nckan.add_command(profile.profile)\nckan.add_command(seed.seed)\nckan.add_command(db.db)\nckan.add_command(search_index.search_index)\nckan.add_command(sysadmin.sysadmin)\nckan.add_command(asset.asset)\nckan.add_command(translation.translation)\nckan.add_command(dataset.dataset)\nckan.add_command(views.views)\nckan.add_command(plugin_info.plugin_info)\nckan.add_command(notify.notify)\nckan.add_command(tracking.tracking)\nckan.add_command(minify.minify)\nckan.add_command(less.less)\nckan.add_command(generate.generate)\nckan.add_command(user.user)\n", "path": "ckan/cli/cli.py"}]}
2,550
359
gh_patches_debug_27107
rasdani/github-patches
git_diff
qutebrowser__qutebrowser-3034
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add small delay before updating completion A small delay before updating the completion might help to reduce load when an user is e.g. holding backspace. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `qutebrowser/completion/completer.py` Content: ``` 1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: 2 3 # Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]> 4 # 5 # This file is part of qutebrowser. 6 # 7 # qutebrowser is free software: you can redistribute it and/or modify 8 # it under the terms of the GNU General Public License as published by 9 # the Free Software Foundation, either version 3 of the License, or 10 # (at your option) any later version. 11 # 12 # qutebrowser is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU General Public License for more details. 16 # 17 # You should have received a copy of the GNU General Public License 18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. 19 20 """Completer attached to a CompletionView.""" 21 22 import attr 23 from PyQt5.QtCore import pyqtSlot, QObject, QTimer 24 25 from qutebrowser.config import config 26 from qutebrowser.commands import cmdutils, runners 27 from qutebrowser.utils import log, utils, debug 28 from qutebrowser.completion.models import miscmodels 29 30 31 @attr.s 32 class CompletionInfo: 33 34 """Context passed into all completion functions.""" 35 36 config = attr.ib() 37 keyconf = attr.ib() 38 39 40 class Completer(QObject): 41 42 """Completer which manages completions in a CompletionView. 43 44 Attributes: 45 _cmd: The statusbar Command object this completer belongs to. 46 _ignore_change: Whether to ignore the next completion update. 47 _timer: The timer used to trigger the completion update. 48 _last_cursor_pos: The old cursor position so we avoid double completion 49 updates. 50 _last_text: The old command text so we avoid double completion updates. 51 _last_completion_func: The completion function used for the last text. 52 """ 53 54 def __init__(self, cmd, parent=None): 55 super().__init__(parent) 56 self._cmd = cmd 57 self._ignore_change = False 58 self._timer = QTimer() 59 self._timer.setSingleShot(True) 60 self._timer.setInterval(0) 61 self._timer.timeout.connect(self._update_completion) 62 self._last_cursor_pos = None 63 self._last_text = None 64 self._last_completion_func = None 65 self._cmd.update_completion.connect(self.schedule_completion_update) 66 67 def __repr__(self): 68 return utils.get_repr(self) 69 70 def _model(self): 71 """Convenience method to get the current completion model.""" 72 completion = self.parent() 73 return completion.model() 74 75 def _get_new_completion(self, before_cursor, under_cursor): 76 """Get the completion function based on the current command text. 77 78 Args: 79 before_cursor: The command chunks before the cursor. 80 under_cursor: The command chunk under the cursor. 81 82 Return: 83 A completion model. 84 """ 85 if '--' in before_cursor or under_cursor.startswith('-'): 86 # cursor on a flag or after an explicit split (--) 87 return None 88 log.completion.debug("Before removing flags: {}".format(before_cursor)) 89 before_cursor = [x for x in before_cursor if not x.startswith('-')] 90 log.completion.debug("After removing flags: {}".format(before_cursor)) 91 if not before_cursor: 92 # '|' or 'set|' 93 log.completion.debug('Starting command completion') 94 return miscmodels.command 95 try: 96 cmd = cmdutils.cmd_dict[before_cursor[0]] 97 except KeyError: 98 log.completion.debug("No completion for unknown command: {}" 99 .format(before_cursor[0])) 100 return None 101 argpos = len(before_cursor) - 1 102 try: 103 func = cmd.get_pos_arg_info(argpos).completion 104 except IndexError: 105 log.completion.debug("No completion in position {}".format(argpos)) 106 return None 107 return func 108 109 def _quote(self, s): 110 """Quote s if it needs quoting for the commandline. 111 112 Note we don't use shlex.quote because that quotes a lot of shell 113 metachars we don't need to have quoted. 114 """ 115 if not s: 116 return "''" 117 elif any(c in s for c in ' "\'\t\n\\'): 118 # use single quotes, and put single quotes into double quotes 119 # the string $'b is then quoted as '$'"'"'b' 120 return "'" + s.replace("'", "'\"'\"'") + "'" 121 else: 122 return s 123 124 def _partition(self): 125 """Divide the commandline text into chunks around the cursor position. 126 127 Return: 128 ([parts_before_cursor], 'part_under_cursor', [parts_after_cursor]) 129 """ 130 text = self._cmd.text()[len(self._cmd.prefix()):] 131 if not text or not text.strip(): 132 # Only ":", empty part under the cursor with nothing before/after 133 return [], '', [] 134 parser = runners.CommandParser() 135 result = parser.parse(text, fallback=True, keep=True) 136 # pylint: disable=not-an-iterable 137 parts = [x for x in result.cmdline if x] 138 # pylint: enable=not-an-iterable 139 pos = self._cmd.cursorPosition() - len(self._cmd.prefix()) 140 pos = min(pos, len(text)) # Qt treats 2-byte UTF-16 chars as 2 chars 141 log.completion.debug('partitioning {} around position {}'.format(parts, 142 pos)) 143 for i, part in enumerate(parts): 144 pos -= len(part) 145 if pos <= 0: 146 if part[pos-1:pos+1].isspace(): 147 # cursor is in a space between two existing words 148 parts.insert(i, '') 149 prefix = [x.strip() for x in parts[:i]] 150 center = parts[i].strip() 151 # strip trailing whitepsace included as a separate token 152 postfix = [x.strip() for x in parts[i+1:] if not x.isspace()] 153 log.completion.debug( 154 "partitioned: {} '{}' {}".format(prefix, center, postfix)) 155 return prefix, center, postfix 156 157 # We should always return above 158 assert False, parts 159 160 @pyqtSlot(str) 161 def on_selection_changed(self, text): 162 """Change the completed part if a new item was selected. 163 164 Called from the views selectionChanged method. 165 166 Args: 167 text: Newly selected text. 168 """ 169 if text is None: 170 return 171 before, center, after = self._partition() 172 log.completion.debug("Changing {} to '{}'".format(center, text)) 173 try: 174 maxsplit = cmdutils.cmd_dict[before[0]].maxsplit 175 except (KeyError, IndexError): 176 maxsplit = None 177 if maxsplit is None: 178 text = self._quote(text) 179 model = self._model() 180 if model.count() == 1 and config.val.completion.quick: 181 # If we only have one item, we want to apply it immediately 182 # and go on to the next part. 183 self._change_completed_part(text, before, after, immediate=True) 184 if maxsplit is not None and maxsplit < len(before): 185 # If we are quick-completing the part after maxsplit, don't 186 # keep offering completions (see issue #1519) 187 self._ignore_change = True 188 else: 189 log.completion.debug("Will ignore next completion update.") 190 self._ignore_change = True 191 self._change_completed_part(text, before, after) 192 193 @pyqtSlot() 194 def schedule_completion_update(self): 195 """Schedule updating/enabling completion. 196 197 For performance reasons we don't want to block here, instead we do this 198 in the background. 199 """ 200 if (self._cmd.cursorPosition() == self._last_cursor_pos and 201 self._cmd.text() == self._last_text): 202 log.completion.debug("Ignoring update because there were no " 203 "changes.") 204 else: 205 log.completion.debug("Scheduling completion update.") 206 self._timer.start() 207 self._last_cursor_pos = self._cmd.cursorPosition() 208 self._last_text = self._cmd.text() 209 210 @pyqtSlot() 211 def _update_completion(self): 212 """Check if completions are available and activate them.""" 213 if self._ignore_change: 214 log.completion.debug("Ignoring completion update because " 215 "ignore_change is True.") 216 self._ignore_change = False 217 return 218 219 completion = self.parent() 220 221 if self._cmd.prefix() != ':': 222 # This is a search or gibberish, so we don't need to complete 223 # anything (yet) 224 # FIXME complete searches 225 # https://github.com/qutebrowser/qutebrowser/issues/32 226 completion.set_model(None) 227 self._last_completion_func = None 228 return 229 230 before_cursor, pattern, after_cursor = self._partition() 231 232 log.completion.debug("Updating completion: {} {} {}".format( 233 before_cursor, pattern, after_cursor)) 234 235 pattern = pattern.strip("'\"") 236 func = self._get_new_completion(before_cursor, pattern) 237 238 if func is None: 239 log.completion.debug('Clearing completion') 240 completion.set_model(None) 241 self._last_completion_func = None 242 return 243 244 if func != self._last_completion_func: 245 self._last_completion_func = func 246 args = (x for x in before_cursor[1:] if not x.startswith('-')) 247 with debug.log_time(log.completion, 248 'Starting {} completion'.format(func.__name__)): 249 info = CompletionInfo(config=config.instance, 250 keyconf=config.key_instance) 251 model = func(*args, info=info) 252 with debug.log_time(log.completion, 'Set completion model'): 253 completion.set_model(model) 254 255 completion.set_pattern(pattern) 256 257 def _change_completed_part(self, newtext, before, after, immediate=False): 258 """Change the part we're currently completing in the commandline. 259 260 Args: 261 text: The text to set (string) for the token under the cursor. 262 before: Commandline tokens before the token under the cursor. 263 after: Commandline tokens after the token under the cursor. 264 immediate: True if the text should be completed immediately 265 including a trailing space and we shouldn't continue 266 completing the current item. 267 """ 268 text = self._cmd.prefix() + ' '.join(before + [newtext]) 269 pos = len(text) + (1 if immediate else 0) 270 if after: 271 text += ' ' + ' '.join(after) 272 elif immediate: 273 # pad with a space if quick-completing the last entry 274 text += ' ' 275 log.completion.debug("setting text = '{}', pos = {}".format(text, pos)) 276 self._cmd.setText(text) 277 self._cmd.setCursorPosition(pos) 278 self._cmd.setFocus() 279 self._cmd.show_cmd.emit() 280 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/qutebrowser/completion/completer.py b/qutebrowser/completion/completer.py --- a/qutebrowser/completion/completer.py +++ b/qutebrowser/completion/completer.py @@ -196,14 +196,25 @@ For performance reasons we don't want to block here, instead we do this in the background. + + We delay the update only if we've already input some text and ignore + updates if the text is shorter than completion.min_chars (unless we're + hitting backspace in which case updates won't be ignored). """ - if (self._cmd.cursorPosition() == self._last_cursor_pos and + _cmd, _sep, rest = self._cmd.text().partition(' ') + input_length = len(rest) + if (0 < input_length < config.val.completion.min_chars and + self._cmd.cursorPosition() > self._last_cursor_pos): + log.completion.debug("Ignoring update because the length of " + "the text is less than completion.min_chars.") + elif (self._cmd.cursorPosition() == self._last_cursor_pos and self._cmd.text() == self._last_text): log.completion.debug("Ignoring update because there were no " "changes.") else: log.completion.debug("Scheduling completion update.") - self._timer.start() + start_delay = config.val.completion.delay if self._last_text else 0 + self._timer.start(start_delay) self._last_cursor_pos = self._cmd.cursorPosition() self._last_text = self._cmd.text()
{"golden_diff": "diff --git a/qutebrowser/completion/completer.py b/qutebrowser/completion/completer.py\n--- a/qutebrowser/completion/completer.py\n+++ b/qutebrowser/completion/completer.py\n@@ -196,14 +196,25 @@\n \n For performance reasons we don't want to block here, instead we do this\n in the background.\n+\n+ We delay the update only if we've already input some text and ignore\n+ updates if the text is shorter than completion.min_chars (unless we're\n+ hitting backspace in which case updates won't be ignored).\n \"\"\"\n- if (self._cmd.cursorPosition() == self._last_cursor_pos and\n+ _cmd, _sep, rest = self._cmd.text().partition(' ')\n+ input_length = len(rest)\n+ if (0 < input_length < config.val.completion.min_chars and\n+ self._cmd.cursorPosition() > self._last_cursor_pos):\n+ log.completion.debug(\"Ignoring update because the length of \"\n+ \"the text is less than completion.min_chars.\")\n+ elif (self._cmd.cursorPosition() == self._last_cursor_pos and\n self._cmd.text() == self._last_text):\n log.completion.debug(\"Ignoring update because there were no \"\n \"changes.\")\n else:\n log.completion.debug(\"Scheduling completion update.\")\n- self._timer.start()\n+ start_delay = config.val.completion.delay if self._last_text else 0\n+ self._timer.start(start_delay)\n self._last_cursor_pos = self._cmd.cursorPosition()\n self._last_text = self._cmd.text()\n", "issue": "Add small delay before updating completion\nA small delay before updating the completion might help to reduce load when an user is e.g. holding backspace.\n\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Completer attached to a CompletionView.\"\"\"\n\nimport attr\nfrom PyQt5.QtCore import pyqtSlot, QObject, QTimer\n\nfrom qutebrowser.config import config\nfrom qutebrowser.commands import cmdutils, runners\nfrom qutebrowser.utils import log, utils, debug\nfrom qutebrowser.completion.models import miscmodels\n\n\[email protected]\nclass CompletionInfo:\n\n \"\"\"Context passed into all completion functions.\"\"\"\n\n config = attr.ib()\n keyconf = attr.ib()\n\n\nclass Completer(QObject):\n\n \"\"\"Completer which manages completions in a CompletionView.\n\n Attributes:\n _cmd: The statusbar Command object this completer belongs to.\n _ignore_change: Whether to ignore the next completion update.\n _timer: The timer used to trigger the completion update.\n _last_cursor_pos: The old cursor position so we avoid double completion\n updates.\n _last_text: The old command text so we avoid double completion updates.\n _last_completion_func: The completion function used for the last text.\n \"\"\"\n\n def __init__(self, cmd, parent=None):\n super().__init__(parent)\n self._cmd = cmd\n self._ignore_change = False\n self._timer = QTimer()\n self._timer.setSingleShot(True)\n self._timer.setInterval(0)\n self._timer.timeout.connect(self._update_completion)\n self._last_cursor_pos = None\n self._last_text = None\n self._last_completion_func = None\n self._cmd.update_completion.connect(self.schedule_completion_update)\n\n def __repr__(self):\n return utils.get_repr(self)\n\n def _model(self):\n \"\"\"Convenience method to get the current completion model.\"\"\"\n completion = self.parent()\n return completion.model()\n\n def _get_new_completion(self, before_cursor, under_cursor):\n \"\"\"Get the completion function based on the current command text.\n\n Args:\n before_cursor: The command chunks before the cursor.\n under_cursor: The command chunk under the cursor.\n\n Return:\n A completion model.\n \"\"\"\n if '--' in before_cursor or under_cursor.startswith('-'):\n # cursor on a flag or after an explicit split (--)\n return None\n log.completion.debug(\"Before removing flags: {}\".format(before_cursor))\n before_cursor = [x for x in before_cursor if not x.startswith('-')]\n log.completion.debug(\"After removing flags: {}\".format(before_cursor))\n if not before_cursor:\n # '|' or 'set|'\n log.completion.debug('Starting command completion')\n return miscmodels.command\n try:\n cmd = cmdutils.cmd_dict[before_cursor[0]]\n except KeyError:\n log.completion.debug(\"No completion for unknown command: {}\"\n .format(before_cursor[0]))\n return None\n argpos = len(before_cursor) - 1\n try:\n func = cmd.get_pos_arg_info(argpos).completion\n except IndexError:\n log.completion.debug(\"No completion in position {}\".format(argpos))\n return None\n return func\n\n def _quote(self, s):\n \"\"\"Quote s if it needs quoting for the commandline.\n\n Note we don't use shlex.quote because that quotes a lot of shell\n metachars we don't need to have quoted.\n \"\"\"\n if not s:\n return \"''\"\n elif any(c in s for c in ' \"\\'\\t\\n\\\\'):\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"\n else:\n return s\n\n def _partition(self):\n \"\"\"Divide the commandline text into chunks around the cursor position.\n\n Return:\n ([parts_before_cursor], 'part_under_cursor', [parts_after_cursor])\n \"\"\"\n text = self._cmd.text()[len(self._cmd.prefix()):]\n if not text or not text.strip():\n # Only \":\", empty part under the cursor with nothing before/after\n return [], '', []\n parser = runners.CommandParser()\n result = parser.parse(text, fallback=True, keep=True)\n # pylint: disable=not-an-iterable\n parts = [x for x in result.cmdline if x]\n # pylint: enable=not-an-iterable\n pos = self._cmd.cursorPosition() - len(self._cmd.prefix())\n pos = min(pos, len(text)) # Qt treats 2-byte UTF-16 chars as 2 chars\n log.completion.debug('partitioning {} around position {}'.format(parts,\n pos))\n for i, part in enumerate(parts):\n pos -= len(part)\n if pos <= 0:\n if part[pos-1:pos+1].isspace():\n # cursor is in a space between two existing words\n parts.insert(i, '')\n prefix = [x.strip() for x in parts[:i]]\n center = parts[i].strip()\n # strip trailing whitepsace included as a separate token\n postfix = [x.strip() for x in parts[i+1:] if not x.isspace()]\n log.completion.debug(\n \"partitioned: {} '{}' {}\".format(prefix, center, postfix))\n return prefix, center, postfix\n\n # We should always return above\n assert False, parts\n\n @pyqtSlot(str)\n def on_selection_changed(self, text):\n \"\"\"Change the completed part if a new item was selected.\n\n Called from the views selectionChanged method.\n\n Args:\n text: Newly selected text.\n \"\"\"\n if text is None:\n return\n before, center, after = self._partition()\n log.completion.debug(\"Changing {} to '{}'\".format(center, text))\n try:\n maxsplit = cmdutils.cmd_dict[before[0]].maxsplit\n except (KeyError, IndexError):\n maxsplit = None\n if maxsplit is None:\n text = self._quote(text)\n model = self._model()\n if model.count() == 1 and config.val.completion.quick:\n # If we only have one item, we want to apply it immediately\n # and go on to the next part.\n self._change_completed_part(text, before, after, immediate=True)\n if maxsplit is not None and maxsplit < len(before):\n # If we are quick-completing the part after maxsplit, don't\n # keep offering completions (see issue #1519)\n self._ignore_change = True\n else:\n log.completion.debug(\"Will ignore next completion update.\")\n self._ignore_change = True\n self._change_completed_part(text, before, after)\n\n @pyqtSlot()\n def schedule_completion_update(self):\n \"\"\"Schedule updating/enabling completion.\n\n For performance reasons we don't want to block here, instead we do this\n in the background.\n \"\"\"\n if (self._cmd.cursorPosition() == self._last_cursor_pos and\n self._cmd.text() == self._last_text):\n log.completion.debug(\"Ignoring update because there were no \"\n \"changes.\")\n else:\n log.completion.debug(\"Scheduling completion update.\")\n self._timer.start()\n self._last_cursor_pos = self._cmd.cursorPosition()\n self._last_text = self._cmd.text()\n\n @pyqtSlot()\n def _update_completion(self):\n \"\"\"Check if completions are available and activate them.\"\"\"\n if self._ignore_change:\n log.completion.debug(\"Ignoring completion update because \"\n \"ignore_change is True.\")\n self._ignore_change = False\n return\n\n completion = self.parent()\n\n if self._cmd.prefix() != ':':\n # This is a search or gibberish, so we don't need to complete\n # anything (yet)\n # FIXME complete searches\n # https://github.com/qutebrowser/qutebrowser/issues/32\n completion.set_model(None)\n self._last_completion_func = None\n return\n\n before_cursor, pattern, after_cursor = self._partition()\n\n log.completion.debug(\"Updating completion: {} {} {}\".format(\n before_cursor, pattern, after_cursor))\n\n pattern = pattern.strip(\"'\\\"\")\n func = self._get_new_completion(before_cursor, pattern)\n\n if func is None:\n log.completion.debug('Clearing completion')\n completion.set_model(None)\n self._last_completion_func = None\n return\n\n if func != self._last_completion_func:\n self._last_completion_func = func\n args = (x for x in before_cursor[1:] if not x.startswith('-'))\n with debug.log_time(log.completion,\n 'Starting {} completion'.format(func.__name__)):\n info = CompletionInfo(config=config.instance,\n keyconf=config.key_instance)\n model = func(*args, info=info)\n with debug.log_time(log.completion, 'Set completion model'):\n completion.set_model(model)\n\n completion.set_pattern(pattern)\n\n def _change_completed_part(self, newtext, before, after, immediate=False):\n \"\"\"Change the part we're currently completing in the commandline.\n\n Args:\n text: The text to set (string) for the token under the cursor.\n before: Commandline tokens before the token under the cursor.\n after: Commandline tokens after the token under the cursor.\n immediate: True if the text should be completed immediately\n including a trailing space and we shouldn't continue\n completing the current item.\n \"\"\"\n text = self._cmd.prefix() + ' '.join(before + [newtext])\n pos = len(text) + (1 if immediate else 0)\n if after:\n text += ' ' + ' '.join(after)\n elif immediate:\n # pad with a space if quick-completing the last entry\n text += ' '\n log.completion.debug(\"setting text = '{}', pos = {}\".format(text, pos))\n self._cmd.setText(text)\n self._cmd.setCursorPosition(pos)\n self._cmd.setFocus()\n self._cmd.show_cmd.emit()\n", "path": "qutebrowser/completion/completer.py"}], "after_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Completer attached to a CompletionView.\"\"\"\n\nimport attr\nfrom PyQt5.QtCore import pyqtSlot, QObject, QTimer\n\nfrom qutebrowser.config import config\nfrom qutebrowser.commands import cmdutils, runners\nfrom qutebrowser.utils import log, utils, debug\nfrom qutebrowser.completion.models import miscmodels\n\n\[email protected]\nclass CompletionInfo:\n\n \"\"\"Context passed into all completion functions.\"\"\"\n\n config = attr.ib()\n keyconf = attr.ib()\n\n\nclass Completer(QObject):\n\n \"\"\"Completer which manages completions in a CompletionView.\n\n Attributes:\n _cmd: The statusbar Command object this completer belongs to.\n _ignore_change: Whether to ignore the next completion update.\n _timer: The timer used to trigger the completion update.\n _last_cursor_pos: The old cursor position so we avoid double completion\n updates.\n _last_text: The old command text so we avoid double completion updates.\n _last_completion_func: The completion function used for the last text.\n \"\"\"\n\n def __init__(self, cmd, parent=None):\n super().__init__(parent)\n self._cmd = cmd\n self._ignore_change = False\n self._timer = QTimer()\n self._timer.setSingleShot(True)\n self._timer.setInterval(0)\n self._timer.timeout.connect(self._update_completion)\n self._last_cursor_pos = None\n self._last_text = None\n self._last_completion_func = None\n self._cmd.update_completion.connect(self.schedule_completion_update)\n\n def __repr__(self):\n return utils.get_repr(self)\n\n def _model(self):\n \"\"\"Convenience method to get the current completion model.\"\"\"\n completion = self.parent()\n return completion.model()\n\n def _get_new_completion(self, before_cursor, under_cursor):\n \"\"\"Get the completion function based on the current command text.\n\n Args:\n before_cursor: The command chunks before the cursor.\n under_cursor: The command chunk under the cursor.\n\n Return:\n A completion model.\n \"\"\"\n if '--' in before_cursor or under_cursor.startswith('-'):\n # cursor on a flag or after an explicit split (--)\n return None\n log.completion.debug(\"Before removing flags: {}\".format(before_cursor))\n before_cursor = [x for x in before_cursor if not x.startswith('-')]\n log.completion.debug(\"After removing flags: {}\".format(before_cursor))\n if not before_cursor:\n # '|' or 'set|'\n log.completion.debug('Starting command completion')\n return miscmodels.command\n try:\n cmd = cmdutils.cmd_dict[before_cursor[0]]\n except KeyError:\n log.completion.debug(\"No completion for unknown command: {}\"\n .format(before_cursor[0]))\n return None\n argpos = len(before_cursor) - 1\n try:\n func = cmd.get_pos_arg_info(argpos).completion\n except IndexError:\n log.completion.debug(\"No completion in position {}\".format(argpos))\n return None\n return func\n\n def _quote(self, s):\n \"\"\"Quote s if it needs quoting for the commandline.\n\n Note we don't use shlex.quote because that quotes a lot of shell\n metachars we don't need to have quoted.\n \"\"\"\n if not s:\n return \"''\"\n elif any(c in s for c in ' \"\\'\\t\\n\\\\'):\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"\n else:\n return s\n\n def _partition(self):\n \"\"\"Divide the commandline text into chunks around the cursor position.\n\n Return:\n ([parts_before_cursor], 'part_under_cursor', [parts_after_cursor])\n \"\"\"\n text = self._cmd.text()[len(self._cmd.prefix()):]\n if not text or not text.strip():\n # Only \":\", empty part under the cursor with nothing before/after\n return [], '', []\n parser = runners.CommandParser()\n result = parser.parse(text, fallback=True, keep=True)\n # pylint: disable=not-an-iterable\n parts = [x for x in result.cmdline if x]\n # pylint: enable=not-an-iterable\n pos = self._cmd.cursorPosition() - len(self._cmd.prefix())\n pos = min(pos, len(text)) # Qt treats 2-byte UTF-16 chars as 2 chars\n log.completion.debug('partitioning {} around position {}'.format(parts,\n pos))\n for i, part in enumerate(parts):\n pos -= len(part)\n if pos <= 0:\n if part[pos-1:pos+1].isspace():\n # cursor is in a space between two existing words\n parts.insert(i, '')\n prefix = [x.strip() for x in parts[:i]]\n center = parts[i].strip()\n # strip trailing whitepsace included as a separate token\n postfix = [x.strip() for x in parts[i+1:] if not x.isspace()]\n log.completion.debug(\n \"partitioned: {} '{}' {}\".format(prefix, center, postfix))\n return prefix, center, postfix\n\n # We should always return above\n assert False, parts\n\n @pyqtSlot(str)\n def on_selection_changed(self, text):\n \"\"\"Change the completed part if a new item was selected.\n\n Called from the views selectionChanged method.\n\n Args:\n text: Newly selected text.\n \"\"\"\n if text is None:\n return\n before, center, after = self._partition()\n log.completion.debug(\"Changing {} to '{}'\".format(center, text))\n try:\n maxsplit = cmdutils.cmd_dict[before[0]].maxsplit\n except (KeyError, IndexError):\n maxsplit = None\n if maxsplit is None:\n text = self._quote(text)\n model = self._model()\n if model.count() == 1 and config.val.completion.quick:\n # If we only have one item, we want to apply it immediately\n # and go on to the next part.\n self._change_completed_part(text, before, after, immediate=True)\n if maxsplit is not None and maxsplit < len(before):\n # If we are quick-completing the part after maxsplit, don't\n # keep offering completions (see issue #1519)\n self._ignore_change = True\n else:\n log.completion.debug(\"Will ignore next completion update.\")\n self._ignore_change = True\n self._change_completed_part(text, before, after)\n\n @pyqtSlot()\n def schedule_completion_update(self):\n \"\"\"Schedule updating/enabling completion.\n\n For performance reasons we don't want to block here, instead we do this\n in the background.\n\n We delay the update only if we've already input some text and ignore\n updates if the text is shorter than completion.min_chars (unless we're\n hitting backspace in which case updates won't be ignored).\n \"\"\"\n _cmd, _sep, rest = self._cmd.text().partition(' ')\n input_length = len(rest)\n if (0 < input_length < config.val.completion.min_chars and\n self._cmd.cursorPosition() > self._last_cursor_pos):\n log.completion.debug(\"Ignoring update because the length of \"\n \"the text is less than completion.min_chars.\")\n elif (self._cmd.cursorPosition() == self._last_cursor_pos and\n self._cmd.text() == self._last_text):\n log.completion.debug(\"Ignoring update because there were no \"\n \"changes.\")\n else:\n log.completion.debug(\"Scheduling completion update.\")\n start_delay = config.val.completion.delay if self._last_text else 0\n self._timer.start(start_delay)\n self._last_cursor_pos = self._cmd.cursorPosition()\n self._last_text = self._cmd.text()\n\n @pyqtSlot()\n def _update_completion(self):\n \"\"\"Check if completions are available and activate them.\"\"\"\n if self._ignore_change:\n log.completion.debug(\"Ignoring completion update because \"\n \"ignore_change is True.\")\n self._ignore_change = False\n return\n\n completion = self.parent()\n\n if self._cmd.prefix() != ':':\n # This is a search or gibberish, so we don't need to complete\n # anything (yet)\n # FIXME complete searches\n # https://github.com/qutebrowser/qutebrowser/issues/32\n completion.set_model(None)\n self._last_completion_func = None\n return\n\n before_cursor, pattern, after_cursor = self._partition()\n\n log.completion.debug(\"Updating completion: {} {} {}\".format(\n before_cursor, pattern, after_cursor))\n\n pattern = pattern.strip(\"'\\\"\")\n func = self._get_new_completion(before_cursor, pattern)\n\n if func is None:\n log.completion.debug('Clearing completion')\n completion.set_model(None)\n self._last_completion_func = None\n return\n\n if func != self._last_completion_func:\n self._last_completion_func = func\n args = (x for x in before_cursor[1:] if not x.startswith('-'))\n with debug.log_time(log.completion,\n 'Starting {} completion'.format(func.__name__)):\n info = CompletionInfo(config=config.instance,\n keyconf=config.key_instance)\n model = func(*args, info=info)\n with debug.log_time(log.completion, 'Set completion model'):\n completion.set_model(model)\n\n completion.set_pattern(pattern)\n\n def _change_completed_part(self, newtext, before, after, immediate=False):\n \"\"\"Change the part we're currently completing in the commandline.\n\n Args:\n text: The text to set (string) for the token under the cursor.\n before: Commandline tokens before the token under the cursor.\n after: Commandline tokens after the token under the cursor.\n immediate: True if the text should be completed immediately\n including a trailing space and we shouldn't continue\n completing the current item.\n \"\"\"\n text = self._cmd.prefix() + ' '.join(before + [newtext])\n pos = len(text) + (1 if immediate else 0)\n if after:\n text += ' ' + ' '.join(after)\n elif immediate:\n # pad with a space if quick-completing the last entry\n text += ' '\n log.completion.debug(\"setting text = '{}', pos = {}\".format(text, pos))\n self._cmd.setText(text)\n self._cmd.setCursorPosition(pos)\n self._cmd.setFocus()\n self._cmd.show_cmd.emit()\n", "path": "qutebrowser/completion/completer.py"}]}
3,384
360
gh_patches_debug_6937
rasdani/github-patches
git_diff
ivy-llc__ivy-22098
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- reshape_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ivy/functional/frontends/paddle/tensor/manipulation.py` Content: ``` 1 # global 2 import ivy 3 from ivy.functional.frontends.paddle.func_wrapper import ( 4 to_ivy_arrays_and_back, 5 ) 6 from ivy.func_wrapper import ( 7 with_unsupported_dtypes, 8 with_supported_dtypes, 9 with_supported_device_and_dtypes, 10 ) 11 12 13 @to_ivy_arrays_and_back 14 def reshape(x, shape): 15 return ivy.reshape(x, shape) 16 17 18 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle") 19 @to_ivy_arrays_and_back 20 def abs(x, name=None): 21 return ivy.abs(x) 22 23 24 absolute = abs 25 26 27 @to_ivy_arrays_and_back 28 def stack(x, axis=0, name=None): 29 return ivy.stack(x, axis=axis) 30 31 32 @with_unsupported_dtypes({"2.5.1 and below": ("int8", "int16")}, "paddle") 33 @to_ivy_arrays_and_back 34 def concat(x, axis, name=None): 35 return ivy.concat(x, axis=axis) 36 37 38 @with_unsupported_dtypes( 39 {"2.5.1 and below": ("int8", "uint8", "int16", "float16")}, 40 "paddle", 41 ) 42 @to_ivy_arrays_and_back 43 def tile(x, repeat_times, name=None): 44 return ivy.tile(x, repeats=repeat_times) 45 46 47 @with_unsupported_dtypes( 48 {"2.5.1 and below": ("int8", "uint8", "int16", "float16")}, 49 "paddle", 50 ) 51 @to_ivy_arrays_and_back 52 def flip(x, axis, name=None): 53 return ivy.flip(x, axis=axis) 54 55 56 @with_unsupported_dtypes( 57 {"2.5.1 and below": ("int16", "complex64", "complex128")}, 58 "paddle", 59 ) 60 @to_ivy_arrays_and_back 61 def split(x, num_or_sections, axis=0, name=None): 62 return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis) 63 64 65 @with_unsupported_dtypes( 66 {"2.5.1 and below": ("float16", "bfloat16", "int8", "int16")}, 67 "paddle", 68 ) 69 @to_ivy_arrays_and_back 70 def squeeze(x, axis=None, name=None): 71 return ivy.squeeze(x, axis=axis) 72 73 74 @with_supported_dtypes( 75 {"2.5.1 and below": ("bool", "float32", "float64", "int32", "int64")}, 76 "paddle", 77 ) 78 @to_ivy_arrays_and_back 79 def expand(x, shape, name=None): 80 return ivy.expand(x, shape) 81 82 83 @with_supported_dtypes( 84 { 85 "2.5.1 and below": ( 86 "bool", 87 "float16", 88 "float32", 89 "float64", 90 "int32", 91 "int64", 92 "uint8", 93 ) 94 }, 95 "paddle", 96 ) 97 @to_ivy_arrays_and_back 98 def cast(x, dtype): 99 return ivy.astype(x, dtype) 100 101 102 @with_supported_dtypes( 103 {"2.5.1 and below": ("bool", "float32", "float64", "int32", "int64")}, 104 "paddle", 105 ) 106 @to_ivy_arrays_and_back 107 def broadcast_to(x, shape, name=None): 108 return ivy.broadcast_to(x, shape) 109 110 111 @with_supported_dtypes( 112 {"2.5.1 and below": ("bool", "float32", "float64", "int32", "int64")}, 113 "paddle", 114 ) 115 @to_ivy_arrays_and_back 116 def gather(params, indices, axis=-1, batch_dims=0, name=None): 117 return ivy.gather(params, indices, axis=axis, batch_dims=batch_dims) 118 119 120 @with_supported_dtypes( 121 { 122 "2.5.0 and below": ( 123 "float32", 124 "float64", 125 "int32", 126 "int64", 127 "complex64", 128 "complex128", 129 ) 130 }, 131 "paddle", 132 ) 133 @to_ivy_arrays_and_back 134 def roll(x, shifts, axis=None, name=None): 135 return ivy.roll(x, shifts, axis=axis) 136 137 138 @with_supported_dtypes( 139 { 140 "2.5.1 and below": ( 141 "float32", 142 "float64", 143 "int32", 144 "int64", 145 ) 146 }, 147 "paddle", 148 ) 149 @to_ivy_arrays_and_back 150 def take_along_axis(arr, indices, axis): 151 return ivy.take_along_axis(arr, indices, axis) 152 153 154 @with_supported_device_and_dtypes( 155 { 156 "2.5.1 and above": { 157 "cpu": ( 158 "bool", 159 "int32", 160 "int64", 161 "float32", 162 "float64", 163 ), 164 "gpu": ("float16",), 165 }, 166 }, 167 "paddle", 168 ) 169 @to_ivy_arrays_and_back 170 def rot90(x, k=1, axes=(0, 1), name=None): 171 return ivy.rot90(x, k=k, axes=axes) 172 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ivy/functional/frontends/paddle/tensor/manipulation.py b/ivy/functional/frontends/paddle/tensor/manipulation.py --- a/ivy/functional/frontends/paddle/tensor/manipulation.py +++ b/ivy/functional/frontends/paddle/tensor/manipulation.py @@ -15,6 +15,17 @@ return ivy.reshape(x, shape) +@with_unsupported_dtypes( + {"2.5.1 and below": ("int8", "uint8", "int16", "uint16", "float16", "bfloat16")}, + "paddle", +) +@to_ivy_arrays_and_back +def reshape_(x, shape): + ret = ivy.reshape(x, shape) + ivy.inplace_update(x, ret) + return x + + @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle") @to_ivy_arrays_and_back def abs(x, name=None):
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/manipulation.py b/ivy/functional/frontends/paddle/tensor/manipulation.py\n--- a/ivy/functional/frontends/paddle/tensor/manipulation.py\n+++ b/ivy/functional/frontends/paddle/tensor/manipulation.py\n@@ -15,6 +15,17 @@\n return ivy.reshape(x, shape)\n \n \n+@with_unsupported_dtypes(\n+ {\"2.5.1 and below\": (\"int8\", \"uint8\", \"int16\", \"uint16\", \"float16\", \"bfloat16\")},\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def reshape_(x, shape):\n+ ret = ivy.reshape(x, shape)\n+ ivy.inplace_update(x, ret)\n+ return x\n+\n+\n @with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n @to_ivy_arrays_and_back\n def abs(x, name=None):\n", "issue": "reshape_\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.func_wrapper import (\n with_unsupported_dtypes,\n with_supported_dtypes,\n with_supported_device_and_dtypes,\n)\n\n\n@to_ivy_arrays_and_back\ndef reshape(x, shape):\n return ivy.reshape(x, shape)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\nabsolute = abs\n\n\n@to_ivy_arrays_and_back\ndef stack(x, axis=0, name=None):\n return ivy.stack(x, axis=axis)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"int8\", \"int16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef concat(x, axis, name=None):\n return ivy.concat(x, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.1 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef tile(x, repeat_times, name=None):\n return ivy.tile(x, repeats=repeat_times)\n\n\n@with_unsupported_dtypes(\n {\"2.5.1 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef flip(x, axis, name=None):\n return ivy.flip(x, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.1 and below\": (\"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef split(x, num_or_sections, axis=0, name=None):\n return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.1 and below\": (\"float16\", \"bfloat16\", \"int8\", \"int16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef squeeze(x, axis=None, name=None):\n return ivy.squeeze(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef expand(x, shape, name=None):\n return ivy.expand(x, shape)\n\n\n@with_supported_dtypes(\n {\n \"2.5.1 and below\": (\n \"bool\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"uint8\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cast(x, dtype):\n return ivy.astype(x, dtype)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef broadcast_to(x, shape, name=None):\n return ivy.broadcast_to(x, shape)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef gather(params, indices, axis=-1, batch_dims=0, name=None):\n return ivy.gather(params, indices, axis=axis, batch_dims=batch_dims)\n\n\n@with_supported_dtypes(\n {\n \"2.5.0 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef roll(x, shifts, axis=None, name=None):\n return ivy.roll(x, shifts, axis=axis)\n\n\n@with_supported_dtypes(\n {\n \"2.5.1 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef take_along_axis(arr, indices, axis):\n return ivy.take_along_axis(arr, indices, axis)\n\n\n@with_supported_device_and_dtypes(\n {\n \"2.5.1 and above\": {\n \"cpu\": (\n \"bool\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n ),\n \"gpu\": (\"float16\",),\n },\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef rot90(x, k=1, axes=(0, 1), name=None):\n return ivy.rot90(x, k=k, axes=axes)\n", "path": "ivy/functional/frontends/paddle/tensor/manipulation.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.func_wrapper import (\n with_unsupported_dtypes,\n with_supported_dtypes,\n with_supported_device_and_dtypes,\n)\n\n\n@to_ivy_arrays_and_back\ndef reshape(x, shape):\n return ivy.reshape(x, shape)\n\n\n@with_unsupported_dtypes(\n {\"2.5.1 and below\": (\"int8\", \"uint8\", \"int16\", \"uint16\", \"float16\", \"bfloat16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef reshape_(x, shape):\n ret = ivy.reshape(x, shape)\n ivy.inplace_update(x, ret)\n return x\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\nabsolute = abs\n\n\n@to_ivy_arrays_and_back\ndef stack(x, axis=0, name=None):\n return ivy.stack(x, axis=axis)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"int8\", \"int16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef concat(x, axis, name=None):\n return ivy.concat(x, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.1 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef tile(x, repeat_times, name=None):\n return ivy.tile(x, repeats=repeat_times)\n\n\n@with_unsupported_dtypes(\n {\"2.5.1 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef flip(x, axis, name=None):\n return ivy.flip(x, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.1 and below\": (\"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef split(x, num_or_sections, axis=0, name=None):\n return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.1 and below\": (\"float16\", \"bfloat16\", \"int8\", \"int16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef squeeze(x, axis=None, name=None):\n return ivy.squeeze(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef expand(x, shape, name=None):\n return ivy.expand(x, shape)\n\n\n@with_supported_dtypes(\n {\n \"2.5.1 and below\": (\n \"bool\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"uint8\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cast(x, dtype):\n return ivy.astype(x, dtype)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef broadcast_to(x, shape, name=None):\n return ivy.broadcast_to(x, shape)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef gather(params, indices, axis=-1, batch_dims=0, name=None):\n return ivy.gather(params, indices, axis=axis, batch_dims=batch_dims)\n\n\n@with_supported_dtypes(\n {\n \"2.5.0 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef roll(x, shifts, axis=None, name=None):\n return ivy.roll(x, shifts, axis=axis)\n\n\n@with_supported_dtypes(\n {\n \"2.5.1 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef take_along_axis(arr, indices, axis):\n return ivy.take_along_axis(arr, indices, axis)\n\n\n@with_supported_device_and_dtypes(\n {\n \"2.5.1 and above\": {\n \"cpu\": (\n \"bool\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n ),\n \"gpu\": (\"float16\",),\n },\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef rot90(x, k=1, axes=(0, 1), name=None):\n return ivy.rot90(x, k=k, axes=axes)\n", "path": "ivy/functional/frontends/paddle/tensor/manipulation.py"}]}
1,857
239
gh_patches_debug_23414
rasdani/github-patches
git_diff
gammapy__gammapy-2558
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Log scale and grids This issue is triggered by https://github.com/gammapy/gammapy/issues/2270#issue-464031097 - there's a test fail with Numpy dev - the issue is that we are making an `nbins` float here: https://github.com/gammapy/gammapy/blob/7a19c4b3b3e82831c6c683db9799a1d803cf928f/gammapy/utils/energy.py#L34 @adonath - Could you please fix this? I'm not sure if the proper fix is to round up or down to the nearest int. There's other options as well, e.g. we could just remove the `per_decade` argument or that helper function completely and use this: https://github.com/gammapy/gammapy/blob/76ab409e6bee09436127ce67e11de1ad5b2d7bb0/gammapy/maps/geom.py#L479 Or introduce an equivalent of `numpy.arange` - Numpy offered a separate function for the case where the caller wants a stepsize instead of a given number of points - maybe we should do that as well to be symmetric, and to keep support for setting up grids of a desired step size easily. Finally, there's the option to attache the grid-making functionality to the existing scale classes here and to use those throughout Gammapy: https://github.com/gammapy/gammapy/blob/5a24919d91a9aa601f7a1ee3d677cf4bebfc6b3b/gammapy/utils/interpolation.py#L138 I think https://observablehq.com/@d3/scale-ticks and also matplotlib have the feature to make grids or ticks attached to the scales. Here in Astropy this wasn't needed/added so far http://docs.astropy.org/en/stable/visualization/normalization.html In principle having energy-specific scale and grid functions for log isn't needed or good design, on the other hand it's the only case (currently, but later?) where we need a log scale, so just fixing up the existing helper function is not too bad. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gammapy/utils/nddata.py` Content: ``` 1 # Licensed under a 3-clause BSD style license - see LICENSE.rst 2 """Utility functions and classes for n-dimensional data and axes.""" 3 import numpy as np 4 from astropy.units import Quantity 5 from .array import array_stats_str 6 from .interpolation import ScaledRegularGridInterpolator 7 8 __all__ = ["NDDataArray", "sqrt_space"] 9 10 11 class NDDataArray: 12 """ND Data Array Base class 13 14 Parameters 15 ---------- 16 axes : list 17 List of `~gammapy.utils.nddata.DataAxis` 18 data : `~astropy.units.Quantity` 19 Data 20 meta : dict 21 Meta info 22 interp_kwargs : dict 23 TODO 24 """ 25 26 default_interp_kwargs = dict(bounds_error=False, values_scale="lin") 27 """Default interpolation kwargs used to initialize the 28 `scipy.interpolate.RegularGridInterpolator`. The interpolation behaviour 29 of an individual axis ('log', 'linear') can be passed to the axis on 30 initialization.""" 31 32 def __init__(self, axes, data=None, meta=None, interp_kwargs=None): 33 self._axes = axes 34 if data is not None: 35 self.data = data 36 self.meta = meta or {} 37 self.interp_kwargs = interp_kwargs or self.default_interp_kwargs 38 39 self._regular_grid_interp = None 40 41 def __str__(self): 42 ss = "NDDataArray summary info\n" 43 for axis in self.axes: 44 ss += str(axis) 45 ss += array_stats_str(self.data, "Data") 46 return ss 47 48 @property 49 def axes(self): 50 """Array holding the axes in correct order""" 51 return self._axes 52 53 def axis(self, name): 54 """Return axis by name""" 55 try: 56 idx = [_.name for _ in self.axes].index(name) 57 except ValueError: 58 raise ValueError(f"Axis {name} not found") 59 return self.axes[idx] 60 61 @property 62 def data(self): 63 """Array holding the n-dimensional data.""" 64 return self._data 65 66 @data.setter 67 def data(self, data): 68 """Set data. 69 70 Some sanity checks are performed to avoid an invalid array. 71 Also, the interpolator is set to None to avoid unwanted behaviour. 72 73 Parameters 74 ---------- 75 data : `~astropy.units.Quantity`, array-like 76 Data array 77 """ 78 data = Quantity(data) 79 dimension = len(data.shape) 80 if dimension != self.dim: 81 raise ValueError( 82 "Overall dimensions to not match. " 83 "Data: {}, Hist: {}".format(dimension, self.dim) 84 ) 85 86 for dim in np.arange(self.dim): 87 axis = self.axes[dim] 88 if axis.nbin != data.shape[dim]: 89 msg = "Data shape does not match in dimension {d}\n" 90 msg += "Axis {n} : {sa}, Data {sd}" 91 raise ValueError( 92 msg.format(d=dim, n=axis.name, sa=axis.nbin, sd=data.shape[dim]) 93 ) 94 self._regular_grid_interp = None 95 self._data = data 96 97 @property 98 def dim(self): 99 """Dimension (number of axes)""" 100 return len(self.axes) 101 102 def evaluate(self, method=None, **kwargs): 103 """Evaluate NDData Array 104 105 This function provides a uniform interface to several interpolators. 106 The evaluation nodes are given as ``kwargs``. 107 108 Currently available: 109 `~scipy.interpolate.RegularGridInterpolator`, methods: linear, nearest 110 111 Parameters 112 ---------- 113 method : str {'linear', 'nearest'}, optional 114 Interpolation method 115 kwargs : dict 116 Keys are the axis names, Values the evaluation points 117 118 Returns 119 ------- 120 array : `~astropy.units.Quantity` 121 Interpolated values, axis order is the same as for the NDData array 122 """ 123 values = [] 124 for idx, axis in enumerate(self.axes): 125 # Extract values for each axis, default: nodes 126 shape = [1] * len(self.axes) 127 shape[idx] = -1 128 default = axis.center.reshape(tuple(shape)) 129 temp = Quantity(kwargs.pop(axis.name, default)) 130 values.append(np.atleast_1d(temp)) 131 132 # This is to catch e.g. typos in axis names 133 if kwargs != {}: 134 raise ValueError(f"Input given for unknown axis: {kwargs}") 135 136 if self._regular_grid_interp is None: 137 self._add_regular_grid_interp() 138 139 return self._regular_grid_interp(values, method=method, **kwargs) 140 141 def _add_regular_grid_interp(self, interp_kwargs=None): 142 """Add `~scipy.interpolate.RegularGridInterpolator` 143 144 http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.interpolate.RegularGridInterpolator.html 145 146 Parameters 147 ---------- 148 interp_kwargs : dict, optional 149 Interpolation kwargs 150 """ 151 if interp_kwargs is None: 152 interp_kwargs = self.interp_kwargs 153 154 points = [a.center for a in self.axes] 155 points_scale = [a.interp for a in self.axes] 156 self._regular_grid_interp = ScaledRegularGridInterpolator( 157 points, self.data, points_scale=points_scale, **interp_kwargs 158 ) 159 160 161 def sqrt_space(start, stop, num): 162 """Return numbers spaced evenly on a square root scale. 163 164 This function is similar to `numpy.linspace` and `numpy.logspace`. 165 166 Parameters 167 ---------- 168 start : float 169 start is the starting value of the sequence 170 stop : float 171 stop is the final value of the sequence 172 num : int 173 Number of samples to generate. 174 175 Returns 176 ------- 177 samples : `~numpy.ndarray` 178 1D array with a square root scale 179 180 Examples 181 -------- 182 >>> from gammapy.utils.nddata import sqrt_space 183 >>> sqrt_space(0, 2, 5) 184 array([0. , 1. , 1.41421356, 1.73205081, 2. ]) 185 """ 186 return np.sqrt(np.linspace(start ** 2, stop ** 2, num)) 187 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gammapy/utils/nddata.py b/gammapy/utils/nddata.py --- a/gammapy/utils/nddata.py +++ b/gammapy/utils/nddata.py @@ -5,7 +5,7 @@ from .array import array_stats_str from .interpolation import ScaledRegularGridInterpolator -__all__ = ["NDDataArray", "sqrt_space"] +__all__ = ["NDDataArray"] class NDDataArray: @@ -156,31 +156,3 @@ self._regular_grid_interp = ScaledRegularGridInterpolator( points, self.data, points_scale=points_scale, **interp_kwargs ) - - -def sqrt_space(start, stop, num): - """Return numbers spaced evenly on a square root scale. - - This function is similar to `numpy.linspace` and `numpy.logspace`. - - Parameters - ---------- - start : float - start is the starting value of the sequence - stop : float - stop is the final value of the sequence - num : int - Number of samples to generate. - - Returns - ------- - samples : `~numpy.ndarray` - 1D array with a square root scale - - Examples - -------- - >>> from gammapy.utils.nddata import sqrt_space - >>> sqrt_space(0, 2, 5) - array([0. , 1. , 1.41421356, 1.73205081, 2. ]) - """ - return np.sqrt(np.linspace(start ** 2, stop ** 2, num))
{"golden_diff": "diff --git a/gammapy/utils/nddata.py b/gammapy/utils/nddata.py\n--- a/gammapy/utils/nddata.py\n+++ b/gammapy/utils/nddata.py\n@@ -5,7 +5,7 @@\n from .array import array_stats_str\n from .interpolation import ScaledRegularGridInterpolator\n \n-__all__ = [\"NDDataArray\", \"sqrt_space\"]\n+__all__ = [\"NDDataArray\"]\n \n \n class NDDataArray:\n@@ -156,31 +156,3 @@\n self._regular_grid_interp = ScaledRegularGridInterpolator(\n points, self.data, points_scale=points_scale, **interp_kwargs\n )\n-\n-\n-def sqrt_space(start, stop, num):\n- \"\"\"Return numbers spaced evenly on a square root scale.\n-\n- This function is similar to `numpy.linspace` and `numpy.logspace`.\n-\n- Parameters\n- ----------\n- start : float\n- start is the starting value of the sequence\n- stop : float\n- stop is the final value of the sequence\n- num : int\n- Number of samples to generate.\n-\n- Returns\n- -------\n- samples : `~numpy.ndarray`\n- 1D array with a square root scale\n-\n- Examples\n- --------\n- >>> from gammapy.utils.nddata import sqrt_space\n- >>> sqrt_space(0, 2, 5)\n- array([0. , 1. , 1.41421356, 1.73205081, 2. ])\n- \"\"\"\n- return np.sqrt(np.linspace(start ** 2, stop ** 2, num))\n", "issue": "Log scale and grids\nThis issue is triggered by https://github.com/gammapy/gammapy/issues/2270#issue-464031097 - there's a test fail with Numpy dev - the issue is that we are making an `nbins` float here:\r\n\r\nhttps://github.com/gammapy/gammapy/blob/7a19c4b3b3e82831c6c683db9799a1d803cf928f/gammapy/utils/energy.py#L34\r\n\r\n@adonath - Could you please fix this? I'm not sure if the proper fix is to round up or down to the nearest int.\r\n\r\nThere's other options as well, e.g. we could just remove the `per_decade` argument or that helper function completely and use this:\r\nhttps://github.com/gammapy/gammapy/blob/76ab409e6bee09436127ce67e11de1ad5b2d7bb0/gammapy/maps/geom.py#L479\r\n\r\nOr introduce an equivalent of `numpy.arange` - Numpy offered a separate function for the case where the caller wants a stepsize instead of a given number of points - maybe we should do that as well to be symmetric, and to keep support for setting up grids of a desired step size easily.\r\n\r\nFinally, there's the option to attache the grid-making functionality to the existing scale classes here and to use those throughout Gammapy:\r\nhttps://github.com/gammapy/gammapy/blob/5a24919d91a9aa601f7a1ee3d677cf4bebfc6b3b/gammapy/utils/interpolation.py#L138\r\n\r\nI think https://observablehq.com/@d3/scale-ticks and also matplotlib have the feature to make grids or ticks attached to the scales. Here in Astropy this wasn't needed/added so far http://docs.astropy.org/en/stable/visualization/normalization.html\r\n\r\nIn principle having energy-specific scale and grid functions for log isn't needed or good design, on the other hand it's the only case (currently, but later?) where we need a log scale, so just fixing up the existing helper function is not too bad.\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Utility functions and classes for n-dimensional data and axes.\"\"\"\nimport numpy as np\nfrom astropy.units import Quantity\nfrom .array import array_stats_str\nfrom .interpolation import ScaledRegularGridInterpolator\n\n__all__ = [\"NDDataArray\", \"sqrt_space\"]\n\n\nclass NDDataArray:\n \"\"\"ND Data Array Base class\n\n Parameters\n ----------\n axes : list\n List of `~gammapy.utils.nddata.DataAxis`\n data : `~astropy.units.Quantity`\n Data\n meta : dict\n Meta info\n interp_kwargs : dict\n TODO\n \"\"\"\n\n default_interp_kwargs = dict(bounds_error=False, values_scale=\"lin\")\n \"\"\"Default interpolation kwargs used to initialize the\n `scipy.interpolate.RegularGridInterpolator`. The interpolation behaviour\n of an individual axis ('log', 'linear') can be passed to the axis on\n initialization.\"\"\"\n\n def __init__(self, axes, data=None, meta=None, interp_kwargs=None):\n self._axes = axes\n if data is not None:\n self.data = data\n self.meta = meta or {}\n self.interp_kwargs = interp_kwargs or self.default_interp_kwargs\n\n self._regular_grid_interp = None\n\n def __str__(self):\n ss = \"NDDataArray summary info\\n\"\n for axis in self.axes:\n ss += str(axis)\n ss += array_stats_str(self.data, \"Data\")\n return ss\n\n @property\n def axes(self):\n \"\"\"Array holding the axes in correct order\"\"\"\n return self._axes\n\n def axis(self, name):\n \"\"\"Return axis by name\"\"\"\n try:\n idx = [_.name for _ in self.axes].index(name)\n except ValueError:\n raise ValueError(f\"Axis {name} not found\")\n return self.axes[idx]\n\n @property\n def data(self):\n \"\"\"Array holding the n-dimensional data.\"\"\"\n return self._data\n\n @data.setter\n def data(self, data):\n \"\"\"Set data.\n\n Some sanity checks are performed to avoid an invalid array.\n Also, the interpolator is set to None to avoid unwanted behaviour.\n\n Parameters\n ----------\n data : `~astropy.units.Quantity`, array-like\n Data array\n \"\"\"\n data = Quantity(data)\n dimension = len(data.shape)\n if dimension != self.dim:\n raise ValueError(\n \"Overall dimensions to not match. \"\n \"Data: {}, Hist: {}\".format(dimension, self.dim)\n )\n\n for dim in np.arange(self.dim):\n axis = self.axes[dim]\n if axis.nbin != data.shape[dim]:\n msg = \"Data shape does not match in dimension {d}\\n\"\n msg += \"Axis {n} : {sa}, Data {sd}\"\n raise ValueError(\n msg.format(d=dim, n=axis.name, sa=axis.nbin, sd=data.shape[dim])\n )\n self._regular_grid_interp = None\n self._data = data\n\n @property\n def dim(self):\n \"\"\"Dimension (number of axes)\"\"\"\n return len(self.axes)\n\n def evaluate(self, method=None, **kwargs):\n \"\"\"Evaluate NDData Array\n\n This function provides a uniform interface to several interpolators.\n The evaluation nodes are given as ``kwargs``.\n\n Currently available:\n `~scipy.interpolate.RegularGridInterpolator`, methods: linear, nearest\n\n Parameters\n ----------\n method : str {'linear', 'nearest'}, optional\n Interpolation method\n kwargs : dict\n Keys are the axis names, Values the evaluation points\n\n Returns\n -------\n array : `~astropy.units.Quantity`\n Interpolated values, axis order is the same as for the NDData array\n \"\"\"\n values = []\n for idx, axis in enumerate(self.axes):\n # Extract values for each axis, default: nodes\n shape = [1] * len(self.axes)\n shape[idx] = -1\n default = axis.center.reshape(tuple(shape))\n temp = Quantity(kwargs.pop(axis.name, default))\n values.append(np.atleast_1d(temp))\n\n # This is to catch e.g. typos in axis names\n if kwargs != {}:\n raise ValueError(f\"Input given for unknown axis: {kwargs}\")\n\n if self._regular_grid_interp is None:\n self._add_regular_grid_interp()\n\n return self._regular_grid_interp(values, method=method, **kwargs)\n\n def _add_regular_grid_interp(self, interp_kwargs=None):\n \"\"\"Add `~scipy.interpolate.RegularGridInterpolator`\n\n http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.interpolate.RegularGridInterpolator.html\n\n Parameters\n ----------\n interp_kwargs : dict, optional\n Interpolation kwargs\n \"\"\"\n if interp_kwargs is None:\n interp_kwargs = self.interp_kwargs\n\n points = [a.center for a in self.axes]\n points_scale = [a.interp for a in self.axes]\n self._regular_grid_interp = ScaledRegularGridInterpolator(\n points, self.data, points_scale=points_scale, **interp_kwargs\n )\n\n\ndef sqrt_space(start, stop, num):\n \"\"\"Return numbers spaced evenly on a square root scale.\n\n This function is similar to `numpy.linspace` and `numpy.logspace`.\n\n Parameters\n ----------\n start : float\n start is the starting value of the sequence\n stop : float\n stop is the final value of the sequence\n num : int\n Number of samples to generate.\n\n Returns\n -------\n samples : `~numpy.ndarray`\n 1D array with a square root scale\n\n Examples\n --------\n >>> from gammapy.utils.nddata import sqrt_space\n >>> sqrt_space(0, 2, 5)\n array([0. , 1. , 1.41421356, 1.73205081, 2. ])\n \"\"\"\n return np.sqrt(np.linspace(start ** 2, stop ** 2, num))\n", "path": "gammapy/utils/nddata.py"}], "after_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Utility functions and classes for n-dimensional data and axes.\"\"\"\nimport numpy as np\nfrom astropy.units import Quantity\nfrom .array import array_stats_str\nfrom .interpolation import ScaledRegularGridInterpolator\n\n__all__ = [\"NDDataArray\"]\n\n\nclass NDDataArray:\n \"\"\"ND Data Array Base class\n\n Parameters\n ----------\n axes : list\n List of `~gammapy.utils.nddata.DataAxis`\n data : `~astropy.units.Quantity`\n Data\n meta : dict\n Meta info\n interp_kwargs : dict\n TODO\n \"\"\"\n\n default_interp_kwargs = dict(bounds_error=False, values_scale=\"lin\")\n \"\"\"Default interpolation kwargs used to initialize the\n `scipy.interpolate.RegularGridInterpolator`. The interpolation behaviour\n of an individual axis ('log', 'linear') can be passed to the axis on\n initialization.\"\"\"\n\n def __init__(self, axes, data=None, meta=None, interp_kwargs=None):\n self._axes = axes\n if data is not None:\n self.data = data\n self.meta = meta or {}\n self.interp_kwargs = interp_kwargs or self.default_interp_kwargs\n\n self._regular_grid_interp = None\n\n def __str__(self):\n ss = \"NDDataArray summary info\\n\"\n for axis in self.axes:\n ss += str(axis)\n ss += array_stats_str(self.data, \"Data\")\n return ss\n\n @property\n def axes(self):\n \"\"\"Array holding the axes in correct order\"\"\"\n return self._axes\n\n def axis(self, name):\n \"\"\"Return axis by name\"\"\"\n try:\n idx = [_.name for _ in self.axes].index(name)\n except ValueError:\n raise ValueError(f\"Axis {name} not found\")\n return self.axes[idx]\n\n @property\n def data(self):\n \"\"\"Array holding the n-dimensional data.\"\"\"\n return self._data\n\n @data.setter\n def data(self, data):\n \"\"\"Set data.\n\n Some sanity checks are performed to avoid an invalid array.\n Also, the interpolator is set to None to avoid unwanted behaviour.\n\n Parameters\n ----------\n data : `~astropy.units.Quantity`, array-like\n Data array\n \"\"\"\n data = Quantity(data)\n dimension = len(data.shape)\n if dimension != self.dim:\n raise ValueError(\n \"Overall dimensions to not match. \"\n \"Data: {}, Hist: {}\".format(dimension, self.dim)\n )\n\n for dim in np.arange(self.dim):\n axis = self.axes[dim]\n if axis.nbin != data.shape[dim]:\n msg = \"Data shape does not match in dimension {d}\\n\"\n msg += \"Axis {n} : {sa}, Data {sd}\"\n raise ValueError(\n msg.format(d=dim, n=axis.name, sa=axis.nbin, sd=data.shape[dim])\n )\n self._regular_grid_interp = None\n self._data = data\n\n @property\n def dim(self):\n \"\"\"Dimension (number of axes)\"\"\"\n return len(self.axes)\n\n def evaluate(self, method=None, **kwargs):\n \"\"\"Evaluate NDData Array\n\n This function provides a uniform interface to several interpolators.\n The evaluation nodes are given as ``kwargs``.\n\n Currently available:\n `~scipy.interpolate.RegularGridInterpolator`, methods: linear, nearest\n\n Parameters\n ----------\n method : str {'linear', 'nearest'}, optional\n Interpolation method\n kwargs : dict\n Keys are the axis names, Values the evaluation points\n\n Returns\n -------\n array : `~astropy.units.Quantity`\n Interpolated values, axis order is the same as for the NDData array\n \"\"\"\n values = []\n for idx, axis in enumerate(self.axes):\n # Extract values for each axis, default: nodes\n shape = [1] * len(self.axes)\n shape[idx] = -1\n default = axis.center.reshape(tuple(shape))\n temp = Quantity(kwargs.pop(axis.name, default))\n values.append(np.atleast_1d(temp))\n\n # This is to catch e.g. typos in axis names\n if kwargs != {}:\n raise ValueError(f\"Input given for unknown axis: {kwargs}\")\n\n if self._regular_grid_interp is None:\n self._add_regular_grid_interp()\n\n return self._regular_grid_interp(values, method=method, **kwargs)\n\n def _add_regular_grid_interp(self, interp_kwargs=None):\n \"\"\"Add `~scipy.interpolate.RegularGridInterpolator`\n\n http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.interpolate.RegularGridInterpolator.html\n\n Parameters\n ----------\n interp_kwargs : dict, optional\n Interpolation kwargs\n \"\"\"\n if interp_kwargs is None:\n interp_kwargs = self.interp_kwargs\n\n points = [a.center for a in self.axes]\n points_scale = [a.interp for a in self.axes]\n self._regular_grid_interp = ScaledRegularGridInterpolator(\n points, self.data, points_scale=points_scale, **interp_kwargs\n )\n", "path": "gammapy/utils/nddata.py"}]}
2,557
380
gh_patches_debug_3735
rasdani/github-patches
git_diff
spacetelescope__jwql-167
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- logging_functions should write logs from dev & test VMs to dev/ Currently `logging_functions.py` will write new log files to the `dev/` directory if the script being executed is not being executed from the admin account. This should be extended to write to `dev/` when the script is run from personal accounts from the dev or test VMs. In other words, all logs should be written to `dev/` unless they are being run from the admin account on one of the production VMs. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `jwql/utils/logging_functions.py` Content: ``` 1 2 """ Logging functions for the ``jwql`` automation platform. 3 4 This module provides decorators to log the execution of modules. Log 5 files are written to the ``logs/`` directory in the ``jwql`` central 6 storage area, named by module name and timestamp, e.g. 7 ``monitor_filesystem/monitor_filesystem_2018-06-20-15:22:51.log`` 8 9 10 Authors 11 ------- 12 13 - Catherine Martlin 2018 14 - Alex Viana, 2013 (WFC3 QL Version) 15 16 Use 17 --- 18 19 To log the execution of a module, use: 20 :: 21 22 import os 23 import logging 24 25 from jwql.logging.logging_functions import configure_logging 26 from jwql.logging.logging_functions import log_info 27 from jwql.logging.logging_functions import log_fail 28 29 @log_info 30 @log_fail 31 def my_main_function(): 32 pass 33 34 if __name__ == '__main__': 35 36 module = os.path.basename(__file__).replace('.py', '') 37 configure_logging(module) 38 39 my_main_function() 40 41 Dependencies 42 ------------ 43 44 The user must have a configuration file named ``config.json`` 45 placed in the ``utils`` directory. 46 47 48 References 49 ---------- 50 This code is adopted and updated from python routine 51 ``logging_functions.py`` written by Alex Viana, 2013 for the WFC3 52 Quicklook automation platform. 53 """ 54 55 import datetime 56 import getpass 57 import importlib 58 import logging 59 import os 60 import pwd 61 import socket 62 import sys 63 import time 64 import traceback 65 66 from functools import wraps 67 68 from jwql.utils.permissions import set_permissions 69 from jwql.utils.utils import get_config, ensure_dir_exists 70 71 LOG_FILE_LOC = '' 72 PRODUCTION_BOOL = '' 73 74 75 def configure_logging(module, production_mode=True, path='./'): 76 """Configure the log file with a standard logging format. 77 78 Parameters 79 ---------- 80 module : str 81 The name of the module being logged. 82 production_mode : bool 83 Whether or not the output should be written to the production 84 environement. 85 path : str 86 Where to write the log if user-supplied path; default to working dir. 87 """ 88 89 # Determine log file location 90 if production_mode: 91 log_file = make_log_file(module) 92 else: 93 log_file = make_log_file(module, production_mode=False, path=path) 94 global LOG_FILE_LOC 95 global PRODUCTION_BOOL 96 LOG_FILE_LOC = log_file 97 PRODUCTION_BOOL = production_mode 98 99 # Create the log file and set the permissions 100 logging.basicConfig(filename=log_file, 101 format='%(asctime)s %(levelname)s: %(message)s', 102 datefmt='%m/%d/%Y %H:%M:%S %p', 103 level=logging.INFO) 104 set_permissions(log_file) 105 106 107 def make_log_file(module, production_mode=True, path='./'): 108 """Create the log file name based on the module name. 109 110 The name of the ``log_file`` is a combination of the name of the 111 module being logged and the current datetime. 112 113 Parameters 114 ---------- 115 module : str 116 The name of the module being logged. 117 production_mode : bool 118 Whether or not the output should be written to the production 119 environment. 120 path : str 121 Where to write the log if user-supplied path; default to 122 working dir. 123 124 Returns 125 ------- 126 log_file : str 127 The full path to where the log file will be written to. 128 """ 129 130 timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M') 131 filename = '{0}_{1}.log'.format(module, timestamp) 132 user = pwd.getpwuid(os.getuid()).pw_name 133 134 settings = get_config() 135 admin_account = settings['admin_account'] 136 log_path = settings['log_dir'] 137 138 exempt_modules = [] 139 if user != admin_account and module not in exempt_modules and production_mode: 140 module = os.path.join('dev', module) 141 142 if production_mode: 143 log_file = os.path.join(log_path, module, filename) 144 else: 145 log_file = os.path.join(path, filename) 146 147 ensure_dir_exists(os.path.dirname(log_file)) 148 149 return log_file 150 151 152 def log_info(func): 153 """Decorator to log useful system information. 154 155 This function can be used as a decorator to log user environment 156 and system information. Future packages we want to track can be 157 added or removed as necessary. 158 159 Parameters 160 ---------- 161 func : func 162 The function to decorate. 163 164 Returns 165 ------- 166 wrapped : func 167 The wrapped function. 168 """ 169 170 @wraps(func) 171 def wrapped(*a, **kw): 172 173 # Log environment information 174 logging.info('User: ' + getpass.getuser()) 175 logging.info('System: ' + socket.gethostname()) 176 logging.info('Python Version: ' + sys.version.replace('\n', '')) 177 logging.info('Python Executable Path: ' + sys.executable) 178 179 # Read in setup.py file to build list of required modules 180 settings = get_config() 181 setup_file_name = settings['setup_file'] 182 with open(setup_file_name) as setup: 183 for line in setup: 184 if line[0:8] == "REQUIRES": 185 module_required = line[12:-2] 186 module_list = module_required.split(',') 187 188 # Clean up the module list 189 module_list = [module.replace('"', '').replace("'", '').replace(' ', '') for module in module_list] 190 module_list = [module.split('=')[0] for module in module_list] 191 192 # Log common module version information 193 for module in module_list: 194 try: 195 mod = importlib.import_module(module) 196 logging.info(module + ' Version: ' + mod.__version__) 197 logging.info(module + ' Path: ' + mod.__path__[0]) 198 except ImportError as err: 199 logging.warning(err) 200 201 # Call the function and time it 202 t1_cpu = time.clock() 203 t1_time = time.time() 204 func(*a, **kw) 205 t2_cpu = time.clock() 206 t2_time = time.time() 207 208 # Log execution time 209 hours_cpu, remainder_cpu = divmod(t2_cpu - t1_cpu, 60 * 60) 210 minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60) 211 hours_time, remainder_time = divmod(t2_time - t1_time, 60 * 60) 212 minutes_time, seconds_time = divmod(remainder_time, 60) 213 logging.info('Elapsed Real Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_time, minutes_time, seconds_time)) 214 logging.info('Elapsed CPU Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_cpu, minutes_cpu, seconds_cpu)) 215 216 return wrapped 217 218 219 def log_fail(func): 220 """Decorator to log crashes in the decorated code. 221 222 Parameters 223 ---------- 224 func : func 225 The function to decorate. 226 227 Returns 228 ------- 229 wrapped : func 230 The wrapped function. 231 """ 232 233 @wraps(func) 234 def wrapped(*a, **kw): 235 236 try: 237 238 # Run the function 239 func(*a, **kw) 240 logging.info('Completed Successfully') 241 242 except Exception: 243 logging.critical(traceback.format_exc()) 244 logging.critical('CRASHED') 245 246 return wrapped 247 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/jwql/utils/logging_functions.py b/jwql/utils/logging_functions.py --- a/jwql/utils/logging_functions.py +++ b/jwql/utils/logging_functions.py @@ -135,8 +135,7 @@ admin_account = settings['admin_account'] log_path = settings['log_dir'] - exempt_modules = [] - if user != admin_account and module not in exempt_modules and production_mode: + if user != admin_account or not production_mode: module = os.path.join('dev', module) if production_mode:
{"golden_diff": "diff --git a/jwql/utils/logging_functions.py b/jwql/utils/logging_functions.py\n--- a/jwql/utils/logging_functions.py\n+++ b/jwql/utils/logging_functions.py\n@@ -135,8 +135,7 @@\n admin_account = settings['admin_account']\n log_path = settings['log_dir']\n \n- exempt_modules = []\n- if user != admin_account and module not in exempt_modules and production_mode:\n+ if user != admin_account or not production_mode:\n module = os.path.join('dev', module)\n \n if production_mode:\n", "issue": "logging_functions should write logs from dev & test VMs to dev/\nCurrently `logging_functions.py` will write new log files to the `dev/` directory if the script being executed is not being executed from the admin account. This should be extended to write to `dev/` when the script is run from personal accounts from the dev or test VMs. \r\n\r\nIn other words, all logs should be written to `dev/` unless they are being run from the admin account on one of the production VMs. \n", "before_files": [{"content": "\n\"\"\" Logging functions for the ``jwql`` automation platform.\n\nThis module provides decorators to log the execution of modules. Log\nfiles are written to the ``logs/`` directory in the ``jwql`` central\nstorage area, named by module name and timestamp, e.g.\n``monitor_filesystem/monitor_filesystem_2018-06-20-15:22:51.log``\n\n\nAuthors\n-------\n\n - Catherine Martlin 2018\n - Alex Viana, 2013 (WFC3 QL Version)\n\nUse\n---\n\n To log the execution of a module, use:\n ::\n\n import os\n import logging\n\n from jwql.logging.logging_functions import configure_logging\n from jwql.logging.logging_functions import log_info\n from jwql.logging.logging_functions import log_fail\n\n @log_info\n @log_fail\n def my_main_function():\n pass\n\n if __name__ == '__main__':\n\n module = os.path.basename(__file__).replace('.py', '')\n configure_logging(module)\n\n my_main_function()\n\nDependencies\n------------\n\n The user must have a configuration file named ``config.json``\n placed in the ``utils`` directory.\n\n\nReferences\n----------\n This code is adopted and updated from python routine\n ``logging_functions.py`` written by Alex Viana, 2013 for the WFC3\n Quicklook automation platform.\n\"\"\"\n\nimport datetime\nimport getpass\nimport importlib\nimport logging\nimport os\nimport pwd\nimport socket\nimport sys\nimport time\nimport traceback\n\nfrom functools import wraps\n\nfrom jwql.utils.permissions import set_permissions\nfrom jwql.utils.utils import get_config, ensure_dir_exists\n\nLOG_FILE_LOC = ''\nPRODUCTION_BOOL = ''\n\n\ndef configure_logging(module, production_mode=True, path='./'):\n \"\"\"Configure the log file with a standard logging format.\n\n Parameters\n ----------\n module : str\n The name of the module being logged.\n production_mode : bool\n Whether or not the output should be written to the production\n environement.\n path : str\n Where to write the log if user-supplied path; default to working dir.\n \"\"\"\n\n # Determine log file location\n if production_mode:\n log_file = make_log_file(module)\n else:\n log_file = make_log_file(module, production_mode=False, path=path)\n global LOG_FILE_LOC\n global PRODUCTION_BOOL\n LOG_FILE_LOC = log_file\n PRODUCTION_BOOL = production_mode\n\n # Create the log file and set the permissions\n logging.basicConfig(filename=log_file,\n format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S %p',\n level=logging.INFO)\n set_permissions(log_file)\n\n\ndef make_log_file(module, production_mode=True, path='./'):\n \"\"\"Create the log file name based on the module name.\n\n The name of the ``log_file`` is a combination of the name of the\n module being logged and the current datetime.\n\n Parameters\n ----------\n module : str\n The name of the module being logged.\n production_mode : bool\n Whether or not the output should be written to the production\n environment.\n path : str\n Where to write the log if user-supplied path; default to\n working dir.\n\n Returns\n -------\n log_file : str\n The full path to where the log file will be written to.\n \"\"\"\n\n timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')\n filename = '{0}_{1}.log'.format(module, timestamp)\n user = pwd.getpwuid(os.getuid()).pw_name\n\n settings = get_config()\n admin_account = settings['admin_account']\n log_path = settings['log_dir']\n\n exempt_modules = []\n if user != admin_account and module not in exempt_modules and production_mode:\n module = os.path.join('dev', module)\n\n if production_mode:\n log_file = os.path.join(log_path, module, filename)\n else:\n log_file = os.path.join(path, filename)\n\n ensure_dir_exists(os.path.dirname(log_file))\n\n return log_file\n\n\ndef log_info(func):\n \"\"\"Decorator to log useful system information.\n\n This function can be used as a decorator to log user environment\n and system information. Future packages we want to track can be\n added or removed as necessary.\n\n Parameters\n ----------\n func : func\n The function to decorate.\n\n Returns\n -------\n wrapped : func\n The wrapped function.\n \"\"\"\n\n @wraps(func)\n def wrapped(*a, **kw):\n\n # Log environment information\n logging.info('User: ' + getpass.getuser())\n logging.info('System: ' + socket.gethostname())\n logging.info('Python Version: ' + sys.version.replace('\\n', ''))\n logging.info('Python Executable Path: ' + sys.executable)\n\n # Read in setup.py file to build list of required modules\n settings = get_config()\n setup_file_name = settings['setup_file']\n with open(setup_file_name) as setup:\n for line in setup:\n if line[0:8] == \"REQUIRES\":\n module_required = line[12:-2]\n module_list = module_required.split(',')\n\n # Clean up the module list\n module_list = [module.replace('\"', '').replace(\"'\", '').replace(' ', '') for module in module_list]\n module_list = [module.split('=')[0] for module in module_list]\n\n # Log common module version information\n for module in module_list:\n try:\n mod = importlib.import_module(module)\n logging.info(module + ' Version: ' + mod.__version__)\n logging.info(module + ' Path: ' + mod.__path__[0])\n except ImportError as err:\n logging.warning(err)\n\n # Call the function and time it\n t1_cpu = time.clock()\n t1_time = time.time()\n func(*a, **kw)\n t2_cpu = time.clock()\n t2_time = time.time()\n\n # Log execution time\n hours_cpu, remainder_cpu = divmod(t2_cpu - t1_cpu, 60 * 60)\n minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60)\n hours_time, remainder_time = divmod(t2_time - t1_time, 60 * 60)\n minutes_time, seconds_time = divmod(remainder_time, 60)\n logging.info('Elapsed Real Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_time, minutes_time, seconds_time))\n logging.info('Elapsed CPU Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_cpu, minutes_cpu, seconds_cpu))\n\n return wrapped\n\n\ndef log_fail(func):\n \"\"\"Decorator to log crashes in the decorated code.\n\n Parameters\n ----------\n func : func\n The function to decorate.\n\n Returns\n -------\n wrapped : func\n The wrapped function.\n \"\"\"\n\n @wraps(func)\n def wrapped(*a, **kw):\n\n try:\n\n # Run the function\n func(*a, **kw)\n logging.info('Completed Successfully')\n\n except Exception:\n logging.critical(traceback.format_exc())\n logging.critical('CRASHED')\n\n return wrapped\n", "path": "jwql/utils/logging_functions.py"}], "after_files": [{"content": "\n\"\"\" Logging functions for the ``jwql`` automation platform.\n\nThis module provides decorators to log the execution of modules. Log\nfiles are written to the ``logs/`` directory in the ``jwql`` central\nstorage area, named by module name and timestamp, e.g.\n``monitor_filesystem/monitor_filesystem_2018-06-20-15:22:51.log``\n\n\nAuthors\n-------\n\n - Catherine Martlin 2018\n - Alex Viana, 2013 (WFC3 QL Version)\n\nUse\n---\n\n To log the execution of a module, use:\n ::\n\n import os\n import logging\n\n from jwql.logging.logging_functions import configure_logging\n from jwql.logging.logging_functions import log_info\n from jwql.logging.logging_functions import log_fail\n\n @log_info\n @log_fail\n def my_main_function():\n pass\n\n if __name__ == '__main__':\n\n module = os.path.basename(__file__).replace('.py', '')\n configure_logging(module)\n\n my_main_function()\n\nDependencies\n------------\n\n The user must have a configuration file named ``config.json``\n placed in the ``utils`` directory.\n\n\nReferences\n----------\n This code is adopted and updated from python routine\n ``logging_functions.py`` written by Alex Viana, 2013 for the WFC3\n Quicklook automation platform.\n\"\"\"\n\nimport datetime\nimport getpass\nimport importlib\nimport logging\nimport os\nimport pwd\nimport socket\nimport sys\nimport time\nimport traceback\n\nfrom functools import wraps\n\nfrom jwql.utils.permissions import set_permissions\nfrom jwql.utils.utils import get_config, ensure_dir_exists\n\nLOG_FILE_LOC = ''\nPRODUCTION_BOOL = ''\n\n\ndef configure_logging(module, production_mode=True, path='./'):\n \"\"\"Configure the log file with a standard logging format.\n\n Parameters\n ----------\n module : str\n The name of the module being logged.\n production_mode : bool\n Whether or not the output should be written to the production\n environement.\n path : str\n Where to write the log if user-supplied path; default to working dir.\n \"\"\"\n\n # Determine log file location\n if production_mode:\n log_file = make_log_file(module)\n else:\n log_file = make_log_file(module, production_mode=False, path=path)\n global LOG_FILE_LOC\n global PRODUCTION_BOOL\n LOG_FILE_LOC = log_file\n PRODUCTION_BOOL = production_mode\n\n # Create the log file and set the permissions\n logging.basicConfig(filename=log_file,\n format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S %p',\n level=logging.INFO)\n set_permissions(log_file)\n\n\ndef make_log_file(module, production_mode=True, path='./'):\n \"\"\"Create the log file name based on the module name.\n\n The name of the ``log_file`` is a combination of the name of the\n module being logged and the current datetime.\n\n Parameters\n ----------\n module : str\n The name of the module being logged.\n production_mode : bool\n Whether or not the output should be written to the production\n environment.\n path : str\n Where to write the log if user-supplied path; default to\n working dir.\n\n Returns\n -------\n log_file : str\n The full path to where the log file will be written to.\n \"\"\"\n\n timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')\n filename = '{0}_{1}.log'.format(module, timestamp)\n user = pwd.getpwuid(os.getuid()).pw_name\n\n settings = get_config()\n admin_account = settings['admin_account']\n log_path = settings['log_dir']\n\n if user != admin_account or not production_mode:\n module = os.path.join('dev', module)\n\n if production_mode:\n log_file = os.path.join(log_path, module, filename)\n else:\n log_file = os.path.join(path, filename)\n\n ensure_dir_exists(os.path.dirname(log_file))\n\n return log_file\n\n\ndef log_info(func):\n \"\"\"Decorator to log useful system information.\n\n This function can be used as a decorator to log user environment\n and system information. Future packages we want to track can be\n added or removed as necessary.\n\n Parameters\n ----------\n func : func\n The function to decorate.\n\n Returns\n -------\n wrapped : func\n The wrapped function.\n \"\"\"\n\n @wraps(func)\n def wrapped(*a, **kw):\n\n # Log environment information\n logging.info('User: ' + getpass.getuser())\n logging.info('System: ' + socket.gethostname())\n logging.info('Python Version: ' + sys.version.replace('\\n', ''))\n logging.info('Python Executable Path: ' + sys.executable)\n\n # Read in setup.py file to build list of required modules\n settings = get_config()\n setup_file_name = settings['setup_file']\n with open(setup_file_name) as setup:\n for line in setup:\n if line[0:8] == \"REQUIRES\":\n module_required = line[12:-2]\n module_list = module_required.split(',')\n\n # Clean up the module list\n module_list = [module.replace('\"', '').replace(\"'\", '').replace(' ', '') for module in module_list]\n module_list = [module.split('=')[0] for module in module_list]\n\n # Log common module version information\n for module in module_list:\n try:\n mod = importlib.import_module(module)\n logging.info(module + ' Version: ' + mod.__version__)\n logging.info(module + ' Path: ' + mod.__path__[0])\n except ImportError as err:\n logging.warning(err)\n\n # Call the function and time it\n t1_cpu = time.clock()\n t1_time = time.time()\n func(*a, **kw)\n t2_cpu = time.clock()\n t2_time = time.time()\n\n # Log execution time\n hours_cpu, remainder_cpu = divmod(t2_cpu - t1_cpu, 60 * 60)\n minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60)\n hours_time, remainder_time = divmod(t2_time - t1_time, 60 * 60)\n minutes_time, seconds_time = divmod(remainder_time, 60)\n logging.info('Elapsed Real Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_time, minutes_time, seconds_time))\n logging.info('Elapsed CPU Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_cpu, minutes_cpu, seconds_cpu))\n\n return wrapped\n\n\ndef log_fail(func):\n \"\"\"Decorator to log crashes in the decorated code.\n\n Parameters\n ----------\n func : func\n The function to decorate.\n\n Returns\n -------\n wrapped : func\n The wrapped function.\n \"\"\"\n\n @wraps(func)\n def wrapped(*a, **kw):\n\n try:\n\n # Run the function\n func(*a, **kw)\n logging.info('Completed Successfully')\n\n except Exception:\n logging.critical(traceback.format_exc())\n logging.critical('CRASHED')\n\n return wrapped\n", "path": "jwql/utils/logging_functions.py"}]}
2,627
125
gh_patches_debug_36890
rasdani/github-patches
git_diff
bokeh__bokeh-4021
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Allow for the injection of raw HTML code Currently, the Widget library contains a `Paragraph` and a `PreText` widget allowing the user to put basic text on the rendered page. Neither of these widgets allows for the inclusion of formatted text using HTML markup. A widget should be added to support the inclusion of raw HTML. The widget can be a simple div named div tag. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bokeh/models/widgets/markups.py` Content: ``` 1 """ Various kinds of markup (static content) widgets. 2 3 """ 4 from __future__ import absolute_import 5 6 from ...core.properties import abstract 7 from ...core.properties import Int, String 8 from .widget import Widget 9 10 @abstract 11 class Markup(Widget): 12 """ Base class for HTML markup widget models. """ 13 14 class Paragraph(Markup): 15 """ A block (paragraph) of text. 16 17 """ 18 19 text = String(default="", help=""" 20 The contents of the widget. 21 """) 22 23 width = Int(500, help=""" 24 The width of the block in pixels. 25 """) 26 27 height = Int(400, help=""" 28 The height of the block in pixels. 29 """) 30 31 32 class PreText(Paragraph): 33 """ A block (paragraph) of pre-formatted text. 34 35 """ 36 ``` Path: `examples/plotting/file/slider_callback_policy.py` Content: ``` 1 from bokeh.io import vform, output_file, show 2 from bokeh.models import CustomJS, Slider, Paragraph, PreText 3 4 # NOTE: the JS functions to forvide the format code for strings is found the answer 5 # from the user fearphage at http://stackoverflow.com/questions/610406/javascript-equivalent-to-printf-string-format 6 callback = CustomJS(code=""" 7 var s1 = slider1.get('value') 8 var s2 = slider2.get('value') 9 var s3 = slider3.get('value') 10 11 if (!String.prototype.format) { 12 String.prototype.format = function() { 13 var args = arguments; 14 return this.replace(/{(\d+)}/g, function(match, number) { 15 return typeof args[number] != 'undefined' 16 ? args[number] 17 : match 18 ; 19 }); 20 }; 21 } 22 23 para.set('text', "Slider Values\\n\\n Slider 1: {0}\\n Slider 2: {1}\\n Slider 3: {2}".format(s1, s2, s3)) 24 """) 25 26 para = PreText(text = "Slider Values:\n\n Slider 1: 0\n Slider 2: 0\n Slider 3: 0", width = 200, height = 150) 27 28 s1 = Slider(title="Slider 1 (Continuous)", start=0, end=1000, value=0, step=1, callback=callback, callback_policy="continuous") 29 s2 = Slider(title="Slider 2 (Throttle)", start=0, end=1000, value=0, step=1, callback=callback, callback_policy="throttle", callback_throttle=2000) 30 s3 = Slider(title="Slider 3 (Mouse Up)", start=0, end=1000, value=0, step=1, callback=callback, callback_policy="mouseup") 31 32 callback.args['para'] = para 33 callback.args['slider1'] = s1 34 callback.args['slider2'] = s2 35 callback.args['slider3'] = s3 36 37 output_file('slider_callback_policy.html') 38 39 show(vform(s1, s2, s3, para)) 40 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bokeh/models/widgets/markups.py b/bokeh/models/widgets/markups.py --- a/bokeh/models/widgets/markups.py +++ b/bokeh/models/widgets/markups.py @@ -4,18 +4,13 @@ from __future__ import absolute_import from ...core.properties import abstract -from ...core.properties import Int, String +from ...core.properties import Int, String, Bool from .widget import Widget @abstract class Markup(Widget): """ Base class for HTML markup widget models. """ -class Paragraph(Markup): - """ A block (paragraph) of text. - - """ - text = String(default="", help=""" The contents of the widget. """) @@ -29,6 +24,20 @@ """) +class Paragraph(Markup): + """ A block (paragraph) of text. + + """ + +class Div(Markup): + """ A block (div) of text. + + """ + + render_as_text = Bool(False, help=""" + Should the text be rendered as raw text (False, default), or should the text be interprited as an HTML string (True) + """) + class PreText(Paragraph): """ A block (paragraph) of pre-formatted text. diff --git a/examples/plotting/file/slider_callback_policy.py b/examples/plotting/file/slider_callback_policy.py --- a/examples/plotting/file/slider_callback_policy.py +++ b/examples/plotting/file/slider_callback_policy.py @@ -1,5 +1,5 @@ from bokeh.io import vform, output_file, show -from bokeh.models import CustomJS, Slider, Paragraph, PreText +from bokeh.models import CustomJS, Slider, Div # NOTE: the JS functions to forvide the format code for strings is found the answer # from the user fearphage at http://stackoverflow.com/questions/610406/javascript-equivalent-to-printf-string-format @@ -20,10 +20,10 @@ }; } - para.set('text', "Slider Values\\n\\n Slider 1: {0}\\n Slider 2: {1}\\n Slider 3: {2}".format(s1, s2, s3)) + para.set('text', "<h1>Slider Values</h1><p>Slider 1: {0}<p>Slider 2: {1}<p>Slider 3: {2}".format(s1, s2, s3)) """) -para = PreText(text = "Slider Values:\n\n Slider 1: 0\n Slider 2: 0\n Slider 3: 0", width = 200, height = 150) +para = Div(text="<h1>Slider Values:</h1><p>Slider 1: 0<p>Slider 2: 0<p>Slider 3: 0", width=200, height=150, render_as_text=False) s1 = Slider(title="Slider 1 (Continuous)", start=0, end=1000, value=0, step=1, callback=callback, callback_policy="continuous") s2 = Slider(title="Slider 2 (Throttle)", start=0, end=1000, value=0, step=1, callback=callback, callback_policy="throttle", callback_throttle=2000)
{"golden_diff": "diff --git a/bokeh/models/widgets/markups.py b/bokeh/models/widgets/markups.py\n--- a/bokeh/models/widgets/markups.py\n+++ b/bokeh/models/widgets/markups.py\n@@ -4,18 +4,13 @@\n from __future__ import absolute_import\n \n from ...core.properties import abstract\n-from ...core.properties import Int, String\n+from ...core.properties import Int, String, Bool\n from .widget import Widget\n \n @abstract\n class Markup(Widget):\n \"\"\" Base class for HTML markup widget models. \"\"\"\n \n-class Paragraph(Markup):\n- \"\"\" A block (paragraph) of text.\n-\n- \"\"\"\n-\n text = String(default=\"\", help=\"\"\"\n The contents of the widget.\n \"\"\")\n@@ -29,6 +24,20 @@\n \"\"\")\n \n \n+class Paragraph(Markup):\n+ \"\"\" A block (paragraph) of text.\n+\n+ \"\"\"\n+\n+class Div(Markup):\n+ \"\"\" A block (div) of text.\n+\n+ \"\"\"\n+\n+ render_as_text = Bool(False, help=\"\"\"\n+ Should the text be rendered as raw text (False, default), or should the text be interprited as an HTML string (True)\n+ \"\"\")\n+\n class PreText(Paragraph):\n \"\"\" A block (paragraph) of pre-formatted text.\n \ndiff --git a/examples/plotting/file/slider_callback_policy.py b/examples/plotting/file/slider_callback_policy.py\n--- a/examples/plotting/file/slider_callback_policy.py\n+++ b/examples/plotting/file/slider_callback_policy.py\n@@ -1,5 +1,5 @@\n from bokeh.io import vform, output_file, show\n-from bokeh.models import CustomJS, Slider, Paragraph, PreText\n+from bokeh.models import CustomJS, Slider, Div\n \n # NOTE: the JS functions to forvide the format code for strings is found the answer\n # from the user fearphage at http://stackoverflow.com/questions/610406/javascript-equivalent-to-printf-string-format\n@@ -20,10 +20,10 @@\n };\n }\n \n- para.set('text', \"Slider Values\\\\n\\\\n Slider 1: {0}\\\\n Slider 2: {1}\\\\n Slider 3: {2}\".format(s1, s2, s3))\n+ para.set('text', \"<h1>Slider Values</h1><p>Slider 1: {0}<p>Slider 2: {1}<p>Slider 3: {2}\".format(s1, s2, s3))\n \"\"\")\n \n-para = PreText(text = \"Slider Values:\\n\\n Slider 1: 0\\n Slider 2: 0\\n Slider 3: 0\", width = 200, height = 150)\n+para = Div(text=\"<h1>Slider Values:</h1><p>Slider 1: 0<p>Slider 2: 0<p>Slider 3: 0\", width=200, height=150, render_as_text=False)\n \n s1 = Slider(title=\"Slider 1 (Continuous)\", start=0, end=1000, value=0, step=1, callback=callback, callback_policy=\"continuous\")\n s2 = Slider(title=\"Slider 2 (Throttle)\", start=0, end=1000, value=0, step=1, callback=callback, callback_policy=\"throttle\", callback_throttle=2000)\n", "issue": "Allow for the injection of raw HTML code\nCurrently, the Widget library contains a `Paragraph` and a `PreText` widget allowing the user to put basic text on the rendered page. Neither of these widgets allows for the inclusion of formatted text using HTML markup. A widget should be added to support the inclusion of raw HTML. The widget can be a simple div named div tag.\n\n", "before_files": [{"content": "\"\"\" Various kinds of markup (static content) widgets.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom ...core.properties import abstract\nfrom ...core.properties import Int, String\nfrom .widget import Widget\n\n@abstract\nclass Markup(Widget):\n \"\"\" Base class for HTML markup widget models. \"\"\"\n\nclass Paragraph(Markup):\n \"\"\" A block (paragraph) of text.\n\n \"\"\"\n\n text = String(default=\"\", help=\"\"\"\n The contents of the widget.\n \"\"\")\n\n width = Int(500, help=\"\"\"\n The width of the block in pixels.\n \"\"\")\n\n height = Int(400, help=\"\"\"\n The height of the block in pixels.\n \"\"\")\n\n\nclass PreText(Paragraph):\n \"\"\" A block (paragraph) of pre-formatted text.\n\n \"\"\"\n", "path": "bokeh/models/widgets/markups.py"}, {"content": "from bokeh.io import vform, output_file, show\nfrom bokeh.models import CustomJS, Slider, Paragraph, PreText\n\n# NOTE: the JS functions to forvide the format code for strings is found the answer\n# from the user fearphage at http://stackoverflow.com/questions/610406/javascript-equivalent-to-printf-string-format\ncallback = CustomJS(code=\"\"\"\n var s1 = slider1.get('value')\n var s2 = slider2.get('value')\n var s3 = slider3.get('value')\n\n if (!String.prototype.format) {\n String.prototype.format = function() {\n var args = arguments;\n return this.replace(/{(\\d+)}/g, function(match, number) {\n return typeof args[number] != 'undefined'\n ? args[number]\n : match\n ;\n });\n };\n }\n\n para.set('text', \"Slider Values\\\\n\\\\n Slider 1: {0}\\\\n Slider 2: {1}\\\\n Slider 3: {2}\".format(s1, s2, s3))\n\"\"\")\n\npara = PreText(text = \"Slider Values:\\n\\n Slider 1: 0\\n Slider 2: 0\\n Slider 3: 0\", width = 200, height = 150)\n\ns1 = Slider(title=\"Slider 1 (Continuous)\", start=0, end=1000, value=0, step=1, callback=callback, callback_policy=\"continuous\")\ns2 = Slider(title=\"Slider 2 (Throttle)\", start=0, end=1000, value=0, step=1, callback=callback, callback_policy=\"throttle\", callback_throttle=2000)\ns3 = Slider(title=\"Slider 3 (Mouse Up)\", start=0, end=1000, value=0, step=1, callback=callback, callback_policy=\"mouseup\")\n\ncallback.args['para'] = para\ncallback.args['slider1'] = s1\ncallback.args['slider2'] = s2\ncallback.args['slider3'] = s3\n\noutput_file('slider_callback_policy.html')\n\nshow(vform(s1, s2, s3, para))\n", "path": "examples/plotting/file/slider_callback_policy.py"}], "after_files": [{"content": "\"\"\" Various kinds of markup (static content) widgets.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom ...core.properties import abstract\nfrom ...core.properties import Int, String, Bool\nfrom .widget import Widget\n\n@abstract\nclass Markup(Widget):\n \"\"\" Base class for HTML markup widget models. \"\"\"\n\n text = String(default=\"\", help=\"\"\"\n The contents of the widget.\n \"\"\")\n\n width = Int(500, help=\"\"\"\n The width of the block in pixels.\n \"\"\")\n\n height = Int(400, help=\"\"\"\n The height of the block in pixels.\n \"\"\")\n\n\nclass Paragraph(Markup):\n \"\"\" A block (paragraph) of text.\n\n \"\"\"\n\nclass Div(Markup):\n \"\"\" A block (div) of text.\n\n \"\"\"\n\n render_as_text = Bool(False, help=\"\"\"\n Should the text be rendered as raw text (False, default), or should the text be interprited as an HTML string (True)\n \"\"\")\n\nclass PreText(Paragraph):\n \"\"\" A block (paragraph) of pre-formatted text.\n\n \"\"\"\n", "path": "bokeh/models/widgets/markups.py"}, {"content": "from bokeh.io import vform, output_file, show\nfrom bokeh.models import CustomJS, Slider, Div\n\n# NOTE: the JS functions to forvide the format code for strings is found the answer\n# from the user fearphage at http://stackoverflow.com/questions/610406/javascript-equivalent-to-printf-string-format\ncallback = CustomJS(code=\"\"\"\n var s1 = slider1.get('value')\n var s2 = slider2.get('value')\n var s3 = slider3.get('value')\n\n if (!String.prototype.format) {\n String.prototype.format = function() {\n var args = arguments;\n return this.replace(/{(\\d+)}/g, function(match, number) {\n return typeof args[number] != 'undefined'\n ? args[number]\n : match\n ;\n });\n };\n }\n\n para.set('text', \"<h1>Slider Values</h1><p>Slider 1: {0}<p>Slider 2: {1}<p>Slider 3: {2}\".format(s1, s2, s3))\n\"\"\")\n\npara = Div(text=\"<h1>Slider Values:</h1><p>Slider 1: 0<p>Slider 2: 0<p>Slider 3: 0\", width=200, height=150, render_as_text=False)\n\ns1 = Slider(title=\"Slider 1 (Continuous)\", start=0, end=1000, value=0, step=1, callback=callback, callback_policy=\"continuous\")\ns2 = Slider(title=\"Slider 2 (Throttle)\", start=0, end=1000, value=0, step=1, callback=callback, callback_policy=\"throttle\", callback_throttle=2000)\ns3 = Slider(title=\"Slider 3 (Mouse Up)\", start=0, end=1000, value=0, step=1, callback=callback, callback_policy=\"mouseup\")\n\ncallback.args['para'] = para\ncallback.args['slider1'] = s1\ncallback.args['slider2'] = s2\ncallback.args['slider3'] = s3\n\noutput_file('slider_callback_policy.html')\n\nshow(vform(s1, s2, s3, para))\n", "path": "examples/plotting/file/slider_callback_policy.py"}]}
1,157
762
gh_patches_debug_14897
rasdani/github-patches
git_diff
qtile__qtile-3099
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Widget for updatable image I don't want to reinvent the wheel, so will check before. I use GenPollText for my keyboard layout indicator but instead I want to see the flag (image). As I change layout the image should be changed. Do Qtile has such widget or proper way to do that? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `libqtile/widget/image.py` Content: ``` 1 # Copyright (c) 2013 dequis 2 # Copyright (c) 2014 Sean Vig 3 # Copyright (c) 2014 Adi Sieker 4 # 5 # Permission is hereby granted, free of charge, to any person obtaining a copy 6 # of this software and associated documentation files (the "Software"), to deal 7 # in the Software without restriction, including without limitation the rights 8 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 # copies of the Software, and to permit persons to whom the Software is 10 # furnished to do so, subject to the following conditions: 11 # 12 # The above copyright notice and this permission notice shall be included in 13 # all copies or substantial portions of the Software. 14 # 15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 # SOFTWARE. 22 import os 23 24 from libqtile import bar 25 from libqtile.images import Img 26 from libqtile.log_utils import logger 27 from libqtile.widget import base 28 29 30 class Image(base._Widget, base.MarginMixin): 31 """Display a PNG image on the bar""" 32 orientations = base.ORIENTATION_BOTH 33 defaults = [ 34 ("scale", True, "Enable/Disable image scaling"), 35 ("rotate", 0.0, "rotate the image in degrees counter-clockwise"), 36 ("filename", None, "Image filename. Can contain '~'"), 37 ] 38 39 def __init__(self, length=bar.CALCULATED, **config): 40 base._Widget.__init__(self, length, **config) 41 self.add_defaults(Image.defaults) 42 self.add_defaults(base.MarginMixin.defaults) 43 44 # make the default 0 instead 45 self._variable_defaults["margin"] = 0 46 47 def _configure(self, qtile, bar): 48 base._Widget._configure(self, qtile, bar) 49 self.img = None 50 51 if not self.filename: 52 logger.warning("Image filename not set!") 53 return 54 55 self.filename = os.path.expanduser(self.filename) 56 57 if not os.path.exists(self.filename): 58 logger.warning("Image does not exist: {}".format(self.filename)) 59 return 60 61 img = Img.from_path(self.filename) 62 self.img = img 63 img.theta = self.rotate 64 if not self.scale: 65 return 66 if self.bar.horizontal: 67 new_height = self.bar.height - (self.margin_y * 2) 68 img.resize(height=new_height) 69 else: 70 new_width = self.bar.width - (self.margin_x * 2) 71 img.resize(width=new_width) 72 73 def draw(self): 74 if self.img is None: 75 return 76 77 self.drawer.clear(self.background or self.bar.background) 78 self.drawer.ctx.save() 79 self.drawer.ctx.translate(self.margin_x, self.margin_y) 80 self.drawer.ctx.set_source(self.img.pattern) 81 self.drawer.ctx.paint() 82 self.drawer.ctx.restore() 83 84 if self.bar.horizontal: 85 self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.width) 86 else: 87 self.drawer.draw(offsety=self.offset, offsetx=self.offsetx, height=self.width) 88 89 def calculate_length(self): 90 if self.img is None: 91 return 0 92 93 if self.bar.horizontal: 94 return self.img.width + (self.margin_x * 2) 95 else: 96 return self.img.height + (self.margin_y * 2) 97 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libqtile/widget/image.py b/libqtile/widget/image.py --- a/libqtile/widget/image.py +++ b/libqtile/widget/image.py @@ -46,6 +46,9 @@ def _configure(self, qtile, bar): base._Widget._configure(self, qtile, bar) + self._update_image() + + def _update_image(self): self.img = None if not self.filename: @@ -94,3 +97,13 @@ return self.img.width + (self.margin_x * 2) else: return self.img.height + (self.margin_y * 2) + + def cmd_update(self, filename): + old_length = self.calculate_length() + self.filename = filename + self._update_image() + + if self.calculate_length() == old_length: + self.draw() + else: + self.bar.draw()
{"golden_diff": "diff --git a/libqtile/widget/image.py b/libqtile/widget/image.py\n--- a/libqtile/widget/image.py\n+++ b/libqtile/widget/image.py\n@@ -46,6 +46,9 @@\n \n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n+ self._update_image()\n+\n+ def _update_image(self):\n self.img = None\n \n if not self.filename:\n@@ -94,3 +97,13 @@\n return self.img.width + (self.margin_x * 2)\n else:\n return self.img.height + (self.margin_y * 2)\n+\n+ def cmd_update(self, filename):\n+ old_length = self.calculate_length()\n+ self.filename = filename\n+ self._update_image()\n+\n+ if self.calculate_length() == old_length:\n+ self.draw()\n+ else:\n+ self.bar.draw()\n", "issue": "Widget for updatable image\nI don't want to reinvent the wheel, so will check before.\r\nI use GenPollText for my keyboard layout indicator but instead I want to see the flag (image). As I change layout the image should be changed. Do Qtile has such widget or proper way to do that?\n", "before_files": [{"content": "# Copyright (c) 2013 dequis\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 Adi Sieker\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport os\n\nfrom libqtile import bar\nfrom libqtile.images import Img\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n\nclass Image(base._Widget, base.MarginMixin):\n \"\"\"Display a PNG image on the bar\"\"\"\n orientations = base.ORIENTATION_BOTH\n defaults = [\n (\"scale\", True, \"Enable/Disable image scaling\"),\n (\"rotate\", 0.0, \"rotate the image in degrees counter-clockwise\"),\n (\"filename\", None, \"Image filename. Can contain '~'\"),\n ]\n\n def __init__(self, length=bar.CALCULATED, **config):\n base._Widget.__init__(self, length, **config)\n self.add_defaults(Image.defaults)\n self.add_defaults(base.MarginMixin.defaults)\n\n # make the default 0 instead\n self._variable_defaults[\"margin\"] = 0\n\n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n self.img = None\n\n if not self.filename:\n logger.warning(\"Image filename not set!\")\n return\n\n self.filename = os.path.expanduser(self.filename)\n\n if not os.path.exists(self.filename):\n logger.warning(\"Image does not exist: {}\".format(self.filename))\n return\n\n img = Img.from_path(self.filename)\n self.img = img\n img.theta = self.rotate\n if not self.scale:\n return\n if self.bar.horizontal:\n new_height = self.bar.height - (self.margin_y * 2)\n img.resize(height=new_height)\n else:\n new_width = self.bar.width - (self.margin_x * 2)\n img.resize(width=new_width)\n\n def draw(self):\n if self.img is None:\n return\n\n self.drawer.clear(self.background or self.bar.background)\n self.drawer.ctx.save()\n self.drawer.ctx.translate(self.margin_x, self.margin_y)\n self.drawer.ctx.set_source(self.img.pattern)\n self.drawer.ctx.paint()\n self.drawer.ctx.restore()\n\n if self.bar.horizontal:\n self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.width)\n else:\n self.drawer.draw(offsety=self.offset, offsetx=self.offsetx, height=self.width)\n\n def calculate_length(self):\n if self.img is None:\n return 0\n\n if self.bar.horizontal:\n return self.img.width + (self.margin_x * 2)\n else:\n return self.img.height + (self.margin_y * 2)\n", "path": "libqtile/widget/image.py"}], "after_files": [{"content": "# Copyright (c) 2013 dequis\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 Adi Sieker\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport os\n\nfrom libqtile import bar\nfrom libqtile.images import Img\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n\nclass Image(base._Widget, base.MarginMixin):\n \"\"\"Display a PNG image on the bar\"\"\"\n orientations = base.ORIENTATION_BOTH\n defaults = [\n (\"scale\", True, \"Enable/Disable image scaling\"),\n (\"rotate\", 0.0, \"rotate the image in degrees counter-clockwise\"),\n (\"filename\", None, \"Image filename. Can contain '~'\"),\n ]\n\n def __init__(self, length=bar.CALCULATED, **config):\n base._Widget.__init__(self, length, **config)\n self.add_defaults(Image.defaults)\n self.add_defaults(base.MarginMixin.defaults)\n\n # make the default 0 instead\n self._variable_defaults[\"margin\"] = 0\n\n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n self._update_image()\n\n def _update_image(self):\n self.img = None\n\n if not self.filename:\n logger.warning(\"Image filename not set!\")\n return\n\n self.filename = os.path.expanduser(self.filename)\n\n if not os.path.exists(self.filename):\n logger.warning(\"Image does not exist: {}\".format(self.filename))\n return\n\n img = Img.from_path(self.filename)\n self.img = img\n img.theta = self.rotate\n if not self.scale:\n return\n if self.bar.horizontal:\n new_height = self.bar.height - (self.margin_y * 2)\n img.resize(height=new_height)\n else:\n new_width = self.bar.width - (self.margin_x * 2)\n img.resize(width=new_width)\n\n def draw(self):\n if self.img is None:\n return\n\n self.drawer.clear(self.background or self.bar.background)\n self.drawer.ctx.save()\n self.drawer.ctx.translate(self.margin_x, self.margin_y)\n self.drawer.ctx.set_source(self.img.pattern)\n self.drawer.ctx.paint()\n self.drawer.ctx.restore()\n\n if self.bar.horizontal:\n self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.width)\n else:\n self.drawer.draw(offsety=self.offset, offsetx=self.offsetx, height=self.width)\n\n def calculate_length(self):\n if self.img is None:\n return 0\n\n if self.bar.horizontal:\n return self.img.width + (self.margin_x * 2)\n else:\n return self.img.height + (self.margin_y * 2)\n\n def cmd_update(self, filename):\n old_length = self.calculate_length()\n self.filename = filename\n self._update_image()\n\n if self.calculate_length() == old_length:\n self.draw()\n else:\n self.bar.draw()\n", "path": "libqtile/widget/image.py"}]}
1,312
205
gh_patches_debug_4946
rasdani/github-patches
git_diff
conan-io__conan-5387
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- conan updates all my software installed with yum Hi, my system: CentOs7, conan 1.16.0, packet manager yum ```python installer = SystemPackageTool() installer.update() ``` The above code will instuct conan to update ALL installed packets in the system. It was introduced here: https://github.com/conan-io/conan/pull/3181 This contradicts with the docu that states: > update(): Updates the system package manager database. It’s called automatically from the install() method by default. This would be achieved with `yum check-update` instead of `yum update`. In my opinion conan should not touch any installed packets, unless expicitly instructed to do so. So I would like to see the old behavior back, like it is discribed in the docu. Cheers --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conans/client/tools/system_pm.py` Content: ``` 1 import os 2 import sys 3 4 from conans.client.runner import ConanRunner 5 from conans.client.tools.oss import OSInfo 6 from conans.client.tools.files import which 7 from conans.errors import ConanException 8 from conans.util.env_reader import get_env 9 from conans.util.fallbacks import default_output 10 11 12 class SystemPackageTool(object): 13 14 def __init__(self, runner=None, os_info=None, tool=None, recommends=False, output=None): 15 16 self._output = default_output(output, 'conans.client.tools.system_pm.SystemPackageTool') 17 os_info = os_info or OSInfo() 18 self._is_up_to_date = False 19 self._tool = tool or self._create_tool(os_info, output=self._output) 20 self._tool._sudo_str = self._get_sudo_str() 21 self._tool._runner = runner or ConanRunner(output=self._output) 22 self._tool._recommends = recommends 23 24 @staticmethod 25 def _get_sudo_str(): 26 if not SystemPackageTool._is_sudo_enabled(): 27 return "" 28 29 if hasattr(sys.stdout, "isatty") and not sys.stdout.isatty(): 30 return "sudo -A " 31 else: 32 return "sudo " 33 34 @staticmethod 35 def _is_sudo_enabled(): 36 if "CONAN_SYSREQUIRES_SUDO" not in os.environ: 37 if not which("sudo"): 38 return False 39 if os.name == 'posix' and os.geteuid() == 0: 40 return False 41 if os.name == 'nt': 42 return False 43 return get_env("CONAN_SYSREQUIRES_SUDO", True) 44 45 @staticmethod 46 def _get_sysrequire_mode(): 47 allowed_modes = ("enabled", "verify", "disabled") 48 mode = get_env("CONAN_SYSREQUIRES_MODE", "enabled") 49 mode_lower = mode.lower() 50 if mode_lower not in allowed_modes: 51 raise ConanException("CONAN_SYSREQUIRES_MODE=%s is not allowed, allowed modes=%r" 52 % (mode, allowed_modes)) 53 return mode_lower 54 55 @staticmethod 56 def _create_tool(os_info, output): 57 if os_info.with_apt: 58 return AptTool(output=output) 59 elif os_info.with_yum: 60 return YumTool(output=output) 61 elif os_info.with_pacman: 62 return PacManTool(output=output) 63 elif os_info.is_macos: 64 return BrewTool(output=output) 65 elif os_info.is_freebsd: 66 return PkgTool(output=output) 67 elif os_info.is_solaris: 68 return PkgUtilTool(output=output) 69 elif os_info.with_zypper: 70 return ZypperTool(output=output) 71 else: 72 return NullTool(output=output) 73 74 def add_repository(self, repository, repo_key=None, update=True): 75 self._tool.add_repository(repository, repo_key=repo_key) 76 if update: 77 self.update() 78 79 def update(self): 80 """ 81 Get the system package tool update command 82 """ 83 mode = self._get_sysrequire_mode() 84 if mode in ("disabled", "verify"): 85 self._output.info("Not updating system_requirements. CONAN_SYSREQUIRES_MODE=%s" % mode) 86 return 87 self._is_up_to_date = True 88 self._tool.update() 89 90 def install(self, packages, update=True, force=False): 91 """ 92 Get the system package tool install command. 93 '""" 94 packages = [packages] if isinstance(packages, str) else list(packages) 95 96 mode = self._get_sysrequire_mode() 97 98 if mode in ("verify", "disabled"): 99 # Report to output packages need to be installed 100 if mode == "disabled": 101 self._output.info("The following packages need to be installed:\n %s" 102 % "\n".join(packages)) 103 return 104 105 if mode == "verify" and not self._installed(packages): 106 self._output.error("The following packages need to be installed:\n %s" 107 % "\n".join(packages)) 108 raise ConanException("Aborted due to CONAN_SYSREQUIRES_MODE=%s. " 109 "Some system packages need to be installed" % mode) 110 111 if not force and self._installed(packages): 112 return 113 114 # From here system packages can be updated/modified 115 if update and not self._is_up_to_date: 116 self.update() 117 self._install_any(packages) 118 119 def _installed(self, packages): 120 if not packages: 121 return True 122 123 for pkg in packages: 124 if self._tool.installed(pkg): 125 self._output.info("Package already installed: %s" % pkg) 126 return True 127 return False 128 129 def _install_any(self, packages): 130 if len(packages) == 1: 131 return self._tool.install(packages[0]) 132 for pkg in packages: 133 try: 134 return self._tool.install(pkg) 135 except ConanException: 136 pass 137 raise ConanException("Could not install any of %s" % packages) 138 139 140 class BaseTool(object): 141 def __init__(self, output=None): 142 self._output = default_output(output, 'conans.client.tools.system_pm.BaseTool') 143 144 145 class NullTool(BaseTool): 146 def add_repository(self, repository, repo_key=None): 147 pass 148 149 def update(self): 150 pass 151 152 def install(self, package_name): 153 self._output.warn("Only available for linux with apt-get, yum, or pacman or OSX with brew or" 154 " FreeBSD with pkg or Solaris with pkgutil") 155 156 def installed(self, package_name): 157 return False 158 159 160 class AptTool(BaseTool): 161 def add_repository(self, repository, repo_key=None): 162 _run(self._runner, "%sapt-add-repository %s" % (self._sudo_str, repository), 163 output=self._output) 164 if repo_key: 165 _run(self._runner, "wget -qO - %s | %sapt-key add -" % (repo_key, self._sudo_str), 166 output=self._output) 167 168 def update(self): 169 _run(self._runner, "%sapt-get update" % self._sudo_str, output=self._output) 170 171 def install(self, package_name): 172 recommends_str = '' if self._recommends else '--no-install-recommends ' 173 _run(self._runner, 174 "%sapt-get install -y %s%s" % (self._sudo_str, recommends_str, package_name), 175 output=self._output) 176 177 def installed(self, package_name): 178 exit_code = self._runner("dpkg-query -W -f='${Status}' %s | grep -q \"ok installed\"" 179 % package_name, None) 180 return exit_code == 0 181 182 183 class YumTool(BaseTool): 184 def add_repository(self, repository, repo_key=None): 185 raise ConanException("YumTool::add_repository not implemented") 186 187 def update(self): 188 _run(self._runner, "%syum update -y" % self._sudo_str, accepted_returns=[0, 100], 189 output=self._output) 190 191 def install(self, package_name): 192 _run(self._runner, "%syum install -y %s" % (self._sudo_str, package_name), 193 output=self._output) 194 195 def installed(self, package_name): 196 exit_code = self._runner("rpm -q %s" % package_name, None) 197 return exit_code == 0 198 199 200 class BrewTool(BaseTool): 201 def add_repository(self, repository, repo_key=None): 202 raise ConanException("BrewTool::add_repository not implemented") 203 204 def update(self): 205 _run(self._runner, "brew update", output=self._output) 206 207 def install(self, package_name): 208 _run(self._runner, "brew install %s" % package_name, output=self._output) 209 210 def installed(self, package_name): 211 exit_code = self._runner('test -n "$(brew ls --versions %s)"' % package_name, None) 212 return exit_code == 0 213 214 215 class PkgTool(BaseTool): 216 def add_repository(self, repository, repo_key=None): 217 raise ConanException("PkgTool::add_repository not implemented") 218 219 def update(self): 220 _run(self._runner, "%spkg update" % self._sudo_str, output=self._output) 221 222 def install(self, package_name): 223 _run(self._runner, "%spkg install -y %s" % (self._sudo_str, package_name), 224 output=self._output) 225 226 def installed(self, package_name): 227 exit_code = self._runner("pkg info %s" % package_name, None) 228 return exit_code == 0 229 230 231 class PkgUtilTool(BaseTool): 232 def add_repository(self, repository, repo_key=None): 233 raise ConanException("PkgUtilTool::add_repository not implemented") 234 235 def update(self): 236 _run(self._runner, "%spkgutil --catalog" % self._sudo_str, output=self._output) 237 238 def install(self, package_name): 239 _run(self._runner, "%spkgutil --install --yes %s" % (self._sudo_str, package_name), 240 output=self._output) 241 242 def installed(self, package_name): 243 exit_code = self._runner('test -n "`pkgutil --list %s`"' % package_name, None) 244 return exit_code == 0 245 246 247 class ChocolateyTool(BaseTool): 248 def add_repository(self, repository, repo_key=None): 249 raise ConanException("ChocolateyTool::add_repository not implemented") 250 251 def update(self): 252 _run(self._runner, "choco outdated", output=self._output) 253 254 def install(self, package_name): 255 _run(self._runner, "choco install --yes %s" % package_name, output=self._output) 256 257 def installed(self, package_name): 258 exit_code = self._runner('choco search --local-only --exact %s | ' 259 'findstr /c:"1 packages installed."' % package_name, None) 260 return exit_code == 0 261 262 263 class PacManTool(BaseTool): 264 def add_repository(self, repository, repo_key=None): 265 raise ConanException("PacManTool::add_repository not implemented") 266 267 def update(self): 268 _run(self._runner, "%spacman -Syyu --noconfirm" % self._sudo_str, output=self._output) 269 270 def install(self, package_name): 271 _run(self._runner, "%spacman -S --noconfirm %s" % (self._sudo_str, package_name), 272 output=self._output) 273 274 def installed(self, package_name): 275 exit_code = self._runner("pacman -Qi %s" % package_name, None) 276 return exit_code == 0 277 278 279 class ZypperTool(BaseTool): 280 def add_repository(self, repository, repo_key=None): 281 raise ConanException("ZypperTool::add_repository not implemented") 282 283 def update(self): 284 _run(self._runner, "%szypper --non-interactive ref" % self._sudo_str, output=self._output) 285 286 def install(self, package_name): 287 _run(self._runner, "%szypper --non-interactive in %s" % (self._sudo_str, package_name), 288 output=self._output) 289 290 def installed(self, package_name): 291 exit_code = self._runner("rpm -q %s" % package_name, None) 292 return exit_code == 0 293 294 295 def _run(runner, command, output, accepted_returns=None): 296 accepted_returns = accepted_returns or [0, ] 297 output.info("Running: %s" % command) 298 if runner(command, True) not in accepted_returns: 299 raise ConanException("Command '%s' failed" % command) 300 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conans/client/tools/system_pm.py b/conans/client/tools/system_pm.py --- a/conans/client/tools/system_pm.py +++ b/conans/client/tools/system_pm.py @@ -185,7 +185,7 @@ raise ConanException("YumTool::add_repository not implemented") def update(self): - _run(self._runner, "%syum update -y" % self._sudo_str, accepted_returns=[0, 100], + _run(self._runner, "%syum check-update -y" % self._sudo_str, accepted_returns=[0, 100], output=self._output) def install(self, package_name):
{"golden_diff": "diff --git a/conans/client/tools/system_pm.py b/conans/client/tools/system_pm.py\n--- a/conans/client/tools/system_pm.py\n+++ b/conans/client/tools/system_pm.py\n@@ -185,7 +185,7 @@\n raise ConanException(\"YumTool::add_repository not implemented\")\n \n def update(self):\n- _run(self._runner, \"%syum update -y\" % self._sudo_str, accepted_returns=[0, 100],\n+ _run(self._runner, \"%syum check-update -y\" % self._sudo_str, accepted_returns=[0, 100],\n output=self._output)\n \n def install(self, package_name):\n", "issue": "conan updates all my software installed with yum\nHi,\r\n\r\nmy system:\r\nCentOs7, conan 1.16.0, packet manager yum\r\n\r\n```python\r\ninstaller = SystemPackageTool()\r\ninstaller.update()\r\n```\r\nThe above code will instuct conan to update ALL installed packets in the system. It was introduced here: https://github.com/conan-io/conan/pull/3181\r\n\r\nThis contradicts with the docu that states:\r\n\r\n> update(): Updates the system package manager database. It\u2019s called automatically from the install() method by default.\r\n\r\nThis would be achieved with `yum check-update` instead of `yum update`.\r\nIn my opinion conan should not touch any installed packets, unless expicitly instructed to do so. So I would like to see the old behavior back, like it is discribed in the docu.\r\n\r\nCheers\n", "before_files": [{"content": "import os\nimport sys\n\nfrom conans.client.runner import ConanRunner\nfrom conans.client.tools.oss import OSInfo\nfrom conans.client.tools.files import which\nfrom conans.errors import ConanException\nfrom conans.util.env_reader import get_env\nfrom conans.util.fallbacks import default_output\n\n\nclass SystemPackageTool(object):\n\n def __init__(self, runner=None, os_info=None, tool=None, recommends=False, output=None):\n\n self._output = default_output(output, 'conans.client.tools.system_pm.SystemPackageTool')\n os_info = os_info or OSInfo()\n self._is_up_to_date = False\n self._tool = tool or self._create_tool(os_info, output=self._output)\n self._tool._sudo_str = self._get_sudo_str()\n self._tool._runner = runner or ConanRunner(output=self._output)\n self._tool._recommends = recommends\n\n @staticmethod\n def _get_sudo_str():\n if not SystemPackageTool._is_sudo_enabled():\n return \"\"\n\n if hasattr(sys.stdout, \"isatty\") and not sys.stdout.isatty():\n return \"sudo -A \"\n else:\n return \"sudo \"\n\n @staticmethod\n def _is_sudo_enabled():\n if \"CONAN_SYSREQUIRES_SUDO\" not in os.environ:\n if not which(\"sudo\"):\n return False\n if os.name == 'posix' and os.geteuid() == 0:\n return False\n if os.name == 'nt':\n return False\n return get_env(\"CONAN_SYSREQUIRES_SUDO\", True)\n\n @staticmethod\n def _get_sysrequire_mode():\n allowed_modes = (\"enabled\", \"verify\", \"disabled\")\n mode = get_env(\"CONAN_SYSREQUIRES_MODE\", \"enabled\")\n mode_lower = mode.lower()\n if mode_lower not in allowed_modes:\n raise ConanException(\"CONAN_SYSREQUIRES_MODE=%s is not allowed, allowed modes=%r\"\n % (mode, allowed_modes))\n return mode_lower\n\n @staticmethod\n def _create_tool(os_info, output):\n if os_info.with_apt:\n return AptTool(output=output)\n elif os_info.with_yum:\n return YumTool(output=output)\n elif os_info.with_pacman:\n return PacManTool(output=output)\n elif os_info.is_macos:\n return BrewTool(output=output)\n elif os_info.is_freebsd:\n return PkgTool(output=output)\n elif os_info.is_solaris:\n return PkgUtilTool(output=output)\n elif os_info.with_zypper:\n return ZypperTool(output=output)\n else:\n return NullTool(output=output)\n\n def add_repository(self, repository, repo_key=None, update=True):\n self._tool.add_repository(repository, repo_key=repo_key)\n if update:\n self.update()\n\n def update(self):\n \"\"\"\n Get the system package tool update command\n \"\"\"\n mode = self._get_sysrequire_mode()\n if mode in (\"disabled\", \"verify\"):\n self._output.info(\"Not updating system_requirements. CONAN_SYSREQUIRES_MODE=%s\" % mode)\n return\n self._is_up_to_date = True\n self._tool.update()\n\n def install(self, packages, update=True, force=False):\n \"\"\"\n Get the system package tool install command.\n '\"\"\"\n packages = [packages] if isinstance(packages, str) else list(packages)\n\n mode = self._get_sysrequire_mode()\n\n if mode in (\"verify\", \"disabled\"):\n # Report to output packages need to be installed\n if mode == \"disabled\":\n self._output.info(\"The following packages need to be installed:\\n %s\"\n % \"\\n\".join(packages))\n return\n\n if mode == \"verify\" and not self._installed(packages):\n self._output.error(\"The following packages need to be installed:\\n %s\"\n % \"\\n\".join(packages))\n raise ConanException(\"Aborted due to CONAN_SYSREQUIRES_MODE=%s. \"\n \"Some system packages need to be installed\" % mode)\n\n if not force and self._installed(packages):\n return\n\n # From here system packages can be updated/modified\n if update and not self._is_up_to_date:\n self.update()\n self._install_any(packages)\n\n def _installed(self, packages):\n if not packages:\n return True\n\n for pkg in packages:\n if self._tool.installed(pkg):\n self._output.info(\"Package already installed: %s\" % pkg)\n return True\n return False\n\n def _install_any(self, packages):\n if len(packages) == 1:\n return self._tool.install(packages[0])\n for pkg in packages:\n try:\n return self._tool.install(pkg)\n except ConanException:\n pass\n raise ConanException(\"Could not install any of %s\" % packages)\n\n\nclass BaseTool(object):\n def __init__(self, output=None):\n self._output = default_output(output, 'conans.client.tools.system_pm.BaseTool')\n\n\nclass NullTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n pass\n\n def update(self):\n pass\n\n def install(self, package_name):\n self._output.warn(\"Only available for linux with apt-get, yum, or pacman or OSX with brew or\"\n \" FreeBSD with pkg or Solaris with pkgutil\")\n\n def installed(self, package_name):\n return False\n\n\nclass AptTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n _run(self._runner, \"%sapt-add-repository %s\" % (self._sudo_str, repository),\n output=self._output)\n if repo_key:\n _run(self._runner, \"wget -qO - %s | %sapt-key add -\" % (repo_key, self._sudo_str),\n output=self._output)\n\n def update(self):\n _run(self._runner, \"%sapt-get update\" % self._sudo_str, output=self._output)\n\n def install(self, package_name):\n recommends_str = '' if self._recommends else '--no-install-recommends '\n _run(self._runner,\n \"%sapt-get install -y %s%s\" % (self._sudo_str, recommends_str, package_name),\n output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner(\"dpkg-query -W -f='${Status}' %s | grep -q \\\"ok installed\\\"\"\n % package_name, None)\n return exit_code == 0\n\n\nclass YumTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n raise ConanException(\"YumTool::add_repository not implemented\")\n\n def update(self):\n _run(self._runner, \"%syum update -y\" % self._sudo_str, accepted_returns=[0, 100],\n output=self._output)\n\n def install(self, package_name):\n _run(self._runner, \"%syum install -y %s\" % (self._sudo_str, package_name),\n output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner(\"rpm -q %s\" % package_name, None)\n return exit_code == 0\n\n\nclass BrewTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n raise ConanException(\"BrewTool::add_repository not implemented\")\n\n def update(self):\n _run(self._runner, \"brew update\", output=self._output)\n\n def install(self, package_name):\n _run(self._runner, \"brew install %s\" % package_name, output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner('test -n \"$(brew ls --versions %s)\"' % package_name, None)\n return exit_code == 0\n\n\nclass PkgTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n raise ConanException(\"PkgTool::add_repository not implemented\")\n\n def update(self):\n _run(self._runner, \"%spkg update\" % self._sudo_str, output=self._output)\n\n def install(self, package_name):\n _run(self._runner, \"%spkg install -y %s\" % (self._sudo_str, package_name),\n output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner(\"pkg info %s\" % package_name, None)\n return exit_code == 0\n\n\nclass PkgUtilTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n raise ConanException(\"PkgUtilTool::add_repository not implemented\")\n\n def update(self):\n _run(self._runner, \"%spkgutil --catalog\" % self._sudo_str, output=self._output)\n\n def install(self, package_name):\n _run(self._runner, \"%spkgutil --install --yes %s\" % (self._sudo_str, package_name),\n output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner('test -n \"`pkgutil --list %s`\"' % package_name, None)\n return exit_code == 0\n\n\nclass ChocolateyTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n raise ConanException(\"ChocolateyTool::add_repository not implemented\")\n\n def update(self):\n _run(self._runner, \"choco outdated\", output=self._output)\n\n def install(self, package_name):\n _run(self._runner, \"choco install --yes %s\" % package_name, output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner('choco search --local-only --exact %s | '\n 'findstr /c:\"1 packages installed.\"' % package_name, None)\n return exit_code == 0\n\n\nclass PacManTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n raise ConanException(\"PacManTool::add_repository not implemented\")\n\n def update(self):\n _run(self._runner, \"%spacman -Syyu --noconfirm\" % self._sudo_str, output=self._output)\n\n def install(self, package_name):\n _run(self._runner, \"%spacman -S --noconfirm %s\" % (self._sudo_str, package_name),\n output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner(\"pacman -Qi %s\" % package_name, None)\n return exit_code == 0\n\n\nclass ZypperTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n raise ConanException(\"ZypperTool::add_repository not implemented\")\n\n def update(self):\n _run(self._runner, \"%szypper --non-interactive ref\" % self._sudo_str, output=self._output)\n\n def install(self, package_name):\n _run(self._runner, \"%szypper --non-interactive in %s\" % (self._sudo_str, package_name),\n output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner(\"rpm -q %s\" % package_name, None)\n return exit_code == 0\n\n\ndef _run(runner, command, output, accepted_returns=None):\n accepted_returns = accepted_returns or [0, ]\n output.info(\"Running: %s\" % command)\n if runner(command, True) not in accepted_returns:\n raise ConanException(\"Command '%s' failed\" % command)\n", "path": "conans/client/tools/system_pm.py"}], "after_files": [{"content": "import os\nimport sys\n\nfrom conans.client.runner import ConanRunner\nfrom conans.client.tools.oss import OSInfo\nfrom conans.client.tools.files import which\nfrom conans.errors import ConanException\nfrom conans.util.env_reader import get_env\nfrom conans.util.fallbacks import default_output\n\n\nclass SystemPackageTool(object):\n\n def __init__(self, runner=None, os_info=None, tool=None, recommends=False, output=None):\n\n self._output = default_output(output, 'conans.client.tools.system_pm.SystemPackageTool')\n os_info = os_info or OSInfo()\n self._is_up_to_date = False\n self._tool = tool or self._create_tool(os_info, output=self._output)\n self._tool._sudo_str = self._get_sudo_str()\n self._tool._runner = runner or ConanRunner(output=self._output)\n self._tool._recommends = recommends\n\n @staticmethod\n def _get_sudo_str():\n if not SystemPackageTool._is_sudo_enabled():\n return \"\"\n\n if hasattr(sys.stdout, \"isatty\") and not sys.stdout.isatty():\n return \"sudo -A \"\n else:\n return \"sudo \"\n\n @staticmethod\n def _is_sudo_enabled():\n if \"CONAN_SYSREQUIRES_SUDO\" not in os.environ:\n if not which(\"sudo\"):\n return False\n if os.name == 'posix' and os.geteuid() == 0:\n return False\n if os.name == 'nt':\n return False\n return get_env(\"CONAN_SYSREQUIRES_SUDO\", True)\n\n @staticmethod\n def _get_sysrequire_mode():\n allowed_modes = (\"enabled\", \"verify\", \"disabled\")\n mode = get_env(\"CONAN_SYSREQUIRES_MODE\", \"enabled\")\n mode_lower = mode.lower()\n if mode_lower not in allowed_modes:\n raise ConanException(\"CONAN_SYSREQUIRES_MODE=%s is not allowed, allowed modes=%r\"\n % (mode, allowed_modes))\n return mode_lower\n\n @staticmethod\n def _create_tool(os_info, output):\n if os_info.with_apt:\n return AptTool(output=output)\n elif os_info.with_yum:\n return YumTool(output=output)\n elif os_info.with_pacman:\n return PacManTool(output=output)\n elif os_info.is_macos:\n return BrewTool(output=output)\n elif os_info.is_freebsd:\n return PkgTool(output=output)\n elif os_info.is_solaris:\n return PkgUtilTool(output=output)\n elif os_info.with_zypper:\n return ZypperTool(output=output)\n else:\n return NullTool(output=output)\n\n def add_repository(self, repository, repo_key=None, update=True):\n self._tool.add_repository(repository, repo_key=repo_key)\n if update:\n self.update()\n\n def update(self):\n \"\"\"\n Get the system package tool update command\n \"\"\"\n mode = self._get_sysrequire_mode()\n if mode in (\"disabled\", \"verify\"):\n self._output.info(\"Not updating system_requirements. CONAN_SYSREQUIRES_MODE=%s\" % mode)\n return\n self._is_up_to_date = True\n self._tool.update()\n\n def install(self, packages, update=True, force=False):\n \"\"\"\n Get the system package tool install command.\n '\"\"\"\n packages = [packages] if isinstance(packages, str) else list(packages)\n\n mode = self._get_sysrequire_mode()\n\n if mode in (\"verify\", \"disabled\"):\n # Report to output packages need to be installed\n if mode == \"disabled\":\n self._output.info(\"The following packages need to be installed:\\n %s\"\n % \"\\n\".join(packages))\n return\n\n if mode == \"verify\" and not self._installed(packages):\n self._output.error(\"The following packages need to be installed:\\n %s\"\n % \"\\n\".join(packages))\n raise ConanException(\"Aborted due to CONAN_SYSREQUIRES_MODE=%s. \"\n \"Some system packages need to be installed\" % mode)\n\n if not force and self._installed(packages):\n return\n\n # From here system packages can be updated/modified\n if update and not self._is_up_to_date:\n self.update()\n self._install_any(packages)\n\n def _installed(self, packages):\n if not packages:\n return True\n\n for pkg in packages:\n if self._tool.installed(pkg):\n self._output.info(\"Package already installed: %s\" % pkg)\n return True\n return False\n\n def _install_any(self, packages):\n if len(packages) == 1:\n return self._tool.install(packages[0])\n for pkg in packages:\n try:\n return self._tool.install(pkg)\n except ConanException:\n pass\n raise ConanException(\"Could not install any of %s\" % packages)\n\n\nclass BaseTool(object):\n def __init__(self, output=None):\n self._output = default_output(output, 'conans.client.tools.system_pm.BaseTool')\n\n\nclass NullTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n pass\n\n def update(self):\n pass\n\n def install(self, package_name):\n self._output.warn(\"Only available for linux with apt-get, yum, or pacman or OSX with brew or\"\n \" FreeBSD with pkg or Solaris with pkgutil\")\n\n def installed(self, package_name):\n return False\n\n\nclass AptTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n _run(self._runner, \"%sapt-add-repository %s\" % (self._sudo_str, repository),\n output=self._output)\n if repo_key:\n _run(self._runner, \"wget -qO - %s | %sapt-key add -\" % (repo_key, self._sudo_str),\n output=self._output)\n\n def update(self):\n _run(self._runner, \"%sapt-get update\" % self._sudo_str, output=self._output)\n\n def install(self, package_name):\n recommends_str = '' if self._recommends else '--no-install-recommends '\n _run(self._runner,\n \"%sapt-get install -y %s%s\" % (self._sudo_str, recommends_str, package_name),\n output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner(\"dpkg-query -W -f='${Status}' %s | grep -q \\\"ok installed\\\"\"\n % package_name, None)\n return exit_code == 0\n\n\nclass YumTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n raise ConanException(\"YumTool::add_repository not implemented\")\n\n def update(self):\n _run(self._runner, \"%syum check-update -y\" % self._sudo_str, accepted_returns=[0, 100],\n output=self._output)\n\n def install(self, package_name):\n _run(self._runner, \"%syum install -y %s\" % (self._sudo_str, package_name),\n output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner(\"rpm -q %s\" % package_name, None)\n return exit_code == 0\n\n\nclass BrewTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n raise ConanException(\"BrewTool::add_repository not implemented\")\n\n def update(self):\n _run(self._runner, \"brew update\", output=self._output)\n\n def install(self, package_name):\n _run(self._runner, \"brew install %s\" % package_name, output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner('test -n \"$(brew ls --versions %s)\"' % package_name, None)\n return exit_code == 0\n\n\nclass PkgTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n raise ConanException(\"PkgTool::add_repository not implemented\")\n\n def update(self):\n _run(self._runner, \"%spkg update\" % self._sudo_str, output=self._output)\n\n def install(self, package_name):\n _run(self._runner, \"%spkg install -y %s\" % (self._sudo_str, package_name),\n output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner(\"pkg info %s\" % package_name, None)\n return exit_code == 0\n\n\nclass PkgUtilTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n raise ConanException(\"PkgUtilTool::add_repository not implemented\")\n\n def update(self):\n _run(self._runner, \"%spkgutil --catalog\" % self._sudo_str, output=self._output)\n\n def install(self, package_name):\n _run(self._runner, \"%spkgutil --install --yes %s\" % (self._sudo_str, package_name),\n output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner('test -n \"`pkgutil --list %s`\"' % package_name, None)\n return exit_code == 0\n\n\nclass ChocolateyTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n raise ConanException(\"ChocolateyTool::add_repository not implemented\")\n\n def update(self):\n _run(self._runner, \"choco outdated\", output=self._output)\n\n def install(self, package_name):\n _run(self._runner, \"choco install --yes %s\" % package_name, output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner('choco search --local-only --exact %s | '\n 'findstr /c:\"1 packages installed.\"' % package_name, None)\n return exit_code == 0\n\n\nclass PacManTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n raise ConanException(\"PacManTool::add_repository not implemented\")\n\n def update(self):\n _run(self._runner, \"%spacman -Syyu --noconfirm\" % self._sudo_str, output=self._output)\n\n def install(self, package_name):\n _run(self._runner, \"%spacman -S --noconfirm %s\" % (self._sudo_str, package_name),\n output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner(\"pacman -Qi %s\" % package_name, None)\n return exit_code == 0\n\n\nclass ZypperTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n raise ConanException(\"ZypperTool::add_repository not implemented\")\n\n def update(self):\n _run(self._runner, \"%szypper --non-interactive ref\" % self._sudo_str, output=self._output)\n\n def install(self, package_name):\n _run(self._runner, \"%szypper --non-interactive in %s\" % (self._sudo_str, package_name),\n output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner(\"rpm -q %s\" % package_name, None)\n return exit_code == 0\n\n\ndef _run(runner, command, output, accepted_returns=None):\n accepted_returns = accepted_returns or [0, ]\n output.info(\"Running: %s\" % command)\n if runner(command, True) not in accepted_returns:\n raise ConanException(\"Command '%s' failed\" % command)\n", "path": "conans/client/tools/system_pm.py"}]}
3,768
152
gh_patches_debug_11910
rasdani/github-patches
git_diff
web2py__web2py-1682
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- heroku ADAPTERS error I'm looking to use Heroku for deployment of my web2py project. However, when I add `from gluon.contrib.heroku import get_db` `db = get_db(name=None, pool_size=myconf.get('db.pool_size'))` I get a ticket with the error: > File "/Users/huangyu/dev/web2py/gluon/contrib/heroku.py", line 10, in <module> from pydal.adapters import ADAPTERS, PostgreSQLAdapter ImportError: cannot import name ADAPTERS It looks like web2py has moved on from using ADAPTERS? Has that been replaced by `@adapters.register_for('postgres')` But the heroku file has not been updated. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gluon/contrib/heroku.py` Content: ``` 1 """ 2 Usage: in web2py models/db.py 3 4 from gluon.contrib.heroku import get_db 5 db = get_db() 6 7 """ 8 import os 9 from gluon import * 10 from pydal.adapters import ADAPTERS, PostgreSQLAdapter 11 from pydal.helpers.classes import UseDatabaseStoredFile 12 13 class HerokuPostgresAdapter(UseDatabaseStoredFile,PostgreSQLAdapter): 14 drivers = ('psycopg2',) 15 uploads_in_blob = True 16 17 ADAPTERS['postgres'] = HerokuPostgresAdapter 18 19 def get_db(name = None, pool_size=10): 20 if not name: 21 names = [n for n in os.environ.keys() 22 if n[:18]+n[-4:]=='HEROKU_POSTGRESQL__URL'] 23 if names: 24 name = names[0] 25 if name: 26 db = DAL(os.environ[name], pool_size=pool_size) 27 current.session.connect(current.request, current.response, db=db) 28 else: 29 db = DAL('sqlite://heroku.test.sqlite') 30 return db 31 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gluon/contrib/heroku.py b/gluon/contrib/heroku.py --- a/gluon/contrib/heroku.py +++ b/gluon/contrib/heroku.py @@ -7,15 +7,13 @@ """ import os from gluon import * -from pydal.adapters import ADAPTERS, PostgreSQLAdapter -from pydal.helpers.classes import UseDatabaseStoredFile +from pydal.adapters import adapters, PostgrePsyco +from pydal.helpers.classes import DatabaseStoredFile -class HerokuPostgresAdapter(UseDatabaseStoredFile,PostgreSQLAdapter): - drivers = ('psycopg2',) [email protected]_for('postgres') +class HerokuPostgresAdapter(DatabaseStoredFile, PostgrePsyco): uploads_in_blob = True -ADAPTERS['postgres'] = HerokuPostgresAdapter - def get_db(name = None, pool_size=10): if not name: names = [n for n in os.environ.keys()
{"golden_diff": "diff --git a/gluon/contrib/heroku.py b/gluon/contrib/heroku.py\n--- a/gluon/contrib/heroku.py\n+++ b/gluon/contrib/heroku.py\n@@ -7,15 +7,13 @@\n \"\"\"\n import os\n from gluon import *\n-from pydal.adapters import ADAPTERS, PostgreSQLAdapter\n-from pydal.helpers.classes import UseDatabaseStoredFile\n+from pydal.adapters import adapters, PostgrePsyco\n+from pydal.helpers.classes import DatabaseStoredFile\n \n-class HerokuPostgresAdapter(UseDatabaseStoredFile,PostgreSQLAdapter):\n- drivers = ('psycopg2',)\[email protected]_for('postgres')\n+class HerokuPostgresAdapter(DatabaseStoredFile, PostgrePsyco):\n uploads_in_blob = True\n \n-ADAPTERS['postgres'] = HerokuPostgresAdapter\n-\n def get_db(name = None, pool_size=10):\n if not name:\n names = [n for n in os.environ.keys()\n", "issue": "heroku ADAPTERS error\nI'm looking to use Heroku for deployment of my web2py project. However, when I add \r\n`from gluon.contrib.heroku import get_db`\r\n`db = get_db(name=None, pool_size=myconf.get('db.pool_size'))`\r\n\r\nI get a ticket with the error:\r\n\r\n> File \"/Users/huangyu/dev/web2py/gluon/contrib/heroku.py\", line 10, in <module>\r\n from pydal.adapters import ADAPTERS, PostgreSQLAdapter\r\nImportError: cannot import name ADAPTERS\r\n\r\nIt looks like web2py has moved on from using ADAPTERS? Has that been replaced by \r\n`@adapters.register_for('postgres')`\r\n\r\nBut the heroku file has not been updated. \n", "before_files": [{"content": "\"\"\"\nUsage: in web2py models/db.py\n\nfrom gluon.contrib.heroku import get_db\ndb = get_db()\n\n\"\"\"\nimport os\nfrom gluon import *\nfrom pydal.adapters import ADAPTERS, PostgreSQLAdapter\nfrom pydal.helpers.classes import UseDatabaseStoredFile\n\nclass HerokuPostgresAdapter(UseDatabaseStoredFile,PostgreSQLAdapter):\n drivers = ('psycopg2',)\n uploads_in_blob = True\n\nADAPTERS['postgres'] = HerokuPostgresAdapter\n\ndef get_db(name = None, pool_size=10):\n if not name:\n names = [n for n in os.environ.keys()\n if n[:18]+n[-4:]=='HEROKU_POSTGRESQL__URL']\n if names:\n name = names[0]\n if name:\n db = DAL(os.environ[name], pool_size=pool_size)\n current.session.connect(current.request, current.response, db=db)\n else:\n db = DAL('sqlite://heroku.test.sqlite')\n return db\n", "path": "gluon/contrib/heroku.py"}], "after_files": [{"content": "\"\"\"\nUsage: in web2py models/db.py\n\nfrom gluon.contrib.heroku import get_db\ndb = get_db()\n\n\"\"\"\nimport os\nfrom gluon import *\nfrom pydal.adapters import adapters, PostgrePsyco\nfrom pydal.helpers.classes import DatabaseStoredFile\n\[email protected]_for('postgres')\nclass HerokuPostgresAdapter(DatabaseStoredFile, PostgrePsyco):\n uploads_in_blob = True\n\ndef get_db(name = None, pool_size=10):\n if not name:\n names = [n for n in os.environ.keys()\n if n[:18]+n[-4:]=='HEROKU_POSTGRESQL__URL']\n if names:\n name = names[0]\n if name:\n db = DAL(os.environ[name], pool_size=pool_size)\n current.session.connect(current.request, current.response, db=db)\n else:\n db = DAL('sqlite://heroku.test.sqlite')\n return db\n", "path": "gluon/contrib/heroku.py"}]}
695
214
gh_patches_debug_34290
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-306
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Switch unit tests from `unittest.mock` to SDK & in-memory exporter See https://github.com/open-telemetry/opentelemetry-python/pull/290#issuecomment-558091283. Currently tests are cumbersome to write and actually we probably don't want to test which API calls are made but what Spans would result in most cases. For this a SDK with in-memory exporter would be better than using `unittest.mock`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py` Content: ``` 1 import io 2 import unittest 3 import unittest.mock as mock 4 import wsgiref.util as wsgiref_util 5 6 from opentelemetry import trace as trace_api 7 8 9 class WsgiTestBase(unittest.TestCase): 10 def setUp(self): 11 self.span = mock.create_autospec(trace_api.Span, spec_set=True) 12 tracer = trace_api.Tracer() 13 self.get_tracer_patcher = mock.patch.object( 14 trace_api.TracerSource, 15 "get_tracer", 16 autospec=True, 17 spec_set=True, 18 return_value=tracer, 19 ) 20 self.get_tracer_patcher.start() 21 22 self.start_span_patcher = mock.patch.object( 23 tracer, 24 "start_span", 25 autospec=True, 26 spec_set=True, 27 return_value=self.span, 28 ) 29 self.start_span = self.start_span_patcher.start() 30 self.write_buffer = io.BytesIO() 31 self.write = self.write_buffer.write 32 33 self.environ = {} 34 wsgiref_util.setup_testing_defaults(self.environ) 35 36 self.status = None 37 self.response_headers = None 38 self.exc_info = None 39 40 def tearDown(self): 41 self.get_tracer_patcher.stop() 42 self.start_span_patcher.stop() 43 44 def start_response(self, status, response_headers, exc_info=None): 45 self.status = status 46 self.response_headers = response_headers 47 self.exc_info = exc_info 48 return self.write 49 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py b/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py --- a/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py +++ b/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py @@ -1,32 +1,38 @@ import io import unittest -import unittest.mock as mock import wsgiref.util as wsgiref_util +from importlib import reload from opentelemetry import trace as trace_api +from opentelemetry.sdk.trace import TracerSource, export +from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( + InMemorySpanExporter, +) + +_MEMORY_EXPORTER = None class WsgiTestBase(unittest.TestCase): - def setUp(self): - self.span = mock.create_autospec(trace_api.Span, spec_set=True) - tracer = trace_api.Tracer() - self.get_tracer_patcher = mock.patch.object( - trace_api.TracerSource, - "get_tracer", - autospec=True, - spec_set=True, - return_value=tracer, - ) - self.get_tracer_patcher.start() - - self.start_span_patcher = mock.patch.object( - tracer, - "start_span", - autospec=True, - spec_set=True, - return_value=self.span, + @classmethod + def setUpClass(cls): + global _MEMORY_EXPORTER # pylint:disable=global-statement + trace_api.set_preferred_tracer_source_implementation( + lambda T: TracerSource() ) - self.start_span = self.start_span_patcher.start() + tracer_source = trace_api.tracer_source() + _MEMORY_EXPORTER = InMemorySpanExporter() + span_processor = export.SimpleExportSpanProcessor(_MEMORY_EXPORTER) + tracer_source.add_span_processor(span_processor) + + @classmethod + def tearDownClass(cls): + reload(trace_api) + + def setUp(self): + + self.memory_exporter = _MEMORY_EXPORTER + self.memory_exporter.clear() + self.write_buffer = io.BytesIO() self.write = self.write_buffer.write @@ -37,10 +43,6 @@ self.response_headers = None self.exc_info = None - def tearDown(self): - self.get_tracer_patcher.stop() - self.start_span_patcher.stop() - def start_response(self, status, response_headers, exc_info=None): self.status = status self.response_headers = response_headers
{"golden_diff": "diff --git a/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py b/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py\n--- a/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py\n+++ b/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py\n@@ -1,32 +1,38 @@\n import io\n import unittest\n-import unittest.mock as mock\n import wsgiref.util as wsgiref_util\n+from importlib import reload\n \n from opentelemetry import trace as trace_api\n+from opentelemetry.sdk.trace import TracerSource, export\n+from opentelemetry.sdk.trace.export.in_memory_span_exporter import (\n+ InMemorySpanExporter,\n+)\n+\n+_MEMORY_EXPORTER = None\n \n \n class WsgiTestBase(unittest.TestCase):\n- def setUp(self):\n- self.span = mock.create_autospec(trace_api.Span, spec_set=True)\n- tracer = trace_api.Tracer()\n- self.get_tracer_patcher = mock.patch.object(\n- trace_api.TracerSource,\n- \"get_tracer\",\n- autospec=True,\n- spec_set=True,\n- return_value=tracer,\n- )\n- self.get_tracer_patcher.start()\n-\n- self.start_span_patcher = mock.patch.object(\n- tracer,\n- \"start_span\",\n- autospec=True,\n- spec_set=True,\n- return_value=self.span,\n+ @classmethod\n+ def setUpClass(cls):\n+ global _MEMORY_EXPORTER # pylint:disable=global-statement\n+ trace_api.set_preferred_tracer_source_implementation(\n+ lambda T: TracerSource()\n )\n- self.start_span = self.start_span_patcher.start()\n+ tracer_source = trace_api.tracer_source()\n+ _MEMORY_EXPORTER = InMemorySpanExporter()\n+ span_processor = export.SimpleExportSpanProcessor(_MEMORY_EXPORTER)\n+ tracer_source.add_span_processor(span_processor)\n+\n+ @classmethod\n+ def tearDownClass(cls):\n+ reload(trace_api)\n+\n+ def setUp(self):\n+\n+ self.memory_exporter = _MEMORY_EXPORTER\n+ self.memory_exporter.clear()\n+\n self.write_buffer = io.BytesIO()\n self.write = self.write_buffer.write\n \n@@ -37,10 +43,6 @@\n self.response_headers = None\n self.exc_info = None\n \n- def tearDown(self):\n- self.get_tracer_patcher.stop()\n- self.start_span_patcher.stop()\n-\n def start_response(self, status, response_headers, exc_info=None):\n self.status = status\n self.response_headers = response_headers\n", "issue": "Switch unit tests from `unittest.mock` to SDK & in-memory exporter\nSee https://github.com/open-telemetry/opentelemetry-python/pull/290#issuecomment-558091283.\r\nCurrently tests are cumbersome to write and actually we probably don't want to test which API calls are made but what Spans would result in most cases. For this a SDK with in-memory exporter would be better than using `unittest.mock`.\n", "before_files": [{"content": "import io\nimport unittest\nimport unittest.mock as mock\nimport wsgiref.util as wsgiref_util\n\nfrom opentelemetry import trace as trace_api\n\n\nclass WsgiTestBase(unittest.TestCase):\n def setUp(self):\n self.span = mock.create_autospec(trace_api.Span, spec_set=True)\n tracer = trace_api.Tracer()\n self.get_tracer_patcher = mock.patch.object(\n trace_api.TracerSource,\n \"get_tracer\",\n autospec=True,\n spec_set=True,\n return_value=tracer,\n )\n self.get_tracer_patcher.start()\n\n self.start_span_patcher = mock.patch.object(\n tracer,\n \"start_span\",\n autospec=True,\n spec_set=True,\n return_value=self.span,\n )\n self.start_span = self.start_span_patcher.start()\n self.write_buffer = io.BytesIO()\n self.write = self.write_buffer.write\n\n self.environ = {}\n wsgiref_util.setup_testing_defaults(self.environ)\n\n self.status = None\n self.response_headers = None\n self.exc_info = None\n\n def tearDown(self):\n self.get_tracer_patcher.stop()\n self.start_span_patcher.stop()\n\n def start_response(self, status, response_headers, exc_info=None):\n self.status = status\n self.response_headers = response_headers\n self.exc_info = exc_info\n return self.write\n", "path": "ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py"}], "after_files": [{"content": "import io\nimport unittest\nimport wsgiref.util as wsgiref_util\nfrom importlib import reload\n\nfrom opentelemetry import trace as trace_api\nfrom opentelemetry.sdk.trace import TracerSource, export\nfrom opentelemetry.sdk.trace.export.in_memory_span_exporter import (\n InMemorySpanExporter,\n)\n\n_MEMORY_EXPORTER = None\n\n\nclass WsgiTestBase(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n global _MEMORY_EXPORTER # pylint:disable=global-statement\n trace_api.set_preferred_tracer_source_implementation(\n lambda T: TracerSource()\n )\n tracer_source = trace_api.tracer_source()\n _MEMORY_EXPORTER = InMemorySpanExporter()\n span_processor = export.SimpleExportSpanProcessor(_MEMORY_EXPORTER)\n tracer_source.add_span_processor(span_processor)\n\n @classmethod\n def tearDownClass(cls):\n reload(trace_api)\n\n def setUp(self):\n\n self.memory_exporter = _MEMORY_EXPORTER\n self.memory_exporter.clear()\n\n self.write_buffer = io.BytesIO()\n self.write = self.write_buffer.write\n\n self.environ = {}\n wsgiref_util.setup_testing_defaults(self.environ)\n\n self.status = None\n self.response_headers = None\n self.exc_info = None\n\n def start_response(self, status, response_headers, exc_info=None):\n self.status = status\n self.response_headers = response_headers\n self.exc_info = exc_info\n return self.write\n", "path": "ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py"}]}
757
593
gh_patches_debug_7939
rasdani/github-patches
git_diff
mozilla__bugbug-3401
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Refactor logging statements to use lazy % formatting Example of logging statements that we want to refactor: https://github.com/mozilla/bugbug/blob/d53595391dbd75379bb49bff12dee4821e4b956c/bugbug/github.py#L61 https://github.com/mozilla/bugbug/blob/69972a1684f788319bf5c2944bbe8eeb79428c7d/scripts/regressor_finder.py#L396 More details can be found in the [pylint docs](https://pylint.readthedocs.io/en/latest/user_guide/messages/warning/logging-fstring-interpolation.html). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/retrieve_training_metrics.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # This Source Code Form is subject to the terms of the Mozilla Public 3 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 # You can obtain one at http://mozilla.org/MPL/2.0/. 5 6 import argparse 7 import logging 8 import os 9 import sys 10 from os.path import abspath, join 11 12 import requests 13 import taskcluster 14 15 from bugbug.utils import get_taskcluster_options 16 17 ROOT_URI = "train_{}.per_date" 18 DATE_URI = "train_{}.per_date.{}" 19 BASE_URL = "https://community-tc.services.mozilla.com/api/index/v1/task/{}/artifacts/public/metrics.json" 20 NAMESPACE_URI = "project.bugbug.{}" 21 22 LOGGER = logging.getLogger(__name__) 23 24 logging.basicConfig(level=logging.INFO) 25 26 27 def get_task_metrics_from_uri(index_uri): 28 index_url = BASE_URL.format(index_uri) 29 LOGGER.info("Retrieving metrics from %s", index_url) 30 r = requests.get(index_url) 31 32 if r.status_code == 404: 33 LOGGER.error(f"File not found for URL {index_url}, check your arguments") 34 sys.exit(1) 35 36 r.raise_for_status() 37 38 return r 39 40 41 def get_namespaces(index, index_uri): 42 index_namespaces = index.listNamespaces(index_uri) 43 44 return index_namespaces["namespaces"] 45 46 47 def is_later_or_equal(partial_date, from_date): 48 for partial_date_part, from_date_part in zip(partial_date, from_date): 49 if int(partial_date_part) > int(from_date_part): 50 return True 51 elif int(partial_date_part) < int(from_date_part): 52 return False 53 else: 54 continue 55 56 return True 57 58 59 def get_task_metrics_from_date(model, date, output_directory): 60 options = get_taskcluster_options() 61 62 index = taskcluster.Index(options) 63 64 index.ping() 65 66 # Split the date 67 from_date = date.split(".") 68 69 namespaces = [] 70 71 # Start at the root level 72 # We need an empty list in order to append namespaces part to it 73 namespaces.append([]) 74 75 # Recursively list all namespaces greater or equals than the given date 76 while namespaces: 77 current_ns = namespaces.pop() 78 79 # Handle version level namespaces 80 if not current_ns: 81 ns_uri = ROOT_URI.format(model) 82 else: 83 current_ns_date = ".".join(current_ns) 84 ns_uri = DATE_URI.format(model, current_ns_date) 85 86 ns_full_uri = NAMESPACE_URI.format(ns_uri) 87 88 tasks = index.listTasks(ns_full_uri) 89 for task in tasks["tasks"]: 90 task_uri = task["namespace"] 91 r = get_task_metrics_from_uri(task_uri) 92 93 # Write the file on disk 94 file_name = f"metric_{'_'.join(task_uri.split('.'))}.json" 95 file_path = abspath(join(output_directory, file_name)) 96 with open(file_path, "w") as metric_file: 97 metric_file.write(r.text) 98 LOGGER.info(f"Metrics saved to {file_path!r}") 99 100 for namespace in get_namespaces(index, ns_full_uri): 101 new_ns = current_ns.copy() 102 new_ns.append(namespace["name"]) 103 104 if not is_later_or_equal(new_ns, from_date): 105 LOGGER.debug("NEW namespace %s is before %s", new_ns, from_date) 106 continue 107 108 # Might not be efficient but size of `namespaces` shouldn't be too 109 # big as we are doing a depth-first traversal 110 if new_ns not in namespaces: 111 namespaces.append(new_ns) 112 113 114 def main(): 115 description = "Retrieve a model training metrics" 116 parser = argparse.ArgumentParser(description=description) 117 118 parser.add_argument( 119 "-d", 120 "--output-directory", 121 default=os.getcwd(), 122 help="In which directory the script should save the metrics file. The directory must exists", 123 ) 124 parser.add_argument("model", help="Which model to retrieve training metrics from.") 125 parser.add_argument( 126 "date", 127 nargs="?", 128 help="Which date should we retrieve training metrics from. Default to latest", 129 ) 130 131 args = parser.parse_args() 132 133 get_task_metrics_from_date(args.model, args.date, args.output_directory) 134 135 136 if __name__ == "__main__": 137 main() 138 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/retrieve_training_metrics.py b/scripts/retrieve_training_metrics.py --- a/scripts/retrieve_training_metrics.py +++ b/scripts/retrieve_training_metrics.py @@ -95,7 +95,7 @@ file_path = abspath(join(output_directory, file_name)) with open(file_path, "w") as metric_file: metric_file.write(r.text) - LOGGER.info(f"Metrics saved to {file_path!r}") + LOGGER.info("Metrics saved to %r", file_path) for namespace in get_namespaces(index, ns_full_uri): new_ns = current_ns.copy()
{"golden_diff": "diff --git a/scripts/retrieve_training_metrics.py b/scripts/retrieve_training_metrics.py\n--- a/scripts/retrieve_training_metrics.py\n+++ b/scripts/retrieve_training_metrics.py\n@@ -95,7 +95,7 @@\n file_path = abspath(join(output_directory, file_name))\n with open(file_path, \"w\") as metric_file:\n metric_file.write(r.text)\n- LOGGER.info(f\"Metrics saved to {file_path!r}\")\n+ LOGGER.info(\"Metrics saved to %r\", file_path)\n \n for namespace in get_namespaces(index, ns_full_uri):\n new_ns = current_ns.copy()\n", "issue": "Refactor logging statements to use lazy % formatting\nExample of logging statements that we want to refactor:\r\n\r\nhttps://github.com/mozilla/bugbug/blob/d53595391dbd75379bb49bff12dee4821e4b956c/bugbug/github.py#L61\r\n\r\nhttps://github.com/mozilla/bugbug/blob/69972a1684f788319bf5c2944bbe8eeb79428c7d/scripts/regressor_finder.py#L396\r\n\r\nMore details can be found in the [pylint docs](https://pylint.readthedocs.io/en/latest/user_guide/messages/warning/logging-fstring-interpolation.html).\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport argparse\nimport logging\nimport os\nimport sys\nfrom os.path import abspath, join\n\nimport requests\nimport taskcluster\n\nfrom bugbug.utils import get_taskcluster_options\n\nROOT_URI = \"train_{}.per_date\"\nDATE_URI = \"train_{}.per_date.{}\"\nBASE_URL = \"https://community-tc.services.mozilla.com/api/index/v1/task/{}/artifacts/public/metrics.json\"\nNAMESPACE_URI = \"project.bugbug.{}\"\n\nLOGGER = logging.getLogger(__name__)\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef get_task_metrics_from_uri(index_uri):\n index_url = BASE_URL.format(index_uri)\n LOGGER.info(\"Retrieving metrics from %s\", index_url)\n r = requests.get(index_url)\n\n if r.status_code == 404:\n LOGGER.error(f\"File not found for URL {index_url}, check your arguments\")\n sys.exit(1)\n\n r.raise_for_status()\n\n return r\n\n\ndef get_namespaces(index, index_uri):\n index_namespaces = index.listNamespaces(index_uri)\n\n return index_namespaces[\"namespaces\"]\n\n\ndef is_later_or_equal(partial_date, from_date):\n for partial_date_part, from_date_part in zip(partial_date, from_date):\n if int(partial_date_part) > int(from_date_part):\n return True\n elif int(partial_date_part) < int(from_date_part):\n return False\n else:\n continue\n\n return True\n\n\ndef get_task_metrics_from_date(model, date, output_directory):\n options = get_taskcluster_options()\n\n index = taskcluster.Index(options)\n\n index.ping()\n\n # Split the date\n from_date = date.split(\".\")\n\n namespaces = []\n\n # Start at the root level\n # We need an empty list in order to append namespaces part to it\n namespaces.append([])\n\n # Recursively list all namespaces greater or equals than the given date\n while namespaces:\n current_ns = namespaces.pop()\n\n # Handle version level namespaces\n if not current_ns:\n ns_uri = ROOT_URI.format(model)\n else:\n current_ns_date = \".\".join(current_ns)\n ns_uri = DATE_URI.format(model, current_ns_date)\n\n ns_full_uri = NAMESPACE_URI.format(ns_uri)\n\n tasks = index.listTasks(ns_full_uri)\n for task in tasks[\"tasks\"]:\n task_uri = task[\"namespace\"]\n r = get_task_metrics_from_uri(task_uri)\n\n # Write the file on disk\n file_name = f\"metric_{'_'.join(task_uri.split('.'))}.json\"\n file_path = abspath(join(output_directory, file_name))\n with open(file_path, \"w\") as metric_file:\n metric_file.write(r.text)\n LOGGER.info(f\"Metrics saved to {file_path!r}\")\n\n for namespace in get_namespaces(index, ns_full_uri):\n new_ns = current_ns.copy()\n new_ns.append(namespace[\"name\"])\n\n if not is_later_or_equal(new_ns, from_date):\n LOGGER.debug(\"NEW namespace %s is before %s\", new_ns, from_date)\n continue\n\n # Might not be efficient but size of `namespaces` shouldn't be too\n # big as we are doing a depth-first traversal\n if new_ns not in namespaces:\n namespaces.append(new_ns)\n\n\ndef main():\n description = \"Retrieve a model training metrics\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\n \"-d\",\n \"--output-directory\",\n default=os.getcwd(),\n help=\"In which directory the script should save the metrics file. The directory must exists\",\n )\n parser.add_argument(\"model\", help=\"Which model to retrieve training metrics from.\")\n parser.add_argument(\n \"date\",\n nargs=\"?\",\n help=\"Which date should we retrieve training metrics from. Default to latest\",\n )\n\n args = parser.parse_args()\n\n get_task_metrics_from_date(args.model, args.date, args.output_directory)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "scripts/retrieve_training_metrics.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport argparse\nimport logging\nimport os\nimport sys\nfrom os.path import abspath, join\n\nimport requests\nimport taskcluster\n\nfrom bugbug.utils import get_taskcluster_options\n\nROOT_URI = \"train_{}.per_date\"\nDATE_URI = \"train_{}.per_date.{}\"\nBASE_URL = \"https://community-tc.services.mozilla.com/api/index/v1/task/{}/artifacts/public/metrics.json\"\nNAMESPACE_URI = \"project.bugbug.{}\"\n\nLOGGER = logging.getLogger(__name__)\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef get_task_metrics_from_uri(index_uri):\n index_url = BASE_URL.format(index_uri)\n LOGGER.info(\"Retrieving metrics from %s\", index_url)\n r = requests.get(index_url)\n\n if r.status_code == 404:\n LOGGER.error(f\"File not found for URL {index_url}, check your arguments\")\n sys.exit(1)\n\n r.raise_for_status()\n\n return r\n\n\ndef get_namespaces(index, index_uri):\n index_namespaces = index.listNamespaces(index_uri)\n\n return index_namespaces[\"namespaces\"]\n\n\ndef is_later_or_equal(partial_date, from_date):\n for partial_date_part, from_date_part in zip(partial_date, from_date):\n if int(partial_date_part) > int(from_date_part):\n return True\n elif int(partial_date_part) < int(from_date_part):\n return False\n else:\n continue\n\n return True\n\n\ndef get_task_metrics_from_date(model, date, output_directory):\n options = get_taskcluster_options()\n\n index = taskcluster.Index(options)\n\n index.ping()\n\n # Split the date\n from_date = date.split(\".\")\n\n namespaces = []\n\n # Start at the root level\n # We need an empty list in order to append namespaces part to it\n namespaces.append([])\n\n # Recursively list all namespaces greater or equals than the given date\n while namespaces:\n current_ns = namespaces.pop()\n\n # Handle version level namespaces\n if not current_ns:\n ns_uri = ROOT_URI.format(model)\n else:\n current_ns_date = \".\".join(current_ns)\n ns_uri = DATE_URI.format(model, current_ns_date)\n\n ns_full_uri = NAMESPACE_URI.format(ns_uri)\n\n tasks = index.listTasks(ns_full_uri)\n for task in tasks[\"tasks\"]:\n task_uri = task[\"namespace\"]\n r = get_task_metrics_from_uri(task_uri)\n\n # Write the file on disk\n file_name = f\"metric_{'_'.join(task_uri.split('.'))}.json\"\n file_path = abspath(join(output_directory, file_name))\n with open(file_path, \"w\") as metric_file:\n metric_file.write(r.text)\n LOGGER.info(\"Metrics saved to %r\", file_path)\n\n for namespace in get_namespaces(index, ns_full_uri):\n new_ns = current_ns.copy()\n new_ns.append(namespace[\"name\"])\n\n if not is_later_or_equal(new_ns, from_date):\n LOGGER.debug(\"NEW namespace %s is before %s\", new_ns, from_date)\n continue\n\n # Might not be efficient but size of `namespaces` shouldn't be too\n # big as we are doing a depth-first traversal\n if new_ns not in namespaces:\n namespaces.append(new_ns)\n\n\ndef main():\n description = \"Retrieve a model training metrics\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\n \"-d\",\n \"--output-directory\",\n default=os.getcwd(),\n help=\"In which directory the script should save the metrics file. The directory must exists\",\n )\n parser.add_argument(\"model\", help=\"Which model to retrieve training metrics from.\")\n parser.add_argument(\n \"date\",\n nargs=\"?\",\n help=\"Which date should we retrieve training metrics from. Default to latest\",\n )\n\n args = parser.parse_args()\n\n get_task_metrics_from_date(args.model, args.date, args.output_directory)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "scripts/retrieve_training_metrics.py"}]}
1,668
132
gh_patches_debug_29495
rasdani/github-patches
git_diff
bridgecrewio__checkov-1215
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- False positive for CKV_AWS_143 ("Ensure S3 bucket has lock configuration enabled by default") **Describe the bug** Checkov expects argument `object_lock_configuration` to be an object, i.e. ```hcl object_lock_configuration = { object_lock_enabled = "Enabled" } ``` Terraform works with the above configuration, but when also declaring rules for the object lock configuration, it expects a block instead, e.g. ```hcl object_lock_configuration { object_lock_enabled = "Enabled" rule { default_retention { mode = "GOVERNANCE" days = 366 } } } ``` **Expected behavior** Checkov should pass for a `object_lock_configuration` argument block. **Desktop (please complete the following information):** - OS: macOS Big Sur 11.3.1 - Checkov Version: 2.0.135 - Terraform version: v0.14.8 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checkov/terraform/checks/resource/aws/S3BucketObjectLock.py` Content: ``` 1 from checkov.common.models.enums import CheckCategories, CheckResult 2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck 3 4 5 class S3BucketObjectLock(BaseResourceCheck): 6 def __init__(self): 7 name = "Ensure that S3 bucket has lock configuration enabled by default" 8 id = "CKV_AWS_143" 9 supported_resources = ['aws_s3_bucket'] 10 categories = [CheckCategories.GENERAL_SECURITY] 11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 12 13 def scan_resource_conf(self, conf): 14 if 'object_lock_configuration' in conf: 15 if 'object_lock_enabled' in conf['object_lock_configuration'][0]: 16 lock = conf['object_lock_configuration'][0]['object_lock_enabled'] 17 if lock == "Enabled": 18 return CheckResult.PASSED 19 else: 20 return CheckResult.FAILED 21 else: 22 return CheckResult.PASSED 23 24 25 check = S3BucketObjectLock() 26 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py b/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py --- a/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py +++ b/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py @@ -1,25 +1,26 @@ +from typing import Dict, List, Any + from checkov.common.models.enums import CheckCategories, CheckResult from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck class S3BucketObjectLock(BaseResourceCheck): - def __init__(self): + def __init__(self) -> None: name = "Ensure that S3 bucket has lock configuration enabled by default" id = "CKV_AWS_143" - supported_resources = ['aws_s3_bucket'] + supported_resources = ["aws_s3_bucket"] categories = [CheckCategories.GENERAL_SECURITY] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) - def scan_resource_conf(self, conf): - if 'object_lock_configuration' in conf: - if 'object_lock_enabled' in conf['object_lock_configuration'][0]: - lock = conf['object_lock_configuration'][0]['object_lock_enabled'] - if lock == "Enabled": - return CheckResult.PASSED - else: - return CheckResult.FAILED - else: - return CheckResult.PASSED + def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult: + lock_conf = conf.get("object_lock_configuration") + if lock_conf and lock_conf[0]: + lock_enabled = lock_conf[0].get("object_lock_enabled") + if lock_enabled in ["Enabled", ["Enabled"]]: + return CheckResult.PASSED + return CheckResult.FAILED + + return CheckResult.UNKNOWN check = S3BucketObjectLock()
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py b/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py\n--- a/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py\n+++ b/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py\n@@ -1,25 +1,26 @@\n+from typing import Dict, List, Any\n+\n from checkov.common.models.enums import CheckCategories, CheckResult\n from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck\n \n \n class S3BucketObjectLock(BaseResourceCheck):\n- def __init__(self):\n+ def __init__(self) -> None:\n name = \"Ensure that S3 bucket has lock configuration enabled by default\"\n id = \"CKV_AWS_143\"\n- supported_resources = ['aws_s3_bucket']\n+ supported_resources = [\"aws_s3_bucket\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def scan_resource_conf(self, conf):\n- if 'object_lock_configuration' in conf:\n- if 'object_lock_enabled' in conf['object_lock_configuration'][0]:\n- lock = conf['object_lock_configuration'][0]['object_lock_enabled']\n- if lock == \"Enabled\":\n- return CheckResult.PASSED\n- else:\n- return CheckResult.FAILED\n- else:\n- return CheckResult.PASSED\n+ def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\n+ lock_conf = conf.get(\"object_lock_configuration\")\n+ if lock_conf and lock_conf[0]:\n+ lock_enabled = lock_conf[0].get(\"object_lock_enabled\")\n+ if lock_enabled in [\"Enabled\", [\"Enabled\"]]:\n+ return CheckResult.PASSED\n+ return CheckResult.FAILED\n+\n+ return CheckResult.UNKNOWN\n \n \n check = S3BucketObjectLock()\n", "issue": "False positive for CKV_AWS_143 (\"Ensure S3 bucket has lock configuration enabled by default\")\n**Describe the bug**\r\n\r\nCheckov expects argument `object_lock_configuration` to be an object, i.e.\r\n\r\n```hcl\r\nobject_lock_configuration = {\r\n object_lock_enabled = \"Enabled\"\r\n}\r\n```\r\n\r\nTerraform works with the above configuration, but when also declaring rules for the object lock configuration, it expects a block instead, e.g.\r\n```hcl\r\nobject_lock_configuration {\r\n object_lock_enabled = \"Enabled\"\r\n\r\n rule {\r\n default_retention {\r\n mode = \"GOVERNANCE\"\r\n days = 366\r\n }\r\n }\r\n}\r\n```\r\n\r\n**Expected behavior**\r\nCheckov should pass for a `object_lock_configuration` argument block.\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: macOS Big Sur 11.3.1\r\n - Checkov Version: 2.0.135\r\n - Terraform version: v0.14.8\r\n\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck\n\n\nclass S3BucketObjectLock(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure that S3 bucket has lock configuration enabled by default\"\n id = \"CKV_AWS_143\"\n supported_resources = ['aws_s3_bucket']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'object_lock_configuration' in conf:\n if 'object_lock_enabled' in conf['object_lock_configuration'][0]:\n lock = conf['object_lock_configuration'][0]['object_lock_enabled']\n if lock == \"Enabled\":\n return CheckResult.PASSED\n else:\n return CheckResult.FAILED\n else:\n return CheckResult.PASSED\n\n\ncheck = S3BucketObjectLock()\n", "path": "checkov/terraform/checks/resource/aws/S3BucketObjectLock.py"}], "after_files": [{"content": "from typing import Dict, List, Any\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck\n\n\nclass S3BucketObjectLock(BaseResourceCheck):\n def __init__(self) -> None:\n name = \"Ensure that S3 bucket has lock configuration enabled by default\"\n id = \"CKV_AWS_143\"\n supported_resources = [\"aws_s3_bucket\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\n lock_conf = conf.get(\"object_lock_configuration\")\n if lock_conf and lock_conf[0]:\n lock_enabled = lock_conf[0].get(\"object_lock_enabled\")\n if lock_enabled in [\"Enabled\", [\"Enabled\"]]:\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n return CheckResult.UNKNOWN\n\n\ncheck = S3BucketObjectLock()\n", "path": "checkov/terraform/checks/resource/aws/S3BucketObjectLock.py"}]}
743
439
gh_patches_debug_39587
rasdani/github-patches
git_diff
ansible-collections__amazon.aws-1980
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `lookup` environment variables `aws_ec2.yml` file ### Summary I'm trying to `lookup` environment variables in `aws_ec2.yml` file but the `lookup` seems to be not working as expected, is this something intentionally not allowed? ### Issue Type Bug Report ### Component Name ec2_instnace ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.15.2] config file = /Users/<user>/.ansible.cfg configured module search path = ['/Users/<user>/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /opt/homebrew/lib/python3.11/site-packages/ansible ansible collection location = /Users/<user>/.ansible/collections:/usr/share/ansible/collections executable location = /opt/homebrew/bin/ansible python version = 3.11.6 (main, Oct 2 2023, 20:46:14) [Clang 14.0.3 (clang-1403.0.22.14.1)] (/opt/homebrew/opt/[email protected]/bin/python3.11) jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list # /Users/<user>/.ansible/collections/ansible_collections Collection Version --------------------- ------- amazon.aws 6.5.0 ansible.windows 2.0.0 awx.awx 21.0.0 chocolatey.chocolatey 1.5.1 community.mysql 3.7.2 community.windows 2.0.0 lowlydba.sqlserver 0.7.0 microsoft.ad 1.4.1 **** ``` ### AWS SDK versions ```console (paste below) $ pip show boto boto3 botocore Name: boto3 Version: 1.28.21 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /opt/homebrew/lib/python3.11/site-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.31.21 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: License: Apache License 2.0 Location: /opt/homebrew/lib/python3.11/site-packages Requires: jmespath, python-dateutil, urllib3 Required-by: aws-secretsmanager-caching, boto3, **s3transfer** ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed CONFIG_FILE() = /Users/<user>/.ansible.cfg PAGER(env: PAGER) = less ``` ### OS / Environment macOS Ventura Version 13.6.2 (22G320) ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) plugin: aws_ec2 assume_role_arn: "arn:aws:iam::{{ lookup('env', 'AWS_ACCOUNT_ID') | default('XXXXXXXXXXX', true) }}:role/ansible-manager" regions: - "{{ lookup('env', 'AWS_REGION') | default('us-east-1', true) }}" ``` i have this `yml` file which is something i was expecting it would lookup the env variables and get the authorization ### Expected Results Expected result that `lookup` would happen and assume role/region would get set right, instead it is being parsed as plain text and `lookup` is not being recognized is what i understand looking at the result below ### Actual Results ```console (paste below) aws_ec2.yml with auto plugin: An error occurred (AccessDenied) when calling the AssumeRole operation: User: <UserName> is not authorized to perform: sts:AssumeRole on resource: arn:aws:iam::{{ lookup('ansible.builtin.env', 'AWS_ACCOUNT_ID') | default('XXXXXXXXXXX', True) }}:role/ansible-manager aws_ec2.yml with auto plugin: Couldn't connect to AWS: Provided region_name '{{ lookup('env', 'AWS_REGION') | default('us-east-1', true) }}' doesn't match a supported format. ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `plugins/plugin_utils/inventory.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # Copyright: (c) 2022, Ansible Project 4 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) 5 6 try: 7 import boto3 8 import botocore 9 except ImportError: 10 pass # will be captured by imported HAS_BOTO3 11 12 from ansible.plugins.inventory import BaseInventoryPlugin 13 from ansible.plugins.inventory import Cacheable 14 from ansible.plugins.inventory import Constructable 15 16 from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code 17 from ansible_collections.amazon.aws.plugins.plugin_utils.base import AWSPluginBase 18 from ansible_collections.amazon.aws.plugins.plugin_utils.botocore import AnsibleBotocoreError 19 20 21 def _boto3_session(profile_name=None): 22 if profile_name is None: 23 return boto3.Session() 24 return boto3.session.Session(profile_name=profile_name) 25 26 27 class AWSInventoryBase(BaseInventoryPlugin, Constructable, Cacheable, AWSPluginBase): 28 class TemplatedOptions: 29 # When someone looks up the TEMPLATABLE_OPTIONS using get() any templates 30 # will be templated using the loader passed to parse. 31 TEMPLATABLE_OPTIONS = ( 32 "access_key", 33 "secret_key", 34 "session_token", 35 "profile", 36 "iam_role_name", 37 ) 38 39 def __init__(self, templar, options): 40 self.original_options = options 41 self.templar = templar 42 43 def __getitem__(self, *args): 44 return self.original_options.__getitem__(self, *args) 45 46 def __setitem__(self, *args): 47 return self.original_options.__setitem__(self, *args) 48 49 def get(self, *args): 50 value = self.original_options.get(*args) 51 if not value: 52 return value 53 if args[0] not in self.TEMPLATABLE_OPTIONS: 54 return value 55 if not self.templar.is_template(value): 56 return value 57 58 return self.templar.template(variable=value, disable_lookups=False) 59 60 def get_options(self, *args): 61 original_options = super().get_options(*args) 62 if not self.templar: 63 return original_options 64 return self.TemplatedOptions(self.templar, original_options) 65 66 def __init__(self): 67 super().__init__() 68 self._frozen_credentials = {} 69 70 # pylint: disable=too-many-arguments 71 def parse(self, inventory, loader, path, cache=True, botocore_version=None, boto3_version=None): 72 super().parse(inventory, loader, path) 73 self.require_aws_sdk(botocore_version=botocore_version, boto3_version=boto3_version) 74 self._read_config_data(path) 75 self._set_frozen_credentials() 76 77 def client(self, *args, **kwargs): 78 kw_args = dict(self._frozen_credentials) 79 kw_args.update(kwargs) 80 return super().client(*args, **kw_args) 81 82 def resource(self, *args, **kwargs): 83 kw_args = dict(self._frozen_credentials) 84 kw_args.update(kwargs) 85 return super().resource(*args, **kw_args) 86 87 def _freeze_iam_role(self, iam_role_arn): 88 if hasattr(self, "ansible_name"): 89 role_session_name = f"ansible_aws_{self.ansible_name}_dynamic_inventory" 90 else: 91 role_session_name = "ansible_aws_dynamic_inventory" 92 assume_params = {"RoleArn": iam_role_arn, "RoleSessionName": role_session_name} 93 94 try: 95 sts = self.client("sts") 96 assumed_role = sts.assume_role(**assume_params) 97 except AnsibleBotocoreError as e: 98 self.fail_aws(f"Unable to assume role {iam_role_arn}", exception=e) 99 100 credentials = assumed_role.get("Credentials") 101 if not credentials: 102 self.fail_aws(f"Unable to assume role {iam_role_arn}") 103 104 self._frozen_credentials = { 105 "profile_name": None, 106 "aws_access_key_id": credentials.get("AccessKeyId"), 107 "aws_secret_access_key": credentials.get("SecretAccessKey"), 108 "aws_session_token": credentials.get("SessionToken"), 109 } 110 111 def _set_frozen_credentials(self): 112 options = self.get_options() 113 iam_role_arn = options.get("assume_role_arn") 114 if iam_role_arn: 115 self._freeze_iam_role(iam_role_arn) 116 117 def _describe_regions(self, service): 118 # Try pulling a list of regions from the service 119 try: 120 initial_region = self.region or "us-east-1" 121 client = self.client(service, region=initial_region) 122 resp = client.describe_regions() 123 except AttributeError: 124 # Not all clients support describe 125 pass 126 except is_boto3_error_code("UnauthorizedOperation"): 127 self.warn(f"UnauthorizedOperation when trying to list {service} regions") 128 except botocore.exceptions.NoRegionError: 129 self.warn(f"NoRegionError when trying to list {service} regions") 130 except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: 131 self.warn(f"Unexpected error while trying to list {service} regions: {e}") 132 else: 133 regions = [x["RegionName"] for x in resp.get("Regions", [])] 134 if regions: 135 return regions 136 return None 137 138 def _boto3_regions(self, service): 139 options = self.get_options() 140 141 if options.get("regions"): 142 return options.get("regions") 143 144 # boto3 has hard coded lists of available regions for resources, however this does bit-rot 145 # As such we try to query the service, and fall back to ec2 for a list of regions 146 for resource_type in list({service, "ec2"}): 147 regions = self._describe_regions(resource_type) 148 if regions: 149 return regions 150 151 # fallback to local list hardcoded in boto3 if still no regions 152 session = _boto3_session(options.get("profile")) 153 regions = session.get_available_regions(service) 154 155 if not regions: 156 # I give up, now you MUST give me regions 157 self.fail_aws( 158 "Unable to get regions list from available methods, you must specify the 'regions' option to continue." 159 ) 160 161 return regions 162 163 def all_clients(self, service): 164 """ 165 Generator that yields a boto3 client and the region 166 167 :param service: The boto3 service to connect to. 168 169 Note: For services which don't support 'DescribeRegions' this may include bad 170 endpoints, and as such EndpointConnectionError should be cleanly handled as a non-fatal 171 error. 172 """ 173 regions = self._boto3_regions(service=service) 174 175 for region in regions: 176 connection = self.client(service, region=region) 177 yield connection, region 178 179 def get_cached_result(self, path, cache): 180 # false when refresh_cache or --flush-cache is used 181 if not cache: 182 return False, None 183 # get the user-specified directive 184 if not self.get_option("cache"): 185 return False, None 186 187 cache_key = self.get_cache_key(path) 188 try: 189 cached_value = self._cache[cache_key] 190 except KeyError: 191 # if cache expires or cache file doesn"t exist 192 return False, None 193 194 return True, cached_value 195 196 def update_cached_result(self, path, cache, result): 197 if not self.get_option("cache"): 198 return 199 200 cache_key = self.get_cache_key(path) 201 # We weren't explicitly told to flush the cache, and there's already a cache entry, 202 # this means that the result we're being passed came from the cache. As such we don't 203 # want to "update" the cache as that could reset a TTL on the cache entry. 204 if cache and cache_key in self._cache: 205 return 206 207 self._cache[cache_key] = result 208 209 def verify_file(self, path): 210 """ 211 :param path: the path to the inventory config file 212 :return the contents of the config file 213 """ 214 if not super().verify_file(path): 215 return False 216 217 if hasattr(self, "INVENTORY_FILE_SUFFIXES"): 218 if not path.endswith(self.INVENTORY_FILE_SUFFIXES): 219 return False 220 221 return True 222 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/plugins/plugin_utils/inventory.py b/plugins/plugin_utils/inventory.py --- a/plugins/plugin_utils/inventory.py +++ b/plugins/plugin_utils/inventory.py @@ -33,7 +33,10 @@ "secret_key", "session_token", "profile", - "iam_role_name", + "endpoint_url", + "assume_role_arn", + "region", + "regions", ) def __init__(self, templar, options): @@ -48,20 +51,21 @@ def get(self, *args): value = self.original_options.get(*args) - if not value: - return value - if args[0] not in self.TEMPLATABLE_OPTIONS: - return value - if not self.templar.is_template(value): + if ( + not value + or not self.templar + or args[0] not in self.TEMPLATABLE_OPTIONS + or not self.templar.is_template(value) + ): return value return self.templar.template(variable=value, disable_lookups=False) def get_options(self, *args): - original_options = super().get_options(*args) - if not self.templar: - return original_options - return self.TemplatedOptions(self.templar, original_options) + return self.TemplatedOptions(self.templar, super().get_options(*args)) + + def get_option(self, option, hostvars=None): + return self.TemplatedOptions(self.templar, {option: super().get_option(option, hostvars)}).get(option) def __init__(self): super().__init__() @@ -109,8 +113,7 @@ } def _set_frozen_credentials(self): - options = self.get_options() - iam_role_arn = options.get("assume_role_arn") + iam_role_arn = self.get_option("assume_role_arn") if iam_role_arn: self._freeze_iam_role(iam_role_arn) @@ -136,10 +139,9 @@ return None def _boto3_regions(self, service): - options = self.get_options() - - if options.get("regions"): - return options.get("regions") + regions = self.get_option("regions") + if regions: + return regions # boto3 has hard coded lists of available regions for resources, however this does bit-rot # As such we try to query the service, and fall back to ec2 for a list of regions @@ -149,7 +151,7 @@ return regions # fallback to local list hardcoded in boto3 if still no regions - session = _boto3_session(options.get("profile")) + session = _boto3_session(self.get_option("profile")) regions = session.get_available_regions(service) if not regions:
{"golden_diff": "diff --git a/plugins/plugin_utils/inventory.py b/plugins/plugin_utils/inventory.py\n--- a/plugins/plugin_utils/inventory.py\n+++ b/plugins/plugin_utils/inventory.py\n@@ -33,7 +33,10 @@\n \"secret_key\",\n \"session_token\",\n \"profile\",\n- \"iam_role_name\",\n+ \"endpoint_url\",\n+ \"assume_role_arn\",\n+ \"region\",\n+ \"regions\",\n )\n \n def __init__(self, templar, options):\n@@ -48,20 +51,21 @@\n \n def get(self, *args):\n value = self.original_options.get(*args)\n- if not value:\n- return value\n- if args[0] not in self.TEMPLATABLE_OPTIONS:\n- return value\n- if not self.templar.is_template(value):\n+ if (\n+ not value\n+ or not self.templar\n+ or args[0] not in self.TEMPLATABLE_OPTIONS\n+ or not self.templar.is_template(value)\n+ ):\n return value\n \n return self.templar.template(variable=value, disable_lookups=False)\n \n def get_options(self, *args):\n- original_options = super().get_options(*args)\n- if not self.templar:\n- return original_options\n- return self.TemplatedOptions(self.templar, original_options)\n+ return self.TemplatedOptions(self.templar, super().get_options(*args))\n+\n+ def get_option(self, option, hostvars=None):\n+ return self.TemplatedOptions(self.templar, {option: super().get_option(option, hostvars)}).get(option)\n \n def __init__(self):\n super().__init__()\n@@ -109,8 +113,7 @@\n }\n \n def _set_frozen_credentials(self):\n- options = self.get_options()\n- iam_role_arn = options.get(\"assume_role_arn\")\n+ iam_role_arn = self.get_option(\"assume_role_arn\")\n if iam_role_arn:\n self._freeze_iam_role(iam_role_arn)\n \n@@ -136,10 +139,9 @@\n return None\n \n def _boto3_regions(self, service):\n- options = self.get_options()\n-\n- if options.get(\"regions\"):\n- return options.get(\"regions\")\n+ regions = self.get_option(\"regions\")\n+ if regions:\n+ return regions\n \n # boto3 has hard coded lists of available regions for resources, however this does bit-rot\n # As such we try to query the service, and fall back to ec2 for a list of regions\n@@ -149,7 +151,7 @@\n return regions\n \n # fallback to local list hardcoded in boto3 if still no regions\n- session = _boto3_session(options.get(\"profile\"))\n+ session = _boto3_session(self.get_option(\"profile\"))\n regions = session.get_available_regions(service)\n \n if not regions:\n", "issue": "`lookup` environment variables `aws_ec2.yml` file\n### Summary\n\nI'm trying to `lookup` environment variables in `aws_ec2.yml` file but the `lookup` seems to be not working as expected, is this something intentionally not allowed? \n\n### Issue Type\n\nBug Report\n\n### Component Name\n\nec2_instnace\n\n### Ansible Version\n\n```console (paste below)\r\n$ ansible --version\r\nansible [core 2.15.2]\r\n config file = /Users/<user>/.ansible.cfg\r\n configured module search path = ['/Users/<user>/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /opt/homebrew/lib/python3.11/site-packages/ansible\r\n ansible collection location = /Users/<user>/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /opt/homebrew/bin/ansible\r\n python version = 3.11.6 (main, Oct 2 2023, 20:46:14) [Clang 14.0.3 (clang-1403.0.22.14.1)] (/opt/homebrew/opt/[email protected]/bin/python3.11)\r\n jinja version = 3.1.2\r\n libyaml = True\r\n```\r\n\n\n### Collection Versions\n\n```console (paste below)\r\n$ ansible-galaxy collection list\r\n\r\n# /Users/<user>/.ansible/collections/ansible_collections\r\nCollection Version\r\n--------------------- -------\r\namazon.aws 6.5.0 \r\nansible.windows 2.0.0 \r\nawx.awx 21.0.0 \r\nchocolatey.chocolatey 1.5.1 \r\ncommunity.mysql 3.7.2 \r\ncommunity.windows 2.0.0 \r\nlowlydba.sqlserver 0.7.0 \r\nmicrosoft.ad 1.4.1 ****\r\n```\r\n\n\n### AWS SDK versions\n\n```console (paste below)\r\n$ pip show boto boto3 botocore\r\nName: boto3\r\nVersion: 1.28.21\r\nSummary: The AWS SDK for Python\r\nHome-page: https://github.com/boto/boto3\r\nAuthor: Amazon Web Services\r\nAuthor-email: \r\nLicense: Apache License 2.0\r\nLocation: /opt/homebrew/lib/python3.11/site-packages\r\nRequires: botocore, jmespath, s3transfer\r\nRequired-by: \r\n---\r\nName: botocore\r\nVersion: 1.31.21\r\nSummary: Low-level, data-driven core of boto 3.\r\nHome-page: https://github.com/boto/botocore\r\nAuthor: Amazon Web Services\r\nAuthor-email: \r\nLicense: Apache License 2.0\r\nLocation: /opt/homebrew/lib/python3.11/site-packages\r\nRequires: jmespath, python-dateutil, urllib3\r\nRequired-by: aws-secretsmanager-caching, boto3, **s3transfer**\r\n```\r\n\n\n### Configuration\n\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\nCONFIG_FILE() = /Users/<user>/.ansible.cfg\r\nPAGER(env: PAGER) = less\r\n```\r\n\n\n### OS / Environment\n\nmacOS Ventura Version 13.6.2 (22G320)\n\n### Steps to Reproduce\n\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml (paste below)\r\nplugin: aws_ec2\r\nassume_role_arn: \"arn:aws:iam::{{ lookup('env', 'AWS_ACCOUNT_ID') | default('XXXXXXXXXXX', true) }}:role/ansible-manager\"\r\nregions:\r\n - \"{{ lookup('env', 'AWS_REGION') | default('us-east-1', true) }}\"\r\n```\r\n\r\ni have this `yml` file which is something i was expecting it would lookup the env variables and get the authorization \n\n### Expected Results\n\nExpected result that `lookup` would happen and assume role/region would get set right, instead it is being parsed as plain text and `lookup` is not being recognized is what i understand looking at the result below\r\n\n\n### Actual Results\n\n```console (paste below)\r\naws_ec2.yml with auto plugin: An error occurred (AccessDenied) when calling the AssumeRole operation: User:\r\n<UserName> is not\r\nauthorized to perform: sts:AssumeRole on resource: arn:aws:iam::{{ lookup('ansible.builtin.env', 'AWS_ACCOUNT_ID') |\r\ndefault('XXXXXXXXXXX', True) }}:role/ansible-manager\r\n\r\naws_ec2.yml with auto plugin: Couldn't connect to AWS: Provided region_name '{{ lookup('env', 'AWS_REGION') |\r\ndefault('us-east-1', true) }}' doesn't match a supported format.\r\n```\r\n\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2022, Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\ntry:\n import boto3\n import botocore\nexcept ImportError:\n pass # will be captured by imported HAS_BOTO3\n\nfrom ansible.plugins.inventory import BaseInventoryPlugin\nfrom ansible.plugins.inventory import Cacheable\nfrom ansible.plugins.inventory import Constructable\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code\nfrom ansible_collections.amazon.aws.plugins.plugin_utils.base import AWSPluginBase\nfrom ansible_collections.amazon.aws.plugins.plugin_utils.botocore import AnsibleBotocoreError\n\n\ndef _boto3_session(profile_name=None):\n if profile_name is None:\n return boto3.Session()\n return boto3.session.Session(profile_name=profile_name)\n\n\nclass AWSInventoryBase(BaseInventoryPlugin, Constructable, Cacheable, AWSPluginBase):\n class TemplatedOptions:\n # When someone looks up the TEMPLATABLE_OPTIONS using get() any templates\n # will be templated using the loader passed to parse.\n TEMPLATABLE_OPTIONS = (\n \"access_key\",\n \"secret_key\",\n \"session_token\",\n \"profile\",\n \"iam_role_name\",\n )\n\n def __init__(self, templar, options):\n self.original_options = options\n self.templar = templar\n\n def __getitem__(self, *args):\n return self.original_options.__getitem__(self, *args)\n\n def __setitem__(self, *args):\n return self.original_options.__setitem__(self, *args)\n\n def get(self, *args):\n value = self.original_options.get(*args)\n if not value:\n return value\n if args[0] not in self.TEMPLATABLE_OPTIONS:\n return value\n if not self.templar.is_template(value):\n return value\n\n return self.templar.template(variable=value, disable_lookups=False)\n\n def get_options(self, *args):\n original_options = super().get_options(*args)\n if not self.templar:\n return original_options\n return self.TemplatedOptions(self.templar, original_options)\n\n def __init__(self):\n super().__init__()\n self._frozen_credentials = {}\n\n # pylint: disable=too-many-arguments\n def parse(self, inventory, loader, path, cache=True, botocore_version=None, boto3_version=None):\n super().parse(inventory, loader, path)\n self.require_aws_sdk(botocore_version=botocore_version, boto3_version=boto3_version)\n self._read_config_data(path)\n self._set_frozen_credentials()\n\n def client(self, *args, **kwargs):\n kw_args = dict(self._frozen_credentials)\n kw_args.update(kwargs)\n return super().client(*args, **kw_args)\n\n def resource(self, *args, **kwargs):\n kw_args = dict(self._frozen_credentials)\n kw_args.update(kwargs)\n return super().resource(*args, **kw_args)\n\n def _freeze_iam_role(self, iam_role_arn):\n if hasattr(self, \"ansible_name\"):\n role_session_name = f\"ansible_aws_{self.ansible_name}_dynamic_inventory\"\n else:\n role_session_name = \"ansible_aws_dynamic_inventory\"\n assume_params = {\"RoleArn\": iam_role_arn, \"RoleSessionName\": role_session_name}\n\n try:\n sts = self.client(\"sts\")\n assumed_role = sts.assume_role(**assume_params)\n except AnsibleBotocoreError as e:\n self.fail_aws(f\"Unable to assume role {iam_role_arn}\", exception=e)\n\n credentials = assumed_role.get(\"Credentials\")\n if not credentials:\n self.fail_aws(f\"Unable to assume role {iam_role_arn}\")\n\n self._frozen_credentials = {\n \"profile_name\": None,\n \"aws_access_key_id\": credentials.get(\"AccessKeyId\"),\n \"aws_secret_access_key\": credentials.get(\"SecretAccessKey\"),\n \"aws_session_token\": credentials.get(\"SessionToken\"),\n }\n\n def _set_frozen_credentials(self):\n options = self.get_options()\n iam_role_arn = options.get(\"assume_role_arn\")\n if iam_role_arn:\n self._freeze_iam_role(iam_role_arn)\n\n def _describe_regions(self, service):\n # Try pulling a list of regions from the service\n try:\n initial_region = self.region or \"us-east-1\"\n client = self.client(service, region=initial_region)\n resp = client.describe_regions()\n except AttributeError:\n # Not all clients support describe\n pass\n except is_boto3_error_code(\"UnauthorizedOperation\"):\n self.warn(f\"UnauthorizedOperation when trying to list {service} regions\")\n except botocore.exceptions.NoRegionError:\n self.warn(f\"NoRegionError when trying to list {service} regions\")\n except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:\n self.warn(f\"Unexpected error while trying to list {service} regions: {e}\")\n else:\n regions = [x[\"RegionName\"] for x in resp.get(\"Regions\", [])]\n if regions:\n return regions\n return None\n\n def _boto3_regions(self, service):\n options = self.get_options()\n\n if options.get(\"regions\"):\n return options.get(\"regions\")\n\n # boto3 has hard coded lists of available regions for resources, however this does bit-rot\n # As such we try to query the service, and fall back to ec2 for a list of regions\n for resource_type in list({service, \"ec2\"}):\n regions = self._describe_regions(resource_type)\n if regions:\n return regions\n\n # fallback to local list hardcoded in boto3 if still no regions\n session = _boto3_session(options.get(\"profile\"))\n regions = session.get_available_regions(service)\n\n if not regions:\n # I give up, now you MUST give me regions\n self.fail_aws(\n \"Unable to get regions list from available methods, you must specify the 'regions' option to continue.\"\n )\n\n return regions\n\n def all_clients(self, service):\n \"\"\"\n Generator that yields a boto3 client and the region\n\n :param service: The boto3 service to connect to.\n\n Note: For services which don't support 'DescribeRegions' this may include bad\n endpoints, and as such EndpointConnectionError should be cleanly handled as a non-fatal\n error.\n \"\"\"\n regions = self._boto3_regions(service=service)\n\n for region in regions:\n connection = self.client(service, region=region)\n yield connection, region\n\n def get_cached_result(self, path, cache):\n # false when refresh_cache or --flush-cache is used\n if not cache:\n return False, None\n # get the user-specified directive\n if not self.get_option(\"cache\"):\n return False, None\n\n cache_key = self.get_cache_key(path)\n try:\n cached_value = self._cache[cache_key]\n except KeyError:\n # if cache expires or cache file doesn\"t exist\n return False, None\n\n return True, cached_value\n\n def update_cached_result(self, path, cache, result):\n if not self.get_option(\"cache\"):\n return\n\n cache_key = self.get_cache_key(path)\n # We weren't explicitly told to flush the cache, and there's already a cache entry,\n # this means that the result we're being passed came from the cache. As such we don't\n # want to \"update\" the cache as that could reset a TTL on the cache entry.\n if cache and cache_key in self._cache:\n return\n\n self._cache[cache_key] = result\n\n def verify_file(self, path):\n \"\"\"\n :param path: the path to the inventory config file\n :return the contents of the config file\n \"\"\"\n if not super().verify_file(path):\n return False\n\n if hasattr(self, \"INVENTORY_FILE_SUFFIXES\"):\n if not path.endswith(self.INVENTORY_FILE_SUFFIXES):\n return False\n\n return True\n", "path": "plugins/plugin_utils/inventory.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2022, Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\ntry:\n import boto3\n import botocore\nexcept ImportError:\n pass # will be captured by imported HAS_BOTO3\n\nfrom ansible.plugins.inventory import BaseInventoryPlugin\nfrom ansible.plugins.inventory import Cacheable\nfrom ansible.plugins.inventory import Constructable\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code\nfrom ansible_collections.amazon.aws.plugins.plugin_utils.base import AWSPluginBase\nfrom ansible_collections.amazon.aws.plugins.plugin_utils.botocore import AnsibleBotocoreError\n\n\ndef _boto3_session(profile_name=None):\n if profile_name is None:\n return boto3.Session()\n return boto3.session.Session(profile_name=profile_name)\n\n\nclass AWSInventoryBase(BaseInventoryPlugin, Constructable, Cacheable, AWSPluginBase):\n class TemplatedOptions:\n # When someone looks up the TEMPLATABLE_OPTIONS using get() any templates\n # will be templated using the loader passed to parse.\n TEMPLATABLE_OPTIONS = (\n \"access_key\",\n \"secret_key\",\n \"session_token\",\n \"profile\",\n \"endpoint_url\",\n \"assume_role_arn\",\n \"region\",\n \"regions\",\n )\n\n def __init__(self, templar, options):\n self.original_options = options\n self.templar = templar\n\n def __getitem__(self, *args):\n return self.original_options.__getitem__(self, *args)\n\n def __setitem__(self, *args):\n return self.original_options.__setitem__(self, *args)\n\n def get(self, *args):\n value = self.original_options.get(*args)\n if (\n not value\n or not self.templar\n or args[0] not in self.TEMPLATABLE_OPTIONS\n or not self.templar.is_template(value)\n ):\n return value\n\n return self.templar.template(variable=value, disable_lookups=False)\n\n def get_options(self, *args):\n return self.TemplatedOptions(self.templar, super().get_options(*args))\n\n def get_option(self, option, hostvars=None):\n return self.TemplatedOptions(self.templar, {option: super().get_option(option, hostvars)}).get(option)\n\n def __init__(self):\n super().__init__()\n self._frozen_credentials = {}\n\n # pylint: disable=too-many-arguments\n def parse(self, inventory, loader, path, cache=True, botocore_version=None, boto3_version=None):\n super().parse(inventory, loader, path)\n self.require_aws_sdk(botocore_version=botocore_version, boto3_version=boto3_version)\n self._read_config_data(path)\n self._set_frozen_credentials()\n\n def client(self, *args, **kwargs):\n kw_args = dict(self._frozen_credentials)\n kw_args.update(kwargs)\n return super().client(*args, **kw_args)\n\n def resource(self, *args, **kwargs):\n kw_args = dict(self._frozen_credentials)\n kw_args.update(kwargs)\n return super().resource(*args, **kw_args)\n\n def _freeze_iam_role(self, iam_role_arn):\n if hasattr(self, \"ansible_name\"):\n role_session_name = f\"ansible_aws_{self.ansible_name}_dynamic_inventory\"\n else:\n role_session_name = \"ansible_aws_dynamic_inventory\"\n assume_params = {\"RoleArn\": iam_role_arn, \"RoleSessionName\": role_session_name}\n\n try:\n sts = self.client(\"sts\")\n assumed_role = sts.assume_role(**assume_params)\n except AnsibleBotocoreError as e:\n self.fail_aws(f\"Unable to assume role {iam_role_arn}\", exception=e)\n\n credentials = assumed_role.get(\"Credentials\")\n if not credentials:\n self.fail_aws(f\"Unable to assume role {iam_role_arn}\")\n\n self._frozen_credentials = {\n \"profile_name\": None,\n \"aws_access_key_id\": credentials.get(\"AccessKeyId\"),\n \"aws_secret_access_key\": credentials.get(\"SecretAccessKey\"),\n \"aws_session_token\": credentials.get(\"SessionToken\"),\n }\n\n def _set_frozen_credentials(self):\n iam_role_arn = self.get_option(\"assume_role_arn\")\n if iam_role_arn:\n self._freeze_iam_role(iam_role_arn)\n\n def _describe_regions(self, service):\n # Try pulling a list of regions from the service\n try:\n initial_region = self.region or \"us-east-1\"\n client = self.client(service, region=initial_region)\n resp = client.describe_regions()\n except AttributeError:\n # Not all clients support describe\n pass\n except is_boto3_error_code(\"UnauthorizedOperation\"):\n self.warn(f\"UnauthorizedOperation when trying to list {service} regions\")\n except botocore.exceptions.NoRegionError:\n self.warn(f\"NoRegionError when trying to list {service} regions\")\n except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:\n self.warn(f\"Unexpected error while trying to list {service} regions: {e}\")\n else:\n regions = [x[\"RegionName\"] for x in resp.get(\"Regions\", [])]\n if regions:\n return regions\n return None\n\n def _boto3_regions(self, service):\n regions = self.get_option(\"regions\")\n if regions:\n return regions\n\n # boto3 has hard coded lists of available regions for resources, however this does bit-rot\n # As such we try to query the service, and fall back to ec2 for a list of regions\n for resource_type in list({service, \"ec2\"}):\n regions = self._describe_regions(resource_type)\n if regions:\n return regions\n\n # fallback to local list hardcoded in boto3 if still no regions\n session = _boto3_session(self.get_option(\"profile\"))\n regions = session.get_available_regions(service)\n\n if not regions:\n # I give up, now you MUST give me regions\n self.fail_aws(\n \"Unable to get regions list from available methods, you must specify the 'regions' option to continue.\"\n )\n\n return regions\n\n def all_clients(self, service):\n \"\"\"\n Generator that yields a boto3 client and the region\n\n :param service: The boto3 service to connect to.\n\n Note: For services which don't support 'DescribeRegions' this may include bad\n endpoints, and as such EndpointConnectionError should be cleanly handled as a non-fatal\n error.\n \"\"\"\n regions = self._boto3_regions(service=service)\n\n for region in regions:\n connection = self.client(service, region=region)\n yield connection, region\n\n def get_cached_result(self, path, cache):\n # false when refresh_cache or --flush-cache is used\n if not cache:\n return False, None\n # get the user-specified directive\n if not self.get_option(\"cache\"):\n return False, None\n\n cache_key = self.get_cache_key(path)\n try:\n cached_value = self._cache[cache_key]\n except KeyError:\n # if cache expires or cache file doesn\"t exist\n return False, None\n\n return True, cached_value\n\n def update_cached_result(self, path, cache, result):\n if not self.get_option(\"cache\"):\n return\n\n cache_key = self.get_cache_key(path)\n # We weren't explicitly told to flush the cache, and there's already a cache entry,\n # this means that the result we're being passed came from the cache. As such we don't\n # want to \"update\" the cache as that could reset a TTL on the cache entry.\n if cache and cache_key in self._cache:\n return\n\n self._cache[cache_key] = result\n\n def verify_file(self, path):\n \"\"\"\n :param path: the path to the inventory config file\n :return the contents of the config file\n \"\"\"\n if not super().verify_file(path):\n return False\n\n if hasattr(self, \"INVENTORY_FILE_SUFFIXES\"):\n if not path.endswith(self.INVENTORY_FILE_SUFFIXES):\n return False\n\n return True\n", "path": "plugins/plugin_utils/inventory.py"}]}
3,667
665
gh_patches_debug_15777
rasdani/github-patches
git_diff
python-telegram-bot__python-telegram-bot-1112
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Tests missing for `User.mention_markdown` and `User.mention_html` And while we're at it. Maybe `helpers.mention_markdown/html` too. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `telegram/utils/helpers.py` Content: ``` 1 #!/usr/bin/env python 2 # 3 # A library that provides a Python interface to the Telegram Bot API 4 # Copyright (C) 2015-2018 5 # Leandro Toledo de Souza <[email protected]> 6 # 7 # This program is free software: you can redistribute it and/or modify 8 # it under the terms of the GNU Lesser Public License as published by 9 # the Free Software Foundation, either version 3 of the License, or 10 # (at your option) any later version. 11 # 12 # This program is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU Lesser Public License for more details. 16 # 17 # You should have received a copy of the GNU Lesser Public License 18 # along with this program. If not, see [http://www.gnu.org/licenses/]. 19 """This module contains helper functions.""" 20 from html import escape 21 22 import re 23 import signal 24 from datetime import datetime 25 26 # From https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python 27 _signames = {v: k 28 for k, v in reversed(sorted(vars(signal).items())) 29 if k.startswith('SIG') and not k.startswith('SIG_')} 30 31 32 def get_signal_name(signum): 33 """Returns the signal name of the given signal number.""" 34 return _signames[signum] 35 36 37 # Not using future.backports.datetime here as datetime value might be an input from the user, 38 # making every isinstace() call more delicate. So we just use our own compat layer. 39 if hasattr(datetime, 'timestamp'): 40 # Python 3.3+ 41 def _timestamp(dt_obj): 42 return dt_obj.timestamp() 43 else: 44 # Python < 3.3 (incl 2.7) 45 from time import mktime 46 47 def _timestamp(dt_obj): 48 return mktime(dt_obj.timetuple()) 49 50 51 def escape_markdown(text): 52 """Helper function to escape telegram markup symbols.""" 53 escape_chars = '\*_`\[' 54 return re.sub(r'([%s])' % escape_chars, r'\\\1', text) 55 56 57 def to_timestamp(dt_obj): 58 """ 59 Args: 60 dt_obj (:class:`datetime.datetime`): 61 62 Returns: 63 int: 64 65 """ 66 if not dt_obj: 67 return None 68 69 return int(_timestamp(dt_obj)) 70 71 72 def from_timestamp(unixtime): 73 """ 74 Args: 75 unixtime (int): 76 77 Returns: 78 datetime.datetime: 79 80 """ 81 if not unixtime: 82 return None 83 84 return datetime.fromtimestamp(unixtime) 85 86 87 def mention_html(user_id, name): 88 """ 89 Args: 90 user_id (:obj:`int`) The user's id which you want to mention. 91 name (:obj:`str`) The name the mention is showing. 92 93 Returns: 94 :obj:`str`: The inline mention for the user as html. 95 """ 96 if isinstance(user_id, int): 97 return '<a href="tg://user?id={}">{}</a>'.format(user_id, escape(name)) 98 99 100 def mention_markdown(user_id, name): 101 """ 102 Args: 103 user_id (:obj:`int`) The user's id which you want to mention. 104 name (:obj:`str`) The name the mention is showing. 105 106 Returns: 107 :obj:`str`: The inline mention for the user as markdown. 108 """ 109 if isinstance(user_id, int): 110 return '[{}](tg://user?id={})'.format(escape_markdown(name), user_id) 111 112 113 def effective_message_type(entity): 114 """ 115 Extracts the type of message as a string identifier from a :class:`telegram.Message` or a 116 :class:`telegram.Update`. 117 118 Args: 119 entity (:obj:`Update` | :obj:`Message`) The ``update`` or ``message`` to extract from 120 121 Returns: 122 str: One of ``Message.MESSAGE_TYPES`` 123 124 """ 125 126 # Importing on file-level yields cyclic Import Errors 127 from telegram import Message 128 from telegram import Update 129 130 if isinstance(entity, Message): 131 message = entity 132 elif isinstance(entity, Update): 133 message = entity.effective_message 134 else: 135 raise TypeError("entity is not Message or Update (got: {})".format(type(entity))) 136 137 for i in Message.MESSAGE_TYPES: 138 if getattr(message, i, None): 139 return i 140 141 return None 142 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/telegram/utils/helpers.py b/telegram/utils/helpers.py --- a/telegram/utils/helpers.py +++ b/telegram/utils/helpers.py @@ -94,7 +94,7 @@ :obj:`str`: The inline mention for the user as html. """ if isinstance(user_id, int): - return '<a href="tg://user?id={}">{}</a>'.format(user_id, escape(name)) + return u'<a href="tg://user?id={}">{}</a>'.format(user_id, escape(name)) def mention_markdown(user_id, name): @@ -107,7 +107,7 @@ :obj:`str`: The inline mention for the user as markdown. """ if isinstance(user_id, int): - return '[{}](tg://user?id={})'.format(escape_markdown(name), user_id) + return u'[{}](tg://user?id={})'.format(escape_markdown(name), user_id) def effective_message_type(entity):
{"golden_diff": "diff --git a/telegram/utils/helpers.py b/telegram/utils/helpers.py\n--- a/telegram/utils/helpers.py\n+++ b/telegram/utils/helpers.py\n@@ -94,7 +94,7 @@\n :obj:`str`: The inline mention for the user as html.\n \"\"\"\n if isinstance(user_id, int):\n- return '<a href=\"tg://user?id={}\">{}</a>'.format(user_id, escape(name))\n+ return u'<a href=\"tg://user?id={}\">{}</a>'.format(user_id, escape(name))\n \n \n def mention_markdown(user_id, name):\n@@ -107,7 +107,7 @@\n :obj:`str`: The inline mention for the user as markdown.\n \"\"\"\n if isinstance(user_id, int):\n- return '[{}](tg://user?id={})'.format(escape_markdown(name), user_id)\n+ return u'[{}](tg://user?id={})'.format(escape_markdown(name), user_id)\n \n \n def effective_message_type(entity):\n", "issue": "Tests missing for `User.mention_markdown` and `User.mention_html`\nAnd while we're at it. Maybe `helpers.mention_markdown/html` too.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains helper functions.\"\"\"\nfrom html import escape\n\nimport re\nimport signal\nfrom datetime import datetime\n\n# From https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python\n_signames = {v: k\n for k, v in reversed(sorted(vars(signal).items()))\n if k.startswith('SIG') and not k.startswith('SIG_')}\n\n\ndef get_signal_name(signum):\n \"\"\"Returns the signal name of the given signal number.\"\"\"\n return _signames[signum]\n\n\n# Not using future.backports.datetime here as datetime value might be an input from the user,\n# making every isinstace() call more delicate. So we just use our own compat layer.\nif hasattr(datetime, 'timestamp'):\n # Python 3.3+\n def _timestamp(dt_obj):\n return dt_obj.timestamp()\nelse:\n # Python < 3.3 (incl 2.7)\n from time import mktime\n\n def _timestamp(dt_obj):\n return mktime(dt_obj.timetuple())\n\n\ndef escape_markdown(text):\n \"\"\"Helper function to escape telegram markup symbols.\"\"\"\n escape_chars = '\\*_`\\['\n return re.sub(r'([%s])' % escape_chars, r'\\\\\\1', text)\n\n\ndef to_timestamp(dt_obj):\n \"\"\"\n Args:\n dt_obj (:class:`datetime.datetime`):\n\n Returns:\n int:\n\n \"\"\"\n if not dt_obj:\n return None\n\n return int(_timestamp(dt_obj))\n\n\ndef from_timestamp(unixtime):\n \"\"\"\n Args:\n unixtime (int):\n\n Returns:\n datetime.datetime:\n\n \"\"\"\n if not unixtime:\n return None\n\n return datetime.fromtimestamp(unixtime)\n\n\ndef mention_html(user_id, name):\n \"\"\"\n Args:\n user_id (:obj:`int`) The user's id which you want to mention.\n name (:obj:`str`) The name the mention is showing.\n\n Returns:\n :obj:`str`: The inline mention for the user as html.\n \"\"\"\n if isinstance(user_id, int):\n return '<a href=\"tg://user?id={}\">{}</a>'.format(user_id, escape(name))\n\n\ndef mention_markdown(user_id, name):\n \"\"\"\n Args:\n user_id (:obj:`int`) The user's id which you want to mention.\n name (:obj:`str`) The name the mention is showing.\n\n Returns:\n :obj:`str`: The inline mention for the user as markdown.\n \"\"\"\n if isinstance(user_id, int):\n return '[{}](tg://user?id={})'.format(escape_markdown(name), user_id)\n\n\ndef effective_message_type(entity):\n \"\"\"\n Extracts the type of message as a string identifier from a :class:`telegram.Message` or a\n :class:`telegram.Update`.\n\n Args:\n entity (:obj:`Update` | :obj:`Message`) The ``update`` or ``message`` to extract from\n\n Returns:\n str: One of ``Message.MESSAGE_TYPES``\n\n \"\"\"\n\n # Importing on file-level yields cyclic Import Errors\n from telegram import Message\n from telegram import Update\n\n if isinstance(entity, Message):\n message = entity\n elif isinstance(entity, Update):\n message = entity.effective_message\n else:\n raise TypeError(\"entity is not Message or Update (got: {})\".format(type(entity)))\n\n for i in Message.MESSAGE_TYPES:\n if getattr(message, i, None):\n return i\n\n return None\n", "path": "telegram/utils/helpers.py"}], "after_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains helper functions.\"\"\"\nfrom html import escape\n\nimport re\nimport signal\nfrom datetime import datetime\n\n# From https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python\n_signames = {v: k\n for k, v in reversed(sorted(vars(signal).items()))\n if k.startswith('SIG') and not k.startswith('SIG_')}\n\n\ndef get_signal_name(signum):\n \"\"\"Returns the signal name of the given signal number.\"\"\"\n return _signames[signum]\n\n\n# Not using future.backports.datetime here as datetime value might be an input from the user,\n# making every isinstace() call more delicate. So we just use our own compat layer.\nif hasattr(datetime, 'timestamp'):\n # Python 3.3+\n def _timestamp(dt_obj):\n return dt_obj.timestamp()\nelse:\n # Python < 3.3 (incl 2.7)\n from time import mktime\n\n def _timestamp(dt_obj):\n return mktime(dt_obj.timetuple())\n\n\ndef escape_markdown(text):\n \"\"\"Helper function to escape telegram markup symbols.\"\"\"\n escape_chars = '\\*_`\\['\n return re.sub(r'([%s])' % escape_chars, r'\\\\\\1', text)\n\n\ndef to_timestamp(dt_obj):\n \"\"\"\n Args:\n dt_obj (:class:`datetime.datetime`):\n\n Returns:\n int:\n\n \"\"\"\n if not dt_obj:\n return None\n\n return int(_timestamp(dt_obj))\n\n\ndef from_timestamp(unixtime):\n \"\"\"\n Args:\n unixtime (int):\n\n Returns:\n datetime.datetime:\n\n \"\"\"\n if not unixtime:\n return None\n\n return datetime.fromtimestamp(unixtime)\n\n\ndef mention_html(user_id, name):\n \"\"\"\n Args:\n user_id (:obj:`int`) The user's id which you want to mention.\n name (:obj:`str`) The name the mention is showing.\n\n Returns:\n :obj:`str`: The inline mention for the user as html.\n \"\"\"\n if isinstance(user_id, int):\n return u'<a href=\"tg://user?id={}\">{}</a>'.format(user_id, escape(name))\n\n\ndef mention_markdown(user_id, name):\n \"\"\"\n Args:\n user_id (:obj:`int`) The user's id which you want to mention.\n name (:obj:`str`) The name the mention is showing.\n\n Returns:\n :obj:`str`: The inline mention for the user as markdown.\n \"\"\"\n if isinstance(user_id, int):\n return u'[{}](tg://user?id={})'.format(escape_markdown(name), user_id)\n\n\ndef effective_message_type(entity):\n \"\"\"\n Extracts the type of message as a string identifier from a :class:`telegram.Message` or a\n :class:`telegram.Update`.\n\n Args:\n entity (:obj:`Update` | :obj:`Message`) The ``update`` or ``message`` to extract from\n\n Returns:\n str: One of ``Message.MESSAGE_TYPES``\n\n \"\"\"\n\n # Importing on file-level yields cyclic Import Errors\n from telegram import Message\n from telegram import Update\n\n if isinstance(entity, Message):\n message = entity\n elif isinstance(entity, Update):\n message = entity.effective_message\n else:\n raise TypeError(\"entity is not Message or Update (got: {})\".format(type(entity)))\n\n for i in Message.MESSAGE_TYPES:\n if getattr(message, i, None):\n return i\n\n return None\n", "path": "telegram/utils/helpers.py"}]}
1,572
219
gh_patches_debug_2678
rasdani/github-patches
git_diff
pretalx__pretalx-381
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- installation crashes when there are no config files ## Current Behavior ``` $ cd pretalx $ pip-3.6 install . --user (...) File "<frozen importlib._bootstrap>", line 994, in _gcd_import File "<frozen importlib._bootstrap>", line 971, in _find_and_load File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 665, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 678, in exec_module File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed File "/tmp/pip-xa87l9tk-build/pretalx/settings.py", line 460, in <module> plugins=PLUGINS File "/tmp/pip-xa87l9tk-build/pretalx/common/settings/utils.py", line 11, in log_initial (f'Read from: {", ".join(config_files)}', False), TypeError: can only join an iterable ``` if there are no config files at all, the installation crashes, because `config_files` is `None`. ## Your Environment * Version used: master * Operating System and version (desktop or mobile): FreeBSD --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/pretalx/common/settings/config.py` Content: ``` 1 import configparser 2 import os 3 import sys 4 5 from pretalx.common.settings.utils import reduce_dict 6 7 CONFIG = { 8 'filesystem': { 9 'base': { 10 'default': os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))), 11 }, 12 'logs': { 13 'default': None, 14 'env': os.getenv('PRETALX_FILESYSTEM_LOGS'), 15 }, 16 'media': { 17 'default': None, 18 'env': os.getenv('PRETALX_FILESYSTEM_MEDIA'), 19 }, 20 'static': { 21 'default': None, 22 'env': os.getenv('PRETALX_FILESYSTEM_STATIC'), 23 }, 24 }, 25 'site': { 26 'debug': { 27 'default': 'runserver' in sys.argv, 28 'env': os.getenv('PRETALX_DEBUG'), 29 }, 30 'url': { 31 'default': 'http://localhost', 32 'env': os.getenv('PRETALX_SITE_URL'), 33 }, 34 'https': { 35 'env': os.getenv('PRETALX_HTTPS'), 36 }, 37 'cookie_domain': { 38 'default': '', 39 'env': os.getenv('PRETALX_COOKIE_DOMAIN'), 40 }, 41 }, 42 'database': { 43 'backend': { 44 'default': 'sqlite3', 45 'env': os.getenv('PRETALX_DB_TYPE'), 46 }, 47 'name': { 48 'env': os.getenv('PRETALX_DB_NAME'), 49 }, 50 'user': { 51 'default': '', 52 'env': os.getenv('PRETALX_DB_USER'), 53 }, 54 'password': { 55 'default': '', 56 'env': os.getenv('PRETALX_DB_PASS'), 57 }, 58 'host': { 59 'default': '', 60 'env': os.getenv('PRETALX_DB_HOST'), 61 }, 62 'port': { 63 'default': '', 64 'env': os.getenv('PRETALX_DB_PORT'), 65 }, 66 }, 67 'mail': { 68 'from': { 69 'default': 'admin@localhost', 70 'env': os.getenv('PRETALX_MAIL_FROM'), 71 }, 72 'host': { 73 'default': 'localhost', 74 'env': os.getenv('PRETALX_MAIL_HOST'), 75 }, 76 'port': { 77 'default': '25', 78 'env': os.getenv('PRETALX_MAIL_PORT'), 79 }, 80 'user': { 81 'default': '', 82 'env': os.getenv('PRETALX_MAIL_USER'), 83 }, 84 'password': { 85 'default': '', 86 'env': os.getenv('PRETALX_MAIL_PASSWORD'), 87 }, 88 'tls': { 89 'default': 'False', 90 'env': os.getenv('PRETALX_MAIL_TLS'), 91 }, 92 'ssl': { 93 'default': 'False', 94 'env': os.getenv('PRETALX_MAIL_SSL'), 95 }, 96 }, 97 'cache': { 98 }, 99 'celery': { 100 'broker': { 101 'default': '', 102 'env': os.getenv('PRETALX_CELERY_BROKER'), 103 }, 104 'backend': { 105 'default': '', 106 'env': os.getenv('PRETALX_CELERY_BACKEND'), 107 }, 108 }, 109 'logging': { 110 'email': { 111 'default': '', 112 'env': os.getenv('PRETALX_LOGGING_EMAIL'), 113 }, 114 'email_level': { 115 'default': '', 116 'env': os.getenv('PRETALX_LOGGING_EMAIL_LEVEL'), 117 }, 118 }, 119 } 120 121 122 def read_config_files(config): 123 if 'PRETALX_CONFIG_FILE' in os.environ: 124 config_files = config.read_file(open(os.environ.get('PRETALX_CONFIG_FILE'), encoding='utf-8')) 125 else: 126 config_files = config.read([ 127 '/etc/pretalx/pretalx.cfg', 128 os.path.expanduser('~/.pretalx.cfg'), 129 'pretalx.cfg', 130 ], encoding='utf-8') 131 return config, config_files 132 133 134 def read_layer(layer_name, config): 135 config_dict = reduce_dict({ 136 section_name: { 137 key: value.get(layer_name) 138 for key, value in section_content.items() 139 } 140 for section_name, section_content in CONFIG.items() 141 }) 142 config.read_dict(config_dict) 143 return config 144 145 146 def build_config(): 147 config = configparser.RawConfigParser() 148 config = read_layer('default', config) 149 config, config_files = read_config_files(config) 150 config = read_layer('env', config) 151 return config, config_files 152 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/pretalx/common/settings/config.py b/src/pretalx/common/settings/config.py --- a/src/pretalx/common/settings/config.py +++ b/src/pretalx/common/settings/config.py @@ -128,7 +128,7 @@ os.path.expanduser('~/.pretalx.cfg'), 'pretalx.cfg', ], encoding='utf-8') - return config, config_files + return config, config_files or [] # .read() returns None, if there are no config files def read_layer(layer_name, config):
{"golden_diff": "diff --git a/src/pretalx/common/settings/config.py b/src/pretalx/common/settings/config.py\n--- a/src/pretalx/common/settings/config.py\n+++ b/src/pretalx/common/settings/config.py\n@@ -128,7 +128,7 @@\n os.path.expanduser('~/.pretalx.cfg'),\n 'pretalx.cfg',\n ], encoding='utf-8')\n- return config, config_files\n+ return config, config_files or [] # .read() returns None, if there are no config files\n \n \n def read_layer(layer_name, config):\n", "issue": "installation crashes when there are no config files\n## Current Behavior\r\n```\r\n$ cd pretalx\r\n$ pip-3.6 install . --user\r\n(...)\r\n File \"<frozen importlib._bootstrap>\", line 994, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 971, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 955, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 665, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 678, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/tmp/pip-xa87l9tk-build/pretalx/settings.py\", line 460, in <module>\r\n plugins=PLUGINS\r\n File \"/tmp/pip-xa87l9tk-build/pretalx/common/settings/utils.py\", line 11, in log_initial\r\n (f'Read from: {\", \".join(config_files)}', False),\r\n TypeError: can only join an iterable\r\n```\r\n\r\nif there are no config files at all, the installation crashes, because `config_files` is `None`.\r\n\r\n## Your Environment\r\n\r\n* Version used: master\r\n* Operating System and version (desktop or mobile): FreeBSD\r\n\n", "before_files": [{"content": "import configparser\nimport os\nimport sys\n\nfrom pretalx.common.settings.utils import reduce_dict\n\nCONFIG = {\n 'filesystem': {\n 'base': {\n 'default': os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))),\n },\n 'logs': {\n 'default': None,\n 'env': os.getenv('PRETALX_FILESYSTEM_LOGS'),\n },\n 'media': {\n 'default': None,\n 'env': os.getenv('PRETALX_FILESYSTEM_MEDIA'),\n },\n 'static': {\n 'default': None,\n 'env': os.getenv('PRETALX_FILESYSTEM_STATIC'),\n },\n },\n 'site': {\n 'debug': {\n 'default': 'runserver' in sys.argv,\n 'env': os.getenv('PRETALX_DEBUG'),\n },\n 'url': {\n 'default': 'http://localhost',\n 'env': os.getenv('PRETALX_SITE_URL'),\n },\n 'https': {\n 'env': os.getenv('PRETALX_HTTPS'),\n },\n 'cookie_domain': {\n 'default': '',\n 'env': os.getenv('PRETALX_COOKIE_DOMAIN'),\n },\n },\n 'database': {\n 'backend': {\n 'default': 'sqlite3',\n 'env': os.getenv('PRETALX_DB_TYPE'),\n },\n 'name': {\n 'env': os.getenv('PRETALX_DB_NAME'),\n },\n 'user': {\n 'default': '',\n 'env': os.getenv('PRETALX_DB_USER'),\n },\n 'password': {\n 'default': '',\n 'env': os.getenv('PRETALX_DB_PASS'),\n },\n 'host': {\n 'default': '',\n 'env': os.getenv('PRETALX_DB_HOST'),\n },\n 'port': {\n 'default': '',\n 'env': os.getenv('PRETALX_DB_PORT'),\n },\n },\n 'mail': {\n 'from': {\n 'default': 'admin@localhost',\n 'env': os.getenv('PRETALX_MAIL_FROM'),\n },\n 'host': {\n 'default': 'localhost',\n 'env': os.getenv('PRETALX_MAIL_HOST'),\n },\n 'port': {\n 'default': '25',\n 'env': os.getenv('PRETALX_MAIL_PORT'),\n },\n 'user': {\n 'default': '',\n 'env': os.getenv('PRETALX_MAIL_USER'),\n },\n 'password': {\n 'default': '',\n 'env': os.getenv('PRETALX_MAIL_PASSWORD'),\n },\n 'tls': {\n 'default': 'False',\n 'env': os.getenv('PRETALX_MAIL_TLS'),\n },\n 'ssl': {\n 'default': 'False',\n 'env': os.getenv('PRETALX_MAIL_SSL'),\n },\n },\n 'cache': {\n },\n 'celery': {\n 'broker': {\n 'default': '',\n 'env': os.getenv('PRETALX_CELERY_BROKER'),\n },\n 'backend': {\n 'default': '',\n 'env': os.getenv('PRETALX_CELERY_BACKEND'),\n },\n },\n 'logging': {\n 'email': {\n 'default': '',\n 'env': os.getenv('PRETALX_LOGGING_EMAIL'),\n },\n 'email_level': {\n 'default': '',\n 'env': os.getenv('PRETALX_LOGGING_EMAIL_LEVEL'),\n },\n },\n}\n\n\ndef read_config_files(config):\n if 'PRETALX_CONFIG_FILE' in os.environ:\n config_files = config.read_file(open(os.environ.get('PRETALX_CONFIG_FILE'), encoding='utf-8'))\n else:\n config_files = config.read([\n '/etc/pretalx/pretalx.cfg',\n os.path.expanduser('~/.pretalx.cfg'),\n 'pretalx.cfg',\n ], encoding='utf-8')\n return config, config_files\n\n\ndef read_layer(layer_name, config):\n config_dict = reduce_dict({\n section_name: {\n key: value.get(layer_name)\n for key, value in section_content.items()\n }\n for section_name, section_content in CONFIG.items()\n })\n config.read_dict(config_dict)\n return config\n\n\ndef build_config():\n config = configparser.RawConfigParser()\n config = read_layer('default', config)\n config, config_files = read_config_files(config)\n config = read_layer('env', config)\n return config, config_files\n", "path": "src/pretalx/common/settings/config.py"}], "after_files": [{"content": "import configparser\nimport os\nimport sys\n\nfrom pretalx.common.settings.utils import reduce_dict\n\nCONFIG = {\n 'filesystem': {\n 'base': {\n 'default': os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))),\n },\n 'logs': {\n 'default': None,\n 'env': os.getenv('PRETALX_FILESYSTEM_LOGS'),\n },\n 'media': {\n 'default': None,\n 'env': os.getenv('PRETALX_FILESYSTEM_MEDIA'),\n },\n 'static': {\n 'default': None,\n 'env': os.getenv('PRETALX_FILESYSTEM_STATIC'),\n },\n },\n 'site': {\n 'debug': {\n 'default': 'runserver' in sys.argv,\n 'env': os.getenv('PRETALX_DEBUG'),\n },\n 'url': {\n 'default': 'http://localhost',\n 'env': os.getenv('PRETALX_SITE_URL'),\n },\n 'https': {\n 'env': os.getenv('PRETALX_HTTPS'),\n },\n 'cookie_domain': {\n 'default': '',\n 'env': os.getenv('PRETALX_COOKIE_DOMAIN'),\n },\n },\n 'database': {\n 'backend': {\n 'default': 'sqlite3',\n 'env': os.getenv('PRETALX_DB_TYPE'),\n },\n 'name': {\n 'env': os.getenv('PRETALX_DB_NAME'),\n },\n 'user': {\n 'default': '',\n 'env': os.getenv('PRETALX_DB_USER'),\n },\n 'password': {\n 'default': '',\n 'env': os.getenv('PRETALX_DB_PASS'),\n },\n 'host': {\n 'default': '',\n 'env': os.getenv('PRETALX_DB_HOST'),\n },\n 'port': {\n 'default': '',\n 'env': os.getenv('PRETALX_DB_PORT'),\n },\n },\n 'mail': {\n 'from': {\n 'default': 'admin@localhost',\n 'env': os.getenv('PRETALX_MAIL_FROM'),\n },\n 'host': {\n 'default': 'localhost',\n 'env': os.getenv('PRETALX_MAIL_HOST'),\n },\n 'port': {\n 'default': '25',\n 'env': os.getenv('PRETALX_MAIL_PORT'),\n },\n 'user': {\n 'default': '',\n 'env': os.getenv('PRETALX_MAIL_USER'),\n },\n 'password': {\n 'default': '',\n 'env': os.getenv('PRETALX_MAIL_PASSWORD'),\n },\n 'tls': {\n 'default': 'False',\n 'env': os.getenv('PRETALX_MAIL_TLS'),\n },\n 'ssl': {\n 'default': 'False',\n 'env': os.getenv('PRETALX_MAIL_SSL'),\n },\n },\n 'cache': {\n },\n 'celery': {\n 'broker': {\n 'default': '',\n 'env': os.getenv('PRETALX_CELERY_BROKER'),\n },\n 'backend': {\n 'default': '',\n 'env': os.getenv('PRETALX_CELERY_BACKEND'),\n },\n },\n 'logging': {\n 'email': {\n 'default': '',\n 'env': os.getenv('PRETALX_LOGGING_EMAIL'),\n },\n 'email_level': {\n 'default': '',\n 'env': os.getenv('PRETALX_LOGGING_EMAIL_LEVEL'),\n },\n },\n}\n\n\ndef read_config_files(config):\n if 'PRETALX_CONFIG_FILE' in os.environ:\n config_files = config.read_file(open(os.environ.get('PRETALX_CONFIG_FILE'), encoding='utf-8'))\n else:\n config_files = config.read([\n '/etc/pretalx/pretalx.cfg',\n os.path.expanduser('~/.pretalx.cfg'),\n 'pretalx.cfg',\n ], encoding='utf-8')\n return config, config_files or [] # .read() returns None, if there are no config files\n\n\ndef read_layer(layer_name, config):\n config_dict = reduce_dict({\n section_name: {\n key: value.get(layer_name)\n for key, value in section_content.items()\n }\n for section_name, section_content in CONFIG.items()\n })\n config.read_dict(config_dict)\n return config\n\n\ndef build_config():\n config = configparser.RawConfigParser()\n config = read_layer('default', config)\n config, config_files = read_config_files(config)\n config = read_layer('env', config)\n return config, config_files\n", "path": "src/pretalx/common/settings/config.py"}]}
1,904
130
gh_patches_debug_29983
rasdani/github-patches
git_diff
liqd__a4-meinberlin-3146
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- testing interactive event: remove + on call to action **URL:** https://meinberlin-dev.liqd.net/projekte/module/interaktive-veranstaltung-2/ **user:** unregistered user **expected behaviour:** buttons should be same all over platform **behaviour:** there is a + on the button **important screensize:** **device & browser:** **Comment/Question:** please take out the + before add question Screenshot? <img width="692" alt="Bildschirmfoto 2020-09-22 um 17 51 38" src="https://user-images.githubusercontent.com/35491681/93906276-494d9200-fcfc-11ea-9614-3a9359b5ec97.png"> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `meinberlin/apps/projects/templatetags/meinberlin_project_tags.py` Content: ``` 1 from django import template 2 3 from adhocracy4.comments.models import Comment 4 from meinberlin.apps.budgeting.models import Proposal as budget_proposal 5 from meinberlin.apps.ideas.models import Idea 6 from meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal 7 from meinberlin.apps.mapideas.models import MapIdea 8 from meinberlin.apps.polls.models import Vote 9 from meinberlin.apps.projects import get_project_type 10 11 register = template.Library() 12 13 14 @register.filter 15 def project_url(project): 16 if (project.project_type == 'meinberlin_bplan.Bplan' 17 or project.project_type == 18 'meinberlin_extprojects.ExternalProject'): 19 return project.externalproject.url 20 return project.get_absolute_url() 21 22 23 @register.filter 24 def project_type(project): 25 return get_project_type(project) 26 27 28 @register.filter 29 def is_external(project): 30 return (project.project_type == 'meinberlin_bplan.Bplan' 31 or project.project_type == 32 'meinberlin_extprojects.ExternalProject') 33 34 35 @register.filter 36 def is_a4_project(project): 37 return (project.project_type == 'a4projects.Project') 38 39 40 @register.simple_tag 41 def get_num_entries(module): 42 """Count all user-generated items.""" 43 item_count = \ 44 Idea.objects.filter(module=module).count() \ 45 + MapIdea.objects.filter(module=module).count() \ 46 + budget_proposal.objects.filter(module=module).count() \ 47 + kiezkasse_proposal.objects.filter(module=module).count() \ 48 + Comment.objects.filter(idea__module=module).count() \ 49 + Comment.objects.filter(mapidea__module=module).count() \ 50 + Comment.objects.filter(budget_proposal__module=module).count() \ 51 + Comment.objects.filter(kiezkasse_proposal__module=module).count() \ 52 + Comment.objects.filter(topic__module=module).count() \ 53 + Comment.objects.filter(maptopic__module=module).count() \ 54 + Comment.objects.filter(paragraph__chapter__module=module).count() \ 55 + Comment.objects.filter(chapter__module=module).count() \ 56 + Comment.objects.filter(poll__module=module).count() \ 57 + Vote.objects.filter(choice__question__poll__module=module).count() 58 return item_count 59 ``` Path: `meinberlin/apps/livequestions/phases.py` Content: ``` 1 from django.utils.translation import ugettext_lazy as _ 2 3 from adhocracy4 import phases 4 5 from . import apps 6 from . import models 7 from . import views 8 9 10 class IssuePhase(phases.PhaseContent): 11 app = apps.Config.label 12 phase = 'issue' 13 view = views.LiveQuestionModuleDetail 14 15 name = _('Issue phase') 16 description = _('Add question.') 17 module_name = _('Interactive Event') 18 icon = 'lightbulb-o' 19 20 features = { 21 'crud': (models.LiveQuestion,), 22 'like': (models.LiveQuestion,) 23 } 24 25 26 phases.content.register(IssuePhase()) 27 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/meinberlin/apps/livequestions/phases.py b/meinberlin/apps/livequestions/phases.py --- a/meinberlin/apps/livequestions/phases.py +++ b/meinberlin/apps/livequestions/phases.py @@ -13,7 +13,7 @@ view = views.LiveQuestionModuleDetail name = _('Issue phase') - description = _('Add question.') + description = _('Add questions and support.') module_name = _('Interactive Event') icon = 'lightbulb-o' diff --git a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py --- a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py +++ b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py @@ -4,6 +4,8 @@ from meinberlin.apps.budgeting.models import Proposal as budget_proposal from meinberlin.apps.ideas.models import Idea from meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal +from meinberlin.apps.likes.models import Like +from meinberlin.apps.livequestions.models import LiveQuestion from meinberlin.apps.mapideas.models import MapIdea from meinberlin.apps.polls.models import Vote from meinberlin.apps.projects import get_project_type @@ -54,5 +56,7 @@ + Comment.objects.filter(paragraph__chapter__module=module).count() \ + Comment.objects.filter(chapter__module=module).count() \ + Comment.objects.filter(poll__module=module).count() \ - + Vote.objects.filter(choice__question__poll__module=module).count() + + Vote.objects.filter(choice__question__poll__module=module).count() \ + + LiveQuestion.objects.filter(module=module).count() \ + + Like.objects.filter(question__module=module).count() return item_count
{"golden_diff": "diff --git a/meinberlin/apps/livequestions/phases.py b/meinberlin/apps/livequestions/phases.py\n--- a/meinberlin/apps/livequestions/phases.py\n+++ b/meinberlin/apps/livequestions/phases.py\n@@ -13,7 +13,7 @@\n view = views.LiveQuestionModuleDetail\n \n name = _('Issue phase')\n- description = _('Add question.')\n+ description = _('Add questions and support.')\n module_name = _('Interactive Event')\n icon = 'lightbulb-o'\n \ndiff --git a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py\n--- a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py\n+++ b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py\n@@ -4,6 +4,8 @@\n from meinberlin.apps.budgeting.models import Proposal as budget_proposal\n from meinberlin.apps.ideas.models import Idea\n from meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal\n+from meinberlin.apps.likes.models import Like\n+from meinberlin.apps.livequestions.models import LiveQuestion\n from meinberlin.apps.mapideas.models import MapIdea\n from meinberlin.apps.polls.models import Vote\n from meinberlin.apps.projects import get_project_type\n@@ -54,5 +56,7 @@\n + Comment.objects.filter(paragraph__chapter__module=module).count() \\\n + Comment.objects.filter(chapter__module=module).count() \\\n + Comment.objects.filter(poll__module=module).count() \\\n- + Vote.objects.filter(choice__question__poll__module=module).count()\n+ + Vote.objects.filter(choice__question__poll__module=module).count() \\\n+ + LiveQuestion.objects.filter(module=module).count() \\\n+ + Like.objects.filter(question__module=module).count()\n return item_count\n", "issue": "testing interactive event: remove + on call to action\n**URL:** https://meinberlin-dev.liqd.net/projekte/module/interaktive-veranstaltung-2/\r\n**user:** unregistered user\r\n**expected behaviour:** buttons should be same all over platform\r\n**behaviour:** there is a + on the button\r\n**important screensize:**\r\n**device & browser:** \r\n**Comment/Question:** please take out the + before add question\r\n\r\n\r\nScreenshot?\r\n<img width=\"692\" alt=\"Bildschirmfoto 2020-09-22 um 17 51 38\" src=\"https://user-images.githubusercontent.com/35491681/93906276-494d9200-fcfc-11ea-9614-3a9359b5ec97.png\">\r\n\n", "before_files": [{"content": "from django import template\n\nfrom adhocracy4.comments.models import Comment\nfrom meinberlin.apps.budgeting.models import Proposal as budget_proposal\nfrom meinberlin.apps.ideas.models import Idea\nfrom meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal\nfrom meinberlin.apps.mapideas.models import MapIdea\nfrom meinberlin.apps.polls.models import Vote\nfrom meinberlin.apps.projects import get_project_type\n\nregister = template.Library()\n\n\[email protected]\ndef project_url(project):\n if (project.project_type == 'meinberlin_bplan.Bplan'\n or project.project_type ==\n 'meinberlin_extprojects.ExternalProject'):\n return project.externalproject.url\n return project.get_absolute_url()\n\n\[email protected]\ndef project_type(project):\n return get_project_type(project)\n\n\[email protected]\ndef is_external(project):\n return (project.project_type == 'meinberlin_bplan.Bplan'\n or project.project_type ==\n 'meinberlin_extprojects.ExternalProject')\n\n\[email protected]\ndef is_a4_project(project):\n return (project.project_type == 'a4projects.Project')\n\n\[email protected]_tag\ndef get_num_entries(module):\n \"\"\"Count all user-generated items.\"\"\"\n item_count = \\\n Idea.objects.filter(module=module).count() \\\n + MapIdea.objects.filter(module=module).count() \\\n + budget_proposal.objects.filter(module=module).count() \\\n + kiezkasse_proposal.objects.filter(module=module).count() \\\n + Comment.objects.filter(idea__module=module).count() \\\n + Comment.objects.filter(mapidea__module=module).count() \\\n + Comment.objects.filter(budget_proposal__module=module).count() \\\n + Comment.objects.filter(kiezkasse_proposal__module=module).count() \\\n + Comment.objects.filter(topic__module=module).count() \\\n + Comment.objects.filter(maptopic__module=module).count() \\\n + Comment.objects.filter(paragraph__chapter__module=module).count() \\\n + Comment.objects.filter(chapter__module=module).count() \\\n + Comment.objects.filter(poll__module=module).count() \\\n + Vote.objects.filter(choice__question__poll__module=module).count()\n return item_count\n", "path": "meinberlin/apps/projects/templatetags/meinberlin_project_tags.py"}, {"content": "from django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4 import phases\n\nfrom . import apps\nfrom . import models\nfrom . import views\n\n\nclass IssuePhase(phases.PhaseContent):\n app = apps.Config.label\n phase = 'issue'\n view = views.LiveQuestionModuleDetail\n\n name = _('Issue phase')\n description = _('Add question.')\n module_name = _('Interactive Event')\n icon = 'lightbulb-o'\n\n features = {\n 'crud': (models.LiveQuestion,),\n 'like': (models.LiveQuestion,)\n }\n\n\nphases.content.register(IssuePhase())\n", "path": "meinberlin/apps/livequestions/phases.py"}], "after_files": [{"content": "from django import template\n\nfrom adhocracy4.comments.models import Comment\nfrom meinberlin.apps.budgeting.models import Proposal as budget_proposal\nfrom meinberlin.apps.ideas.models import Idea\nfrom meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal\nfrom meinberlin.apps.likes.models import Like\nfrom meinberlin.apps.livequestions.models import LiveQuestion\nfrom meinberlin.apps.mapideas.models import MapIdea\nfrom meinberlin.apps.polls.models import Vote\nfrom meinberlin.apps.projects import get_project_type\n\nregister = template.Library()\n\n\[email protected]\ndef project_url(project):\n if (project.project_type == 'meinberlin_bplan.Bplan'\n or project.project_type ==\n 'meinberlin_extprojects.ExternalProject'):\n return project.externalproject.url\n return project.get_absolute_url()\n\n\[email protected]\ndef project_type(project):\n return get_project_type(project)\n\n\[email protected]\ndef is_external(project):\n return (project.project_type == 'meinberlin_bplan.Bplan'\n or project.project_type ==\n 'meinberlin_extprojects.ExternalProject')\n\n\[email protected]\ndef is_a4_project(project):\n return (project.project_type == 'a4projects.Project')\n\n\[email protected]_tag\ndef get_num_entries(module):\n \"\"\"Count all user-generated items.\"\"\"\n item_count = \\\n Idea.objects.filter(module=module).count() \\\n + MapIdea.objects.filter(module=module).count() \\\n + budget_proposal.objects.filter(module=module).count() \\\n + kiezkasse_proposal.objects.filter(module=module).count() \\\n + Comment.objects.filter(idea__module=module).count() \\\n + Comment.objects.filter(mapidea__module=module).count() \\\n + Comment.objects.filter(budget_proposal__module=module).count() \\\n + Comment.objects.filter(kiezkasse_proposal__module=module).count() \\\n + Comment.objects.filter(topic__module=module).count() \\\n + Comment.objects.filter(maptopic__module=module).count() \\\n + Comment.objects.filter(paragraph__chapter__module=module).count() \\\n + Comment.objects.filter(chapter__module=module).count() \\\n + Comment.objects.filter(poll__module=module).count() \\\n + Vote.objects.filter(choice__question__poll__module=module).count() \\\n + LiveQuestion.objects.filter(module=module).count() \\\n + Like.objects.filter(question__module=module).count()\n return item_count\n", "path": "meinberlin/apps/projects/templatetags/meinberlin_project_tags.py"}, {"content": "from django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4 import phases\n\nfrom . import apps\nfrom . import models\nfrom . import views\n\n\nclass IssuePhase(phases.PhaseContent):\n app = apps.Config.label\n phase = 'issue'\n view = views.LiveQuestionModuleDetail\n\n name = _('Issue phase')\n description = _('Add questions and support.')\n module_name = _('Interactive Event')\n icon = 'lightbulb-o'\n\n features = {\n 'crud': (models.LiveQuestion,),\n 'like': (models.LiveQuestion,)\n }\n\n\nphases.content.register(IssuePhase())\n", "path": "meinberlin/apps/livequestions/phases.py"}]}
1,278
445
gh_patches_debug_17824
rasdani/github-patches
git_diff
hydroshare__hydroshare-5083
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Track user agent for metrics **Describe the feature you'd like and what it will do** In HS v2.5.4, we don't track user_agent in our metrics. This makes it difficult to tell when requests to HS are occurring via direct UI interactions, or via other tools like hsclient. **Why is this feature important?** We need more insight into how HS' ecosystem of tools are being used. This information should drive our continued development on existing tools and our consideration of additions for future use. **Is your feature request related to a problem? Please describe.** It is difficult to make decisions without information. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `hs_tracking/utils.py` Content: ``` 1 import robot_detection 2 from ipware.ip import get_ip 3 from hs_tools_resource.models import RequestUrlBase, RequestUrlBaseAggregation, RequestUrlBaseFile 4 from urllib.parse import urlparse 5 6 7 def get_client_ip(request): 8 return get_ip(request) 9 10 11 def get_user_type(session): 12 try: 13 user = session.visitor.user 14 usertype = user.userprofile.user_type 15 except AttributeError: 16 usertype = None 17 return usertype 18 19 20 def get_user_email_domain(session): 21 try: 22 user = session.visitor.user 23 emaildomain = user.email.split('@')[-1] 24 except AttributeError: 25 emaildomain = None 26 return emaildomain 27 28 29 def get_user_email_tld(session, emaildomain=None): 30 try: 31 if not emaildomain: 32 emaildomain = get_user_email_domain(session) 33 if emaildomain: 34 shortdomain = '.'.join(emaildomain.split('.')[1:]) 35 return shortdomain 36 except AttributeError: 37 return None 38 39 40 def is_human(user_agent): 41 if robot_detection.is_robot(user_agent): 42 return False 43 return True 44 45 46 def get_std_log_fields(request, session=None): 47 """ returns a standard set of metadata that to each receiver function. 48 This ensures that all activities are reporting a consistent set of metrics 49 """ 50 user_type = None 51 user_email_tld = None 52 full_domain = None 53 if session is not None: 54 user_type = get_user_type(session) 55 full_domain = get_user_email_domain(session) 56 user_email_tld = get_user_email_tld(session, full_domain) 57 58 return { 59 'user_ip': get_client_ip(request), 60 'user_type': user_type, 61 'user_email_domain': user_email_tld, 62 'user_email_domain_full': full_domain 63 } 64 65 66 def authentic_redirect_url(url): 67 """ Validates a url scheme and netloc is in an existing web app 68 :param url: String of a url 69 :return: Boolean, True if the url exists in a web app 70 """ 71 if not url: 72 return False 73 u = urlparse(url) 74 url_base = "{}://{}".format(u.scheme, u.netloc) 75 return RequestUrlBase.objects.filter(value__startswith=url_base).exists() \ 76 or RequestUrlBaseAggregation.objects.filter(value__startswith=url_base).exists() \ 77 or RequestUrlBaseFile.objects.filter(value__startswith=url_base).exists() 78 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/hs_tracking/utils.py b/hs_tracking/utils.py --- a/hs_tracking/utils.py +++ b/hs_tracking/utils.py @@ -47,6 +47,12 @@ """ returns a standard set of metadata that to each receiver function. This ensures that all activities are reporting a consistent set of metrics """ + try: + user_agent = request.META['HTTP_USER_AGENT'] + human = is_human(user_agent) + except KeyError: + user_agent = None + human = None user_type = None user_email_tld = None full_domain = None @@ -59,7 +65,9 @@ 'user_ip': get_client_ip(request), 'user_type': user_type, 'user_email_domain': user_email_tld, - 'user_email_domain_full': full_domain + 'user_email_domain_full': full_domain, + 'is_human': human, + 'user_agent': user_agent }
{"golden_diff": "diff --git a/hs_tracking/utils.py b/hs_tracking/utils.py\n--- a/hs_tracking/utils.py\n+++ b/hs_tracking/utils.py\n@@ -47,6 +47,12 @@\n \"\"\" returns a standard set of metadata that to each receiver function.\n This ensures that all activities are reporting a consistent set of metrics\n \"\"\"\n+ try:\n+ user_agent = request.META['HTTP_USER_AGENT']\n+ human = is_human(user_agent)\n+ except KeyError:\n+ user_agent = None\n+ human = None\n user_type = None\n user_email_tld = None\n full_domain = None\n@@ -59,7 +65,9 @@\n 'user_ip': get_client_ip(request),\n 'user_type': user_type,\n 'user_email_domain': user_email_tld,\n- 'user_email_domain_full': full_domain\n+ 'user_email_domain_full': full_domain,\n+ 'is_human': human,\n+ 'user_agent': user_agent\n }\n", "issue": "Track user agent for metrics\n**Describe the feature you'd like and what it will do**\r\nIn HS v2.5.4, we don't track user_agent in our metrics. This makes it difficult to tell when requests to HS are occurring via direct UI interactions, or via other tools like hsclient.\r\n\r\n\r\n**Why is this feature important?**\r\nWe need more insight into how HS' ecosystem of tools are being used. This information should drive our continued development on existing tools and our consideration of additions for future use.\r\n\r\n**Is your feature request related to a problem? Please describe.**\r\nIt is difficult to make decisions without information.\r\n\n", "before_files": [{"content": "import robot_detection\nfrom ipware.ip import get_ip\nfrom hs_tools_resource.models import RequestUrlBase, RequestUrlBaseAggregation, RequestUrlBaseFile\nfrom urllib.parse import urlparse\n\n\ndef get_client_ip(request):\n return get_ip(request)\n\n\ndef get_user_type(session):\n try:\n user = session.visitor.user\n usertype = user.userprofile.user_type\n except AttributeError:\n usertype = None\n return usertype\n\n\ndef get_user_email_domain(session):\n try:\n user = session.visitor.user\n emaildomain = user.email.split('@')[-1]\n except AttributeError:\n emaildomain = None\n return emaildomain\n\n\ndef get_user_email_tld(session, emaildomain=None):\n try:\n if not emaildomain:\n emaildomain = get_user_email_domain(session)\n if emaildomain:\n shortdomain = '.'.join(emaildomain.split('.')[1:])\n return shortdomain\n except AttributeError:\n return None\n\n\ndef is_human(user_agent):\n if robot_detection.is_robot(user_agent):\n return False\n return True\n\n\ndef get_std_log_fields(request, session=None):\n \"\"\" returns a standard set of metadata that to each receiver function.\n This ensures that all activities are reporting a consistent set of metrics\n \"\"\"\n user_type = None\n user_email_tld = None\n full_domain = None\n if session is not None:\n user_type = get_user_type(session)\n full_domain = get_user_email_domain(session)\n user_email_tld = get_user_email_tld(session, full_domain)\n\n return {\n 'user_ip': get_client_ip(request),\n 'user_type': user_type,\n 'user_email_domain': user_email_tld,\n 'user_email_domain_full': full_domain\n }\n\n\ndef authentic_redirect_url(url):\n \"\"\" Validates a url scheme and netloc is in an existing web app\n :param url: String of a url\n :return: Boolean, True if the url exists in a web app\n \"\"\"\n if not url:\n return False\n u = urlparse(url)\n url_base = \"{}://{}\".format(u.scheme, u.netloc)\n return RequestUrlBase.objects.filter(value__startswith=url_base).exists() \\\n or RequestUrlBaseAggregation.objects.filter(value__startswith=url_base).exists() \\\n or RequestUrlBaseFile.objects.filter(value__startswith=url_base).exists()\n", "path": "hs_tracking/utils.py"}], "after_files": [{"content": "import robot_detection\nfrom ipware.ip import get_ip\nfrom hs_tools_resource.models import RequestUrlBase, RequestUrlBaseAggregation, RequestUrlBaseFile\nfrom urllib.parse import urlparse\n\n\ndef get_client_ip(request):\n return get_ip(request)\n\n\ndef get_user_type(session):\n try:\n user = session.visitor.user\n usertype = user.userprofile.user_type\n except AttributeError:\n usertype = None\n return usertype\n\n\ndef get_user_email_domain(session):\n try:\n user = session.visitor.user\n emaildomain = user.email.split('@')[-1]\n except AttributeError:\n emaildomain = None\n return emaildomain\n\n\ndef get_user_email_tld(session, emaildomain=None):\n try:\n if not emaildomain:\n emaildomain = get_user_email_domain(session)\n if emaildomain:\n shortdomain = '.'.join(emaildomain.split('.')[1:])\n return shortdomain\n except AttributeError:\n return None\n\n\ndef is_human(user_agent):\n if robot_detection.is_robot(user_agent):\n return False\n return True\n\n\ndef get_std_log_fields(request, session=None):\n \"\"\" returns a standard set of metadata that to each receiver function.\n This ensures that all activities are reporting a consistent set of metrics\n \"\"\"\n try:\n user_agent = request.META['HTTP_USER_AGENT']\n human = is_human(user_agent)\n except KeyError:\n user_agent = None\n human = None\n user_type = None\n user_email_tld = None\n full_domain = None\n if session is not None:\n user_type = get_user_type(session)\n full_domain = get_user_email_domain(session)\n user_email_tld = get_user_email_tld(session, full_domain)\n\n return {\n 'user_ip': get_client_ip(request),\n 'user_type': user_type,\n 'user_email_domain': user_email_tld,\n 'user_email_domain_full': full_domain,\n 'is_human': human,\n 'user_agent': user_agent\n }\n\n\ndef authentic_redirect_url(url):\n \"\"\" Validates a url scheme and netloc is in an existing web app\n :param url: String of a url\n :return: Boolean, True if the url exists in a web app\n \"\"\"\n if not url:\n return False\n u = urlparse(url)\n url_base = \"{}://{}\".format(u.scheme, u.netloc)\n return RequestUrlBase.objects.filter(value__startswith=url_base).exists() \\\n or RequestUrlBaseAggregation.objects.filter(value__startswith=url_base).exists() \\\n or RequestUrlBaseFile.objects.filter(value__startswith=url_base).exists()\n", "path": "hs_tracking/utils.py"}]}
1,050
221
gh_patches_debug_19918
rasdani/github-patches
git_diff
deepchecks__deepchecks-971
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] Drift titles are not clearly connected to their plots ![image](https://user-images.githubusercontent.com/17730502/156195682-d5949e14-b152-41c8-b16c-222c4ff071ea.png) This example (taken from https://docs.deepchecks.com/en/latest/examples/vision/guides/quickstart_in_5_minutes.html) shows that it is very hard to understand that the "area" title is the property that is drifted. This happens in the common code for drift plots. Need to find a clearer way to indicate the drifted feature, either: 1. Find a way to center and enlarge the title / add bold / underscore (while still having the "Additional output" title be superior to it) 2. Add the feature name also to the title of the plots themselves. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `deepchecks/utils/distribution/drift.py` Content: ``` 1 # ---------------------------------------------------------------------------- 2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com) 3 # 4 # This file is part of Deepchecks. 5 # Deepchecks is distributed under the terms of the GNU Affero General 6 # Public License (version 3 or later). 7 # You should have received a copy of the GNU Affero General Public License 8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>. 9 # ---------------------------------------------------------------------------- 10 # 11 """Common utilities for distribution checks.""" 12 13 from typing import Tuple, Union, Hashable, Callable 14 15 from scipy.stats import wasserstein_distance 16 import numpy as np 17 import pandas as pd 18 19 import plotly.graph_objects as go 20 from plotly.subplots import make_subplots 21 22 from deepchecks.utils.distribution.plot import drift_score_bar_traces, feature_distribution_traces 23 from deepchecks.utils.distribution.preprocessing import preprocess_2_cat_cols_to_same_bins 24 from deepchecks.core.errors import DeepchecksValueError 25 26 27 PSI_MIN_PERCENTAGE = 0.01 28 29 30 __all__ = ['calc_drift_and_plot'] 31 32 33 def psi(expected_percents: np.ndarray, actual_percents: np.ndarray): 34 """ 35 Calculate the PSI (Population Stability Index). 36 37 See https://www.lexjansen.com/wuss/2017/47_Final_Paper_PDF.pdf 38 39 Parameters 40 ---------- 41 expected_percents: np.ndarray 42 array of percentages of each value in the expected distribution. 43 actual_percents: : np.ndarray 44 array of percentages of each value in the actual distribution. 45 Returns 46 ------- 47 psi 48 The PSI score 49 50 """ 51 psi_value = 0 52 for i in range(len(expected_percents)): 53 # In order for the value not to diverge, we cap our min percentage value 54 e_perc = max(expected_percents[i], PSI_MIN_PERCENTAGE) 55 a_perc = max(actual_percents[i], PSI_MIN_PERCENTAGE) 56 value = (e_perc - a_perc) * np.log(e_perc / a_perc) 57 psi_value += value 58 59 return psi_value 60 61 62 def earth_movers_distance(dist1: Union[np.ndarray, pd.Series], dist2: Union[np.ndarray, pd.Series]): 63 """ 64 Calculate the Earth Movers Distance (Wasserstein distance). 65 66 See https://en.wikipedia.org/wiki/Wasserstein_metric 67 68 Function is for numerical data only. 69 70 Parameters 71 ---------- 72 dist1 : Union[np.ndarray, pd.Series] 73 array of numberical values. 74 dist2 : Union[np.ndarray, pd.Series] 75 array of numberical values to compare dist1 to. 76 Returns 77 ------- 78 Any 79 the Wasserstein distance between the two distributions. 80 81 """ 82 unique1 = np.unique(dist1) 83 unique2 = np.unique(dist2) 84 85 sample_space = list(set(unique1).union(set(unique2))) 86 87 val_max = max(sample_space) 88 val_min = min(sample_space) 89 90 if val_max == val_min: 91 return 0 92 93 dist1 = (dist1 - val_min) / (val_max - val_min) 94 dist2 = (dist2 - val_min) / (val_max - val_min) 95 96 return wasserstein_distance(dist1, dist2) 97 98 99 def calc_drift_and_plot(train_column: pd.Series, test_column: pd.Series, plot_title: Hashable, 100 column_type: str, max_num_categories: int = 10) -> Tuple[float, str, Callable]: 101 """ 102 Calculate drift score per column. 103 104 Parameters 105 ---------- 106 train_column : pd.Series 107 column from train dataset 108 test_column : pd.Series 109 same column from test dataset 110 plot_title : Hashable 111 title of plot 112 column_type : str 113 type of column (either "numerical" or "categorical") 114 max_num_categories : int , default: 10 115 Max number of allowed categories. If there are more, they are binned into an "Other" category. 116 Returns 117 ------- 118 Tuple[float, str, Callable] 119 drift score of the difference between the two columns' distributions (Earth movers distance for 120 numerical, PSI for categorical) 121 graph comparing the two distributions (density for numerical, stack bar for categorical) 122 """ 123 train_dist = train_column.dropna().values.reshape(-1) 124 test_dist = test_column.dropna().values.reshape(-1) 125 126 if column_type == 'numerical': 127 scorer_name = "Earth Mover's Distance" 128 129 train_dist = train_dist.astype('float') 130 test_dist = test_dist.astype('float') 131 132 score = earth_movers_distance(dist1=train_dist, dist2=test_dist) 133 134 bar_traces, bar_x_axis, bar_y_axis = drift_score_bar_traces(score) 135 dist_traces, dist_x_axis, dist_y_axis = feature_distribution_traces(train_dist, test_dist) 136 137 elif column_type == 'categorical': 138 scorer_name = 'PSI' 139 expected_percents, actual_percents, _ = \ 140 preprocess_2_cat_cols_to_same_bins(dist1=train_dist, dist2=test_dist, max_num_categories=max_num_categories) 141 score = psi(expected_percents=expected_percents, actual_percents=actual_percents) 142 143 bar_traces, bar_x_axis, bar_y_axis = drift_score_bar_traces(score, bar_max=1) 144 dist_traces, dist_x_axis, dist_y_axis = feature_distribution_traces(train_dist, test_dist, is_categorical=True, 145 max_num_categories=max_num_categories) 146 else: 147 # Should never reach here 148 raise DeepchecksValueError(f'Unsupported column type for drift: {column_type}') 149 150 fig = make_subplots(rows=2, cols=1, vertical_spacing=0.2, shared_yaxes=False, shared_xaxes=False, 151 row_heights=[0.1, 0.9], 152 subplot_titles=['Drift Score - ' + scorer_name, 'Distribution Plot']) 153 154 fig.add_traces(bar_traces, rows=[1] * len(bar_traces), cols=[1] * len(bar_traces)) 155 fig.add_traces(dist_traces, rows=[2] * len(dist_traces), cols=[1] * len(dist_traces)) 156 157 shared_layout = go.Layout( 158 xaxis=bar_x_axis, 159 yaxis=bar_y_axis, 160 xaxis2=dist_x_axis, 161 yaxis2=dist_y_axis, 162 legend=dict( 163 title='Dataset', 164 yanchor='top', 165 y=0.6), 166 width=700, 167 height=400, 168 title=plot_title 169 ) 170 171 fig.update_layout(shared_layout) 172 173 return score, scorer_name, fig 174 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/deepchecks/utils/distribution/drift.py b/deepchecks/utils/distribution/drift.py --- a/deepchecks/utils/distribution/drift.py +++ b/deepchecks/utils/distribution/drift.py @@ -149,7 +149,7 @@ fig = make_subplots(rows=2, cols=1, vertical_spacing=0.2, shared_yaxes=False, shared_xaxes=False, row_heights=[0.1, 0.9], - subplot_titles=['Drift Score - ' + scorer_name, 'Distribution Plot']) + subplot_titles=[f'Drift Score ({scorer_name})', 'Distribution Plot']) fig.add_traces(bar_traces, rows=[1] * len(bar_traces), cols=[1] * len(bar_traces)) fig.add_traces(dist_traces, rows=[2] * len(dist_traces), cols=[1] * len(dist_traces)) @@ -165,7 +165,7 @@ y=0.6), width=700, height=400, - title=plot_title + title=dict(text=plot_title, x=0.5, xanchor='center') ) fig.update_layout(shared_layout)
{"golden_diff": "diff --git a/deepchecks/utils/distribution/drift.py b/deepchecks/utils/distribution/drift.py\n--- a/deepchecks/utils/distribution/drift.py\n+++ b/deepchecks/utils/distribution/drift.py\n@@ -149,7 +149,7 @@\n \n fig = make_subplots(rows=2, cols=1, vertical_spacing=0.2, shared_yaxes=False, shared_xaxes=False,\n row_heights=[0.1, 0.9],\n- subplot_titles=['Drift Score - ' + scorer_name, 'Distribution Plot'])\n+ subplot_titles=[f'Drift Score ({scorer_name})', 'Distribution Plot'])\n \n fig.add_traces(bar_traces, rows=[1] * len(bar_traces), cols=[1] * len(bar_traces))\n fig.add_traces(dist_traces, rows=[2] * len(dist_traces), cols=[1] * len(dist_traces))\n@@ -165,7 +165,7 @@\n y=0.6),\n width=700,\n height=400,\n- title=plot_title\n+ title=dict(text=plot_title, x=0.5, xanchor='center')\n )\n \n fig.update_layout(shared_layout)\n", "issue": "[BUG] Drift titles are not clearly connected to their plots\n![image](https://user-images.githubusercontent.com/17730502/156195682-d5949e14-b152-41c8-b16c-222c4ff071ea.png)\r\n\r\nThis example (taken from https://docs.deepchecks.com/en/latest/examples/vision/guides/quickstart_in_5_minutes.html) shows that it is very hard to understand that the \"area\" title is the property that is drifted. This happens in the common code for drift plots. Need to find a clearer way to indicate the drifted feature, either:\r\n1. Find a way to center and enlarge the title / add bold / underscore (while still having the \"Additional output\" title be superior to it)\r\n2. Add the feature name also to the title of the plots themselves. \n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Common utilities for distribution checks.\"\"\"\n\nfrom typing import Tuple, Union, Hashable, Callable\n\nfrom scipy.stats import wasserstein_distance\nimport numpy as np\nimport pandas as pd\n\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom deepchecks.utils.distribution.plot import drift_score_bar_traces, feature_distribution_traces\nfrom deepchecks.utils.distribution.preprocessing import preprocess_2_cat_cols_to_same_bins\nfrom deepchecks.core.errors import DeepchecksValueError\n\n\nPSI_MIN_PERCENTAGE = 0.01\n\n\n__all__ = ['calc_drift_and_plot']\n\n\ndef psi(expected_percents: np.ndarray, actual_percents: np.ndarray):\n \"\"\"\n Calculate the PSI (Population Stability Index).\n\n See https://www.lexjansen.com/wuss/2017/47_Final_Paper_PDF.pdf\n\n Parameters\n ----------\n expected_percents: np.ndarray\n array of percentages of each value in the expected distribution.\n actual_percents: : np.ndarray\n array of percentages of each value in the actual distribution.\n Returns\n -------\n psi\n The PSI score\n\n \"\"\"\n psi_value = 0\n for i in range(len(expected_percents)):\n # In order for the value not to diverge, we cap our min percentage value\n e_perc = max(expected_percents[i], PSI_MIN_PERCENTAGE)\n a_perc = max(actual_percents[i], PSI_MIN_PERCENTAGE)\n value = (e_perc - a_perc) * np.log(e_perc / a_perc)\n psi_value += value\n\n return psi_value\n\n\ndef earth_movers_distance(dist1: Union[np.ndarray, pd.Series], dist2: Union[np.ndarray, pd.Series]):\n \"\"\"\n Calculate the Earth Movers Distance (Wasserstein distance).\n\n See https://en.wikipedia.org/wiki/Wasserstein_metric\n\n Function is for numerical data only.\n\n Parameters\n ----------\n dist1 : Union[np.ndarray, pd.Series]\n array of numberical values.\n dist2 : Union[np.ndarray, pd.Series]\n array of numberical values to compare dist1 to.\n Returns\n -------\n Any\n the Wasserstein distance between the two distributions.\n\n \"\"\"\n unique1 = np.unique(dist1)\n unique2 = np.unique(dist2)\n\n sample_space = list(set(unique1).union(set(unique2)))\n\n val_max = max(sample_space)\n val_min = min(sample_space)\n\n if val_max == val_min:\n return 0\n\n dist1 = (dist1 - val_min) / (val_max - val_min)\n dist2 = (dist2 - val_min) / (val_max - val_min)\n\n return wasserstein_distance(dist1, dist2)\n\n\ndef calc_drift_and_plot(train_column: pd.Series, test_column: pd.Series, plot_title: Hashable,\n column_type: str, max_num_categories: int = 10) -> Tuple[float, str, Callable]:\n \"\"\"\n Calculate drift score per column.\n\n Parameters\n ----------\n train_column : pd.Series\n column from train dataset\n test_column : pd.Series\n same column from test dataset\n plot_title : Hashable\n title of plot\n column_type : str\n type of column (either \"numerical\" or \"categorical\")\n max_num_categories : int , default: 10\n Max number of allowed categories. If there are more, they are binned into an \"Other\" category.\n Returns\n -------\n Tuple[float, str, Callable]\n drift score of the difference between the two columns' distributions (Earth movers distance for\n numerical, PSI for categorical)\n graph comparing the two distributions (density for numerical, stack bar for categorical)\n \"\"\"\n train_dist = train_column.dropna().values.reshape(-1)\n test_dist = test_column.dropna().values.reshape(-1)\n\n if column_type == 'numerical':\n scorer_name = \"Earth Mover's Distance\"\n\n train_dist = train_dist.astype('float')\n test_dist = test_dist.astype('float')\n\n score = earth_movers_distance(dist1=train_dist, dist2=test_dist)\n\n bar_traces, bar_x_axis, bar_y_axis = drift_score_bar_traces(score)\n dist_traces, dist_x_axis, dist_y_axis = feature_distribution_traces(train_dist, test_dist)\n\n elif column_type == 'categorical':\n scorer_name = 'PSI'\n expected_percents, actual_percents, _ = \\\n preprocess_2_cat_cols_to_same_bins(dist1=train_dist, dist2=test_dist, max_num_categories=max_num_categories)\n score = psi(expected_percents=expected_percents, actual_percents=actual_percents)\n\n bar_traces, bar_x_axis, bar_y_axis = drift_score_bar_traces(score, bar_max=1)\n dist_traces, dist_x_axis, dist_y_axis = feature_distribution_traces(train_dist, test_dist, is_categorical=True,\n max_num_categories=max_num_categories)\n else:\n # Should never reach here\n raise DeepchecksValueError(f'Unsupported column type for drift: {column_type}')\n\n fig = make_subplots(rows=2, cols=1, vertical_spacing=0.2, shared_yaxes=False, shared_xaxes=False,\n row_heights=[0.1, 0.9],\n subplot_titles=['Drift Score - ' + scorer_name, 'Distribution Plot'])\n\n fig.add_traces(bar_traces, rows=[1] * len(bar_traces), cols=[1] * len(bar_traces))\n fig.add_traces(dist_traces, rows=[2] * len(dist_traces), cols=[1] * len(dist_traces))\n\n shared_layout = go.Layout(\n xaxis=bar_x_axis,\n yaxis=bar_y_axis,\n xaxis2=dist_x_axis,\n yaxis2=dist_y_axis,\n legend=dict(\n title='Dataset',\n yanchor='top',\n y=0.6),\n width=700,\n height=400,\n title=plot_title\n )\n\n fig.update_layout(shared_layout)\n\n return score, scorer_name, fig\n", "path": "deepchecks/utils/distribution/drift.py"}], "after_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Common utilities for distribution checks.\"\"\"\n\nfrom typing import Tuple, Union, Hashable, Callable\n\nfrom scipy.stats import wasserstein_distance\nimport numpy as np\nimport pandas as pd\n\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom deepchecks.utils.distribution.plot import drift_score_bar_traces, feature_distribution_traces\nfrom deepchecks.utils.distribution.preprocessing import preprocess_2_cat_cols_to_same_bins\nfrom deepchecks.core.errors import DeepchecksValueError\n\n\nPSI_MIN_PERCENTAGE = 0.01\n\n\n__all__ = ['calc_drift_and_plot']\n\n\ndef psi(expected_percents: np.ndarray, actual_percents: np.ndarray):\n \"\"\"\n Calculate the PSI (Population Stability Index).\n\n See https://www.lexjansen.com/wuss/2017/47_Final_Paper_PDF.pdf\n\n Parameters\n ----------\n expected_percents: np.ndarray\n array of percentages of each value in the expected distribution.\n actual_percents: : np.ndarray\n array of percentages of each value in the actual distribution.\n Returns\n -------\n psi\n The PSI score\n\n \"\"\"\n psi_value = 0\n for i in range(len(expected_percents)):\n # In order for the value not to diverge, we cap our min percentage value\n e_perc = max(expected_percents[i], PSI_MIN_PERCENTAGE)\n a_perc = max(actual_percents[i], PSI_MIN_PERCENTAGE)\n value = (e_perc - a_perc) * np.log(e_perc / a_perc)\n psi_value += value\n\n return psi_value\n\n\ndef earth_movers_distance(dist1: Union[np.ndarray, pd.Series], dist2: Union[np.ndarray, pd.Series]):\n \"\"\"\n Calculate the Earth Movers Distance (Wasserstein distance).\n\n See https://en.wikipedia.org/wiki/Wasserstein_metric\n\n Function is for numerical data only.\n\n Parameters\n ----------\n dist1 : Union[np.ndarray, pd.Series]\n array of numberical values.\n dist2 : Union[np.ndarray, pd.Series]\n array of numberical values to compare dist1 to.\n Returns\n -------\n Any\n the Wasserstein distance between the two distributions.\n\n \"\"\"\n unique1 = np.unique(dist1)\n unique2 = np.unique(dist2)\n\n sample_space = list(set(unique1).union(set(unique2)))\n\n val_max = max(sample_space)\n val_min = min(sample_space)\n\n if val_max == val_min:\n return 0\n\n dist1 = (dist1 - val_min) / (val_max - val_min)\n dist2 = (dist2 - val_min) / (val_max - val_min)\n\n return wasserstein_distance(dist1, dist2)\n\n\ndef calc_drift_and_plot(train_column: pd.Series, test_column: pd.Series, plot_title: Hashable,\n column_type: str, max_num_categories: int = 10) -> Tuple[float, str, Callable]:\n \"\"\"\n Calculate drift score per column.\n\n Parameters\n ----------\n train_column : pd.Series\n column from train dataset\n test_column : pd.Series\n same column from test dataset\n plot_title : Hashable\n title of plot\n column_type : str\n type of column (either \"numerical\" or \"categorical\")\n max_num_categories : int , default: 10\n Max number of allowed categories. If there are more, they are binned into an \"Other\" category.\n Returns\n -------\n Tuple[float, str, Callable]\n drift score of the difference between the two columns' distributions (Earth movers distance for\n numerical, PSI for categorical)\n graph comparing the two distributions (density for numerical, stack bar for categorical)\n \"\"\"\n train_dist = train_column.dropna().values.reshape(-1)\n test_dist = test_column.dropna().values.reshape(-1)\n\n if column_type == 'numerical':\n scorer_name = \"Earth Mover's Distance\"\n\n train_dist = train_dist.astype('float')\n test_dist = test_dist.astype('float')\n\n score = earth_movers_distance(dist1=train_dist, dist2=test_dist)\n\n bar_traces, bar_x_axis, bar_y_axis = drift_score_bar_traces(score)\n dist_traces, dist_x_axis, dist_y_axis = feature_distribution_traces(train_dist, test_dist)\n\n elif column_type == 'categorical':\n scorer_name = 'PSI'\n expected_percents, actual_percents, _ = \\\n preprocess_2_cat_cols_to_same_bins(dist1=train_dist, dist2=test_dist, max_num_categories=max_num_categories)\n score = psi(expected_percents=expected_percents, actual_percents=actual_percents)\n\n bar_traces, bar_x_axis, bar_y_axis = drift_score_bar_traces(score, bar_max=1)\n dist_traces, dist_x_axis, dist_y_axis = feature_distribution_traces(train_dist, test_dist, is_categorical=True,\n max_num_categories=max_num_categories)\n else:\n # Should never reach here\n raise DeepchecksValueError(f'Unsupported column type for drift: {column_type}')\n\n fig = make_subplots(rows=2, cols=1, vertical_spacing=0.2, shared_yaxes=False, shared_xaxes=False,\n row_heights=[0.1, 0.9],\n subplot_titles=[f'Drift Score ({scorer_name})', 'Distribution Plot'])\n\n fig.add_traces(bar_traces, rows=[1] * len(bar_traces), cols=[1] * len(bar_traces))\n fig.add_traces(dist_traces, rows=[2] * len(dist_traces), cols=[1] * len(dist_traces))\n\n shared_layout = go.Layout(\n xaxis=bar_x_axis,\n yaxis=bar_y_axis,\n xaxis2=dist_x_axis,\n yaxis2=dist_y_axis,\n legend=dict(\n title='Dataset',\n yanchor='top',\n y=0.6),\n width=700,\n height=400,\n title=dict(text=plot_title, x=0.5, xanchor='center')\n )\n\n fig.update_layout(shared_layout)\n\n return score, scorer_name, fig\n", "path": "deepchecks/utils/distribution/drift.py"}]}
2,307
276
gh_patches_debug_28599
rasdani/github-patches
git_diff
google__mobly-538
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Errors in `teardown_class` are not properly recorded If Mobly encounters an error in the cleanup stage of `teardown_class`, the error would not be recorded anywhere except the cli output, which makes debugging difficult. This points to a bigger problem: the errors occurred in base test and runner themselves are not clearly handled. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mobly/controller_manager.py` Content: ``` 1 # Copyright 2018 Google Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """ Module for Mobly controller management.""" 15 import collections 16 import copy 17 import logging 18 import yaml 19 20 from mobly import records 21 from mobly import signals 22 23 24 def verify_controller_module(module): 25 """Verifies a module object follows the required interface for 26 controllers. 27 28 The interface is explained in the docstring of 29 `base_test.BaseTestClass.register_controller`. 30 31 Args: 32 module: An object that is a controller module. This is usually 33 imported with import statements or loaded by importlib. 34 35 Raises: 36 ControllerError: if the module does not match the Mobly controller 37 interface, or one of the required members is null. 38 """ 39 required_attributes = ('create', 'destroy', 'MOBLY_CONTROLLER_CONFIG_NAME') 40 for attr in required_attributes: 41 if not hasattr(module, attr): 42 raise signals.ControllerError( 43 'Module %s missing required controller module attribute' 44 ' %s.' % (module.__name__, attr)) 45 if not getattr(module, attr): 46 raise signals.ControllerError( 47 'Controller interface %s in %s cannot be null.' % 48 (attr, module.__name__)) 49 50 51 class ControllerManager(object): 52 """Manages the controller objects for Mobly. 53 54 This manages the life cycles and info retrieval of all controller objects 55 used in a test. 56 57 Attributes: 58 controller_configs: dict, controller configs provided by the user via 59 test bed config. 60 """ 61 62 def __init__(self, class_name, controller_configs): 63 # Controller object management. 64 self._controller_objects = collections.OrderedDict( 65 ) # controller_name: objects 66 self._controller_modules = {} # controller_name: module 67 self._class_name = class_name 68 self.controller_configs = controller_configs 69 70 def register_controller(self, module, required=True, min_number=1): 71 """Loads a controller module and returns its loaded devices. 72 73 This is to be used in a mobly test class. 74 75 Args: 76 module: A module that follows the controller module interface. 77 required: A bool. If True, failing to register the specified 78 controller module raises exceptions. If False, the objects 79 failed to instantiate will be skipped. 80 min_number: An integer that is the minimum number of controller 81 objects to be created. Default is one, since you should not 82 register a controller module without expecting at least one 83 object. 84 85 Returns: 86 A list of controller objects instantiated from controller_module, or 87 None if no config existed for this controller and it was not a 88 required controller. 89 90 Raises: 91 ControllerError: 92 * The controller module has already been registered. 93 * The actual number of objects instantiated is less than the 94 * `min_number`. 95 * `required` is True and no corresponding config can be found. 96 * Any other error occurred in the registration process. 97 """ 98 verify_controller_module(module) 99 # Use the module's name as the ref name 100 module_ref_name = module.__name__.split('.')[-1] 101 if module_ref_name in self._controller_objects: 102 raise signals.ControllerError( 103 'Controller module %s has already been registered. It cannot ' 104 'be registered again.' % module_ref_name) 105 # Create controller objects. 106 module_config_name = module.MOBLY_CONTROLLER_CONFIG_NAME 107 if module_config_name not in self.controller_configs: 108 if required: 109 raise signals.ControllerError( 110 'No corresponding config found for %s' % 111 module_config_name) 112 logging.warning( 113 'No corresponding config found for optional controller %s', 114 module_config_name) 115 return None 116 try: 117 # Make a deep copy of the config to pass to the controller module, 118 # in case the controller module modifies the config internally. 119 original_config = self.controller_configs[module_config_name] 120 controller_config = copy.deepcopy(original_config) 121 objects = module.create(controller_config) 122 except: 123 logging.exception( 124 'Failed to initialize objects for controller %s, abort!', 125 module_config_name) 126 raise 127 if not isinstance(objects, list): 128 raise signals.ControllerError( 129 'Controller module %s did not return a list of objects, abort.' 130 % module_ref_name) 131 # Check we got enough controller objects to continue. 132 actual_number = len(objects) 133 if actual_number < min_number: 134 module.destroy(objects) 135 raise signals.ControllerError( 136 'Expected to get at least %d controller objects, got %d.' % 137 (min_number, actual_number)) 138 # Save a shallow copy of the list for internal usage, so tests can't 139 # affect internal registry by manipulating the object list. 140 self._controller_objects[module_ref_name] = copy.copy(objects) 141 logging.debug('Found %d objects for controller %s', len(objects), 142 module_config_name) 143 self._controller_modules[module_ref_name] = module 144 return objects 145 146 def unregister_controllers(self): 147 """Destroy controller objects and clear internal registry. 148 149 This will be called after each test class. 150 """ 151 # TODO(xpconanfan): actually record these errors instead of just 152 # logging them. 153 for name, module in self._controller_modules.items(): 154 logging.debug('Destroying %s.', name) 155 try: 156 module.destroy(self._controller_objects[name]) 157 except: 158 logging.exception('Exception occurred destroying %s.', name) 159 self._controller_objects = collections.OrderedDict() 160 self._controller_modules = {} 161 162 def _create_controller_info_record(self, controller_module_name): 163 """Creates controller info record for a particular controller type. 164 165 Info is retrieved from all the controller objects spawned from the 166 specified module, using the controller module's `get_info` function. 167 168 Args: 169 controller_module_name: string, the name of the controller module 170 to retrieve info from. 171 172 Returns: 173 A records.ControllerInfoRecord object. 174 """ 175 module = self._controller_modules[controller_module_name] 176 controller_info = None 177 try: 178 controller_info = module.get_info( 179 copy.copy(self._controller_objects[controller_module_name])) 180 except AttributeError: 181 logging.warning('No optional debug info found for controller ' 182 '%s. To provide it, implement `get_info`.', 183 controller_module_name) 184 try: 185 yaml.dump(controller_info) 186 except TypeError: 187 logging.warning('The info of controller %s in class "%s" is not ' 188 'YAML serializable! Coercing it to string.', 189 controller_module_name, self._class_name) 190 controller_info = str(controller_info) 191 return records.ControllerInfoRecord( 192 self._class_name, module.MOBLY_CONTROLLER_CONFIG_NAME, 193 controller_info) 194 195 def get_controller_info_records(self): 196 """Get the info records for all the controller objects in the manager. 197 198 New info records for each controller object are created for every call 199 so the latest info is included. 200 201 Returns: 202 List of records.ControllerInfoRecord objects. Each opject conatins 203 the info of a type of controller 204 """ 205 info_records = [] 206 for controller_module_name in self._controller_objects.keys(): 207 record = self._create_controller_info_record( 208 controller_module_name) 209 if record: 210 info_records.append(record) 211 return info_records 212 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mobly/controller_manager.py b/mobly/controller_manager.py --- a/mobly/controller_manager.py +++ b/mobly/controller_manager.py @@ -17,6 +17,7 @@ import logging import yaml +from mobly import expects from mobly import records from mobly import signals @@ -152,10 +153,9 @@ # logging them. for name, module in self._controller_modules.items(): logging.debug('Destroying %s.', name) - try: + with expects.expect_no_raises( + 'Exception occurred destroying %s.' % name): module.destroy(self._controller_objects[name]) - except: - logging.exception('Exception occurred destroying %s.', name) self._controller_objects = collections.OrderedDict() self._controller_modules = {} @@ -204,8 +204,11 @@ """ info_records = [] for controller_module_name in self._controller_objects.keys(): - record = self._create_controller_info_record( - controller_module_name) - if record: - info_records.append(record) + with expects.expect_no_raises( + 'Failed to collect controller info from %s' % + controller_module_name): + record = self._create_controller_info_record( + controller_module_name) + if record: + info_records.append(record) return info_records
{"golden_diff": "diff --git a/mobly/controller_manager.py b/mobly/controller_manager.py\n--- a/mobly/controller_manager.py\n+++ b/mobly/controller_manager.py\n@@ -17,6 +17,7 @@\n import logging\n import yaml\n \n+from mobly import expects\n from mobly import records\n from mobly import signals\n \n@@ -152,10 +153,9 @@\n # logging them.\n for name, module in self._controller_modules.items():\n logging.debug('Destroying %s.', name)\n- try:\n+ with expects.expect_no_raises(\n+ 'Exception occurred destroying %s.' % name):\n module.destroy(self._controller_objects[name])\n- except:\n- logging.exception('Exception occurred destroying %s.', name)\n self._controller_objects = collections.OrderedDict()\n self._controller_modules = {}\n \n@@ -204,8 +204,11 @@\n \"\"\"\n info_records = []\n for controller_module_name in self._controller_objects.keys():\n- record = self._create_controller_info_record(\n- controller_module_name)\n- if record:\n- info_records.append(record)\n+ with expects.expect_no_raises(\n+ 'Failed to collect controller info from %s' %\n+ controller_module_name):\n+ record = self._create_controller_info_record(\n+ controller_module_name)\n+ if record:\n+ info_records.append(record)\n return info_records\n", "issue": "Errors in `teardown_class` are not properly recorded\nIf Mobly encounters an error in the cleanup stage of `teardown_class`, the error would not be recorded anywhere except the cli output, which makes debugging difficult.\r\n\r\nThis points to a bigger problem: the errors occurred in base test and runner themselves are not clearly handled.\n", "before_files": [{"content": "# Copyright 2018 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Module for Mobly controller management.\"\"\"\nimport collections\nimport copy\nimport logging\nimport yaml\n\nfrom mobly import records\nfrom mobly import signals\n\n\ndef verify_controller_module(module):\n \"\"\"Verifies a module object follows the required interface for\n controllers.\n\n The interface is explained in the docstring of\n `base_test.BaseTestClass.register_controller`.\n\n Args:\n module: An object that is a controller module. This is usually\n imported with import statements or loaded by importlib.\n\n Raises:\n ControllerError: if the module does not match the Mobly controller\n interface, or one of the required members is null.\n \"\"\"\n required_attributes = ('create', 'destroy', 'MOBLY_CONTROLLER_CONFIG_NAME')\n for attr in required_attributes:\n if not hasattr(module, attr):\n raise signals.ControllerError(\n 'Module %s missing required controller module attribute'\n ' %s.' % (module.__name__, attr))\n if not getattr(module, attr):\n raise signals.ControllerError(\n 'Controller interface %s in %s cannot be null.' %\n (attr, module.__name__))\n\n\nclass ControllerManager(object):\n \"\"\"Manages the controller objects for Mobly.\n\n This manages the life cycles and info retrieval of all controller objects\n used in a test.\n\n Attributes:\n controller_configs: dict, controller configs provided by the user via\n test bed config.\n \"\"\"\n\n def __init__(self, class_name, controller_configs):\n # Controller object management.\n self._controller_objects = collections.OrderedDict(\n ) # controller_name: objects\n self._controller_modules = {} # controller_name: module\n self._class_name = class_name\n self.controller_configs = controller_configs\n\n def register_controller(self, module, required=True, min_number=1):\n \"\"\"Loads a controller module and returns its loaded devices.\n\n This is to be used in a mobly test class.\n\n Args:\n module: A module that follows the controller module interface.\n required: A bool. If True, failing to register the specified\n controller module raises exceptions. If False, the objects\n failed to instantiate will be skipped.\n min_number: An integer that is the minimum number of controller\n objects to be created. Default is one, since you should not\n register a controller module without expecting at least one\n object.\n\n Returns:\n A list of controller objects instantiated from controller_module, or\n None if no config existed for this controller and it was not a\n required controller.\n\n Raises:\n ControllerError:\n * The controller module has already been registered.\n * The actual number of objects instantiated is less than the\n * `min_number`.\n * `required` is True and no corresponding config can be found.\n * Any other error occurred in the registration process.\n \"\"\"\n verify_controller_module(module)\n # Use the module's name as the ref name\n module_ref_name = module.__name__.split('.')[-1]\n if module_ref_name in self._controller_objects:\n raise signals.ControllerError(\n 'Controller module %s has already been registered. It cannot '\n 'be registered again.' % module_ref_name)\n # Create controller objects.\n module_config_name = module.MOBLY_CONTROLLER_CONFIG_NAME\n if module_config_name not in self.controller_configs:\n if required:\n raise signals.ControllerError(\n 'No corresponding config found for %s' %\n module_config_name)\n logging.warning(\n 'No corresponding config found for optional controller %s',\n module_config_name)\n return None\n try:\n # Make a deep copy of the config to pass to the controller module,\n # in case the controller module modifies the config internally.\n original_config = self.controller_configs[module_config_name]\n controller_config = copy.deepcopy(original_config)\n objects = module.create(controller_config)\n except:\n logging.exception(\n 'Failed to initialize objects for controller %s, abort!',\n module_config_name)\n raise\n if not isinstance(objects, list):\n raise signals.ControllerError(\n 'Controller module %s did not return a list of objects, abort.'\n % module_ref_name)\n # Check we got enough controller objects to continue.\n actual_number = len(objects)\n if actual_number < min_number:\n module.destroy(objects)\n raise signals.ControllerError(\n 'Expected to get at least %d controller objects, got %d.' %\n (min_number, actual_number))\n # Save a shallow copy of the list for internal usage, so tests can't\n # affect internal registry by manipulating the object list.\n self._controller_objects[module_ref_name] = copy.copy(objects)\n logging.debug('Found %d objects for controller %s', len(objects),\n module_config_name)\n self._controller_modules[module_ref_name] = module\n return objects\n\n def unregister_controllers(self):\n \"\"\"Destroy controller objects and clear internal registry.\n\n This will be called after each test class.\n \"\"\"\n # TODO(xpconanfan): actually record these errors instead of just\n # logging them.\n for name, module in self._controller_modules.items():\n logging.debug('Destroying %s.', name)\n try:\n module.destroy(self._controller_objects[name])\n except:\n logging.exception('Exception occurred destroying %s.', name)\n self._controller_objects = collections.OrderedDict()\n self._controller_modules = {}\n\n def _create_controller_info_record(self, controller_module_name):\n \"\"\"Creates controller info record for a particular controller type.\n\n Info is retrieved from all the controller objects spawned from the\n specified module, using the controller module's `get_info` function.\n\n Args:\n controller_module_name: string, the name of the controller module\n to retrieve info from.\n\n Returns:\n A records.ControllerInfoRecord object.\n \"\"\"\n module = self._controller_modules[controller_module_name]\n controller_info = None\n try:\n controller_info = module.get_info(\n copy.copy(self._controller_objects[controller_module_name]))\n except AttributeError:\n logging.warning('No optional debug info found for controller '\n '%s. To provide it, implement `get_info`.',\n controller_module_name)\n try:\n yaml.dump(controller_info)\n except TypeError:\n logging.warning('The info of controller %s in class \"%s\" is not '\n 'YAML serializable! Coercing it to string.',\n controller_module_name, self._class_name)\n controller_info = str(controller_info)\n return records.ControllerInfoRecord(\n self._class_name, module.MOBLY_CONTROLLER_CONFIG_NAME,\n controller_info)\n\n def get_controller_info_records(self):\n \"\"\"Get the info records for all the controller objects in the manager.\n\n New info records for each controller object are created for every call\n so the latest info is included.\n\n Returns:\n List of records.ControllerInfoRecord objects. Each opject conatins\n the info of a type of controller\n \"\"\"\n info_records = []\n for controller_module_name in self._controller_objects.keys():\n record = self._create_controller_info_record(\n controller_module_name)\n if record:\n info_records.append(record)\n return info_records\n", "path": "mobly/controller_manager.py"}], "after_files": [{"content": "# Copyright 2018 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Module for Mobly controller management.\"\"\"\nimport collections\nimport copy\nimport logging\nimport yaml\n\nfrom mobly import expects\nfrom mobly import records\nfrom mobly import signals\n\n\ndef verify_controller_module(module):\n \"\"\"Verifies a module object follows the required interface for\n controllers.\n\n The interface is explained in the docstring of\n `base_test.BaseTestClass.register_controller`.\n\n Args:\n module: An object that is a controller module. This is usually\n imported with import statements or loaded by importlib.\n\n Raises:\n ControllerError: if the module does not match the Mobly controller\n interface, or one of the required members is null.\n \"\"\"\n required_attributes = ('create', 'destroy', 'MOBLY_CONTROLLER_CONFIG_NAME')\n for attr in required_attributes:\n if not hasattr(module, attr):\n raise signals.ControllerError(\n 'Module %s missing required controller module attribute'\n ' %s.' % (module.__name__, attr))\n if not getattr(module, attr):\n raise signals.ControllerError(\n 'Controller interface %s in %s cannot be null.' %\n (attr, module.__name__))\n\n\nclass ControllerManager(object):\n \"\"\"Manages the controller objects for Mobly.\n\n This manages the life cycles and info retrieval of all controller objects\n used in a test.\n\n Attributes:\n controller_configs: dict, controller configs provided by the user via\n test bed config.\n \"\"\"\n\n def __init__(self, class_name, controller_configs):\n # Controller object management.\n self._controller_objects = collections.OrderedDict(\n ) # controller_name: objects\n self._controller_modules = {} # controller_name: module\n self._class_name = class_name\n self.controller_configs = controller_configs\n\n def register_controller(self, module, required=True, min_number=1):\n \"\"\"Loads a controller module and returns its loaded devices.\n\n This is to be used in a mobly test class.\n\n Args:\n module: A module that follows the controller module interface.\n required: A bool. If True, failing to register the specified\n controller module raises exceptions. If False, the objects\n failed to instantiate will be skipped.\n min_number: An integer that is the minimum number of controller\n objects to be created. Default is one, since you should not\n register a controller module without expecting at least one\n object.\n\n Returns:\n A list of controller objects instantiated from controller_module, or\n None if no config existed for this controller and it was not a\n required controller.\n\n Raises:\n ControllerError:\n * The controller module has already been registered.\n * The actual number of objects instantiated is less than the\n * `min_number`.\n * `required` is True and no corresponding config can be found.\n * Any other error occurred in the registration process.\n \"\"\"\n verify_controller_module(module)\n # Use the module's name as the ref name\n module_ref_name = module.__name__.split('.')[-1]\n if module_ref_name in self._controller_objects:\n raise signals.ControllerError(\n 'Controller module %s has already been registered. It cannot '\n 'be registered again.' % module_ref_name)\n # Create controller objects.\n module_config_name = module.MOBLY_CONTROLLER_CONFIG_NAME\n if module_config_name not in self.controller_configs:\n if required:\n raise signals.ControllerError(\n 'No corresponding config found for %s' %\n module_config_name)\n logging.warning(\n 'No corresponding config found for optional controller %s',\n module_config_name)\n return None\n try:\n # Make a deep copy of the config to pass to the controller module,\n # in case the controller module modifies the config internally.\n original_config = self.controller_configs[module_config_name]\n controller_config = copy.deepcopy(original_config)\n objects = module.create(controller_config)\n except:\n logging.exception(\n 'Failed to initialize objects for controller %s, abort!',\n module_config_name)\n raise\n if not isinstance(objects, list):\n raise signals.ControllerError(\n 'Controller module %s did not return a list of objects, abort.'\n % module_ref_name)\n # Check we got enough controller objects to continue.\n actual_number = len(objects)\n if actual_number < min_number:\n module.destroy(objects)\n raise signals.ControllerError(\n 'Expected to get at least %d controller objects, got %d.' %\n (min_number, actual_number))\n # Save a shallow copy of the list for internal usage, so tests can't\n # affect internal registry by manipulating the object list.\n self._controller_objects[module_ref_name] = copy.copy(objects)\n logging.debug('Found %d objects for controller %s', len(objects),\n module_config_name)\n self._controller_modules[module_ref_name] = module\n return objects\n\n def unregister_controllers(self):\n \"\"\"Destroy controller objects and clear internal registry.\n\n This will be called after each test class.\n \"\"\"\n # TODO(xpconanfan): actually record these errors instead of just\n # logging them.\n for name, module in self._controller_modules.items():\n logging.debug('Destroying %s.', name)\n with expects.expect_no_raises(\n 'Exception occurred destroying %s.' % name):\n module.destroy(self._controller_objects[name])\n self._controller_objects = collections.OrderedDict()\n self._controller_modules = {}\n\n def _create_controller_info_record(self, controller_module_name):\n \"\"\"Creates controller info record for a particular controller type.\n\n Info is retrieved from all the controller objects spawned from the\n specified module, using the controller module's `get_info` function.\n\n Args:\n controller_module_name: string, the name of the controller module\n to retrieve info from.\n\n Returns:\n A records.ControllerInfoRecord object.\n \"\"\"\n module = self._controller_modules[controller_module_name]\n controller_info = None\n try:\n controller_info = module.get_info(\n copy.copy(self._controller_objects[controller_module_name]))\n except AttributeError:\n logging.warning('No optional debug info found for controller '\n '%s. To provide it, implement `get_info`.',\n controller_module_name)\n try:\n yaml.dump(controller_info)\n except TypeError:\n logging.warning('The info of controller %s in class \"%s\" is not '\n 'YAML serializable! Coercing it to string.',\n controller_module_name, self._class_name)\n controller_info = str(controller_info)\n return records.ControllerInfoRecord(\n self._class_name, module.MOBLY_CONTROLLER_CONFIG_NAME,\n controller_info)\n\n def get_controller_info_records(self):\n \"\"\"Get the info records for all the controller objects in the manager.\n\n New info records for each controller object are created for every call\n so the latest info is included.\n\n Returns:\n List of records.ControllerInfoRecord objects. Each opject conatins\n the info of a type of controller\n \"\"\"\n info_records = []\n for controller_module_name in self._controller_objects.keys():\n with expects.expect_no_raises(\n 'Failed to collect controller info from %s' %\n controller_module_name):\n record = self._create_controller_info_record(\n controller_module_name)\n if record:\n info_records.append(record)\n return info_records\n", "path": "mobly/controller_manager.py"}]}
2,522
309
gh_patches_debug_12997
rasdani/github-patches
git_diff
conan-io__conan-center-index-18559
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [package] util-linux-libuuid uses wrong cmake target ### Description In the following lines, the `util-linux-libuuid` recipe sets the cmake target to be `LibUUID::LibUUID` with a filename of `LibUUID-config.cmake`: https://github.com/conan-io/conan-center-index/blob/61c4f7819e6cd3594a57f6c3847f94ab86de623f/recipes/util-linux-libuuid/all/conanfile.py#L112-L113 This was based on the internal practice that Kitware has for their internal libuuid cmake module, however this is not public and a number of packages (czmq, cppcommon) seem to assume a `libuuid::libuuid` target. These change should be reverted such that these packages can utilise util-linux-libuuid without a requirement to be patched. ### Package and Environment Details N/A ### Conan profile N/A ### Steps to reproduce N/A ### Logs <details><summary>Click to expand log</summary> ``` Put your log output here ``` </details> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `recipes/util-linux-libuuid/all/conanfile.py` Content: ``` 1 from conan import ConanFile 2 from conan.errors import ConanInvalidConfiguration 3 from conan.tools.apple import fix_apple_shared_install_name 4 from conan.tools.files import copy, get, rm, rmdir 5 from conan.tools.gnu import Autotools, AutotoolsToolchain, AutotoolsDeps 6 from conan.tools.layout import basic_layout 7 from conan.tools.scm import Version 8 import os 9 10 required_conan_version = ">=1.53.0" 11 12 13 class UtilLinuxLibuuidConan(ConanFile): 14 name = "util-linux-libuuid" 15 description = "Universally unique id library" 16 url = "https://github.com/conan-io/conan-center-index" 17 homepage = "https://github.com/util-linux/util-linux.git" 18 license = "BSD-3-Clause" 19 topics = "id", "identifier", "unique", "uuid" 20 package_type = "library" 21 provides = "libuuid" 22 settings = "os", "arch", "compiler", "build_type" 23 options = { 24 "shared": [True, False], 25 "fPIC": [True, False], 26 } 27 default_options = { 28 "shared": False, 29 "fPIC": True, 30 } 31 32 @property 33 def _has_sys_file_header(self): 34 return self.settings.os in ["FreeBSD", "Linux", "Macos"] 35 36 def config_options(self): 37 if self.settings.os == "Windows": 38 del self.options.fPIC 39 40 def configure(self): 41 if self.options.shared: 42 self.options.rm_safe("fPIC") 43 self.settings.rm_safe("compiler.cppstd") 44 self.settings.rm_safe("compiler.libcxx") 45 46 def layout(self): 47 basic_layout(self, src_folder="src") 48 49 def _minimum_compiler_version(self, compiler, build_type): 50 min_version = { 51 "gcc": { 52 "Release": "4", 53 "Debug": "8", 54 }, 55 "clang": { 56 "Release": "3", 57 "Debug": "3", 58 }, 59 "apple-clang": { 60 "Release": "5", 61 "Debug": "5", 62 }, 63 } 64 return min_version.get(str(compiler), {}).get(str(build_type), "0") 65 66 def validate(self): 67 min_version = self._minimum_compiler_version(self.settings.compiler, self.settings.build_type) 68 if Version(self.settings.compiler.version) < min_version: 69 raise ConanInvalidConfiguration(f"{self.settings.compiler} {self.settings.compiler.version} does not meet the minimum version requirement of version {min_version}") 70 if self.settings.os == "Windows": 71 raise ConanInvalidConfiguration(f"{self.ref} is not supported on Windows") 72 73 def requirements(self): 74 if self.settings.os == "Macos": 75 # Required because libintl.{a,dylib} is not distributed via libc on Macos 76 self.requires("libgettext/0.21") 77 78 def source(self): 79 get(self, **self.conan_data["sources"][self.version], strip_root=True) 80 81 def generate(self): 82 tc = AutotoolsToolchain(self) 83 tc.configure_args.append("--disable-all-programs") 84 tc.configure_args.append("--enable-libuuid") 85 if self._has_sys_file_header: 86 tc.extra_defines.append("HAVE_SYS_FILE_H") 87 if "x86" in self.settings.arch: 88 tc.extra_cflags.append("-mstackrealign") 89 tc.generate() 90 91 deps = AutotoolsDeps(self) 92 deps.generate() 93 94 def build(self): 95 autotools = Autotools(self) 96 autotools.configure() 97 autotools.make() 98 99 def package(self): 100 copy(self, "COPYING.BSD-3-Clause", src=os.path.join(self.source_folder, "Documentation", "licenses"), dst=os.path.join(self.package_folder, "licenses")) 101 autotools = Autotools(self) 102 autotools.install() 103 rm(self, "*.la", os.path.join(self.package_folder, "lib")) 104 rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig")) 105 rmdir(self, os.path.join(self.package_folder, "bin")) 106 rmdir(self, os.path.join(self.package_folder, "sbin")) 107 rmdir(self, os.path.join(self.package_folder, "share")) 108 fix_apple_shared_install_name(self) 109 110 def package_info(self): 111 self.cpp_info.set_property("pkg_config_name", "uuid") 112 self.cpp_info.set_property("cmake_target_name", "LibUUID::LibUUID") 113 self.cpp_info.set_property("cmake_file_name", "LibUUID") 114 self.cpp_info.libs = ["uuid"] 115 self.cpp_info.includedirs.append(os.path.join("include", "uuid")) 116 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/recipes/util-linux-libuuid/all/conanfile.py b/recipes/util-linux-libuuid/all/conanfile.py --- a/recipes/util-linux-libuuid/all/conanfile.py +++ b/recipes/util-linux-libuuid/all/conanfile.py @@ -109,7 +109,10 @@ def package_info(self): self.cpp_info.set_property("pkg_config_name", "uuid") - self.cpp_info.set_property("cmake_target_name", "LibUUID::LibUUID") - self.cpp_info.set_property("cmake_file_name", "LibUUID") + self.cpp_info.set_property("cmake_target_name", "libuuid::libuuid") + self.cpp_info.set_property("cmake_file_name", "libuuid") + # Maintain alias to `LibUUID::LibUUID` for previous version of the recipe + self.cpp_info.set_property("cmake_target_aliases", ["LibUUID::LibUUID"]) + self.cpp_info.libs = ["uuid"] self.cpp_info.includedirs.append(os.path.join("include", "uuid"))
{"golden_diff": "diff --git a/recipes/util-linux-libuuid/all/conanfile.py b/recipes/util-linux-libuuid/all/conanfile.py\n--- a/recipes/util-linux-libuuid/all/conanfile.py\n+++ b/recipes/util-linux-libuuid/all/conanfile.py\n@@ -109,7 +109,10 @@\n \n def package_info(self):\n self.cpp_info.set_property(\"pkg_config_name\", \"uuid\")\n- self.cpp_info.set_property(\"cmake_target_name\", \"LibUUID::LibUUID\")\n- self.cpp_info.set_property(\"cmake_file_name\", \"LibUUID\")\n+ self.cpp_info.set_property(\"cmake_target_name\", \"libuuid::libuuid\")\n+ self.cpp_info.set_property(\"cmake_file_name\", \"libuuid\")\n+ # Maintain alias to `LibUUID::LibUUID` for previous version of the recipe\n+ self.cpp_info.set_property(\"cmake_target_aliases\", [\"LibUUID::LibUUID\"])\n+\n self.cpp_info.libs = [\"uuid\"]\n self.cpp_info.includedirs.append(os.path.join(\"include\", \"uuid\"))\n", "issue": "[package] util-linux-libuuid uses wrong cmake target\n### Description\n\nIn the following lines, the `util-linux-libuuid` recipe sets the cmake target to be `LibUUID::LibUUID` with a filename of `LibUUID-config.cmake`:\r\n\r\nhttps://github.com/conan-io/conan-center-index/blob/61c4f7819e6cd3594a57f6c3847f94ab86de623f/recipes/util-linux-libuuid/all/conanfile.py#L112-L113\r\n\r\nThis was based on the internal practice that Kitware has for their internal libuuid cmake module, however this is not public and a number of packages (czmq, cppcommon) seem to assume a `libuuid::libuuid` target. These change should be reverted such that these packages can utilise util-linux-libuuid without a requirement to be patched.\n\n### Package and Environment Details\n\nN/A\n\n### Conan profile\n\nN/A\r\n\n\n### Steps to reproduce\n\nN/A\n\n### Logs\n\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\nPut your log output here\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.apple import fix_apple_shared_install_name\nfrom conan.tools.files import copy, get, rm, rmdir\nfrom conan.tools.gnu import Autotools, AutotoolsToolchain, AutotoolsDeps\nfrom conan.tools.layout import basic_layout\nfrom conan.tools.scm import Version\nimport os\n\nrequired_conan_version = \">=1.53.0\"\n\n\nclass UtilLinuxLibuuidConan(ConanFile):\n name = \"util-linux-libuuid\"\n description = \"Universally unique id library\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/util-linux/util-linux.git\"\n license = \"BSD-3-Clause\"\n topics = \"id\", \"identifier\", \"unique\", \"uuid\"\n package_type = \"library\"\n provides = \"libuuid\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n @property\n def _has_sys_file_header(self):\n return self.settings.os in [\"FreeBSD\", \"Linux\", \"Macos\"]\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n self.settings.rm_safe(\"compiler.cppstd\")\n self.settings.rm_safe(\"compiler.libcxx\")\n\n def layout(self):\n basic_layout(self, src_folder=\"src\")\n\n def _minimum_compiler_version(self, compiler, build_type):\n min_version = {\n \"gcc\": {\n \"Release\": \"4\",\n \"Debug\": \"8\",\n },\n \"clang\": {\n \"Release\": \"3\",\n \"Debug\": \"3\",\n },\n \"apple-clang\": {\n \"Release\": \"5\",\n \"Debug\": \"5\",\n },\n }\n return min_version.get(str(compiler), {}).get(str(build_type), \"0\")\n\n def validate(self):\n min_version = self._minimum_compiler_version(self.settings.compiler, self.settings.build_type)\n if Version(self.settings.compiler.version) < min_version:\n raise ConanInvalidConfiguration(f\"{self.settings.compiler} {self.settings.compiler.version} does not meet the minimum version requirement of version {min_version}\")\n if self.settings.os == \"Windows\":\n raise ConanInvalidConfiguration(f\"{self.ref} is not supported on Windows\")\n\n def requirements(self):\n if self.settings.os == \"Macos\":\n # Required because libintl.{a,dylib} is not distributed via libc on Macos\n self.requires(\"libgettext/0.21\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n tc = AutotoolsToolchain(self)\n tc.configure_args.append(\"--disable-all-programs\")\n tc.configure_args.append(\"--enable-libuuid\")\n if self._has_sys_file_header:\n tc.extra_defines.append(\"HAVE_SYS_FILE_H\")\n if \"x86\" in self.settings.arch:\n tc.extra_cflags.append(\"-mstackrealign\")\n tc.generate()\n\n deps = AutotoolsDeps(self)\n deps.generate()\n\n def build(self):\n autotools = Autotools(self)\n autotools.configure()\n autotools.make()\n\n def package(self):\n copy(self, \"COPYING.BSD-3-Clause\", src=os.path.join(self.source_folder, \"Documentation\", \"licenses\"), dst=os.path.join(self.package_folder, \"licenses\"))\n autotools = Autotools(self)\n autotools.install()\n rm(self, \"*.la\", os.path.join(self.package_folder, \"lib\"))\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n rmdir(self, os.path.join(self.package_folder, \"bin\"))\n rmdir(self, os.path.join(self.package_folder, \"sbin\"))\n rmdir(self, os.path.join(self.package_folder, \"share\"))\n fix_apple_shared_install_name(self)\n\n def package_info(self):\n self.cpp_info.set_property(\"pkg_config_name\", \"uuid\")\n self.cpp_info.set_property(\"cmake_target_name\", \"LibUUID::LibUUID\")\n self.cpp_info.set_property(\"cmake_file_name\", \"LibUUID\")\n self.cpp_info.libs = [\"uuid\"]\n self.cpp_info.includedirs.append(os.path.join(\"include\", \"uuid\"))\n", "path": "recipes/util-linux-libuuid/all/conanfile.py"}], "after_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.apple import fix_apple_shared_install_name\nfrom conan.tools.files import copy, get, rm, rmdir\nfrom conan.tools.gnu import Autotools, AutotoolsToolchain, AutotoolsDeps\nfrom conan.tools.layout import basic_layout\nfrom conan.tools.scm import Version\nimport os\n\nrequired_conan_version = \">=1.53.0\"\n\n\nclass UtilLinuxLibuuidConan(ConanFile):\n name = \"util-linux-libuuid\"\n description = \"Universally unique id library\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/util-linux/util-linux.git\"\n license = \"BSD-3-Clause\"\n topics = \"id\", \"identifier\", \"unique\", \"uuid\"\n package_type = \"library\"\n provides = \"libuuid\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n @property\n def _has_sys_file_header(self):\n return self.settings.os in [\"FreeBSD\", \"Linux\", \"Macos\"]\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n self.settings.rm_safe(\"compiler.cppstd\")\n self.settings.rm_safe(\"compiler.libcxx\")\n\n def layout(self):\n basic_layout(self, src_folder=\"src\")\n\n def _minimum_compiler_version(self, compiler, build_type):\n min_version = {\n \"gcc\": {\n \"Release\": \"4\",\n \"Debug\": \"8\",\n },\n \"clang\": {\n \"Release\": \"3\",\n \"Debug\": \"3\",\n },\n \"apple-clang\": {\n \"Release\": \"5\",\n \"Debug\": \"5\",\n },\n }\n return min_version.get(str(compiler), {}).get(str(build_type), \"0\")\n\n def validate(self):\n min_version = self._minimum_compiler_version(self.settings.compiler, self.settings.build_type)\n if Version(self.settings.compiler.version) < min_version:\n raise ConanInvalidConfiguration(f\"{self.settings.compiler} {self.settings.compiler.version} does not meet the minimum version requirement of version {min_version}\")\n if self.settings.os == \"Windows\":\n raise ConanInvalidConfiguration(f\"{self.ref} is not supported on Windows\")\n\n def requirements(self):\n if self.settings.os == \"Macos\":\n # Required because libintl.{a,dylib} is not distributed via libc on Macos\n self.requires(\"libgettext/0.21\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n tc = AutotoolsToolchain(self)\n tc.configure_args.append(\"--disable-all-programs\")\n tc.configure_args.append(\"--enable-libuuid\")\n if self._has_sys_file_header:\n tc.extra_defines.append(\"HAVE_SYS_FILE_H\")\n if \"x86\" in self.settings.arch:\n tc.extra_cflags.append(\"-mstackrealign\")\n tc.generate()\n\n deps = AutotoolsDeps(self)\n deps.generate()\n\n def build(self):\n autotools = Autotools(self)\n autotools.configure()\n autotools.make()\n\n def package(self):\n copy(self, \"COPYING.BSD-3-Clause\", src=os.path.join(self.source_folder, \"Documentation\", \"licenses\"), dst=os.path.join(self.package_folder, \"licenses\"))\n autotools = Autotools(self)\n autotools.install()\n rm(self, \"*.la\", os.path.join(self.package_folder, \"lib\"))\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n rmdir(self, os.path.join(self.package_folder, \"bin\"))\n rmdir(self, os.path.join(self.package_folder, \"sbin\"))\n rmdir(self, os.path.join(self.package_folder, \"share\"))\n fix_apple_shared_install_name(self)\n\n def package_info(self):\n self.cpp_info.set_property(\"pkg_config_name\", \"uuid\")\n self.cpp_info.set_property(\"cmake_target_name\", \"libuuid::libuuid\")\n self.cpp_info.set_property(\"cmake_file_name\", \"libuuid\")\n # Maintain alias to `LibUUID::LibUUID` for previous version of the recipe\n self.cpp_info.set_property(\"cmake_target_aliases\", [\"LibUUID::LibUUID\"])\n\n self.cpp_info.libs = [\"uuid\"]\n self.cpp_info.includedirs.append(os.path.join(\"include\", \"uuid\"))\n", "path": "recipes/util-linux-libuuid/all/conanfile.py"}]}
1,759
233
gh_patches_debug_959
rasdani/github-patches
git_diff
getsentry__sentry-52329
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- fix(django): Disable admin on prod Reported here: https://forum.sentry.io/t/sentry-django-admin-portal/12787?u=byk --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/sentry/conf/urls.py` Content: ``` 1 from __future__ import annotations 2 3 from django.conf import settings 4 from django.urls import URLPattern, URLResolver, re_path 5 6 from sentry.web.frontend import csrf_failure 7 from sentry.web.frontend.error_404 import Error404View 8 from sentry.web.frontend.error_500 import Error500View 9 from sentry.web.urls import urlpatterns as web_urlpatterns 10 11 handler404 = Error404View.as_view() 12 handler500 = Error500View.as_view() 13 14 urlpatterns: list[URLResolver | URLPattern] = [ 15 re_path( 16 r"^500/", 17 handler500, 18 name="error-500", 19 ), 20 re_path( 21 r"^404/", 22 handler404, 23 name="error-404", 24 ), 25 re_path( 26 r"^403-csrf-failure/", 27 csrf_failure.view, 28 name="error-403-csrf-failure", 29 ), 30 ] 31 32 if "django.contrib.admin" in settings.INSTALLED_APPS: 33 from sentry import django_admin 34 35 urlpatterns += django_admin.urlpatterns 36 37 urlpatterns += web_urlpatterns 38 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/sentry/conf/urls.py b/src/sentry/conf/urls.py --- a/src/sentry/conf/urls.py +++ b/src/sentry/conf/urls.py @@ -29,7 +29,7 @@ ), ] -if "django.contrib.admin" in settings.INSTALLED_APPS: +if "django.contrib.admin" in settings.INSTALLED_APPS and settings.ADMIN_ENABLED: from sentry import django_admin urlpatterns += django_admin.urlpatterns
{"golden_diff": "diff --git a/src/sentry/conf/urls.py b/src/sentry/conf/urls.py\n--- a/src/sentry/conf/urls.py\n+++ b/src/sentry/conf/urls.py\n@@ -29,7 +29,7 @@\n ),\n ]\n \n-if \"django.contrib.admin\" in settings.INSTALLED_APPS:\n+if \"django.contrib.admin\" in settings.INSTALLED_APPS and settings.ADMIN_ENABLED:\n from sentry import django_admin\n \n urlpatterns += django_admin.urlpatterns\n", "issue": "fix(django): Disable admin on prod\nReported here: https://forum.sentry.io/t/sentry-django-admin-portal/12787?u=byk\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom django.conf import settings\nfrom django.urls import URLPattern, URLResolver, re_path\n\nfrom sentry.web.frontend import csrf_failure\nfrom sentry.web.frontend.error_404 import Error404View\nfrom sentry.web.frontend.error_500 import Error500View\nfrom sentry.web.urls import urlpatterns as web_urlpatterns\n\nhandler404 = Error404View.as_view()\nhandler500 = Error500View.as_view()\n\nurlpatterns: list[URLResolver | URLPattern] = [\n re_path(\n r\"^500/\",\n handler500,\n name=\"error-500\",\n ),\n re_path(\n r\"^404/\",\n handler404,\n name=\"error-404\",\n ),\n re_path(\n r\"^403-csrf-failure/\",\n csrf_failure.view,\n name=\"error-403-csrf-failure\",\n ),\n]\n\nif \"django.contrib.admin\" in settings.INSTALLED_APPS:\n from sentry import django_admin\n\n urlpatterns += django_admin.urlpatterns\n\nurlpatterns += web_urlpatterns\n", "path": "src/sentry/conf/urls.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom django.conf import settings\nfrom django.urls import URLPattern, URLResolver, re_path\n\nfrom sentry.web.frontend import csrf_failure\nfrom sentry.web.frontend.error_404 import Error404View\nfrom sentry.web.frontend.error_500 import Error500View\nfrom sentry.web.urls import urlpatterns as web_urlpatterns\n\nhandler404 = Error404View.as_view()\nhandler500 = Error500View.as_view()\n\nurlpatterns: list[URLResolver | URLPattern] = [\n re_path(\n r\"^500/\",\n handler500,\n name=\"error-500\",\n ),\n re_path(\n r\"^404/\",\n handler404,\n name=\"error-404\",\n ),\n re_path(\n r\"^403-csrf-failure/\",\n csrf_failure.view,\n name=\"error-403-csrf-failure\",\n ),\n]\n\nif \"django.contrib.admin\" in settings.INSTALLED_APPS and settings.ADMIN_ENABLED:\n from sentry import django_admin\n\n urlpatterns += django_admin.urlpatterns\n\nurlpatterns += web_urlpatterns\n", "path": "src/sentry/conf/urls.py"}]}
625
103
gh_patches_debug_26704
rasdani/github-patches
git_diff
ManimCommunity__manim-1209
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ManimDirective needs a `ref_methods` Some of the current documentation uses `:ref_functions:` in the example documentation which is supposed to link to the proper function; however, it is often the case that the function is actually a method. In these instances, a ref_methods would be appropriate to have, and the usage of this in the examples should be updated accordingly. See https://github.com/ManimCommunity/manim/blob/master/docs/source/manim_directive.py#L133-L135 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/source/manim_directive.py` Content: ``` 1 r""" 2 A directive for including Manim videos in a Sphinx document 3 =========================================================== 4 5 When rendering the HTML documentation, the ``.. manim::`` directive 6 implemented here allows to include rendered videos. 7 8 Its basic usage that allows processing **inline content** 9 looks as follows:: 10 11 .. manim:: MyScene 12 13 class MyScene(Scene): 14 def construct(self): 15 ... 16 17 It is required to pass the name of the class representing the 18 scene to be rendered to the directive. 19 20 As a second application, the directive can also be used to 21 render scenes that are defined within doctests, for example:: 22 23 .. manim:: DirectiveDoctestExample 24 :ref_classes: Dot 25 26 >>> dot = Dot(color=RED) 27 >>> dot.color 28 <Color #fc6255> 29 >>> class DirectiveDoctestExample(Scene): 30 ... def construct(self): 31 ... self.play(Create(dot)) 32 33 34 Options 35 ------- 36 37 Options can be passed as follows:: 38 39 .. manim:: <Class name> 40 :<option name>: <value> 41 42 The following configuration options are supported by the 43 directive: 44 45 hide_source 46 If this flag is present without argument, 47 the source code is not displayed above the rendered video. 48 49 quality : {'low', 'medium', 'high', 'fourk'} 50 Controls render quality of the video, in analogy to 51 the corresponding command line flags. 52 53 save_as_gif 54 If this flag is present without argument, 55 the scene is rendered as a gif. 56 57 save_last_frame 58 If this flag is present without argument, 59 an image representing the last frame of the scene will 60 be rendered and displayed, instead of a video. 61 62 ref_classes 63 A list of classes, separated by spaces, that is 64 rendered in a reference block after the source code. 65 66 ref_functions 67 A list of functions and methods, separated by spaces, 68 that is rendered in a reference block after the source code. 69 70 """ 71 from docutils import nodes 72 from docutils.parsers.rst import directives, Directive 73 from docutils.statemachine import StringList 74 75 import jinja2 76 import os 77 from os.path import relpath 78 from pathlib import Path 79 from typing import List 80 81 import shutil 82 83 from manim import QUALITIES 84 85 classnamedict = {} 86 87 88 class skip_manim_node(nodes.Admonition, nodes.Element): 89 pass 90 91 92 def visit(self, node, name=""): 93 self.visit_admonition(node, name) 94 95 96 def depart(self, node): 97 self.depart_admonition(node) 98 99 100 def process_name_list(option_input: str, reference_type: str) -> List[str]: 101 r"""Reformats a string of space separated class names 102 as a list of strings containing valid Sphinx references. 103 104 Tests 105 ----- 106 107 :: 108 109 >>> process_name_list("Tex TexTemplate", "class") 110 [":class:`~.Tex`", ":class:`~.TexTemplate`"] 111 >>> process_name_list("Scene.play Mobject.rotate", "func") 112 [":func:`~.Scene.play`", ":func:`~.Mobject.rotate`"] 113 """ 114 return [f":{reference_type}:`~.{name}`" for name in option_input.split()] 115 116 117 class ManimDirective(Directive): 118 r"""The manim directive, rendering videos while building 119 the documentation. 120 121 See the module docstring for documentation. 122 """ 123 has_content = True 124 required_arguments = 1 125 optional_arguments = 0 126 option_spec = { 127 "hide_source": bool, 128 "quality": lambda arg: directives.choice( 129 arg, ("low", "medium", "high", "fourk") 130 ), 131 "save_as_gif": bool, 132 "save_last_frame": bool, 133 "ref_modules": lambda arg: process_name_list(arg, "mod"), 134 "ref_classes": lambda arg: process_name_list(arg, "class"), 135 "ref_functions": lambda arg: process_name_list(arg, "func"), 136 } 137 final_argument_whitespace = True 138 139 def run(self): 140 if "skip-manim" in self.state.document.settings.env.app.builder.tags.tags: 141 node = skip_manim_node() 142 self.state.nested_parse( 143 StringList(self.content[0]), self.content_offset, node 144 ) 145 return [node] 146 147 from manim import config 148 149 global classnamedict 150 151 clsname = self.arguments[0] 152 if clsname not in classnamedict: 153 classnamedict[clsname] = 1 154 else: 155 classnamedict[clsname] += 1 156 157 hide_source = "hide_source" in self.options 158 save_as_gif = "save_as_gif" in self.options 159 save_last_frame = "save_last_frame" in self.options 160 assert not (save_as_gif and save_last_frame) 161 162 ref_content = ( 163 self.options.get("ref_modules", []) 164 + self.options.get("ref_classes", []) 165 + self.options.get("ref_functions", []) 166 ) 167 if ref_content: 168 ref_block = f""" 169 .. admonition:: Example References 170 :class: example-reference 171 172 {' '.join(ref_content)}""" 173 else: 174 ref_block = "" 175 176 if "quality" in self.options: 177 quality = f'{self.options["quality"]}_quality' 178 else: 179 quality = "example_quality" 180 frame_rate = QUALITIES[quality]["frame_rate"] 181 pixel_height = QUALITIES[quality]["pixel_height"] 182 pixel_width = QUALITIES[quality]["pixel_width"] 183 qualitydir = f"{pixel_height}p{frame_rate}" 184 185 state_machine = self.state_machine 186 document = state_machine.document 187 188 source_file_name = document.attributes["source"] 189 source_rel_name = relpath(source_file_name, setup.confdir) 190 source_rel_dir = os.path.dirname(source_rel_name) 191 while source_rel_dir.startswith(os.path.sep): 192 source_rel_dir = source_rel_dir[1:] 193 194 dest_dir = os.path.abspath( 195 os.path.join(setup.app.builder.outdir, source_rel_dir) 196 ) 197 if not os.path.exists(dest_dir): 198 os.makedirs(dest_dir) 199 200 source_block = [ 201 ".. code-block:: python", 202 "", 203 *[" " + line for line in self.content], 204 ] 205 source_block = "\n".join(source_block) 206 207 config.media_dir = Path(setup.confdir) / "media" 208 config.images_dir = "{media_dir}/images" 209 config.video_dir = "{media_dir}/videos/{quality}" 210 output_file = f"{clsname}-{classnamedict[clsname]}" 211 config.assets_dir = Path("_static") 212 213 config_code = [ 214 f'config["frame_rate"] = {frame_rate}', 215 f'config["pixel_height"] = {pixel_height}', 216 f'config["pixel_width"] = {pixel_width}', 217 f'config["save_last_frame"] = {save_last_frame}', 218 f'config["save_as_gif"] = {save_as_gif}', 219 f'config["output_file"] = r"{output_file}"', 220 ] 221 222 user_code = self.content 223 if user_code[0].startswith(">>> "): # check whether block comes from doctest 224 user_code = [ 225 line[4:] for line in user_code if line.startswith((">>> ", "... ")) 226 ] 227 228 code = [ 229 "from manim import *", 230 *config_code, 231 *user_code, 232 f"{clsname}().render()", 233 ] 234 exec("\n".join(code), globals()) 235 236 # copy video file to output directory 237 if not (save_as_gif or save_last_frame): 238 filename = f"{output_file}.mp4" 239 filesrc = config.get_dir("video_dir") / filename 240 destfile = os.path.join(dest_dir, filename) 241 shutil.copyfile(filesrc, destfile) 242 elif save_as_gif: 243 filename = f"{output_file}.gif" 244 filesrc = config.get_dir("video_dir") / filename 245 elif save_last_frame: 246 filename = f"{output_file}.png" 247 filesrc = config.get_dir("images_dir") / filename 248 else: 249 raise ValueError("Invalid combination of render flags received.") 250 251 rendered_template = jinja2.Template(TEMPLATE).render( 252 clsname=clsname, 253 clsname_lowercase=clsname.lower(), 254 hide_source=hide_source, 255 filesrc_rel=os.path.relpath(filesrc, setup.confdir), 256 output_file=output_file, 257 save_last_frame=save_last_frame, 258 save_as_gif=save_as_gif, 259 source_block=source_block, 260 ref_block=ref_block, 261 ) 262 state_machine.insert_input( 263 rendered_template.split("\n"), source=document.attributes["source"] 264 ) 265 266 return [] 267 268 269 def setup(app): 270 import manim 271 272 app.add_node(skip_manim_node, html=(visit, depart)) 273 274 setup.app = app 275 setup.config = app.config 276 setup.confdir = app.confdir 277 278 app.add_directive("manim", ManimDirective) 279 280 metadata = {"parallel_read_safe": False, "parallel_write_safe": True} 281 return metadata 282 283 284 TEMPLATE = r""" 285 {% if not hide_source %} 286 .. raw:: html 287 288 <div class="manim-example"> 289 290 {% endif %} 291 292 {% if not (save_as_gif or save_last_frame) %} 293 .. raw:: html 294 295 <video id="{{ clsname_lowercase }}" class="manim-video" controls loop autoplay src="./{{ output_file }}.mp4"></video> 296 {% elif save_as_gif %} 297 .. image:: /{{ filesrc_rel }} 298 :align: center 299 :name: {{ clsname_lowercase }} 300 {% elif save_last_frame %} 301 .. image:: /{{ filesrc_rel }} 302 :align: center 303 :name: {{ clsname_lowercase }} 304 {% endif %} 305 {% if not hide_source %} 306 .. raw:: html 307 308 <h5 class="example-header">{{ clsname }}<a class="headerlink" href="#{{ clsname_lowercase }}">¶</a></h5> 309 310 {{ source_block }} 311 {{ ref_block }} 312 {% endif %} 313 314 .. raw:: html 315 316 </div> 317 """ 318 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/source/manim_directive.py b/docs/source/manim_directive.py --- a/docs/source/manim_directive.py +++ b/docs/source/manim_directive.py @@ -64,7 +64,11 @@ rendered in a reference block after the source code. ref_functions - A list of functions and methods, separated by spaces, + A list of functions, separated by spaces, + that is rendered in a reference block after the source code. + + ref_methods + A list of methods, separated by spaces, that is rendered in a reference block after the source code. """ @@ -133,6 +137,7 @@ "ref_modules": lambda arg: process_name_list(arg, "mod"), "ref_classes": lambda arg: process_name_list(arg, "class"), "ref_functions": lambda arg: process_name_list(arg, "func"), + "ref_methods": lambda arg: process_name_list(arg, "meth"), } final_argument_whitespace = True @@ -163,6 +168,7 @@ self.options.get("ref_modules", []) + self.options.get("ref_classes", []) + self.options.get("ref_functions", []) + + self.options.get("ref_methods", []) ) if ref_content: ref_block = f"""
{"golden_diff": "diff --git a/docs/source/manim_directive.py b/docs/source/manim_directive.py\n--- a/docs/source/manim_directive.py\n+++ b/docs/source/manim_directive.py\n@@ -64,7 +64,11 @@\n rendered in a reference block after the source code.\n \n ref_functions\n- A list of functions and methods, separated by spaces,\n+ A list of functions, separated by spaces,\n+ that is rendered in a reference block after the source code.\n+\n+ ref_methods\n+ A list of methods, separated by spaces,\n that is rendered in a reference block after the source code.\n \n \"\"\"\n@@ -133,6 +137,7 @@\n \"ref_modules\": lambda arg: process_name_list(arg, \"mod\"),\n \"ref_classes\": lambda arg: process_name_list(arg, \"class\"),\n \"ref_functions\": lambda arg: process_name_list(arg, \"func\"),\n+ \"ref_methods\": lambda arg: process_name_list(arg, \"meth\"),\n }\n final_argument_whitespace = True\n \n@@ -163,6 +168,7 @@\n self.options.get(\"ref_modules\", [])\n + self.options.get(\"ref_classes\", [])\n + self.options.get(\"ref_functions\", [])\n+ + self.options.get(\"ref_methods\", [])\n )\n if ref_content:\n ref_block = f\"\"\"\n", "issue": "ManimDirective needs a `ref_methods`\nSome of the current documentation uses `:ref_functions:` in the example documentation which is supposed to link to the proper function; however, it is often the case that the function is actually a method. In these instances, a ref_methods would be appropriate to have, and the usage of this in the examples should be updated accordingly.\r\n\r\nSee https://github.com/ManimCommunity/manim/blob/master/docs/source/manim_directive.py#L133-L135\n", "before_files": [{"content": "r\"\"\"\nA directive for including Manim videos in a Sphinx document\n===========================================================\n\nWhen rendering the HTML documentation, the ``.. manim::`` directive\nimplemented here allows to include rendered videos.\n\nIts basic usage that allows processing **inline content**\nlooks as follows::\n\n .. manim:: MyScene\n\n class MyScene(Scene):\n def construct(self):\n ...\n\nIt is required to pass the name of the class representing the\nscene to be rendered to the directive.\n\nAs a second application, the directive can also be used to\nrender scenes that are defined within doctests, for example::\n\n .. manim:: DirectiveDoctestExample\n :ref_classes: Dot\n\n >>> dot = Dot(color=RED)\n >>> dot.color\n <Color #fc6255>\n >>> class DirectiveDoctestExample(Scene):\n ... def construct(self):\n ... self.play(Create(dot))\n\n\nOptions\n-------\n\nOptions can be passed as follows::\n\n .. manim:: <Class name>\n :<option name>: <value>\n\nThe following configuration options are supported by the\ndirective:\n\n hide_source\n If this flag is present without argument,\n the source code is not displayed above the rendered video.\n\n quality : {'low', 'medium', 'high', 'fourk'}\n Controls render quality of the video, in analogy to\n the corresponding command line flags.\n\n save_as_gif\n If this flag is present without argument,\n the scene is rendered as a gif.\n\n save_last_frame\n If this flag is present without argument,\n an image representing the last frame of the scene will\n be rendered and displayed, instead of a video.\n\n ref_classes\n A list of classes, separated by spaces, that is\n rendered in a reference block after the source code.\n\n ref_functions\n A list of functions and methods, separated by spaces,\n that is rendered in a reference block after the source code.\n\n\"\"\"\nfrom docutils import nodes\nfrom docutils.parsers.rst import directives, Directive\nfrom docutils.statemachine import StringList\n\nimport jinja2\nimport os\nfrom os.path import relpath\nfrom pathlib import Path\nfrom typing import List\n\nimport shutil\n\nfrom manim import QUALITIES\n\nclassnamedict = {}\n\n\nclass skip_manim_node(nodes.Admonition, nodes.Element):\n pass\n\n\ndef visit(self, node, name=\"\"):\n self.visit_admonition(node, name)\n\n\ndef depart(self, node):\n self.depart_admonition(node)\n\n\ndef process_name_list(option_input: str, reference_type: str) -> List[str]:\n r\"\"\"Reformats a string of space separated class names\n as a list of strings containing valid Sphinx references.\n\n Tests\n -----\n\n ::\n\n >>> process_name_list(\"Tex TexTemplate\", \"class\")\n [\":class:`~.Tex`\", \":class:`~.TexTemplate`\"]\n >>> process_name_list(\"Scene.play Mobject.rotate\", \"func\")\n [\":func:`~.Scene.play`\", \":func:`~.Mobject.rotate`\"]\n \"\"\"\n return [f\":{reference_type}:`~.{name}`\" for name in option_input.split()]\n\n\nclass ManimDirective(Directive):\n r\"\"\"The manim directive, rendering videos while building\n the documentation.\n\n See the module docstring for documentation.\n \"\"\"\n has_content = True\n required_arguments = 1\n optional_arguments = 0\n option_spec = {\n \"hide_source\": bool,\n \"quality\": lambda arg: directives.choice(\n arg, (\"low\", \"medium\", \"high\", \"fourk\")\n ),\n \"save_as_gif\": bool,\n \"save_last_frame\": bool,\n \"ref_modules\": lambda arg: process_name_list(arg, \"mod\"),\n \"ref_classes\": lambda arg: process_name_list(arg, \"class\"),\n \"ref_functions\": lambda arg: process_name_list(arg, \"func\"),\n }\n final_argument_whitespace = True\n\n def run(self):\n if \"skip-manim\" in self.state.document.settings.env.app.builder.tags.tags:\n node = skip_manim_node()\n self.state.nested_parse(\n StringList(self.content[0]), self.content_offset, node\n )\n return [node]\n\n from manim import config\n\n global classnamedict\n\n clsname = self.arguments[0]\n if clsname not in classnamedict:\n classnamedict[clsname] = 1\n else:\n classnamedict[clsname] += 1\n\n hide_source = \"hide_source\" in self.options\n save_as_gif = \"save_as_gif\" in self.options\n save_last_frame = \"save_last_frame\" in self.options\n assert not (save_as_gif and save_last_frame)\n\n ref_content = (\n self.options.get(\"ref_modules\", [])\n + self.options.get(\"ref_classes\", [])\n + self.options.get(\"ref_functions\", [])\n )\n if ref_content:\n ref_block = f\"\"\"\n.. admonition:: Example References\n :class: example-reference\n\n {' '.join(ref_content)}\"\"\"\n else:\n ref_block = \"\"\n\n if \"quality\" in self.options:\n quality = f'{self.options[\"quality\"]}_quality'\n else:\n quality = \"example_quality\"\n frame_rate = QUALITIES[quality][\"frame_rate\"]\n pixel_height = QUALITIES[quality][\"pixel_height\"]\n pixel_width = QUALITIES[quality][\"pixel_width\"]\n qualitydir = f\"{pixel_height}p{frame_rate}\"\n\n state_machine = self.state_machine\n document = state_machine.document\n\n source_file_name = document.attributes[\"source\"]\n source_rel_name = relpath(source_file_name, setup.confdir)\n source_rel_dir = os.path.dirname(source_rel_name)\n while source_rel_dir.startswith(os.path.sep):\n source_rel_dir = source_rel_dir[1:]\n\n dest_dir = os.path.abspath(\n os.path.join(setup.app.builder.outdir, source_rel_dir)\n )\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n\n source_block = [\n \".. code-block:: python\",\n \"\",\n *[\" \" + line for line in self.content],\n ]\n source_block = \"\\n\".join(source_block)\n\n config.media_dir = Path(setup.confdir) / \"media\"\n config.images_dir = \"{media_dir}/images\"\n config.video_dir = \"{media_dir}/videos/{quality}\"\n output_file = f\"{clsname}-{classnamedict[clsname]}\"\n config.assets_dir = Path(\"_static\")\n\n config_code = [\n f'config[\"frame_rate\"] = {frame_rate}',\n f'config[\"pixel_height\"] = {pixel_height}',\n f'config[\"pixel_width\"] = {pixel_width}',\n f'config[\"save_last_frame\"] = {save_last_frame}',\n f'config[\"save_as_gif\"] = {save_as_gif}',\n f'config[\"output_file\"] = r\"{output_file}\"',\n ]\n\n user_code = self.content\n if user_code[0].startswith(\">>> \"): # check whether block comes from doctest\n user_code = [\n line[4:] for line in user_code if line.startswith((\">>> \", \"... \"))\n ]\n\n code = [\n \"from manim import *\",\n *config_code,\n *user_code,\n f\"{clsname}().render()\",\n ]\n exec(\"\\n\".join(code), globals())\n\n # copy video file to output directory\n if not (save_as_gif or save_last_frame):\n filename = f\"{output_file}.mp4\"\n filesrc = config.get_dir(\"video_dir\") / filename\n destfile = os.path.join(dest_dir, filename)\n shutil.copyfile(filesrc, destfile)\n elif save_as_gif:\n filename = f\"{output_file}.gif\"\n filesrc = config.get_dir(\"video_dir\") / filename\n elif save_last_frame:\n filename = f\"{output_file}.png\"\n filesrc = config.get_dir(\"images_dir\") / filename\n else:\n raise ValueError(\"Invalid combination of render flags received.\")\n\n rendered_template = jinja2.Template(TEMPLATE).render(\n clsname=clsname,\n clsname_lowercase=clsname.lower(),\n hide_source=hide_source,\n filesrc_rel=os.path.relpath(filesrc, setup.confdir),\n output_file=output_file,\n save_last_frame=save_last_frame,\n save_as_gif=save_as_gif,\n source_block=source_block,\n ref_block=ref_block,\n )\n state_machine.insert_input(\n rendered_template.split(\"\\n\"), source=document.attributes[\"source\"]\n )\n\n return []\n\n\ndef setup(app):\n import manim\n\n app.add_node(skip_manim_node, html=(visit, depart))\n\n setup.app = app\n setup.config = app.config\n setup.confdir = app.confdir\n\n app.add_directive(\"manim\", ManimDirective)\n\n metadata = {\"parallel_read_safe\": False, \"parallel_write_safe\": True}\n return metadata\n\n\nTEMPLATE = r\"\"\"\n{% if not hide_source %}\n.. raw:: html\n\n <div class=\"manim-example\">\n\n{% endif %}\n\n{% if not (save_as_gif or save_last_frame) %}\n.. raw:: html\n\n <video id=\"{{ clsname_lowercase }}\" class=\"manim-video\" controls loop autoplay src=\"./{{ output_file }}.mp4\"></video>\n{% elif save_as_gif %}\n.. image:: /{{ filesrc_rel }}\n :align: center\n :name: {{ clsname_lowercase }}\n{% elif save_last_frame %}\n.. image:: /{{ filesrc_rel }}\n :align: center\n :name: {{ clsname_lowercase }}\n{% endif %}\n{% if not hide_source %}\n.. raw:: html\n\n <h5 class=\"example-header\">{{ clsname }}<a class=\"headerlink\" href=\"#{{ clsname_lowercase }}\">\u00b6</a></h5>\n\n{{ source_block }}\n{{ ref_block }}\n{% endif %}\n\n.. raw:: html\n\n </div>\n\"\"\"\n", "path": "docs/source/manim_directive.py"}], "after_files": [{"content": "r\"\"\"\nA directive for including Manim videos in a Sphinx document\n===========================================================\n\nWhen rendering the HTML documentation, the ``.. manim::`` directive\nimplemented here allows to include rendered videos.\n\nIts basic usage that allows processing **inline content**\nlooks as follows::\n\n .. manim:: MyScene\n\n class MyScene(Scene):\n def construct(self):\n ...\n\nIt is required to pass the name of the class representing the\nscene to be rendered to the directive.\n\nAs a second application, the directive can also be used to\nrender scenes that are defined within doctests, for example::\n\n .. manim:: DirectiveDoctestExample\n :ref_classes: Dot\n\n >>> dot = Dot(color=RED)\n >>> dot.color\n <Color #fc6255>\n >>> class DirectiveDoctestExample(Scene):\n ... def construct(self):\n ... self.play(Create(dot))\n\n\nOptions\n-------\n\nOptions can be passed as follows::\n\n .. manim:: <Class name>\n :<option name>: <value>\n\nThe following configuration options are supported by the\ndirective:\n\n hide_source\n If this flag is present without argument,\n the source code is not displayed above the rendered video.\n\n quality : {'low', 'medium', 'high', 'fourk'}\n Controls render quality of the video, in analogy to\n the corresponding command line flags.\n\n save_as_gif\n If this flag is present without argument,\n the scene is rendered as a gif.\n\n save_last_frame\n If this flag is present without argument,\n an image representing the last frame of the scene will\n be rendered and displayed, instead of a video.\n\n ref_classes\n A list of classes, separated by spaces, that is\n rendered in a reference block after the source code.\n\n ref_functions\n A list of functions, separated by spaces,\n that is rendered in a reference block after the source code.\n\n ref_methods\n A list of methods, separated by spaces,\n that is rendered in a reference block after the source code.\n\n\"\"\"\nfrom docutils import nodes\nfrom docutils.parsers.rst import directives, Directive\nfrom docutils.statemachine import StringList\n\nimport jinja2\nimport os\nfrom os.path import relpath\nfrom pathlib import Path\nfrom typing import List\n\nimport shutil\n\nfrom manim import QUALITIES\n\nclassnamedict = {}\n\n\nclass skip_manim_node(nodes.Admonition, nodes.Element):\n pass\n\n\ndef visit(self, node, name=\"\"):\n self.visit_admonition(node, name)\n\n\ndef depart(self, node):\n self.depart_admonition(node)\n\n\ndef process_name_list(option_input: str, reference_type: str) -> List[str]:\n r\"\"\"Reformats a string of space separated class names\n as a list of strings containing valid Sphinx references.\n\n Tests\n -----\n\n ::\n\n >>> process_name_list(\"Tex TexTemplate\", \"class\")\n [\":class:`~.Tex`\", \":class:`~.TexTemplate`\"]\n >>> process_name_list(\"Scene.play Mobject.rotate\", \"func\")\n [\":func:`~.Scene.play`\", \":func:`~.Mobject.rotate`\"]\n \"\"\"\n return [f\":{reference_type}:`~.{name}`\" for name in option_input.split()]\n\n\nclass ManimDirective(Directive):\n r\"\"\"The manim directive, rendering videos while building\n the documentation.\n\n See the module docstring for documentation.\n \"\"\"\n has_content = True\n required_arguments = 1\n optional_arguments = 0\n option_spec = {\n \"hide_source\": bool,\n \"quality\": lambda arg: directives.choice(\n arg, (\"low\", \"medium\", \"high\", \"fourk\")\n ),\n \"save_as_gif\": bool,\n \"save_last_frame\": bool,\n \"ref_modules\": lambda arg: process_name_list(arg, \"mod\"),\n \"ref_classes\": lambda arg: process_name_list(arg, \"class\"),\n \"ref_functions\": lambda arg: process_name_list(arg, \"func\"),\n \"ref_methods\": lambda arg: process_name_list(arg, \"meth\"),\n }\n final_argument_whitespace = True\n\n def run(self):\n if \"skip-manim\" in self.state.document.settings.env.app.builder.tags.tags:\n node = skip_manim_node()\n self.state.nested_parse(\n StringList(self.content[0]), self.content_offset, node\n )\n return [node]\n\n from manim import config\n\n global classnamedict\n\n clsname = self.arguments[0]\n if clsname not in classnamedict:\n classnamedict[clsname] = 1\n else:\n classnamedict[clsname] += 1\n\n hide_source = \"hide_source\" in self.options\n save_as_gif = \"save_as_gif\" in self.options\n save_last_frame = \"save_last_frame\" in self.options\n assert not (save_as_gif and save_last_frame)\n\n ref_content = (\n self.options.get(\"ref_modules\", [])\n + self.options.get(\"ref_classes\", [])\n + self.options.get(\"ref_functions\", [])\n + self.options.get(\"ref_methods\", [])\n )\n if ref_content:\n ref_block = f\"\"\"\n.. admonition:: Example References\n :class: example-reference\n\n {' '.join(ref_content)}\"\"\"\n else:\n ref_block = \"\"\n\n if \"quality\" in self.options:\n quality = f'{self.options[\"quality\"]}_quality'\n else:\n quality = \"example_quality\"\n frame_rate = QUALITIES[quality][\"frame_rate\"]\n pixel_height = QUALITIES[quality][\"pixel_height\"]\n pixel_width = QUALITIES[quality][\"pixel_width\"]\n qualitydir = f\"{pixel_height}p{frame_rate}\"\n\n state_machine = self.state_machine\n document = state_machine.document\n\n source_file_name = document.attributes[\"source\"]\n source_rel_name = relpath(source_file_name, setup.confdir)\n source_rel_dir = os.path.dirname(source_rel_name)\n while source_rel_dir.startswith(os.path.sep):\n source_rel_dir = source_rel_dir[1:]\n\n dest_dir = os.path.abspath(\n os.path.join(setup.app.builder.outdir, source_rel_dir)\n )\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n\n source_block = [\n \".. code-block:: python\",\n \"\",\n *[\" \" + line for line in self.content],\n ]\n source_block = \"\\n\".join(source_block)\n\n config.media_dir = Path(setup.confdir) / \"media\"\n config.images_dir = \"{media_dir}/images\"\n config.video_dir = \"{media_dir}/videos/{quality}\"\n output_file = f\"{clsname}-{classnamedict[clsname]}\"\n config.assets_dir = Path(\"_static\")\n\n config_code = [\n f'config[\"frame_rate\"] = {frame_rate}',\n f'config[\"pixel_height\"] = {pixel_height}',\n f'config[\"pixel_width\"] = {pixel_width}',\n f'config[\"save_last_frame\"] = {save_last_frame}',\n f'config[\"save_as_gif\"] = {save_as_gif}',\n f'config[\"output_file\"] = r\"{output_file}\"',\n ]\n\n user_code = self.content\n if user_code[0].startswith(\">>> \"): # check whether block comes from doctest\n user_code = [\n line[4:] for line in user_code if line.startswith((\">>> \", \"... \"))\n ]\n\n code = [\n \"from manim import *\",\n *config_code,\n *user_code,\n f\"{clsname}().render()\",\n ]\n exec(\"\\n\".join(code), globals())\n\n # copy video file to output directory\n if not (save_as_gif or save_last_frame):\n filename = f\"{output_file}.mp4\"\n filesrc = config.get_dir(\"video_dir\") / filename\n destfile = os.path.join(dest_dir, filename)\n shutil.copyfile(filesrc, destfile)\n elif save_as_gif:\n filename = f\"{output_file}.gif\"\n filesrc = config.get_dir(\"video_dir\") / filename\n elif save_last_frame:\n filename = f\"{output_file}.png\"\n filesrc = config.get_dir(\"images_dir\") / filename\n else:\n raise ValueError(\"Invalid combination of render flags received.\")\n\n rendered_template = jinja2.Template(TEMPLATE).render(\n clsname=clsname,\n clsname_lowercase=clsname.lower(),\n hide_source=hide_source,\n filesrc_rel=os.path.relpath(filesrc, setup.confdir),\n output_file=output_file,\n save_last_frame=save_last_frame,\n save_as_gif=save_as_gif,\n source_block=source_block,\n ref_block=ref_block,\n )\n state_machine.insert_input(\n rendered_template.split(\"\\n\"), source=document.attributes[\"source\"]\n )\n\n return []\n\n\ndef setup(app):\n import manim\n\n app.add_node(skip_manim_node, html=(visit, depart))\n\n setup.app = app\n setup.config = app.config\n setup.confdir = app.confdir\n\n app.add_directive(\"manim\", ManimDirective)\n\n metadata = {\"parallel_read_safe\": False, \"parallel_write_safe\": True}\n return metadata\n\n\nTEMPLATE = r\"\"\"\n{% if not hide_source %}\n.. raw:: html\n\n <div class=\"manim-example\">\n\n{% endif %}\n\n{% if not (save_as_gif or save_last_frame) %}\n.. raw:: html\n\n <video id=\"{{ clsname_lowercase }}\" class=\"manim-video\" controls loop autoplay src=\"./{{ output_file }}.mp4\"></video>\n{% elif save_as_gif %}\n.. image:: /{{ filesrc_rel }}\n :align: center\n :name: {{ clsname_lowercase }}\n{% elif save_last_frame %}\n.. image:: /{{ filesrc_rel }}\n :align: center\n :name: {{ clsname_lowercase }}\n{% endif %}\n{% if not hide_source %}\n.. raw:: html\n\n <h5 class=\"example-header\">{{ clsname }}<a class=\"headerlink\" href=\"#{{ clsname_lowercase }}\">\u00b6</a></h5>\n\n{{ source_block }}\n{{ ref_block }}\n{% endif %}\n\n.. raw:: html\n\n </div>\n\"\"\"\n", "path": "docs/source/manim_directive.py"}]}
3,395
294
gh_patches_debug_19972
rasdani/github-patches
git_diff
CTFd__CTFd-1059
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Feature Request : Search users by IP address It's really hard to search an IP address on the UI now. It'd be nice to have a search feature in `admin/users` search a user by an IP address. CTFd already logs IP addresses of each login so it'll be very useful when wanting to ban certain users for abusing rules. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `CTFd/admin/users.py` Content: ``` 1 from flask import render_template, request 2 from CTFd.utils import get_config 3 from CTFd.utils.decorators import admins_only 4 from CTFd.utils.modes import TEAMS_MODE 5 from CTFd.models import db, Users, Challenges, Tracking 6 from CTFd.admin import admin 7 from CTFd.utils.helpers import get_errors 8 9 from sqlalchemy.sql import not_ 10 11 12 @admin.route("/admin/users") 13 @admins_only 14 def users_listing(): 15 page = abs(request.args.get("page", 1, type=int)) 16 q = request.args.get("q") 17 if q: 18 field = request.args.get("field") 19 users = [] 20 errors = get_errors() 21 if field == "id": 22 if q.isnumeric(): 23 users = Users.query.filter(Users.id == q).order_by(Users.id.asc()).all() 24 else: 25 users = [] 26 errors.append("Your ID search term is not numeric") 27 elif field == "name": 28 users = ( 29 Users.query.filter(Users.name.like("%{}%".format(q))) 30 .order_by(Users.id.asc()) 31 .all() 32 ) 33 elif field == "email": 34 users = ( 35 Users.query.filter(Users.email.like("%{}%".format(q))) 36 .order_by(Users.id.asc()) 37 .all() 38 ) 39 elif field == "affiliation": 40 users = ( 41 Users.query.filter(Users.affiliation.like("%{}%".format(q))) 42 .order_by(Users.id.asc()) 43 .all() 44 ) 45 return render_template( 46 "admin/users/users.html", 47 users=users, 48 pages=None, 49 curr_page=None, 50 q=q, 51 field=field, 52 ) 53 54 page = abs(int(page)) 55 results_per_page = 50 56 page_start = results_per_page * (page - 1) 57 page_end = results_per_page * (page - 1) + results_per_page 58 59 users = Users.query.order_by(Users.id.asc()).slice(page_start, page_end).all() 60 count = db.session.query(db.func.count(Users.id)).first()[0] 61 pages = int(count / results_per_page) + (count % results_per_page > 0) 62 63 return render_template( 64 "admin/users/users.html", users=users, pages=pages, curr_page=page 65 ) 66 67 68 @admin.route("/admin/users/new") 69 @admins_only 70 def users_new(): 71 return render_template("admin/users/new.html") 72 73 74 @admin.route("/admin/users/<int:user_id>") 75 @admins_only 76 def users_detail(user_id): 77 # Get user object 78 user = Users.query.filter_by(id=user_id).first_or_404() 79 80 # Get the user's solves 81 solves = user.get_solves(admin=True) 82 83 # Get challenges that the user is missing 84 if get_config("user_mode") == TEAMS_MODE: 85 if user.team: 86 all_solves = user.team.get_solves(admin=True) 87 else: 88 all_solves = user.get_solves(admin=True) 89 else: 90 all_solves = user.get_solves(admin=True) 91 92 solve_ids = [s.challenge_id for s in all_solves] 93 missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all() 94 95 # Get IP addresses that the User has used 96 addrs = ( 97 Tracking.query.filter_by(user_id=user_id).order_by(Tracking.date.desc()).all() 98 ) 99 100 # Get Fails 101 fails = user.get_fails(admin=True) 102 103 # Get Awards 104 awards = user.get_awards(admin=True) 105 106 # Get user properties 107 score = user.get_score(admin=True) 108 place = user.get_place(admin=True) 109 110 return render_template( 111 "admin/users/user.html", 112 solves=solves, 113 user=user, 114 addrs=addrs, 115 score=score, 116 missing=missing, 117 place=place, 118 fails=fails, 119 awards=awards, 120 ) 121 ``` Path: `CTFd/admin/teams.py` Content: ``` 1 from flask import render_template, request 2 from CTFd.utils.decorators import admins_only 3 from CTFd.models import db, Teams, Challenges, Tracking 4 from CTFd.admin import admin 5 from CTFd.utils.helpers import get_errors 6 7 from sqlalchemy.sql import not_ 8 9 10 @admin.route("/admin/teams") 11 @admins_only 12 def teams_listing(): 13 page = abs(request.args.get("page", 1, type=int)) 14 q = request.args.get("q") 15 if q: 16 field = request.args.get("field") 17 teams = [] 18 errors = get_errors() 19 if field == "id": 20 if q.isnumeric(): 21 teams = Teams.query.filter(Teams.id == q).order_by(Teams.id.asc()).all() 22 else: 23 teams = [] 24 errors.append("Your ID search term is not numeric") 25 elif field == "name": 26 teams = ( 27 Teams.query.filter(Teams.name.like("%{}%".format(q))) 28 .order_by(Teams.id.asc()) 29 .all() 30 ) 31 elif field == "email": 32 teams = ( 33 Teams.query.filter(Teams.email.like("%{}%".format(q))) 34 .order_by(Teams.id.asc()) 35 .all() 36 ) 37 elif field == "affiliation": 38 teams = ( 39 Teams.query.filter(Teams.affiliation.like("%{}%".format(q))) 40 .order_by(Teams.id.asc()) 41 .all() 42 ) 43 return render_template( 44 "admin/teams/teams.html", 45 teams=teams, 46 pages=None, 47 curr_page=None, 48 q=q, 49 field=field, 50 ) 51 52 page = abs(int(page)) 53 results_per_page = 50 54 page_start = results_per_page * (page - 1) 55 page_end = results_per_page * (page - 1) + results_per_page 56 57 teams = Teams.query.order_by(Teams.id.asc()).slice(page_start, page_end).all() 58 count = db.session.query(db.func.count(Teams.id)).first()[0] 59 pages = int(count / results_per_page) + (count % results_per_page > 0) 60 return render_template( 61 "admin/teams/teams.html", teams=teams, pages=pages, curr_page=page 62 ) 63 64 65 @admin.route("/admin/teams/new") 66 @admins_only 67 def teams_new(): 68 return render_template("admin/teams/new.html") 69 70 71 @admin.route("/admin/teams/<int:team_id>") 72 @admins_only 73 def teams_detail(team_id): 74 team = Teams.query.filter_by(id=team_id).first_or_404() 75 76 # Get members 77 members = team.members 78 member_ids = [member.id for member in members] 79 80 # Get Solves for all members 81 solves = team.get_solves(admin=True) 82 fails = team.get_fails(admin=True) 83 awards = team.get_awards(admin=True) 84 score = team.get_score(admin=True) 85 place = team.get_place(admin=True) 86 87 # Get missing Challenges for all members 88 # TODO: How do you mark a missing challenge for a team? 89 solve_ids = [s.challenge_id for s in solves] 90 missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all() 91 92 # Get addresses for all members 93 addrs = ( 94 Tracking.query.filter(Tracking.user_id.in_(member_ids)) 95 .order_by(Tracking.date.desc()) 96 .all() 97 ) 98 99 return render_template( 100 "admin/teams/team.html", 101 team=team, 102 members=members, 103 score=score, 104 place=place, 105 solves=solves, 106 fails=fails, 107 missing=missing, 108 awards=awards, 109 addrs=addrs, 110 ) 111 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/CTFd/admin/teams.py b/CTFd/admin/teams.py --- a/CTFd/admin/teams.py +++ b/CTFd/admin/teams.py @@ -43,7 +43,7 @@ return render_template( "admin/teams/teams.html", teams=teams, - pages=None, + pages=0, curr_page=None, q=q, field=field, diff --git a/CTFd/admin/users.py b/CTFd/admin/users.py --- a/CTFd/admin/users.py +++ b/CTFd/admin/users.py @@ -42,10 +42,18 @@ .order_by(Users.id.asc()) .all() ) + elif field == "ip": + users = ( + Users.query.join(Tracking, Users.id == Tracking.user_id) + .filter(Tracking.ip.like("%{}%".format(q))) + .order_by(Users.id.asc()) + .all() + ) + return render_template( "admin/users/users.html", users=users, - pages=None, + pages=0, curr_page=None, q=q, field=field,
{"golden_diff": "diff --git a/CTFd/admin/teams.py b/CTFd/admin/teams.py\n--- a/CTFd/admin/teams.py\n+++ b/CTFd/admin/teams.py\n@@ -43,7 +43,7 @@\n return render_template(\n \"admin/teams/teams.html\",\n teams=teams,\n- pages=None,\n+ pages=0,\n curr_page=None,\n q=q,\n field=field,\ndiff --git a/CTFd/admin/users.py b/CTFd/admin/users.py\n--- a/CTFd/admin/users.py\n+++ b/CTFd/admin/users.py\n@@ -42,10 +42,18 @@\n .order_by(Users.id.asc())\n .all()\n )\n+ elif field == \"ip\":\n+ users = (\n+ Users.query.join(Tracking, Users.id == Tracking.user_id)\n+ .filter(Tracking.ip.like(\"%{}%\".format(q)))\n+ .order_by(Users.id.asc())\n+ .all()\n+ )\n+\n return render_template(\n \"admin/users/users.html\",\n users=users,\n- pages=None,\n+ pages=0,\n curr_page=None,\n q=q,\n field=field,\n", "issue": "Feature Request : Search users by IP address\nIt's really hard to search an IP address on the UI now.\r\n\r\nIt'd be nice to have a search feature in `admin/users` search a user by an IP address. CTFd already logs IP addresses of each login so it'll be very useful when wanting to ban certain users for abusing rules.\r\n\n", "before_files": [{"content": "from flask import render_template, request\nfrom CTFd.utils import get_config\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.modes import TEAMS_MODE\nfrom CTFd.models import db, Users, Challenges, Tracking\nfrom CTFd.admin import admin\nfrom CTFd.utils.helpers import get_errors\n\nfrom sqlalchemy.sql import not_\n\n\[email protected](\"/admin/users\")\n@admins_only\ndef users_listing():\n page = abs(request.args.get(\"page\", 1, type=int))\n q = request.args.get(\"q\")\n if q:\n field = request.args.get(\"field\")\n users = []\n errors = get_errors()\n if field == \"id\":\n if q.isnumeric():\n users = Users.query.filter(Users.id == q).order_by(Users.id.asc()).all()\n else:\n users = []\n errors.append(\"Your ID search term is not numeric\")\n elif field == \"name\":\n users = (\n Users.query.filter(Users.name.like(\"%{}%\".format(q)))\n .order_by(Users.id.asc())\n .all()\n )\n elif field == \"email\":\n users = (\n Users.query.filter(Users.email.like(\"%{}%\".format(q)))\n .order_by(Users.id.asc())\n .all()\n )\n elif field == \"affiliation\":\n users = (\n Users.query.filter(Users.affiliation.like(\"%{}%\".format(q)))\n .order_by(Users.id.asc())\n .all()\n )\n return render_template(\n \"admin/users/users.html\",\n users=users,\n pages=None,\n curr_page=None,\n q=q,\n field=field,\n )\n\n page = abs(int(page))\n results_per_page = 50\n page_start = results_per_page * (page - 1)\n page_end = results_per_page * (page - 1) + results_per_page\n\n users = Users.query.order_by(Users.id.asc()).slice(page_start, page_end).all()\n count = db.session.query(db.func.count(Users.id)).first()[0]\n pages = int(count / results_per_page) + (count % results_per_page > 0)\n\n return render_template(\n \"admin/users/users.html\", users=users, pages=pages, curr_page=page\n )\n\n\[email protected](\"/admin/users/new\")\n@admins_only\ndef users_new():\n return render_template(\"admin/users/new.html\")\n\n\[email protected](\"/admin/users/<int:user_id>\")\n@admins_only\ndef users_detail(user_id):\n # Get user object\n user = Users.query.filter_by(id=user_id).first_or_404()\n\n # Get the user's solves\n solves = user.get_solves(admin=True)\n\n # Get challenges that the user is missing\n if get_config(\"user_mode\") == TEAMS_MODE:\n if user.team:\n all_solves = user.team.get_solves(admin=True)\n else:\n all_solves = user.get_solves(admin=True)\n else:\n all_solves = user.get_solves(admin=True)\n\n solve_ids = [s.challenge_id for s in all_solves]\n missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()\n\n # Get IP addresses that the User has used\n addrs = (\n Tracking.query.filter_by(user_id=user_id).order_by(Tracking.date.desc()).all()\n )\n\n # Get Fails\n fails = user.get_fails(admin=True)\n\n # Get Awards\n awards = user.get_awards(admin=True)\n\n # Get user properties\n score = user.get_score(admin=True)\n place = user.get_place(admin=True)\n\n return render_template(\n \"admin/users/user.html\",\n solves=solves,\n user=user,\n addrs=addrs,\n score=score,\n missing=missing,\n place=place,\n fails=fails,\n awards=awards,\n )\n", "path": "CTFd/admin/users.py"}, {"content": "from flask import render_template, request\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.models import db, Teams, Challenges, Tracking\nfrom CTFd.admin import admin\nfrom CTFd.utils.helpers import get_errors\n\nfrom sqlalchemy.sql import not_\n\n\[email protected](\"/admin/teams\")\n@admins_only\ndef teams_listing():\n page = abs(request.args.get(\"page\", 1, type=int))\n q = request.args.get(\"q\")\n if q:\n field = request.args.get(\"field\")\n teams = []\n errors = get_errors()\n if field == \"id\":\n if q.isnumeric():\n teams = Teams.query.filter(Teams.id == q).order_by(Teams.id.asc()).all()\n else:\n teams = []\n errors.append(\"Your ID search term is not numeric\")\n elif field == \"name\":\n teams = (\n Teams.query.filter(Teams.name.like(\"%{}%\".format(q)))\n .order_by(Teams.id.asc())\n .all()\n )\n elif field == \"email\":\n teams = (\n Teams.query.filter(Teams.email.like(\"%{}%\".format(q)))\n .order_by(Teams.id.asc())\n .all()\n )\n elif field == \"affiliation\":\n teams = (\n Teams.query.filter(Teams.affiliation.like(\"%{}%\".format(q)))\n .order_by(Teams.id.asc())\n .all()\n )\n return render_template(\n \"admin/teams/teams.html\",\n teams=teams,\n pages=None,\n curr_page=None,\n q=q,\n field=field,\n )\n\n page = abs(int(page))\n results_per_page = 50\n page_start = results_per_page * (page - 1)\n page_end = results_per_page * (page - 1) + results_per_page\n\n teams = Teams.query.order_by(Teams.id.asc()).slice(page_start, page_end).all()\n count = db.session.query(db.func.count(Teams.id)).first()[0]\n pages = int(count / results_per_page) + (count % results_per_page > 0)\n return render_template(\n \"admin/teams/teams.html\", teams=teams, pages=pages, curr_page=page\n )\n\n\[email protected](\"/admin/teams/new\")\n@admins_only\ndef teams_new():\n return render_template(\"admin/teams/new.html\")\n\n\[email protected](\"/admin/teams/<int:team_id>\")\n@admins_only\ndef teams_detail(team_id):\n team = Teams.query.filter_by(id=team_id).first_or_404()\n\n # Get members\n members = team.members\n member_ids = [member.id for member in members]\n\n # Get Solves for all members\n solves = team.get_solves(admin=True)\n fails = team.get_fails(admin=True)\n awards = team.get_awards(admin=True)\n score = team.get_score(admin=True)\n place = team.get_place(admin=True)\n\n # Get missing Challenges for all members\n # TODO: How do you mark a missing challenge for a team?\n solve_ids = [s.challenge_id for s in solves]\n missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()\n\n # Get addresses for all members\n addrs = (\n Tracking.query.filter(Tracking.user_id.in_(member_ids))\n .order_by(Tracking.date.desc())\n .all()\n )\n\n return render_template(\n \"admin/teams/team.html\",\n team=team,\n members=members,\n score=score,\n place=place,\n solves=solves,\n fails=fails,\n missing=missing,\n awards=awards,\n addrs=addrs,\n )\n", "path": "CTFd/admin/teams.py"}], "after_files": [{"content": "from flask import render_template, request\nfrom CTFd.utils import get_config\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.modes import TEAMS_MODE\nfrom CTFd.models import db, Users, Challenges, Tracking\nfrom CTFd.admin import admin\nfrom CTFd.utils.helpers import get_errors\n\nfrom sqlalchemy.sql import not_\n\n\[email protected](\"/admin/users\")\n@admins_only\ndef users_listing():\n page = abs(request.args.get(\"page\", 1, type=int))\n q = request.args.get(\"q\")\n if q:\n field = request.args.get(\"field\")\n users = []\n errors = get_errors()\n if field == \"id\":\n if q.isnumeric():\n users = Users.query.filter(Users.id == q).order_by(Users.id.asc()).all()\n else:\n users = []\n errors.append(\"Your ID search term is not numeric\")\n elif field == \"name\":\n users = (\n Users.query.filter(Users.name.like(\"%{}%\".format(q)))\n .order_by(Users.id.asc())\n .all()\n )\n elif field == \"email\":\n users = (\n Users.query.filter(Users.email.like(\"%{}%\".format(q)))\n .order_by(Users.id.asc())\n .all()\n )\n elif field == \"affiliation\":\n users = (\n Users.query.filter(Users.affiliation.like(\"%{}%\".format(q)))\n .order_by(Users.id.asc())\n .all()\n )\n elif field == \"ip\":\n users = (\n Users.query.join(Tracking, Users.id == Tracking.user_id)\n .filter(Tracking.ip.like(\"%{}%\".format(q)))\n .order_by(Users.id.asc())\n .all()\n )\n\n return render_template(\n \"admin/users/users.html\",\n users=users,\n pages=0,\n curr_page=None,\n q=q,\n field=field,\n )\n\n page = abs(int(page))\n results_per_page = 50\n page_start = results_per_page * (page - 1)\n page_end = results_per_page * (page - 1) + results_per_page\n\n users = Users.query.order_by(Users.id.asc()).slice(page_start, page_end).all()\n count = db.session.query(db.func.count(Users.id)).first()[0]\n pages = int(count / results_per_page) + (count % results_per_page > 0)\n\n return render_template(\n \"admin/users/users.html\", users=users, pages=pages, curr_page=page\n )\n\n\[email protected](\"/admin/users/new\")\n@admins_only\ndef users_new():\n return render_template(\"admin/users/new.html\")\n\n\[email protected](\"/admin/users/<int:user_id>\")\n@admins_only\ndef users_detail(user_id):\n # Get user object\n user = Users.query.filter_by(id=user_id).first_or_404()\n\n # Get the user's solves\n solves = user.get_solves(admin=True)\n\n # Get challenges that the user is missing\n if get_config(\"user_mode\") == TEAMS_MODE:\n if user.team:\n all_solves = user.team.get_solves(admin=True)\n else:\n all_solves = user.get_solves(admin=True)\n else:\n all_solves = user.get_solves(admin=True)\n\n solve_ids = [s.challenge_id for s in all_solves]\n missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()\n\n # Get IP addresses that the User has used\n addrs = (\n Tracking.query.filter_by(user_id=user_id).order_by(Tracking.date.desc()).all()\n )\n\n # Get Fails\n fails = user.get_fails(admin=True)\n\n # Get Awards\n awards = user.get_awards(admin=True)\n\n # Get user properties\n score = user.get_score(admin=True)\n place = user.get_place(admin=True)\n\n return render_template(\n \"admin/users/user.html\",\n solves=solves,\n user=user,\n addrs=addrs,\n score=score,\n missing=missing,\n place=place,\n fails=fails,\n awards=awards,\n )\n", "path": "CTFd/admin/users.py"}, {"content": "from flask import render_template, request\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.models import db, Teams, Challenges, Tracking\nfrom CTFd.admin import admin\nfrom CTFd.utils.helpers import get_errors\n\nfrom sqlalchemy.sql import not_\n\n\[email protected](\"/admin/teams\")\n@admins_only\ndef teams_listing():\n page = abs(request.args.get(\"page\", 1, type=int))\n q = request.args.get(\"q\")\n if q:\n field = request.args.get(\"field\")\n teams = []\n errors = get_errors()\n if field == \"id\":\n if q.isnumeric():\n teams = Teams.query.filter(Teams.id == q).order_by(Teams.id.asc()).all()\n else:\n teams = []\n errors.append(\"Your ID search term is not numeric\")\n elif field == \"name\":\n teams = (\n Teams.query.filter(Teams.name.like(\"%{}%\".format(q)))\n .order_by(Teams.id.asc())\n .all()\n )\n elif field == \"email\":\n teams = (\n Teams.query.filter(Teams.email.like(\"%{}%\".format(q)))\n .order_by(Teams.id.asc())\n .all()\n )\n elif field == \"affiliation\":\n teams = (\n Teams.query.filter(Teams.affiliation.like(\"%{}%\".format(q)))\n .order_by(Teams.id.asc())\n .all()\n )\n return render_template(\n \"admin/teams/teams.html\",\n teams=teams,\n pages=0,\n curr_page=None,\n q=q,\n field=field,\n )\n\n page = abs(int(page))\n results_per_page = 50\n page_start = results_per_page * (page - 1)\n page_end = results_per_page * (page - 1) + results_per_page\n\n teams = Teams.query.order_by(Teams.id.asc()).slice(page_start, page_end).all()\n count = db.session.query(db.func.count(Teams.id)).first()[0]\n pages = int(count / results_per_page) + (count % results_per_page > 0)\n return render_template(\n \"admin/teams/teams.html\", teams=teams, pages=pages, curr_page=page\n )\n\n\[email protected](\"/admin/teams/new\")\n@admins_only\ndef teams_new():\n return render_template(\"admin/teams/new.html\")\n\n\[email protected](\"/admin/teams/<int:team_id>\")\n@admins_only\ndef teams_detail(team_id):\n team = Teams.query.filter_by(id=team_id).first_or_404()\n\n # Get members\n members = team.members\n member_ids = [member.id for member in members]\n\n # Get Solves for all members\n solves = team.get_solves(admin=True)\n fails = team.get_fails(admin=True)\n awards = team.get_awards(admin=True)\n score = team.get_score(admin=True)\n place = team.get_place(admin=True)\n\n # Get missing Challenges for all members\n # TODO: How do you mark a missing challenge for a team?\n solve_ids = [s.challenge_id for s in solves]\n missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()\n\n # Get addresses for all members\n addrs = (\n Tracking.query.filter(Tracking.user_id.in_(member_ids))\n .order_by(Tracking.date.desc())\n .all()\n )\n\n return render_template(\n \"admin/teams/team.html\",\n team=team,\n members=members,\n score=score,\n place=place,\n solves=solves,\n fails=fails,\n missing=missing,\n awards=awards,\n addrs=addrs,\n )\n", "path": "CTFd/admin/teams.py"}]}
2,487
265
gh_patches_debug_1305
rasdani/github-patches
git_diff
oppia__oppia-7459
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Upgrade @typescript-eslint/eslint-plugin `eslint-utils` is currently out of date, https://github.com/oppia/oppia/pull/7451 provides a temporary fix, but we need to upgrade the main package that requires `eslint-utils` to ensure that we have a long term fix. When fixing this, please make sure that the lint tests run successfully. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `core/domain/feedback_jobs_one_off.py` Content: ``` 1 # Copyright 2019 The Oppia Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS-IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """One-off jobs for feedback models.""" 16 17 from core import jobs 18 from core.platform import models 19 20 (feedback_models,) = models.Registry.import_models([models.NAMES.feedback]) 21 22 23 class GeneralFeedbackThreadUserOneOffJob(jobs.BaseMapReduceOneOffJobManager): 24 """One-off job for setting user_id and thread_id for all 25 GeneralFeedbackThreadUserModels. 26 """ 27 @classmethod 28 def entity_classes_to_map_over(cls): 29 """Return a list of datastore class references to map over.""" 30 return [feedback_models.GeneralFeedbackThreadUserModel] 31 32 @staticmethod 33 def map(model_instance): 34 """Implements the map function for this job.""" 35 user_id, thread_id = model_instance.id.split('.', 1) 36 if model_instance.user_id is None: 37 model_instance.user_id = user_id 38 if model_instance.thread_id is None: 39 model_instance.thread_id = thread_id 40 model_instance.put(update_last_updated_time=False) 41 yield ('SUCCESS', model_instance.id) 42 43 @staticmethod 44 def reduce(key, values): 45 yield (key, len(values)) 46 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/core/domain/feedback_jobs_one_off.py b/core/domain/feedback_jobs_one_off.py --- a/core/domain/feedback_jobs_one_off.py +++ b/core/domain/feedback_jobs_one_off.py @@ -13,6 +13,7 @@ # limitations under the License. """One-off jobs for feedback models.""" +from __future__ import absolute_import # pylint: disable=import-only-modules from core import jobs from core.platform import models
{"golden_diff": "diff --git a/core/domain/feedback_jobs_one_off.py b/core/domain/feedback_jobs_one_off.py\n--- a/core/domain/feedback_jobs_one_off.py\n+++ b/core/domain/feedback_jobs_one_off.py\n@@ -13,6 +13,7 @@\n # limitations under the License.\n \n \"\"\"One-off jobs for feedback models.\"\"\"\n+from __future__ import absolute_import # pylint: disable=import-only-modules\n \n from core import jobs\n from core.platform import models\n", "issue": "Upgrade @typescript-eslint/eslint-plugin\n`eslint-utils` is currently out of date, https://github.com/oppia/oppia/pull/7451 provides a temporary fix, but we need to upgrade the main package that requires `eslint-utils` to ensure that we have a long term fix. \r\n\r\nWhen fixing this, please make sure that the lint tests run successfully.\n", "before_files": [{"content": "# Copyright 2019 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"One-off jobs for feedback models.\"\"\"\n\nfrom core import jobs\nfrom core.platform import models\n\n(feedback_models,) = models.Registry.import_models([models.NAMES.feedback])\n\n\nclass GeneralFeedbackThreadUserOneOffJob(jobs.BaseMapReduceOneOffJobManager):\n \"\"\"One-off job for setting user_id and thread_id for all\n GeneralFeedbackThreadUserModels.\n \"\"\"\n @classmethod\n def entity_classes_to_map_over(cls):\n \"\"\"Return a list of datastore class references to map over.\"\"\"\n return [feedback_models.GeneralFeedbackThreadUserModel]\n\n @staticmethod\n def map(model_instance):\n \"\"\"Implements the map function for this job.\"\"\"\n user_id, thread_id = model_instance.id.split('.', 1)\n if model_instance.user_id is None:\n model_instance.user_id = user_id\n if model_instance.thread_id is None:\n model_instance.thread_id = thread_id\n model_instance.put(update_last_updated_time=False)\n yield ('SUCCESS', model_instance.id)\n\n @staticmethod\n def reduce(key, values):\n yield (key, len(values))\n", "path": "core/domain/feedback_jobs_one_off.py"}], "after_files": [{"content": "# Copyright 2019 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"One-off jobs for feedback models.\"\"\"\nfrom __future__ import absolute_import # pylint: disable=import-only-modules\n\nfrom core import jobs\nfrom core.platform import models\n\n(feedback_models,) = models.Registry.import_models([models.NAMES.feedback])\n\n\nclass GeneralFeedbackThreadUserOneOffJob(jobs.BaseMapReduceOneOffJobManager):\n \"\"\"One-off job for setting user_id and thread_id for all\n GeneralFeedbackThreadUserModels.\n \"\"\"\n @classmethod\n def entity_classes_to_map_over(cls):\n \"\"\"Return a list of datastore class references to map over.\"\"\"\n return [feedback_models.GeneralFeedbackThreadUserModel]\n\n @staticmethod\n def map(model_instance):\n \"\"\"Implements the map function for this job.\"\"\"\n user_id, thread_id = model_instance.id.split('.', 1)\n if model_instance.user_id is None:\n model_instance.user_id = user_id\n if model_instance.thread_id is None:\n model_instance.thread_id = thread_id\n model_instance.put(update_last_updated_time=False)\n yield ('SUCCESS', model_instance.id)\n\n @staticmethod\n def reduce(key, values):\n yield (key, len(values))\n", "path": "core/domain/feedback_jobs_one_off.py"}]}
790
99
gh_patches_debug_21117
rasdani/github-patches
git_diff
mlcommons__GaNDLF-614
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- CCA failure when enabled **Describe the bug** The CCA (Largest Connected Component Analysis) function was implemented as a standalone function, which causes it to fail when called in the segmentation pipeline with post-processing enabled. The expected behavior is a likely failure due to this issue. **To Reproduce** Run a segmentation pipeline with CCA enabled for the post-processing. **Expected behavior** The CCA function should be corrected and integrated with the segmentation pipeline to work correctly and tested **GaNDLF Version** <!-- Put the output of the following command: python -c 'import GANDLF as g;print(g.__version__)' --> Version information of the GaNDLF package in the virtual environment. 0.0.16-dev **Desktop (please complete the following information):** - OS: Linux, Ubuntu - Version (including Build information, if any): 22.04 **Additional context** None --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `GANDLF/data/post_process/morphology.py` Content: ``` 1 import torch 2 import torch.nn.functional as F 3 from skimage.measure import label 4 import numpy as np 5 from scipy.ndimage import binary_fill_holes, binary_closing 6 from GANDLF.utils.generic import get_array_from_image_or_tensor 7 8 9 def torch_morphological(input_image, kernel_size=1, mode="dilation"): 10 """ 11 This function enables morphological operations using torch. Adapted from https://github.com/DIVA-DIA/Generating-Synthetic-Handwritten-Historical-Documents/blob/e6a798dc2b374f338804222747c56cb44869af5b/HTR_ctc/utils/auxilary_functions.py#L10. 12 13 Args: 14 input_image (torch.Tensor): The input image. 15 kernel_size (list): The size of the window to take a max over. 16 mode (str): The type of morphological operation to perform. 17 18 Returns: 19 torch.Tensor: The output image after morphological operations. 20 """ 21 22 if len(input_image.shape) == 4: 23 max_pool = F.max_pool2d 24 elif len(input_image.shape) == 5: 25 max_pool = F.max_pool3d 26 else: 27 raise ValueError("Input image has invalid shape for morphological operations.") 28 29 if mode == "dilation": 30 output_image = max_pool( 31 input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2 32 ) 33 elif mode == "erosion": 34 output_image = -max_pool( 35 -input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2 36 ) 37 elif mode == "closing": 38 output_image = max_pool( 39 input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2 40 ) 41 output_image = -max_pool( 42 -output_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2 43 ) 44 elif mode == "opening": 45 output_image = -max_pool( 46 -input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2 47 ) 48 output_image = max_pool( 49 output_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2 50 ) 51 52 return output_image 53 54 55 def fill_holes(input_image, params=None): 56 """ 57 This function fills holes in masks. 58 59 Args: 60 input_image (torch.Tensor): The input image. 61 params (dict): The parameters dict; unused. 62 63 Returns: 64 torch.Tensor: The output image after morphological operations. 65 """ 66 input_image_array = get_array_from_image_or_tensor(input_image).astype(int) 67 input_image_array_closed = binary_closing(input_image_array) 68 # Fill the holes in binary objects 69 output_array = binary_fill_holes(input_image_array_closed).astype(int) 70 71 return torch.from_numpy(output_array) 72 73 74 def cca(input_image): 75 """ 76 This function performs connected component analysis on the input image. 77 78 Args: 79 input_image (torch.Tensor): The input image. 80 params (dict): The parameters dict; 81 82 Returns: 83 torch.Tensor: The output image after morphological operations. 84 """ 85 seg = get_array_from_image_or_tensor(input_image) 86 mask = seg != 0 87 88 connectivity = input_image.dim() - 1 89 labels_connected = label(mask, connectivity=connectivity) 90 labels_connected_sizes = [ 91 np.sum(labels_connected == i) for i in np.unique(labels_connected) 92 ] 93 largest_region = np.argmax(labels_connected_sizes[1:]) + 1 94 seg[labels_connected != largest_region] = 0 95 return seg 96 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/GANDLF/data/post_process/morphology.py b/GANDLF/data/post_process/morphology.py --- a/GANDLF/data/post_process/morphology.py +++ b/GANDLF/data/post_process/morphology.py @@ -71,7 +71,7 @@ return torch.from_numpy(output_array) -def cca(input_image): +def cca(input_image, params=None): """ This function performs connected component analysis on the input image. @@ -85,11 +85,15 @@ seg = get_array_from_image_or_tensor(input_image) mask = seg != 0 - connectivity = input_image.dim() - 1 + connectivity = input_image.ndim - 1 labels_connected = label(mask, connectivity=connectivity) labels_connected_sizes = [ np.sum(labels_connected == i) for i in np.unique(labels_connected) ] - largest_region = np.argmax(labels_connected_sizes[1:]) + 1 + largest_region = 0 + if len(labels_connected_sizes) > 1: + largest_region = np.argmax(labels_connected_sizes[1:]) + 1 seg[labels_connected != largest_region] = 0 + return seg +
{"golden_diff": "diff --git a/GANDLF/data/post_process/morphology.py b/GANDLF/data/post_process/morphology.py\n--- a/GANDLF/data/post_process/morphology.py\n+++ b/GANDLF/data/post_process/morphology.py\n@@ -71,7 +71,7 @@\n return torch.from_numpy(output_array)\n \n \n-def cca(input_image):\n+def cca(input_image, params=None):\n \"\"\"\n This function performs connected component analysis on the input image.\n \n@@ -85,11 +85,15 @@\n seg = get_array_from_image_or_tensor(input_image)\n mask = seg != 0\n \n- connectivity = input_image.dim() - 1\n+ connectivity = input_image.ndim - 1\n labels_connected = label(mask, connectivity=connectivity)\n labels_connected_sizes = [\n np.sum(labels_connected == i) for i in np.unique(labels_connected)\n ]\n- largest_region = np.argmax(labels_connected_sizes[1:]) + 1\n+ largest_region = 0\n+ if len(labels_connected_sizes) > 1:\n+ largest_region = np.argmax(labels_connected_sizes[1:]) + 1\n seg[labels_connected != largest_region] = 0\n+\n return seg\n+\n", "issue": "CCA failure when enabled\n**Describe the bug**\r\nThe CCA (Largest Connected Component Analysis) function was implemented as a standalone function, which causes it to fail when called in the segmentation pipeline with post-processing enabled. The expected behavior is a likely failure due to this issue.\r\n\r\n**To Reproduce**\r\nRun a segmentation pipeline with CCA enabled for the post-processing.\r\n\r\n**Expected behavior**\r\nThe CCA function should be corrected and integrated with the segmentation pipeline to work correctly and tested\r\n\r\n**GaNDLF Version**\r\n<!-- Put the output of the following command:\r\npython -c 'import GANDLF as g;print(g.__version__)'\r\n-->\r\nVersion information of the GaNDLF package in the virtual environment. 0.0.16-dev\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Linux, Ubuntu\r\n - Version (including Build information, if any): 22.04\r\n\r\n**Additional context**\r\nNone\r\n\n", "before_files": [{"content": "import torch\nimport torch.nn.functional as F\nfrom skimage.measure import label\nimport numpy as np\nfrom scipy.ndimage import binary_fill_holes, binary_closing\nfrom GANDLF.utils.generic import get_array_from_image_or_tensor\n\n\ndef torch_morphological(input_image, kernel_size=1, mode=\"dilation\"):\n \"\"\"\n This function enables morphological operations using torch. Adapted from https://github.com/DIVA-DIA/Generating-Synthetic-Handwritten-Historical-Documents/blob/e6a798dc2b374f338804222747c56cb44869af5b/HTR_ctc/utils/auxilary_functions.py#L10.\n\n Args:\n input_image (torch.Tensor): The input image.\n kernel_size (list): The size of the window to take a max over.\n mode (str): The type of morphological operation to perform.\n\n Returns:\n torch.Tensor: The output image after morphological operations.\n \"\"\"\n\n if len(input_image.shape) == 4:\n max_pool = F.max_pool2d\n elif len(input_image.shape) == 5:\n max_pool = F.max_pool3d\n else:\n raise ValueError(\"Input image has invalid shape for morphological operations.\")\n\n if mode == \"dilation\":\n output_image = max_pool(\n input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n elif mode == \"erosion\":\n output_image = -max_pool(\n -input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n elif mode == \"closing\":\n output_image = max_pool(\n input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n output_image = -max_pool(\n -output_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n elif mode == \"opening\":\n output_image = -max_pool(\n -input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n output_image = max_pool(\n output_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n\n return output_image\n\n\ndef fill_holes(input_image, params=None):\n \"\"\"\n This function fills holes in masks.\n\n Args:\n input_image (torch.Tensor): The input image.\n params (dict): The parameters dict; unused.\n\n Returns:\n torch.Tensor: The output image after morphological operations.\n \"\"\"\n input_image_array = get_array_from_image_or_tensor(input_image).astype(int)\n input_image_array_closed = binary_closing(input_image_array)\n # Fill the holes in binary objects\n output_array = binary_fill_holes(input_image_array_closed).astype(int)\n\n return torch.from_numpy(output_array)\n\n\ndef cca(input_image):\n \"\"\"\n This function performs connected component analysis on the input image.\n\n Args:\n input_image (torch.Tensor): The input image.\n params (dict): The parameters dict;\n\n Returns:\n torch.Tensor: The output image after morphological operations.\n \"\"\"\n seg = get_array_from_image_or_tensor(input_image)\n mask = seg != 0\n\n connectivity = input_image.dim() - 1\n labels_connected = label(mask, connectivity=connectivity)\n labels_connected_sizes = [\n np.sum(labels_connected == i) for i in np.unique(labels_connected)\n ]\n largest_region = np.argmax(labels_connected_sizes[1:]) + 1\n seg[labels_connected != largest_region] = 0\n return seg\n", "path": "GANDLF/data/post_process/morphology.py"}], "after_files": [{"content": "import torch\nimport torch.nn.functional as F\nfrom skimage.measure import label\nimport numpy as np\nfrom scipy.ndimage import binary_fill_holes, binary_closing\nfrom GANDLF.utils.generic import get_array_from_image_or_tensor\n\n\ndef torch_morphological(input_image, kernel_size=1, mode=\"dilation\"):\n \"\"\"\n This function enables morphological operations using torch. Adapted from https://github.com/DIVA-DIA/Generating-Synthetic-Handwritten-Historical-Documents/blob/e6a798dc2b374f338804222747c56cb44869af5b/HTR_ctc/utils/auxilary_functions.py#L10.\n\n Args:\n input_image (torch.Tensor): The input image.\n kernel_size (list): The size of the window to take a max over.\n mode (str): The type of morphological operation to perform.\n\n Returns:\n torch.Tensor: The output image after morphological operations.\n \"\"\"\n\n if len(input_image.shape) == 4:\n max_pool = F.max_pool2d\n elif len(input_image.shape) == 5:\n max_pool = F.max_pool3d\n else:\n raise ValueError(\"Input image has invalid shape for morphological operations.\")\n\n if mode == \"dilation\":\n output_image = max_pool(\n input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n elif mode == \"erosion\":\n output_image = -max_pool(\n -input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n elif mode == \"closing\":\n output_image = max_pool(\n input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n output_image = -max_pool(\n -output_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n elif mode == \"opening\":\n output_image = -max_pool(\n -input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n output_image = max_pool(\n output_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n\n return output_image\n\n\ndef fill_holes(input_image, params=None):\n \"\"\"\n This function fills holes in masks.\n\n Args:\n input_image (torch.Tensor): The input image.\n params (dict): The parameters dict; unused.\n\n Returns:\n torch.Tensor: The output image after morphological operations.\n \"\"\"\n input_image_array = get_array_from_image_or_tensor(input_image).astype(int)\n input_image_array_closed = binary_closing(input_image_array)\n # Fill the holes in binary objects\n output_array = binary_fill_holes(input_image_array_closed).astype(int)\n\n return torch.from_numpy(output_array)\n\n\ndef cca(input_image, params=None):\n \"\"\"\n This function performs connected component analysis on the input image.\n\n Args:\n input_image (torch.Tensor): The input image.\n params (dict): The parameters dict;\n\n Returns:\n torch.Tensor: The output image after morphological operations.\n \"\"\"\n seg = get_array_from_image_or_tensor(input_image)\n mask = seg != 0\n\n connectivity = input_image.ndim - 1\n labels_connected = label(mask, connectivity=connectivity)\n labels_connected_sizes = [\n np.sum(labels_connected == i) for i in np.unique(labels_connected)\n ]\n largest_region = 0\n if len(labels_connected_sizes) > 1:\n largest_region = np.argmax(labels_connected_sizes[1:]) + 1\n seg[labels_connected != largest_region] = 0\n\n return seg\n\n", "path": "GANDLF/data/post_process/morphology.py"}]}
1,447
270
gh_patches_debug_17211
rasdani/github-patches
git_diff
facebookresearch__ParlAI-2264
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Can not download p6tyohj.tgz when I run this command shown at https://parl.ai/#getstarted: > python examples/eval_model.py -m ir_baseline -t "#moviedd-reddit" -dt valid It can correctly downloads moviedialog.tar.gz, but can't downlad p6tyohj.tgz. as following: ``` [ downloading: http://parl.ai/downloads/moviedialog/moviedialog.tar.gz to path/ParlAI/data/MovieDialog/moviedialog.tar.gz ] Downloading moviedialog.tar.gz: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████| 224M/224M [00:23<00:00, 9.33MB/s] [ Checksum Successful ] unpacking moviedialog.tar.gz [ downloading: http://tinyurl.com/p6tyohj to path/ParlAI/data/MovieDialog/movie_dialog_dataset/task4_reddit/p6tyohj.tgz ] Connection error, retrying. (4 retries left) Connection error, retrying. (3 retries left) Connection error, retrying. (2 retries left) Connection error, retrying. (1 retries left) Connection error, retrying. (0 retries left) Retried too many times, stopped retrying. Traceback (most recent call last): File "path/ParlAI/parlai/core/worlds.py", line 1116, in _create_task_agents task_agents = my_module.create_agents(opt, task) AttributeError: module 'parlai.tasks.moviedialog.agents' has no attribute 'create_agents' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "examples/eval_model.py", line 17, in <module> eval_model(opt, print_parser=parser) File "path/ParlAI/parlai/scripts/eval_model.py", line 117, in eval_model task_report = _eval_single_world(opt, agent, task) File "path/ParlAI/parlai/scripts/eval_model.py", line 69, in _eval_single_world world = create_task(task_opt, agent) # create worlds for tasks File "path/ParlAI/parlai/core/worlds.py", line 1221, in create_task world = create_task_world(opt, user_agents, default_world=default_world) File "path/ParlAI/parlai/core/worlds.py", line 1178, in create_task_world opt, user_agents, default_world=default_world File "path/ParlAI/parlai/core/worlds.py", line 1126, in _get_task_world task_agents = _create_task_agents(opt) File "path/ParlAI/parlai/core/worlds.py", line 1119, in _create_task_agents return create_task_agent_from_taskname(opt) File "path/ParlAI/parlai/core/agents.py", line 837, in create_task_agent_from_taskname task_agents = teacher_class(opt) File "path/ParlAI/parlai/tasks/moviedialog/agents.py", line 88, in __init__ opt['datafile'], opt['cands_datafile'] = _path(self.task, opt) File "path/ParlAI/parlai/tasks/moviedialog/agents.py", line 36, in _path build(opt) File "path/ParlAI/parlai/tasks/moviedialog/build.py", line 43, in build RESOURCES[1].download_file(dpath2) File "path/ParlAI/parlai/core/build_data.py", line 81, in download_file download(self.url, dpath, self.file_name) File "path/ParlAI/parlai/core/build_data.py", line 216, in download raise RuntimeWarning('Connection broken too many times. Stopped retrying.') RuntimeWarning: Connection broken too many times. Stopped retrying. Downloading p6tyohj.tgz: 0.00B [00:31, ?B/s] ``` ` RESOURCES = [ DownloadableFile( 'http://parl.ai/downloads/moviedialog/moviedialog.tar.gz', 'moviedialog.tar.gz', '9b168d30111f13b9cc50e6a15885adae8f86bc0bb7a124d435c43fd0f7e2a9c7', ), DownloadableFile( 'http://tinyurl.com/p6tyohj', # maybe this URL is wrong. 'p6tyohj.tgz', '6316a6a5c563bc3c133a4a1e611d8ca638c61582f331c500697d9090efd215bb', ), ] ` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `parlai/tasks/moviedialog/build.py` Content: ``` 1 #!/usr/bin/env python3 2 3 # Copyright (c) Facebook, Inc. and its affiliates. 4 # This source code is licensed under the MIT license found in the 5 # LICENSE file in the root directory of this source tree. 6 # Download and build the data if it does not exist. 7 8 import parlai.core.build_data as build_data 9 import os 10 from parlai.core.build_data import DownloadableFile 11 12 RESOURCES = [ 13 DownloadableFile( 14 'http://parl.ai/downloads/moviedialog/moviedialog.tar.gz', 15 'moviedialog.tar.gz', 16 '9b168d30111f13b9cc50e6a15885adae8f86bc0bb7a124d435c43fd0f7e2a9c7', 17 ), 18 DownloadableFile( 19 'http://tinyurl.com/p6tyohj', 20 'p6tyohj.tgz', 21 '6316a6a5c563bc3c133a4a1e611d8ca638c61582f331c500697d9090efd215bb', 22 ), 23 ] 24 25 26 def build(opt): 27 dpath = os.path.join(opt['datapath'], 'MovieDialog') 28 version = '3' 29 30 if not build_data.built(dpath, version_string=version): 31 print('[building data: ' + dpath + ']') 32 if build_data.built(dpath): 33 # An older version exists, so remove these outdated files. 34 build_data.remove_dir(dpath) 35 build_data.make_dir(dpath) 36 37 # Download the data. 38 RESOURCES[0].download_file(dpath) 39 40 dpath2 = os.path.join(dpath, 'movie_dialog_dataset', 'task4_reddit') 41 build_data.make_dir(dpath2) 42 43 RESOURCES[1].download_file(dpath2) 44 45 # remove pipes from task 4 labels, only one label per example 46 for root, _subfolder, files in os.walk(os.path.join(dpath2, 'task4_reddit')): 47 for f in files: 48 if f.endswith('txt'): 49 read_fn = os.path.join(root, f) 50 head = 'task4_reddit_' 51 tail = f[len(head) :] 52 write_fn = os.path.join(root, head + 'pipeless_' + tail) 53 with open(read_fn) as read, open(write_fn, 'w') as write: 54 for line in read: 55 write.write(line.replace('|', ' __PIPE__ ') + '\n') 56 57 # Mark the data as built. 58 build_data.mark_done(dpath, version_string=version) 59 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/parlai/tasks/moviedialog/build.py b/parlai/tasks/moviedialog/build.py --- a/parlai/tasks/moviedialog/build.py +++ b/parlai/tasks/moviedialog/build.py @@ -16,8 +16,8 @@ '9b168d30111f13b9cc50e6a15885adae8f86bc0bb7a124d435c43fd0f7e2a9c7', ), DownloadableFile( - 'http://tinyurl.com/p6tyohj', - 'p6tyohj.tgz', + 'http://cs.nyu.edu/~xiang/task4_reddit.tgz', + 'task4_reddit.tgz', '6316a6a5c563bc3c133a4a1e611d8ca638c61582f331c500697d9090efd215bb', ), ] @@ -25,7 +25,8 @@ def build(opt): dpath = os.path.join(opt['datapath'], 'MovieDialog') - version = '3' + # 2019-12-11 bump version with changed url + version = '3.01' if not build_data.built(dpath, version_string=version): print('[building data: ' + dpath + ']')
{"golden_diff": "diff --git a/parlai/tasks/moviedialog/build.py b/parlai/tasks/moviedialog/build.py\n--- a/parlai/tasks/moviedialog/build.py\n+++ b/parlai/tasks/moviedialog/build.py\n@@ -16,8 +16,8 @@\n '9b168d30111f13b9cc50e6a15885adae8f86bc0bb7a124d435c43fd0f7e2a9c7',\n ),\n DownloadableFile(\n- 'http://tinyurl.com/p6tyohj',\n- 'p6tyohj.tgz',\n+ 'http://cs.nyu.edu/~xiang/task4_reddit.tgz',\n+ 'task4_reddit.tgz',\n '6316a6a5c563bc3c133a4a1e611d8ca638c61582f331c500697d9090efd215bb',\n ),\n ]\n@@ -25,7 +25,8 @@\n \n def build(opt):\n dpath = os.path.join(opt['datapath'], 'MovieDialog')\n- version = '3'\n+ # 2019-12-11 bump version with changed url\n+ version = '3.01'\n \n if not build_data.built(dpath, version_string=version):\n print('[building data: ' + dpath + ']')\n", "issue": "Can not download p6tyohj.tgz\n\r\nwhen I run this command shown at https://parl.ai/#getstarted:\r\n\r\n> python examples/eval_model.py -m ir_baseline -t \"#moviedd-reddit\" -dt valid\r\n\r\nIt can correctly downloads moviedialog.tar.gz, but can't downlad p6tyohj.tgz.\r\nas following: \r\n```\r\n[ downloading: http://parl.ai/downloads/moviedialog/moviedialog.tar.gz to path/ParlAI/data/MovieDialog/moviedialog.tar.gz ]\r\nDownloading moviedialog.tar.gz: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 224M/224M [00:23<00:00, 9.33MB/s]\r\n[ Checksum Successful ]\r\nunpacking moviedialog.tar.gz\r\n[ downloading: http://tinyurl.com/p6tyohj to path/ParlAI/data/MovieDialog/movie_dialog_dataset/task4_reddit/p6tyohj.tgz ]\r\nConnection error, retrying. (4 retries left)\r\nConnection error, retrying. (3 retries left)\r\nConnection error, retrying. (2 retries left)\r\nConnection error, retrying. (1 retries left)\r\nConnection error, retrying. (0 retries left)\r\nRetried too many times, stopped retrying.\r\nTraceback (most recent call last):\r\n File \"path/ParlAI/parlai/core/worlds.py\", line 1116, in _create_task_agents\r\n task_agents = my_module.create_agents(opt, task)\r\nAttributeError: module 'parlai.tasks.moviedialog.agents' has no attribute 'create_agents'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"examples/eval_model.py\", line 17, in <module>\r\n eval_model(opt, print_parser=parser)\r\n File \"path/ParlAI/parlai/scripts/eval_model.py\", line 117, in eval_model\r\n task_report = _eval_single_world(opt, agent, task)\r\n File \"path/ParlAI/parlai/scripts/eval_model.py\", line 69, in _eval_single_world\r\n world = create_task(task_opt, agent) # create worlds for tasks\r\n File \"path/ParlAI/parlai/core/worlds.py\", line 1221, in create_task\r\n world = create_task_world(opt, user_agents, default_world=default_world)\r\n File \"path/ParlAI/parlai/core/worlds.py\", line 1178, in create_task_world\r\n opt, user_agents, default_world=default_world\r\n File \"path/ParlAI/parlai/core/worlds.py\", line 1126, in _get_task_world\r\n task_agents = _create_task_agents(opt)\r\n File \"path/ParlAI/parlai/core/worlds.py\", line 1119, in _create_task_agents\r\n return create_task_agent_from_taskname(opt)\r\n File \"path/ParlAI/parlai/core/agents.py\", line 837, in create_task_agent_from_taskname\r\n task_agents = teacher_class(opt)\r\n File \"path/ParlAI/parlai/tasks/moviedialog/agents.py\", line 88, in __init__\r\n opt['datafile'], opt['cands_datafile'] = _path(self.task, opt)\r\n File \"path/ParlAI/parlai/tasks/moviedialog/agents.py\", line 36, in _path\r\n build(opt)\r\n File \"path/ParlAI/parlai/tasks/moviedialog/build.py\", line 43, in build\r\n RESOURCES[1].download_file(dpath2)\r\n File \"path/ParlAI/parlai/core/build_data.py\", line 81, in download_file\r\n download(self.url, dpath, self.file_name)\r\n File \"path/ParlAI/parlai/core/build_data.py\", line 216, in download\r\n raise RuntimeWarning('Connection broken too many times. Stopped retrying.')\r\nRuntimeWarning: Connection broken too many times. Stopped retrying.\r\nDownloading p6tyohj.tgz: 0.00B [00:31, ?B/s]\r\n```\r\n\r\n`\r\nRESOURCES = [\r\n DownloadableFile(\r\n 'http://parl.ai/downloads/moviedialog/moviedialog.tar.gz',\r\n 'moviedialog.tar.gz',\r\n '9b168d30111f13b9cc50e6a15885adae8f86bc0bb7a124d435c43fd0f7e2a9c7',\r\n ),\r\n DownloadableFile(\r\n 'http://tinyurl.com/p6tyohj', # maybe this URL is wrong.\r\n 'p6tyohj.tgz',\r\n '6316a6a5c563bc3c133a4a1e611d8ca638c61582f331c500697d9090efd215bb',\r\n ),\r\n]\r\n`\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n# Download and build the data if it does not exist.\n\nimport parlai.core.build_data as build_data\nimport os\nfrom parlai.core.build_data import DownloadableFile\n\nRESOURCES = [\n DownloadableFile(\n 'http://parl.ai/downloads/moviedialog/moviedialog.tar.gz',\n 'moviedialog.tar.gz',\n '9b168d30111f13b9cc50e6a15885adae8f86bc0bb7a124d435c43fd0f7e2a9c7',\n ),\n DownloadableFile(\n 'http://tinyurl.com/p6tyohj',\n 'p6tyohj.tgz',\n '6316a6a5c563bc3c133a4a1e611d8ca638c61582f331c500697d9090efd215bb',\n ),\n]\n\n\ndef build(opt):\n dpath = os.path.join(opt['datapath'], 'MovieDialog')\n version = '3'\n\n if not build_data.built(dpath, version_string=version):\n print('[building data: ' + dpath + ']')\n if build_data.built(dpath):\n # An older version exists, so remove these outdated files.\n build_data.remove_dir(dpath)\n build_data.make_dir(dpath)\n\n # Download the data.\n RESOURCES[0].download_file(dpath)\n\n dpath2 = os.path.join(dpath, 'movie_dialog_dataset', 'task4_reddit')\n build_data.make_dir(dpath2)\n\n RESOURCES[1].download_file(dpath2)\n\n # remove pipes from task 4 labels, only one label per example\n for root, _subfolder, files in os.walk(os.path.join(dpath2, 'task4_reddit')):\n for f in files:\n if f.endswith('txt'):\n read_fn = os.path.join(root, f)\n head = 'task4_reddit_'\n tail = f[len(head) :]\n write_fn = os.path.join(root, head + 'pipeless_' + tail)\n with open(read_fn) as read, open(write_fn, 'w') as write:\n for line in read:\n write.write(line.replace('|', ' __PIPE__ ') + '\\n')\n\n # Mark the data as built.\n build_data.mark_done(dpath, version_string=version)\n", "path": "parlai/tasks/moviedialog/build.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n# Download and build the data if it does not exist.\n\nimport parlai.core.build_data as build_data\nimport os\nfrom parlai.core.build_data import DownloadableFile\n\nRESOURCES = [\n DownloadableFile(\n 'http://parl.ai/downloads/moviedialog/moviedialog.tar.gz',\n 'moviedialog.tar.gz',\n '9b168d30111f13b9cc50e6a15885adae8f86bc0bb7a124d435c43fd0f7e2a9c7',\n ),\n DownloadableFile(\n 'http://cs.nyu.edu/~xiang/task4_reddit.tgz',\n 'task4_reddit.tgz',\n '6316a6a5c563bc3c133a4a1e611d8ca638c61582f331c500697d9090efd215bb',\n ),\n]\n\n\ndef build(opt):\n dpath = os.path.join(opt['datapath'], 'MovieDialog')\n # 2019-12-11 bump version with changed url\n version = '3.01'\n\n if not build_data.built(dpath, version_string=version):\n print('[building data: ' + dpath + ']')\n if build_data.built(dpath):\n # An older version exists, so remove these outdated files.\n build_data.remove_dir(dpath)\n build_data.make_dir(dpath)\n\n # Download the data.\n RESOURCES[0].download_file(dpath)\n\n dpath2 = os.path.join(dpath, 'movie_dialog_dataset', 'task4_reddit')\n build_data.make_dir(dpath2)\n\n RESOURCES[1].download_file(dpath2)\n\n # remove pipes from task 4 labels, only one label per example\n for root, _subfolder, files in os.walk(os.path.join(dpath2, 'task4_reddit')):\n for f in files:\n if f.endswith('txt'):\n read_fn = os.path.join(root, f)\n head = 'task4_reddit_'\n tail = f[len(head) :]\n write_fn = os.path.join(root, head + 'pipeless_' + tail)\n with open(read_fn) as read, open(write_fn, 'w') as write:\n for line in read:\n write.write(line.replace('|', ' __PIPE__ ') + '\\n')\n\n # Mark the data as built.\n build_data.mark_done(dpath, version_string=version)\n", "path": "parlai/tasks/moviedialog/build.py"}]}
2,140
354
gh_patches_debug_42751
rasdani/github-patches
git_diff
cloudtools__troposphere-1703
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- AWS::ImageBuilder::* some wrong data types and attribut missing In imagebuilder.py (2.6.1 release). * In AWS::ImageBuilder::* "Tags" are "json_checker" but should be dict When encoded in the structure it becomes a string (validator.py, ligne 258,` json.dumps(prop)`) which is creating an issue with CloudFormation that expect a struct like : `"Tags" : {Key : Value, ...}` * AWS::ImageBuilder::DistributionConfiguration::Distribution "AmiDistributionConfiguration" is "json_checker" but should be dict. For the same as above "Tags" * In AWS::ImageBuilder::Component "Data" is missing. And should be "json_checker" in that case. * In AWS::ImageBuilder::ImageRecipe::InstanceBlockDeviceMapping "NoDevice" is boolean but should be a string --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `troposphere/imagebuilder.py` Content: ``` 1 # Copyright (c) 2020, Mark Peek <[email protected]> 2 # All rights reserved. 3 # 4 # See LICENSE file for full license. 5 6 from . import AWSObject, AWSProperty 7 from .validators import (integer, boolean, json_checker, 8 component_platforms, imagepipeline_status, 9 schedule_pipelineexecutionstartcondition, 10 ebsinstanceblockdevicespecification_volume_type) 11 12 13 class S3Logs(AWSProperty): 14 props = { 15 "S3BucketName": (basestring, False), 16 "S3KeyPrefix": (basestring, False), 17 } 18 19 20 class Logging(AWSProperty): 21 props = { 22 'S3Logs': (S3Logs, False), 23 } 24 25 26 class InfrastructureConfiguration(AWSObject): 27 resource_type = "AWS::ImageBuilder::InfrastructureConfiguration" 28 29 props = { 30 'Description': (basestring, False), 31 'InstanceProfileName': (basestring, True), 32 'InstanceTypes': ([basestring], False), 33 'KeyPair': (basestring, False), 34 'Logging': (Logging, False), 35 'Name': (basestring, True), 36 'SecurityGroupIds': ([basestring], False), 37 'SnsTopicArn': (basestring, False), 38 'SubnetId': (basestring, False), 39 'Tags': (json_checker, False), 40 'TerminateInstanceOnFailure': (boolean, False) 41 } 42 43 44 class EbsInstanceBlockDeviceSpecification(AWSProperty): 45 props = { 46 'DeleteOnTermination': (boolean, False), 47 'Encrypted': (boolean, False), 48 'Iops': (integer, False), 49 'KmsKeyId': (basestring, False), 50 'SnapshotId': (basestring, False), 51 'VolumeSize': (integer, False), 52 'VolumeType': (ebsinstanceblockdevicespecification_volume_type, False), 53 } 54 55 56 class InstanceBlockDeviceMapping(AWSProperty): 57 props = { 58 'DeviceName': (basestring, False), 59 'Ebs': (EbsInstanceBlockDeviceSpecification, False), 60 'NoDevice': (boolean, False), 61 'VirtualName': (basestring, False), 62 } 63 64 65 class ComponentConfiguration(AWSProperty): 66 props = { 67 'ComponentArn': (basestring, False), 68 } 69 70 71 class ImageRecipe(AWSObject): 72 resource_type = "AWS::ImageBuilder::ImageRecipe" 73 74 props = { 75 'BlockDeviceMappings': ([InstanceBlockDeviceMapping], False), 76 'Components': ([ComponentConfiguration], True), 77 'Description': (basestring, False), 78 'Name': (basestring, True), 79 'ParentImage': (basestring, True), 80 'Tags': (json_checker, False), 81 'Version': (basestring, True) 82 } 83 84 85 class ImageTestsConfiguration(AWSProperty): 86 props = { 87 'ImageTestsEnabled': (boolean, False), 88 'TimeoutMinutes': (integer, False), 89 } 90 91 92 class Schedule(AWSProperty): 93 props = { 94 'PipelineExecutionStartCondition': (schedule_pipelineexecutionstartcondition, False), # NOQA 95 'ScheduleExpression': (basestring, False), 96 } 97 98 99 class ImagePipeline(AWSObject): 100 resource_type = "AWS::ImageBuilder::ImagePipeline" 101 102 props = { 103 'Description': (basestring, False), 104 'DistributionConfigurationArn': (basestring, False), 105 'ImageRecipeArn': (basestring, True), 106 'ImageTestsConfiguration': (ImageTestsConfiguration, False), 107 'InfrastructureConfigurationArn': (basestring, True), 108 'Name': (basestring, True), 109 'Schedule': (Schedule, False), 110 'Status': (imagepipeline_status, False), 111 'Tags': (json_checker, False), 112 } 113 114 115 class Distribution(AWSProperty): 116 props = { 117 'AmiDistributionConfiguration': (json_checker, False), 118 'LicenseConfigurationArns': ([basestring], False), 119 'Region': (basestring, False), 120 } 121 122 123 class DistributionConfiguration(AWSObject): 124 resource_type = "AWS::ImageBuilder::DistributionConfiguration" 125 126 props = { 127 'Description': (basestring, False), 128 'Distributions': ([Distribution], True), 129 'Name': (basestring, True), 130 'Tags': (json_checker, False), 131 } 132 133 134 class Component(AWSObject): 135 resource_type = "AWS::ImageBuilder::Component" 136 137 props = { 138 'ChangeDescription': (basestring, False), 139 'Description': (basestring, False), 140 'KmsKeyId': (basestring, False), 141 'Name': (basestring, True), 142 'Platform': (component_platforms, True), 143 'Tags': (json_checker, False), 144 'Uri': (basestring, False), 145 'Version': (basestring, True), 146 } 147 148 149 class Image(AWSObject): 150 resource_type = "AWS::ImageBuilder::Image" 151 152 props = { 153 'DistributionConfigurationArn': (basestring, False), 154 'ImageRecipeArn': (basestring, True), 155 'ImageTestsConfiguration': (ImageTestsConfiguration, True), 156 'InfrastructureConfigurationArn': (basestring, True), 157 'Tags': (json_checker, False), 158 } 159 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/troposphere/imagebuilder.py b/troposphere/imagebuilder.py --- a/troposphere/imagebuilder.py +++ b/troposphere/imagebuilder.py @@ -4,8 +4,8 @@ # See LICENSE file for full license. from . import AWSObject, AWSProperty -from .validators import (integer, boolean, json_checker, - component_platforms, imagepipeline_status, +from .validators import (integer, boolean, component_platforms, + imagepipeline_status, schedule_pipelineexecutionstartcondition, ebsinstanceblockdevicespecification_volume_type) @@ -36,7 +36,7 @@ 'SecurityGroupIds': ([basestring], False), 'SnsTopicArn': (basestring, False), 'SubnetId': (basestring, False), - 'Tags': (json_checker, False), + 'Tags': (dict, False), 'TerminateInstanceOnFailure': (boolean, False) } @@ -57,7 +57,7 @@ props = { 'DeviceName': (basestring, False), 'Ebs': (EbsInstanceBlockDeviceSpecification, False), - 'NoDevice': (boolean, False), + 'NoDevice': (basestring, False), 'VirtualName': (basestring, False), } @@ -77,7 +77,7 @@ 'Description': (basestring, False), 'Name': (basestring, True), 'ParentImage': (basestring, True), - 'Tags': (json_checker, False), + 'Tags': (dict, False), 'Version': (basestring, True) } @@ -108,13 +108,13 @@ 'Name': (basestring, True), 'Schedule': (Schedule, False), 'Status': (imagepipeline_status, False), - 'Tags': (json_checker, False), + 'Tags': (dict, False), } class Distribution(AWSProperty): props = { - 'AmiDistributionConfiguration': (json_checker, False), + 'AmiDistributionConfiguration': (dict, False), 'LicenseConfigurationArns': ([basestring], False), 'Region': (basestring, False), } @@ -127,7 +127,7 @@ 'Description': (basestring, False), 'Distributions': ([Distribution], True), 'Name': (basestring, True), - 'Tags': (json_checker, False), + 'Tags': (dict, False), } @@ -136,11 +136,12 @@ props = { 'ChangeDescription': (basestring, False), + 'Data': (basestring, False), 'Description': (basestring, False), 'KmsKeyId': (basestring, False), 'Name': (basestring, True), 'Platform': (component_platforms, True), - 'Tags': (json_checker, False), + 'Tags': (dict, False), 'Uri': (basestring, False), 'Version': (basestring, True), } @@ -154,5 +155,5 @@ 'ImageRecipeArn': (basestring, True), 'ImageTestsConfiguration': (ImageTestsConfiguration, True), 'InfrastructureConfigurationArn': (basestring, True), - 'Tags': (json_checker, False), + 'Tags': (dict, False), }
{"golden_diff": "diff --git a/troposphere/imagebuilder.py b/troposphere/imagebuilder.py\n--- a/troposphere/imagebuilder.py\n+++ b/troposphere/imagebuilder.py\n@@ -4,8 +4,8 @@\n # See LICENSE file for full license.\n \n from . import AWSObject, AWSProperty\n-from .validators import (integer, boolean, json_checker,\n- component_platforms, imagepipeline_status,\n+from .validators import (integer, boolean, component_platforms,\n+ imagepipeline_status,\n schedule_pipelineexecutionstartcondition,\n ebsinstanceblockdevicespecification_volume_type)\n \n@@ -36,7 +36,7 @@\n 'SecurityGroupIds': ([basestring], False),\n 'SnsTopicArn': (basestring, False),\n 'SubnetId': (basestring, False),\n- 'Tags': (json_checker, False),\n+ 'Tags': (dict, False),\n 'TerminateInstanceOnFailure': (boolean, False)\n }\n \n@@ -57,7 +57,7 @@\n props = {\n 'DeviceName': (basestring, False),\n 'Ebs': (EbsInstanceBlockDeviceSpecification, False),\n- 'NoDevice': (boolean, False),\n+ 'NoDevice': (basestring, False),\n 'VirtualName': (basestring, False),\n }\n \n@@ -77,7 +77,7 @@\n 'Description': (basestring, False),\n 'Name': (basestring, True),\n 'ParentImage': (basestring, True),\n- 'Tags': (json_checker, False),\n+ 'Tags': (dict, False),\n 'Version': (basestring, True)\n }\n \n@@ -108,13 +108,13 @@\n 'Name': (basestring, True),\n 'Schedule': (Schedule, False),\n 'Status': (imagepipeline_status, False),\n- 'Tags': (json_checker, False),\n+ 'Tags': (dict, False),\n }\n \n \n class Distribution(AWSProperty):\n props = {\n- 'AmiDistributionConfiguration': (json_checker, False),\n+ 'AmiDistributionConfiguration': (dict, False),\n 'LicenseConfigurationArns': ([basestring], False),\n 'Region': (basestring, False),\n }\n@@ -127,7 +127,7 @@\n 'Description': (basestring, False),\n 'Distributions': ([Distribution], True),\n 'Name': (basestring, True),\n- 'Tags': (json_checker, False),\n+ 'Tags': (dict, False),\n }\n \n \n@@ -136,11 +136,12 @@\n \n props = {\n 'ChangeDescription': (basestring, False),\n+ 'Data': (basestring, False),\n 'Description': (basestring, False),\n 'KmsKeyId': (basestring, False),\n 'Name': (basestring, True),\n 'Platform': (component_platforms, True),\n- 'Tags': (json_checker, False),\n+ 'Tags': (dict, False),\n 'Uri': (basestring, False),\n 'Version': (basestring, True),\n }\n@@ -154,5 +155,5 @@\n 'ImageRecipeArn': (basestring, True),\n 'ImageTestsConfiguration': (ImageTestsConfiguration, True),\n 'InfrastructureConfigurationArn': (basestring, True),\n- 'Tags': (json_checker, False),\n+ 'Tags': (dict, False),\n }\n", "issue": "AWS::ImageBuilder::* some wrong data types and attribut missing\nIn imagebuilder.py (2.6.1 release).\r\n\r\n* In AWS::ImageBuilder::*\r\n\"Tags\" are \"json_checker\" but should be dict\r\nWhen encoded in the structure it becomes a string (validator.py, ligne 258,` json.dumps(prop)`) which is creating an issue with CloudFormation that expect a struct like : `\"Tags\" : {Key : Value, ...}`\r\n* AWS::ImageBuilder::DistributionConfiguration::Distribution\r\n\"AmiDistributionConfiguration\" is \"json_checker\" but should be dict.\r\nFor the same as above \"Tags\" \r\n* In AWS::ImageBuilder::Component\r\n\"Data\" is missing. And should be \"json_checker\" in that case.\r\n* In AWS::ImageBuilder::ImageRecipe::InstanceBlockDeviceMapping\r\n\"NoDevice\" is boolean but should be a string\r\n\n", "before_files": [{"content": "# Copyright (c) 2020, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty\nfrom .validators import (integer, boolean, json_checker,\n component_platforms, imagepipeline_status,\n schedule_pipelineexecutionstartcondition,\n ebsinstanceblockdevicespecification_volume_type)\n\n\nclass S3Logs(AWSProperty):\n props = {\n \"S3BucketName\": (basestring, False),\n \"S3KeyPrefix\": (basestring, False),\n }\n\n\nclass Logging(AWSProperty):\n props = {\n 'S3Logs': (S3Logs, False),\n }\n\n\nclass InfrastructureConfiguration(AWSObject):\n resource_type = \"AWS::ImageBuilder::InfrastructureConfiguration\"\n\n props = {\n 'Description': (basestring, False),\n 'InstanceProfileName': (basestring, True),\n 'InstanceTypes': ([basestring], False),\n 'KeyPair': (basestring, False),\n 'Logging': (Logging, False),\n 'Name': (basestring, True),\n 'SecurityGroupIds': ([basestring], False),\n 'SnsTopicArn': (basestring, False),\n 'SubnetId': (basestring, False),\n 'Tags': (json_checker, False),\n 'TerminateInstanceOnFailure': (boolean, False)\n }\n\n\nclass EbsInstanceBlockDeviceSpecification(AWSProperty):\n props = {\n 'DeleteOnTermination': (boolean, False),\n 'Encrypted': (boolean, False),\n 'Iops': (integer, False),\n 'KmsKeyId': (basestring, False),\n 'SnapshotId': (basestring, False),\n 'VolumeSize': (integer, False),\n 'VolumeType': (ebsinstanceblockdevicespecification_volume_type, False),\n }\n\n\nclass InstanceBlockDeviceMapping(AWSProperty):\n props = {\n 'DeviceName': (basestring, False),\n 'Ebs': (EbsInstanceBlockDeviceSpecification, False),\n 'NoDevice': (boolean, False),\n 'VirtualName': (basestring, False),\n }\n\n\nclass ComponentConfiguration(AWSProperty):\n props = {\n 'ComponentArn': (basestring, False),\n }\n\n\nclass ImageRecipe(AWSObject):\n resource_type = \"AWS::ImageBuilder::ImageRecipe\"\n\n props = {\n 'BlockDeviceMappings': ([InstanceBlockDeviceMapping], False),\n 'Components': ([ComponentConfiguration], True),\n 'Description': (basestring, False),\n 'Name': (basestring, True),\n 'ParentImage': (basestring, True),\n 'Tags': (json_checker, False),\n 'Version': (basestring, True)\n }\n\n\nclass ImageTestsConfiguration(AWSProperty):\n props = {\n 'ImageTestsEnabled': (boolean, False),\n 'TimeoutMinutes': (integer, False),\n }\n\n\nclass Schedule(AWSProperty):\n props = {\n 'PipelineExecutionStartCondition': (schedule_pipelineexecutionstartcondition, False), # NOQA\n 'ScheduleExpression': (basestring, False),\n }\n\n\nclass ImagePipeline(AWSObject):\n resource_type = \"AWS::ImageBuilder::ImagePipeline\"\n\n props = {\n 'Description': (basestring, False),\n 'DistributionConfigurationArn': (basestring, False),\n 'ImageRecipeArn': (basestring, True),\n 'ImageTestsConfiguration': (ImageTestsConfiguration, False),\n 'InfrastructureConfigurationArn': (basestring, True),\n 'Name': (basestring, True),\n 'Schedule': (Schedule, False),\n 'Status': (imagepipeline_status, False),\n 'Tags': (json_checker, False),\n }\n\n\nclass Distribution(AWSProperty):\n props = {\n 'AmiDistributionConfiguration': (json_checker, False),\n 'LicenseConfigurationArns': ([basestring], False),\n 'Region': (basestring, False),\n }\n\n\nclass DistributionConfiguration(AWSObject):\n resource_type = \"AWS::ImageBuilder::DistributionConfiguration\"\n\n props = {\n 'Description': (basestring, False),\n 'Distributions': ([Distribution], True),\n 'Name': (basestring, True),\n 'Tags': (json_checker, False),\n }\n\n\nclass Component(AWSObject):\n resource_type = \"AWS::ImageBuilder::Component\"\n\n props = {\n 'ChangeDescription': (basestring, False),\n 'Description': (basestring, False),\n 'KmsKeyId': (basestring, False),\n 'Name': (basestring, True),\n 'Platform': (component_platforms, True),\n 'Tags': (json_checker, False),\n 'Uri': (basestring, False),\n 'Version': (basestring, True),\n }\n\n\nclass Image(AWSObject):\n resource_type = \"AWS::ImageBuilder::Image\"\n\n props = {\n 'DistributionConfigurationArn': (basestring, False),\n 'ImageRecipeArn': (basestring, True),\n 'ImageTestsConfiguration': (ImageTestsConfiguration, True),\n 'InfrastructureConfigurationArn': (basestring, True),\n 'Tags': (json_checker, False),\n }\n", "path": "troposphere/imagebuilder.py"}], "after_files": [{"content": "# Copyright (c) 2020, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty\nfrom .validators import (integer, boolean, component_platforms,\n imagepipeline_status,\n schedule_pipelineexecutionstartcondition,\n ebsinstanceblockdevicespecification_volume_type)\n\n\nclass S3Logs(AWSProperty):\n props = {\n \"S3BucketName\": (basestring, False),\n \"S3KeyPrefix\": (basestring, False),\n }\n\n\nclass Logging(AWSProperty):\n props = {\n 'S3Logs': (S3Logs, False),\n }\n\n\nclass InfrastructureConfiguration(AWSObject):\n resource_type = \"AWS::ImageBuilder::InfrastructureConfiguration\"\n\n props = {\n 'Description': (basestring, False),\n 'InstanceProfileName': (basestring, True),\n 'InstanceTypes': ([basestring], False),\n 'KeyPair': (basestring, False),\n 'Logging': (Logging, False),\n 'Name': (basestring, True),\n 'SecurityGroupIds': ([basestring], False),\n 'SnsTopicArn': (basestring, False),\n 'SubnetId': (basestring, False),\n 'Tags': (dict, False),\n 'TerminateInstanceOnFailure': (boolean, False)\n }\n\n\nclass EbsInstanceBlockDeviceSpecification(AWSProperty):\n props = {\n 'DeleteOnTermination': (boolean, False),\n 'Encrypted': (boolean, False),\n 'Iops': (integer, False),\n 'KmsKeyId': (basestring, False),\n 'SnapshotId': (basestring, False),\n 'VolumeSize': (integer, False),\n 'VolumeType': (ebsinstanceblockdevicespecification_volume_type, False),\n }\n\n\nclass InstanceBlockDeviceMapping(AWSProperty):\n props = {\n 'DeviceName': (basestring, False),\n 'Ebs': (EbsInstanceBlockDeviceSpecification, False),\n 'NoDevice': (basestring, False),\n 'VirtualName': (basestring, False),\n }\n\n\nclass ComponentConfiguration(AWSProperty):\n props = {\n 'ComponentArn': (basestring, False),\n }\n\n\nclass ImageRecipe(AWSObject):\n resource_type = \"AWS::ImageBuilder::ImageRecipe\"\n\n props = {\n 'BlockDeviceMappings': ([InstanceBlockDeviceMapping], False),\n 'Components': ([ComponentConfiguration], True),\n 'Description': (basestring, False),\n 'Name': (basestring, True),\n 'ParentImage': (basestring, True),\n 'Tags': (dict, False),\n 'Version': (basestring, True)\n }\n\n\nclass ImageTestsConfiguration(AWSProperty):\n props = {\n 'ImageTestsEnabled': (boolean, False),\n 'TimeoutMinutes': (integer, False),\n }\n\n\nclass Schedule(AWSProperty):\n props = {\n 'PipelineExecutionStartCondition': (schedule_pipelineexecutionstartcondition, False), # NOQA\n 'ScheduleExpression': (basestring, False),\n }\n\n\nclass ImagePipeline(AWSObject):\n resource_type = \"AWS::ImageBuilder::ImagePipeline\"\n\n props = {\n 'Description': (basestring, False),\n 'DistributionConfigurationArn': (basestring, False),\n 'ImageRecipeArn': (basestring, True),\n 'ImageTestsConfiguration': (ImageTestsConfiguration, False),\n 'InfrastructureConfigurationArn': (basestring, True),\n 'Name': (basestring, True),\n 'Schedule': (Schedule, False),\n 'Status': (imagepipeline_status, False),\n 'Tags': (dict, False),\n }\n\n\nclass Distribution(AWSProperty):\n props = {\n 'AmiDistributionConfiguration': (dict, False),\n 'LicenseConfigurationArns': ([basestring], False),\n 'Region': (basestring, False),\n }\n\n\nclass DistributionConfiguration(AWSObject):\n resource_type = \"AWS::ImageBuilder::DistributionConfiguration\"\n\n props = {\n 'Description': (basestring, False),\n 'Distributions': ([Distribution], True),\n 'Name': (basestring, True),\n 'Tags': (dict, False),\n }\n\n\nclass Component(AWSObject):\n resource_type = \"AWS::ImageBuilder::Component\"\n\n props = {\n 'ChangeDescription': (basestring, False),\n 'Data': (basestring, False),\n 'Description': (basestring, False),\n 'KmsKeyId': (basestring, False),\n 'Name': (basestring, True),\n 'Platform': (component_platforms, True),\n 'Tags': (dict, False),\n 'Uri': (basestring, False),\n 'Version': (basestring, True),\n }\n\n\nclass Image(AWSObject):\n resource_type = \"AWS::ImageBuilder::Image\"\n\n props = {\n 'DistributionConfigurationArn': (basestring, False),\n 'ImageRecipeArn': (basestring, True),\n 'ImageTestsConfiguration': (ImageTestsConfiguration, True),\n 'InfrastructureConfigurationArn': (basestring, True),\n 'Tags': (dict, False),\n }\n", "path": "troposphere/imagebuilder.py"}]}
1,942
769
gh_patches_debug_14052
rasdani/github-patches
git_diff
openai__gym-1149
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Why is there a restiction on shape in multi discrete? https://github.com/openai/gym/blob/422c9c7bb3c3c5a756c3b12dfe99733bfbfe3920/gym/spaces/multi_discrete.py#L10 Why is this imposed? Say one may need a high dimensional space like a (3,3,3) grid with discretised values for each element in the grid. It can be very easily circumvented by using np.random.random_sample and passing the shape Is there some specific reason for doing this? Also note this example provided in dict_space doesn't work currently. https://github.com/openai/gym/blob/422c9c7bb3c3c5a756c3b12dfe99733bfbfe3920/gym/spaces/dict_space.py#L22 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gym/spaces/multi_discrete.py` Content: ``` 1 import gym 2 import numpy as np 3 4 class MultiDiscrete(gym.Space): 5 def __init__(self, nvec): 6 """ 7 nvec: vector of counts of each categorical variable 8 """ 9 self.nvec = np.asarray(nvec, dtype=np.int32) 10 assert self.nvec.ndim == 1, 'nvec should be a 1d array (or list) of ints' 11 gym.Space.__init__(self, (self.nvec.size,), np.int8) 12 def sample(self): 13 return (gym.spaces.np_random.rand(self.nvec.size) * self.nvec).astype(self.dtype) 14 def contains(self, x): 15 return (0 <= x).all() and (x < self.nvec).all() and x.dtype.kind in 'ui' 16 17 __contains__ = contains 18 19 def to_jsonable(self, sample_n): 20 return [sample.tolist() for sample in sample_n] 21 def from_jsonable(self, sample_n): 22 return np.array(sample_n) 23 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gym/spaces/multi_discrete.py b/gym/spaces/multi_discrete.py --- a/gym/spaces/multi_discrete.py +++ b/gym/spaces/multi_discrete.py @@ -7,10 +7,9 @@ nvec: vector of counts of each categorical variable """ self.nvec = np.asarray(nvec, dtype=np.int32) - assert self.nvec.ndim == 1, 'nvec should be a 1d array (or list) of ints' - gym.Space.__init__(self, (self.nvec.size,), np.int8) + gym.Space.__init__(self, (self.nvec.shape,), np.int8) def sample(self): - return (gym.spaces.np_random.rand(self.nvec.size) * self.nvec).astype(self.dtype) + return (gym.spaces.np_random.random_sample(self.nvec.shape) * self.nvec).astype(self.dtype) def contains(self, x): return (0 <= x).all() and (x < self.nvec).all() and x.dtype.kind in 'ui'
{"golden_diff": "diff --git a/gym/spaces/multi_discrete.py b/gym/spaces/multi_discrete.py\n--- a/gym/spaces/multi_discrete.py\n+++ b/gym/spaces/multi_discrete.py\n@@ -7,10 +7,9 @@\n nvec: vector of counts of each categorical variable\n \"\"\"\n self.nvec = np.asarray(nvec, dtype=np.int32)\n- assert self.nvec.ndim == 1, 'nvec should be a 1d array (or list) of ints'\n- gym.Space.__init__(self, (self.nvec.size,), np.int8)\n+ gym.Space.__init__(self, (self.nvec.shape,), np.int8)\n def sample(self):\n- return (gym.spaces.np_random.rand(self.nvec.size) * self.nvec).astype(self.dtype)\n+ return (gym.spaces.np_random.random_sample(self.nvec.shape) * self.nvec).astype(self.dtype)\n def contains(self, x):\n return (0 <= x).all() and (x < self.nvec).all() and x.dtype.kind in 'ui'\n", "issue": "Why is there a restiction on shape in multi discrete?\nhttps://github.com/openai/gym/blob/422c9c7bb3c3c5a756c3b12dfe99733bfbfe3920/gym/spaces/multi_discrete.py#L10\r\n\r\nWhy is this imposed?\r\nSay one may need a high dimensional space like a (3,3,3) grid with discretised values for each element in the grid. \r\nIt can be very easily circumvented by using np.random.random_sample and passing the shape\r\nIs there some specific reason for doing this?\r\n\r\nAlso note this example provided in dict_space doesn't work currently.\r\nhttps://github.com/openai/gym/blob/422c9c7bb3c3c5a756c3b12dfe99733bfbfe3920/gym/spaces/dict_space.py#L22\r\n\r\n\n", "before_files": [{"content": "import gym\nimport numpy as np\n\nclass MultiDiscrete(gym.Space):\n def __init__(self, nvec):\n \"\"\"\n nvec: vector of counts of each categorical variable\n \"\"\"\n self.nvec = np.asarray(nvec, dtype=np.int32)\n assert self.nvec.ndim == 1, 'nvec should be a 1d array (or list) of ints'\n gym.Space.__init__(self, (self.nvec.size,), np.int8)\n def sample(self):\n return (gym.spaces.np_random.rand(self.nvec.size) * self.nvec).astype(self.dtype)\n def contains(self, x):\n return (0 <= x).all() and (x < self.nvec).all() and x.dtype.kind in 'ui'\n \n __contains__ = contains\n \n def to_jsonable(self, sample_n):\n return [sample.tolist() for sample in sample_n]\n def from_jsonable(self, sample_n):\n return np.array(sample_n)\n", "path": "gym/spaces/multi_discrete.py"}], "after_files": [{"content": "import gym\nimport numpy as np\n\nclass MultiDiscrete(gym.Space):\n def __init__(self, nvec):\n \"\"\"\n nvec: vector of counts of each categorical variable\n \"\"\"\n self.nvec = np.asarray(nvec, dtype=np.int32)\n gym.Space.__init__(self, (self.nvec.shape,), np.int8)\n def sample(self):\n return (gym.spaces.np_random.random_sample(self.nvec.shape) * self.nvec).astype(self.dtype)\n def contains(self, x):\n return (0 <= x).all() and (x < self.nvec).all() and x.dtype.kind in 'ui'\n \n __contains__ = contains\n \n def to_jsonable(self, sample_n):\n return [sample.tolist() for sample in sample_n]\n def from_jsonable(self, sample_n):\n return np.array(sample_n)\n", "path": "gym/spaces/multi_discrete.py"}]}
725
247
gh_patches_debug_22062
rasdani/github-patches
git_diff
getsentry__sentry-24250
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- sentry django send_fake_data errors out ## Important Details How are you running Sentry? * [ ] On-Premise docker [Version xyz] * [ ] Saas (sentry.io) * [x] Other [briefly describe your environment] Development environment on macOS while working on sentry-plugins, with `sentry devserver` ## Description When running `sentry django send_fake_data` a stacktrace is raised: ``` Traceback (most recent call last): File "MYPYTHONPATH/bin/sentry", line 11, in <module> load_entry_point('sentry', 'console_scripts', 'sentry')() File "MYPYTHONPATH/src/sentry/src/sentry/runner/__init__.py", line 161, in main cli(prog_name=get_prog(), obj={}, max_content_width=100) File "MYPYTHONPATH/lib/python2.7/site-packages/click/core.py", line 722, in __call__ return self.main(*args, **kwargs) File "MYPYTHONPATH/lib/python2.7/site-packages/click/core.py", line 697, in main rv = self.invoke(ctx) File "MYPYTHONPATH/lib/python2.7/site-packages/click/core.py", line 1066, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "MYPYTHONPATH/lib/python2.7/site-packages/click/core.py", line 895, in invoke return ctx.invoke(self.callback, **ctx.params) File "MYPYTHONPATH/lib/python2.7/site-packages/click/core.py", line 535, in invoke return callback(*args, **kwargs) File "MYPYTHONPATH/lib/python2.7/site-packages/click/decorators.py", line 17, in new_func return f(get_current_context(), *args, **kwargs) File "MYPYTHONPATH/src/sentry/src/sentry/runner/decorators.py", line 36, in inner return ctx.invoke(f, *args, **kwargs) File "MYPYTHONPATH/lib/python2.7/site-packages/click/core.py", line 535, in invoke return callback(*args, **kwargs) File "MYPYTHONPATH/lib/python2.7/site-packages/click/decorators.py", line 17, in new_func return f(get_current_context(), *args, **kwargs) File "MYPYTHONPATH/src/sentry/src/sentry/runner/commands/django.py", line 25, in django execute_from_command_line(argv=[ctx.command_path] + list(management_args)) File "MYPYTHONPATH/lib/python2.7/site-packages/django/core/management/__init__.py", line 399, in execute_from_command_line utility.execute() File "MYPYTHONPATH/lib/python2.7/site-packages/django/core/management/__init__.py", line 392, in execute self.fetch_command(subcommand).run_from_argv(self.argv) File "MYPYTHONPATH/lib/python2.7/site-packages/django/core/management/base.py", line 242, in run_from_argv self.execute(*args, **options.__dict__) File "MYPYTHONPATH/lib/python2.7/site-packages/django/core/management/base.py", line 285, in execute output = self.handle(*args, **options) File "MYPYTHONPATH/src/sentry/src/sentry/management/commands/send_fake_data.py", line 75, in handle from raven.contrib.django.models import client ImportError: No module named raven.contrib.django.models ``` ## Steps to Reproduce See above. ### What you expected to happen Some fake data should magically appear! ### Possible Solution Appears to be coming from this import: https://github.com/getsentry/sentry/blob/317c65fbcd0f0b31a4e97c415bcf35256900a286/src/sentry/management/commands/send_fake_data.py#L75 Manually running `pip install raven` appears to solve it - no traceback appears, so I guess having raven installed, that solves the immediate issue. However, I think that there's no events being sent anyhow - as perceived by this log: ``` [INFO] raven.contrib.django.client.DjangoClient: Raven is not configured (logging is disabled). Please see the documentation for more information. (message=u'Raven is not configured (logging is disabled). Please see the documentation for more information.') Preparing to send events. Ctrl-C to exit. ``` I saw there's a push to go towards the unified python sdk - should this fix be overall done with that approach? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/sentry/management/commands/send_fake_data.py` Content: ``` 1 import datetime 2 import itertools 3 import random 4 import time 5 6 from django.core.management.base import BaseCommand, CommandError 7 8 9 def funcs(): 10 exceptions = itertools.cycle( 11 [ 12 SyntaxError("foo must come before bar"), 13 ValueError("baz is not a valid choice"), 14 TypeError("NoneType cannot be coerced to bar"), 15 NotImplementedError("This feature is not implemented"), 16 ZeroDivisionError("Your math doesn't work"), 17 Exception("An unknown exception"), 18 KeyError("index does not exist"), 19 ] 20 ) 21 loggers = itertools.cycle(["root", "foo", "foo.bar"]) 22 emails = itertools.cycle(["[email protected]", "[email protected]", "[email protected]"]) 23 timestamp_max = int(datetime.timedelta(days=1).total_seconds()) 24 25 def exception(client): 26 timestamp = datetime.datetime.utcnow() - datetime.timedelta( 27 seconds=random.randint(0, timestamp_max) 28 ) 29 try: 30 raise next(exceptions) 31 except Exception: 32 email = next(emails) 33 return client.captureException( 34 data={ 35 "logger": next(loggers), 36 "site": "web", 37 "user": {"id": email, "email": email}, 38 }, 39 date=timestamp, 40 ) 41 42 return [exception] 43 44 45 class Command(BaseCommand): 46 help = "Sends fake data to the internal Sentry project" 47 48 def add_arguments(self, parser): 49 parser.add_argument( 50 "--project", dest="project", help="project ID or organization-slug/project-slug" 51 ) 52 parser.add_argument("--num", dest="num_events", type=int), 53 54 def handle(self, **options): 55 from django.conf import settings 56 from raven.contrib.django.models import client 57 from sentry.models import Project 58 59 if not options["project"]: 60 project = Project.objects.get(id=settings.SENTRY_PROJECT) 61 else: 62 if options["project"].isdigit(): 63 project = Project.objects.get(id=options["project"]) 64 elif "/" in options["project"]: 65 o_slug, p_slug = options["project"].split("/", 1) 66 project = Project.objects.get(slug=p_slug, organization__slug=o_slug) 67 else: 68 raise CommandError( 69 "Project must be specified as organization-slug/project-slug or a project id" 70 ) 71 72 client.project = project.id 73 74 self.stdout.write("Preparing to send events. Ctrl-C to exit.") 75 76 time.sleep(2) 77 78 functions = funcs() 79 80 if options["num_events"]: 81 max_events = options["num_events"] 82 else: 83 max_events = -1 84 85 s = time.time() 86 r = 0 87 try: 88 while True: 89 if r == max_events: 90 break 91 if options["verbosity"] > 1: 92 self.stdout.write("Sending event..\n") 93 random.choice(functions)(client) 94 r += 1 95 except KeyboardInterrupt: 96 pass 97 finally: 98 total_time = time.time() - s 99 self.stdout.write("%d requests serviced in %.3fs\n" % (r, total_time)) 100 if r: 101 avg = total_time / r 102 ravg = 1 / avg 103 else: 104 avg = ravg = 0 105 self.stdout.write("avg of %.3fs/req, %d req/s\n" % (avg, ravg)) 106 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/sentry/management/commands/send_fake_data.py b/src/sentry/management/commands/send_fake_data.py --- a/src/sentry/management/commands/send_fake_data.py +++ b/src/sentry/management/commands/send_fake_data.py @@ -28,16 +28,14 @@ ) try: raise next(exceptions) - except Exception: + except Exception as exc: email = next(emails) - return client.captureException( - data={ - "logger": next(loggers), - "site": "web", - "user": {"id": email, "email": email}, - }, - date=timestamp, - ) + with client.configure_scope() as scope: + scope.user = {"id": email, "email": email} + scope.logger = next(loggers) + scope.site = "web" + scope.date = timestamp + return client.captureException(exc) return [exception] @@ -53,7 +51,7 @@ def handle(self, **options): from django.conf import settings - from raven.contrib.django.models import client + from sentry.app import client from sentry.models import Project if not options["project"]:
{"golden_diff": "diff --git a/src/sentry/management/commands/send_fake_data.py b/src/sentry/management/commands/send_fake_data.py\n--- a/src/sentry/management/commands/send_fake_data.py\n+++ b/src/sentry/management/commands/send_fake_data.py\n@@ -28,16 +28,14 @@\n )\n try:\n raise next(exceptions)\n- except Exception:\n+ except Exception as exc:\n email = next(emails)\n- return client.captureException(\n- data={\n- \"logger\": next(loggers),\n- \"site\": \"web\",\n- \"user\": {\"id\": email, \"email\": email},\n- },\n- date=timestamp,\n- )\n+ with client.configure_scope() as scope:\n+ scope.user = {\"id\": email, \"email\": email}\n+ scope.logger = next(loggers)\n+ scope.site = \"web\"\n+ scope.date = timestamp\n+ return client.captureException(exc)\n \n return [exception]\n \n@@ -53,7 +51,7 @@\n \n def handle(self, **options):\n from django.conf import settings\n- from raven.contrib.django.models import client\n+ from sentry.app import client\n from sentry.models import Project\n \n if not options[\"project\"]:\n", "issue": "sentry django send_fake_data errors out\n## Important Details\r\n\r\nHow are you running Sentry?\r\n\r\n* [ ] On-Premise docker [Version xyz]\r\n* [ ] Saas (sentry.io)\r\n* [x] Other [briefly describe your environment]\r\nDevelopment environment on macOS while working on sentry-plugins, with `sentry devserver`\r\n\r\n## Description\r\n\r\nWhen running `sentry django send_fake_data` a stacktrace is raised:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"MYPYTHONPATH/bin/sentry\", line 11, in <module>\r\n load_entry_point('sentry', 'console_scripts', 'sentry')()\r\n File \"MYPYTHONPATH/src/sentry/src/sentry/runner/__init__.py\", line 161, in main\r\n cli(prog_name=get_prog(), obj={}, max_content_width=100)\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/click/core.py\", line 722, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/click/core.py\", line 697, in main\r\n rv = self.invoke(ctx)\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/click/core.py\", line 1066, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/click/core.py\", line 895, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/click/core.py\", line 535, in invoke\r\n return callback(*args, **kwargs)\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/click/decorators.py\", line 17, in new_func\r\n return f(get_current_context(), *args, **kwargs)\r\n File \"MYPYTHONPATH/src/sentry/src/sentry/runner/decorators.py\", line 36, in inner\r\n return ctx.invoke(f, *args, **kwargs)\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/click/core.py\", line 535, in invoke\r\n return callback(*args, **kwargs)\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/click/decorators.py\", line 17, in new_func\r\n return f(get_current_context(), *args, **kwargs)\r\n File \"MYPYTHONPATH/src/sentry/src/sentry/runner/commands/django.py\", line 25, in django\r\n execute_from_command_line(argv=[ctx.command_path] + list(management_args))\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/django/core/management/__init__.py\", line 399, in execute_from_command_line\r\n utility.execute()\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/django/core/management/__init__.py\", line 392, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/django/core/management/base.py\", line 242, in run_from_argv\r\n self.execute(*args, **options.__dict__)\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/django/core/management/base.py\", line 285, in execute\r\n output = self.handle(*args, **options)\r\n File \"MYPYTHONPATH/src/sentry/src/sentry/management/commands/send_fake_data.py\", line 75, in handle\r\n from raven.contrib.django.models import client\r\nImportError: No module named raven.contrib.django.models\r\n```\r\n\r\n## Steps to Reproduce\r\n\r\nSee above.\r\n\r\n### What you expected to happen\r\n\r\nSome fake data should magically appear!\r\n\r\n### Possible Solution\r\n\r\nAppears to be coming from this import:\r\nhttps://github.com/getsentry/sentry/blob/317c65fbcd0f0b31a4e97c415bcf35256900a286/src/sentry/management/commands/send_fake_data.py#L75\r\n\r\nManually running `pip install raven` appears to solve it - no traceback appears, so I guess having raven installed, that solves the immediate issue.\r\n\r\nHowever, I think that there's no events being sent anyhow - as perceived by this log:\r\n```\r\n[INFO] raven.contrib.django.client.DjangoClient: Raven is not configured (logging is disabled). Please see the documentation for more information. (message=u'Raven is not configured (logging is disabled). Please see the documentation for more information.')\r\nPreparing to send events. Ctrl-C to exit.\r\n```\r\n\r\nI saw there's a push to go towards the unified python sdk - should this fix be overall done with that approach?\r\n\n", "before_files": [{"content": "import datetime\nimport itertools\nimport random\nimport time\n\nfrom django.core.management.base import BaseCommand, CommandError\n\n\ndef funcs():\n exceptions = itertools.cycle(\n [\n SyntaxError(\"foo must come before bar\"),\n ValueError(\"baz is not a valid choice\"),\n TypeError(\"NoneType cannot be coerced to bar\"),\n NotImplementedError(\"This feature is not implemented\"),\n ZeroDivisionError(\"Your math doesn't work\"),\n Exception(\"An unknown exception\"),\n KeyError(\"index does not exist\"),\n ]\n )\n loggers = itertools.cycle([\"root\", \"foo\", \"foo.bar\"])\n emails = itertools.cycle([\"[email protected]\", \"[email protected]\", \"[email protected]\"])\n timestamp_max = int(datetime.timedelta(days=1).total_seconds())\n\n def exception(client):\n timestamp = datetime.datetime.utcnow() - datetime.timedelta(\n seconds=random.randint(0, timestamp_max)\n )\n try:\n raise next(exceptions)\n except Exception:\n email = next(emails)\n return client.captureException(\n data={\n \"logger\": next(loggers),\n \"site\": \"web\",\n \"user\": {\"id\": email, \"email\": email},\n },\n date=timestamp,\n )\n\n return [exception]\n\n\nclass Command(BaseCommand):\n help = \"Sends fake data to the internal Sentry project\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--project\", dest=\"project\", help=\"project ID or organization-slug/project-slug\"\n )\n parser.add_argument(\"--num\", dest=\"num_events\", type=int),\n\n def handle(self, **options):\n from django.conf import settings\n from raven.contrib.django.models import client\n from sentry.models import Project\n\n if not options[\"project\"]:\n project = Project.objects.get(id=settings.SENTRY_PROJECT)\n else:\n if options[\"project\"].isdigit():\n project = Project.objects.get(id=options[\"project\"])\n elif \"/\" in options[\"project\"]:\n o_slug, p_slug = options[\"project\"].split(\"/\", 1)\n project = Project.objects.get(slug=p_slug, organization__slug=o_slug)\n else:\n raise CommandError(\n \"Project must be specified as organization-slug/project-slug or a project id\"\n )\n\n client.project = project.id\n\n self.stdout.write(\"Preparing to send events. Ctrl-C to exit.\")\n\n time.sleep(2)\n\n functions = funcs()\n\n if options[\"num_events\"]:\n max_events = options[\"num_events\"]\n else:\n max_events = -1\n\n s = time.time()\n r = 0\n try:\n while True:\n if r == max_events:\n break\n if options[\"verbosity\"] > 1:\n self.stdout.write(\"Sending event..\\n\")\n random.choice(functions)(client)\n r += 1\n except KeyboardInterrupt:\n pass\n finally:\n total_time = time.time() - s\n self.stdout.write(\"%d requests serviced in %.3fs\\n\" % (r, total_time))\n if r:\n avg = total_time / r\n ravg = 1 / avg\n else:\n avg = ravg = 0\n self.stdout.write(\"avg of %.3fs/req, %d req/s\\n\" % (avg, ravg))\n", "path": "src/sentry/management/commands/send_fake_data.py"}], "after_files": [{"content": "import datetime\nimport itertools\nimport random\nimport time\n\nfrom django.core.management.base import BaseCommand, CommandError\n\n\ndef funcs():\n exceptions = itertools.cycle(\n [\n SyntaxError(\"foo must come before bar\"),\n ValueError(\"baz is not a valid choice\"),\n TypeError(\"NoneType cannot be coerced to bar\"),\n NotImplementedError(\"This feature is not implemented\"),\n ZeroDivisionError(\"Your math doesn't work\"),\n Exception(\"An unknown exception\"),\n KeyError(\"index does not exist\"),\n ]\n )\n loggers = itertools.cycle([\"root\", \"foo\", \"foo.bar\"])\n emails = itertools.cycle([\"[email protected]\", \"[email protected]\", \"[email protected]\"])\n timestamp_max = int(datetime.timedelta(days=1).total_seconds())\n\n def exception(client):\n timestamp = datetime.datetime.utcnow() - datetime.timedelta(\n seconds=random.randint(0, timestamp_max)\n )\n try:\n raise next(exceptions)\n except Exception as exc:\n email = next(emails)\n with client.configure_scope() as scope:\n scope.user = {\"id\": email, \"email\": email}\n scope.logger = next(loggers)\n scope.site = \"web\"\n scope.date = timestamp\n return client.captureException(exc)\n\n return [exception]\n\n\nclass Command(BaseCommand):\n help = \"Sends fake data to the internal Sentry project\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--project\", dest=\"project\", help=\"project ID or organization-slug/project-slug\"\n )\n parser.add_argument(\"--num\", dest=\"num_events\", type=int),\n\n def handle(self, **options):\n from django.conf import settings\n from sentry.app import client\n from sentry.models import Project\n\n if not options[\"project\"]:\n project = Project.objects.get(id=settings.SENTRY_PROJECT)\n else:\n if options[\"project\"].isdigit():\n project = Project.objects.get(id=options[\"project\"])\n elif \"/\" in options[\"project\"]:\n o_slug, p_slug = options[\"project\"].split(\"/\", 1)\n project = Project.objects.get(slug=p_slug, organization__slug=o_slug)\n else:\n raise CommandError(\n \"Project must be specified as organization-slug/project-slug or a project id\"\n )\n\n client.project = project.id\n\n self.stdout.write(\"Preparing to send events. Ctrl-C to exit.\")\n\n time.sleep(2)\n\n functions = funcs()\n\n if options[\"num_events\"]:\n max_events = options[\"num_events\"]\n else:\n max_events = -1\n\n s = time.time()\n r = 0\n try:\n while True:\n if r == max_events:\n break\n if options[\"verbosity\"] > 1:\n self.stdout.write(\"Sending event..\\n\")\n random.choice(functions)(client)\n r += 1\n except KeyboardInterrupt:\n pass\n finally:\n total_time = time.time() - s\n self.stdout.write(\"%d requests serviced in %.3fs\\n\" % (r, total_time))\n if r:\n avg = total_time / r\n ravg = 1 / avg\n else:\n avg = ravg = 0\n self.stdout.write(\"avg of %.3fs/req, %d req/s\\n\" % (avg, ravg))\n", "path": "src/sentry/management/commands/send_fake_data.py"}]}
2,256
283
gh_patches_debug_28494
rasdani/github-patches
git_diff
goauthentik__authentik-8147
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Federation & Social Logins // OAuth - Azure AD // Unable to save settings **Describe the bug** See https://github.com/goauthentik/authentik/issues/7744 Problem still exists with release 2023.10.5 **To Reproduce** Steps to reproduce the behavior: See https://github.com/goauthentik/authentik/issues/7744 Neither on initial save or update afterwards 'Authorization URL', 'Access token URL ' and 'Profile URL' are changed **Expected behavior** A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. **Logs** Seem fine server: 2023-12-22 11:30:41 {"action": "model_updated", "auth_via": "session", "client_ip": "172.18.0.1", "context": {"http_request": {"args": {}, "method": "PATCH", "path": "/api/v3/sources/oauth/azure-ad/", "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:120.0) Gecko/20100101 Firefox/120.0"}, "model": {"app": "authentik_sources_oauth", "model_name": "oauthsource", "name": "azure ad", "pk": "bda8c9beeac54e5495c8270838e7a797"}}, "event": "Created Event", "host": "localhost:8044", "level": "info", "logger": "authentik.events.models", "pid": 40, "request_id": "ff436debac13400e94274999b5f5d06d", "timestamp": "2023-12-22T10:30:41.555035", "user": {"email": "[email protected]", "pk": 3, "username": "Admin"}} 2023-12-22 11:30:41 {"auth_via": "session", "event": "Task published", "host": "localhost:8044", "level": "info", "logger": "authentik.root.celery", "pid": 40, "request_id": "ff436debac13400e94274999b5f5d06d", "task_id": "4ebe0d05335f4700b8c0871d6f10235c", "task_name": "authentik.events.tasks.event_notification_handler", "timestamp": "2023-12-22T10:30:41.568890"} 2023-12-22 11:30:41 {"auth_via": "session", "event": "/api/v3/sources/oauth/azure-ad/", "host": "localhost:8044", "level": "info", "logger": "authentik.asgi", "method": "PATCH", "pid": 40, "remote": "172.18.0.1", "request_id": "ff436debac13400e94274999b5f5d06d", "runtime": 396, "scheme": "http", "status": 200, "timestamp": "2023-12-22T10:30:41.572086", "user": "Admin", "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:120.0) Gecko/20100101 Firefox/120.0"} **Version and Deployment (please complete the following information):** - authentik version: 2021.10.5 - Deployment: docker-compose, helm **Additional context** --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `authentik/sources/oauth/api/source.py` Content: ``` 1 """OAuth Source Serializer""" 2 from django.urls.base import reverse_lazy 3 from django_filters.filters import BooleanFilter 4 from django_filters.filterset import FilterSet 5 from drf_spectacular.types import OpenApiTypes 6 from drf_spectacular.utils import OpenApiParameter, extend_schema, extend_schema_field 7 from requests import RequestException 8 from rest_framework.decorators import action 9 from rest_framework.fields import BooleanField, CharField, ChoiceField, SerializerMethodField 10 from rest_framework.request import Request 11 from rest_framework.response import Response 12 from rest_framework.serializers import ValidationError 13 from rest_framework.viewsets import ModelViewSet 14 15 from authentik.core.api.sources import SourceSerializer 16 from authentik.core.api.used_by import UsedByMixin 17 from authentik.core.api.utils import PassiveSerializer 18 from authentik.lib.utils.http import get_http_session 19 from authentik.sources.oauth.models import OAuthSource 20 from authentik.sources.oauth.types.registry import SourceType, registry 21 22 23 class SourceTypeSerializer(PassiveSerializer): 24 """Serializer for SourceType""" 25 26 name = CharField(required=True) 27 verbose_name = CharField(required=True) 28 urls_customizable = BooleanField() 29 request_token_url = CharField(read_only=True, allow_null=True) 30 authorization_url = CharField(read_only=True, allow_null=True) 31 access_token_url = CharField(read_only=True, allow_null=True) 32 profile_url = CharField(read_only=True, allow_null=True) 33 oidc_well_known_url = CharField(read_only=True, allow_null=True) 34 oidc_jwks_url = CharField(read_only=True, allow_null=True) 35 36 37 class OAuthSourceSerializer(SourceSerializer): 38 """OAuth Source Serializer""" 39 40 provider_type = ChoiceField(choices=registry.get_name_tuple()) 41 callback_url = SerializerMethodField() 42 type = SerializerMethodField() 43 44 def get_callback_url(self, instance: OAuthSource) -> str: 45 """Get OAuth Callback URL""" 46 relative_url = reverse_lazy( 47 "authentik_sources_oauth:oauth-client-callback", 48 kwargs={"source_slug": instance.slug}, 49 ) 50 if "request" not in self.context: 51 return relative_url 52 return self.context["request"].build_absolute_uri(relative_url) 53 54 @extend_schema_field(SourceTypeSerializer) 55 def get_type(self, instance: OAuthSource) -> SourceTypeSerializer: 56 """Get source's type configuration""" 57 return SourceTypeSerializer(instance.source_type).data 58 59 def validate(self, attrs: dict) -> dict: 60 session = get_http_session() 61 source_type = registry.find_type(attrs["provider_type"]) 62 63 well_known = attrs.get("oidc_well_known_url") or source_type.oidc_well_known_url 64 inferred_oidc_jwks_url = None 65 66 if well_known and well_known != "": 67 try: 68 well_known_config = session.get(well_known) 69 well_known_config.raise_for_status() 70 except RequestException as exc: 71 text = exc.response.text if exc.response else str(exc) 72 raise ValidationError({"oidc_well_known_url": text}) 73 config = well_known_config.json() 74 if "issuer" not in config: 75 raise ValidationError({"oidc_well_known_url": "Invalid well-known configuration"}) 76 attrs["authorization_url"] = config.get("authorization_endpoint", "") 77 attrs["access_token_url"] = config.get("token_endpoint", "") 78 attrs["profile_url"] = config.get("userinfo_endpoint", "") 79 inferred_oidc_jwks_url = config.get("jwks_uri", "") 80 81 # Prefer user-entered URL to inferred URL to default URL 82 jwks_url = attrs.get("oidc_jwks_url") or inferred_oidc_jwks_url or source_type.oidc_jwks_url 83 if jwks_url and jwks_url != "": 84 attrs["oidc_jwks_url"] = jwks_url 85 try: 86 jwks_config = session.get(jwks_url) 87 jwks_config.raise_for_status() 88 except RequestException as exc: 89 text = exc.response.text if exc.response else str(exc) 90 raise ValidationError({"oidc_jwks_url": text}) 91 config = jwks_config.json() 92 attrs["oidc_jwks"] = config 93 94 provider_type = registry.find_type(attrs.get("provider_type", "")) 95 for url in [ 96 "authorization_url", 97 "access_token_url", 98 "profile_url", 99 ]: 100 if getattr(provider_type, url, None) is None: 101 if url not in attrs: 102 raise ValidationError( 103 f"{url} is required for provider {provider_type.verbose_name}" 104 ) 105 return attrs 106 107 class Meta: 108 model = OAuthSource 109 fields = SourceSerializer.Meta.fields + [ 110 "provider_type", 111 "request_token_url", 112 "authorization_url", 113 "access_token_url", 114 "profile_url", 115 "consumer_key", 116 "consumer_secret", 117 "callback_url", 118 "additional_scopes", 119 "type", 120 "oidc_well_known_url", 121 "oidc_jwks_url", 122 "oidc_jwks", 123 ] 124 extra_kwargs = {"consumer_secret": {"write_only": True}} 125 126 127 class OAuthSourceFilter(FilterSet): 128 """OAuth Source filter set""" 129 130 has_jwks = BooleanFilter(label="Only return sources with JWKS data", method="filter_has_jwks") 131 132 def filter_has_jwks(self, queryset, name, value): # pragma: no cover 133 """Only return sources with JWKS data""" 134 return queryset.exclude(oidc_jwks__iexact="{}") 135 136 class Meta: 137 model = OAuthSource 138 fields = [ 139 "name", 140 "slug", 141 "enabled", 142 "authentication_flow", 143 "enrollment_flow", 144 "policy_engine_mode", 145 "user_matching_mode", 146 "provider_type", 147 "request_token_url", 148 "authorization_url", 149 "access_token_url", 150 "profile_url", 151 "consumer_key", 152 "additional_scopes", 153 ] 154 155 156 class OAuthSourceViewSet(UsedByMixin, ModelViewSet): 157 """Source Viewset""" 158 159 queryset = OAuthSource.objects.all() 160 serializer_class = OAuthSourceSerializer 161 lookup_field = "slug" 162 filterset_class = OAuthSourceFilter 163 search_fields = ["name", "slug"] 164 ordering = ["name"] 165 166 @extend_schema( 167 responses={200: SourceTypeSerializer(many=True)}, 168 parameters=[ 169 OpenApiParameter( 170 name="name", 171 location=OpenApiParameter.QUERY, 172 type=OpenApiTypes.STR, 173 ) 174 ], 175 ) 176 @action(detail=False, pagination_class=None, filter_backends=[]) 177 def source_types(self, request: Request) -> Response: 178 """Get all creatable source types. If ?name is set, only returns the type for <name>. 179 If <name> isn't found, returns the default type.""" 180 data = [] 181 if "name" in request.query_params: 182 source_type = registry.find_type(request.query_params.get("name")) 183 if source_type.__class__ != SourceType: 184 data.append(SourceTypeSerializer(source_type).data) 185 else: 186 for source_type in registry.get(): 187 data.append(SourceTypeSerializer(source_type).data) 188 return Response(data) 189 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/authentik/sources/oauth/api/source.py b/authentik/sources/oauth/api/source.py --- a/authentik/sources/oauth/api/source.py +++ b/authentik/sources/oauth/api/source.py @@ -56,6 +56,7 @@ """Get source's type configuration""" return SourceTypeSerializer(instance.source_type).data + # pylint: disable=too-many-locals def validate(self, attrs: dict) -> dict: session = get_http_session() source_type = registry.find_type(attrs["provider_type"]) @@ -73,9 +74,17 @@ config = well_known_config.json() if "issuer" not in config: raise ValidationError({"oidc_well_known_url": "Invalid well-known configuration"}) - attrs["authorization_url"] = config.get("authorization_endpoint", "") - attrs["access_token_url"] = config.get("token_endpoint", "") - attrs["profile_url"] = config.get("userinfo_endpoint", "") + field_map = { + # authentik field to oidc field + "authorization_url": "authorization_endpoint", + "access_token_url": "token_endpoint", + "profile_url": "userinfo_endpoint", + } + for ak_key, oidc_key in field_map.items(): + # Don't overwrite user-set values + if ak_key in attrs and attrs[ak_key]: + continue + attrs[ak_key] = config.get(oidc_key, "") inferred_oidc_jwks_url = config.get("jwks_uri", "") # Prefer user-entered URL to inferred URL to default URL
{"golden_diff": "diff --git a/authentik/sources/oauth/api/source.py b/authentik/sources/oauth/api/source.py\n--- a/authentik/sources/oauth/api/source.py\n+++ b/authentik/sources/oauth/api/source.py\n@@ -56,6 +56,7 @@\n \"\"\"Get source's type configuration\"\"\"\n return SourceTypeSerializer(instance.source_type).data\n \n+ # pylint: disable=too-many-locals\n def validate(self, attrs: dict) -> dict:\n session = get_http_session()\n source_type = registry.find_type(attrs[\"provider_type\"])\n@@ -73,9 +74,17 @@\n config = well_known_config.json()\n if \"issuer\" not in config:\n raise ValidationError({\"oidc_well_known_url\": \"Invalid well-known configuration\"})\n- attrs[\"authorization_url\"] = config.get(\"authorization_endpoint\", \"\")\n- attrs[\"access_token_url\"] = config.get(\"token_endpoint\", \"\")\n- attrs[\"profile_url\"] = config.get(\"userinfo_endpoint\", \"\")\n+ field_map = {\n+ # authentik field to oidc field\n+ \"authorization_url\": \"authorization_endpoint\",\n+ \"access_token_url\": \"token_endpoint\",\n+ \"profile_url\": \"userinfo_endpoint\",\n+ }\n+ for ak_key, oidc_key in field_map.items():\n+ # Don't overwrite user-set values\n+ if ak_key in attrs and attrs[ak_key]:\n+ continue\n+ attrs[ak_key] = config.get(oidc_key, \"\")\n inferred_oidc_jwks_url = config.get(\"jwks_uri\", \"\")\n \n # Prefer user-entered URL to inferred URL to default URL\n", "issue": "Federation & Social Logins // OAuth - Azure AD // Unable to save settings\n**Describe the bug**\r\nSee https://github.com/goauthentik/authentik/issues/7744\r\nProblem still exists with release 2023.10.5\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\nSee https://github.com/goauthentik/authentik/issues/7744\r\n\r\nNeither on initial save or update afterwards 'Authorization URL', 'Access token URL ' and 'Profile URL' are changed\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Logs**\r\nSeem fine\r\nserver:\r\n2023-12-22 11:30:41 {\"action\": \"model_updated\", \"auth_via\": \"session\", \"client_ip\": \"172.18.0.1\", \"context\": {\"http_request\": {\"args\": {}, \"method\": \"PATCH\", \"path\": \"/api/v3/sources/oauth/azure-ad/\", \"user_agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:120.0) Gecko/20100101 Firefox/120.0\"}, \"model\": {\"app\": \"authentik_sources_oauth\", \"model_name\": \"oauthsource\", \"name\": \"azure ad\", \"pk\": \"bda8c9beeac54e5495c8270838e7a797\"}}, \"event\": \"Created Event\", \"host\": \"localhost:8044\", \"level\": \"info\", \"logger\": \"authentik.events.models\", \"pid\": 40, \"request_id\": \"ff436debac13400e94274999b5f5d06d\", \"timestamp\": \"2023-12-22T10:30:41.555035\", \"user\": {\"email\": \"[email protected]\", \"pk\": 3, \"username\": \"Admin\"}}\r\n2023-12-22 11:30:41 {\"auth_via\": \"session\", \"event\": \"Task published\", \"host\": \"localhost:8044\", \"level\": \"info\", \"logger\": \"authentik.root.celery\", \"pid\": 40, \"request_id\": \"ff436debac13400e94274999b5f5d06d\", \"task_id\": \"4ebe0d05335f4700b8c0871d6f10235c\", \"task_name\": \"authentik.events.tasks.event_notification_handler\", \"timestamp\": \"2023-12-22T10:30:41.568890\"}\r\n2023-12-22 11:30:41 {\"auth_via\": \"session\", \"event\": \"/api/v3/sources/oauth/azure-ad/\", \"host\": \"localhost:8044\", \"level\": \"info\", \"logger\": \"authentik.asgi\", \"method\": \"PATCH\", \"pid\": 40, \"remote\": \"172.18.0.1\", \"request_id\": \"ff436debac13400e94274999b5f5d06d\", \"runtime\": 396, \"scheme\": \"http\", \"status\": 200, \"timestamp\": \"2023-12-22T10:30:41.572086\", \"user\": \"Admin\", \"user_agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:120.0) Gecko/20100101 Firefox/120.0\"}\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: 2021.10.5\r\n- Deployment: docker-compose, helm\r\n\r\n**Additional context**\r\n\r\n\n", "before_files": [{"content": "\"\"\"OAuth Source Serializer\"\"\"\nfrom django.urls.base import reverse_lazy\nfrom django_filters.filters import BooleanFilter\nfrom django_filters.filterset import FilterSet\nfrom drf_spectacular.types import OpenApiTypes\nfrom drf_spectacular.utils import OpenApiParameter, extend_schema, extend_schema_field\nfrom requests import RequestException\nfrom rest_framework.decorators import action\nfrom rest_framework.fields import BooleanField, CharField, ChoiceField, SerializerMethodField\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import ValidationError\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom authentik.core.api.sources import SourceSerializer\nfrom authentik.core.api.used_by import UsedByMixin\nfrom authentik.core.api.utils import PassiveSerializer\nfrom authentik.lib.utils.http import get_http_session\nfrom authentik.sources.oauth.models import OAuthSource\nfrom authentik.sources.oauth.types.registry import SourceType, registry\n\n\nclass SourceTypeSerializer(PassiveSerializer):\n \"\"\"Serializer for SourceType\"\"\"\n\n name = CharField(required=True)\n verbose_name = CharField(required=True)\n urls_customizable = BooleanField()\n request_token_url = CharField(read_only=True, allow_null=True)\n authorization_url = CharField(read_only=True, allow_null=True)\n access_token_url = CharField(read_only=True, allow_null=True)\n profile_url = CharField(read_only=True, allow_null=True)\n oidc_well_known_url = CharField(read_only=True, allow_null=True)\n oidc_jwks_url = CharField(read_only=True, allow_null=True)\n\n\nclass OAuthSourceSerializer(SourceSerializer):\n \"\"\"OAuth Source Serializer\"\"\"\n\n provider_type = ChoiceField(choices=registry.get_name_tuple())\n callback_url = SerializerMethodField()\n type = SerializerMethodField()\n\n def get_callback_url(self, instance: OAuthSource) -> str:\n \"\"\"Get OAuth Callback URL\"\"\"\n relative_url = reverse_lazy(\n \"authentik_sources_oauth:oauth-client-callback\",\n kwargs={\"source_slug\": instance.slug},\n )\n if \"request\" not in self.context:\n return relative_url\n return self.context[\"request\"].build_absolute_uri(relative_url)\n\n @extend_schema_field(SourceTypeSerializer)\n def get_type(self, instance: OAuthSource) -> SourceTypeSerializer:\n \"\"\"Get source's type configuration\"\"\"\n return SourceTypeSerializer(instance.source_type).data\n\n def validate(self, attrs: dict) -> dict:\n session = get_http_session()\n source_type = registry.find_type(attrs[\"provider_type\"])\n\n well_known = attrs.get(\"oidc_well_known_url\") or source_type.oidc_well_known_url\n inferred_oidc_jwks_url = None\n\n if well_known and well_known != \"\":\n try:\n well_known_config = session.get(well_known)\n well_known_config.raise_for_status()\n except RequestException as exc:\n text = exc.response.text if exc.response else str(exc)\n raise ValidationError({\"oidc_well_known_url\": text})\n config = well_known_config.json()\n if \"issuer\" not in config:\n raise ValidationError({\"oidc_well_known_url\": \"Invalid well-known configuration\"})\n attrs[\"authorization_url\"] = config.get(\"authorization_endpoint\", \"\")\n attrs[\"access_token_url\"] = config.get(\"token_endpoint\", \"\")\n attrs[\"profile_url\"] = config.get(\"userinfo_endpoint\", \"\")\n inferred_oidc_jwks_url = config.get(\"jwks_uri\", \"\")\n\n # Prefer user-entered URL to inferred URL to default URL\n jwks_url = attrs.get(\"oidc_jwks_url\") or inferred_oidc_jwks_url or source_type.oidc_jwks_url\n if jwks_url and jwks_url != \"\":\n attrs[\"oidc_jwks_url\"] = jwks_url\n try:\n jwks_config = session.get(jwks_url)\n jwks_config.raise_for_status()\n except RequestException as exc:\n text = exc.response.text if exc.response else str(exc)\n raise ValidationError({\"oidc_jwks_url\": text})\n config = jwks_config.json()\n attrs[\"oidc_jwks\"] = config\n\n provider_type = registry.find_type(attrs.get(\"provider_type\", \"\"))\n for url in [\n \"authorization_url\",\n \"access_token_url\",\n \"profile_url\",\n ]:\n if getattr(provider_type, url, None) is None:\n if url not in attrs:\n raise ValidationError(\n f\"{url} is required for provider {provider_type.verbose_name}\"\n )\n return attrs\n\n class Meta:\n model = OAuthSource\n fields = SourceSerializer.Meta.fields + [\n \"provider_type\",\n \"request_token_url\",\n \"authorization_url\",\n \"access_token_url\",\n \"profile_url\",\n \"consumer_key\",\n \"consumer_secret\",\n \"callback_url\",\n \"additional_scopes\",\n \"type\",\n \"oidc_well_known_url\",\n \"oidc_jwks_url\",\n \"oidc_jwks\",\n ]\n extra_kwargs = {\"consumer_secret\": {\"write_only\": True}}\n\n\nclass OAuthSourceFilter(FilterSet):\n \"\"\"OAuth Source filter set\"\"\"\n\n has_jwks = BooleanFilter(label=\"Only return sources with JWKS data\", method=\"filter_has_jwks\")\n\n def filter_has_jwks(self, queryset, name, value): # pragma: no cover\n \"\"\"Only return sources with JWKS data\"\"\"\n return queryset.exclude(oidc_jwks__iexact=\"{}\")\n\n class Meta:\n model = OAuthSource\n fields = [\n \"name\",\n \"slug\",\n \"enabled\",\n \"authentication_flow\",\n \"enrollment_flow\",\n \"policy_engine_mode\",\n \"user_matching_mode\",\n \"provider_type\",\n \"request_token_url\",\n \"authorization_url\",\n \"access_token_url\",\n \"profile_url\",\n \"consumer_key\",\n \"additional_scopes\",\n ]\n\n\nclass OAuthSourceViewSet(UsedByMixin, ModelViewSet):\n \"\"\"Source Viewset\"\"\"\n\n queryset = OAuthSource.objects.all()\n serializer_class = OAuthSourceSerializer\n lookup_field = \"slug\"\n filterset_class = OAuthSourceFilter\n search_fields = [\"name\", \"slug\"]\n ordering = [\"name\"]\n\n @extend_schema(\n responses={200: SourceTypeSerializer(many=True)},\n parameters=[\n OpenApiParameter(\n name=\"name\",\n location=OpenApiParameter.QUERY,\n type=OpenApiTypes.STR,\n )\n ],\n )\n @action(detail=False, pagination_class=None, filter_backends=[])\n def source_types(self, request: Request) -> Response:\n \"\"\"Get all creatable source types. If ?name is set, only returns the type for <name>.\n If <name> isn't found, returns the default type.\"\"\"\n data = []\n if \"name\" in request.query_params:\n source_type = registry.find_type(request.query_params.get(\"name\"))\n if source_type.__class__ != SourceType:\n data.append(SourceTypeSerializer(source_type).data)\n else:\n for source_type in registry.get():\n data.append(SourceTypeSerializer(source_type).data)\n return Response(data)\n", "path": "authentik/sources/oauth/api/source.py"}], "after_files": [{"content": "\"\"\"OAuth Source Serializer\"\"\"\nfrom django.urls.base import reverse_lazy\nfrom django_filters.filters import BooleanFilter\nfrom django_filters.filterset import FilterSet\nfrom drf_spectacular.types import OpenApiTypes\nfrom drf_spectacular.utils import OpenApiParameter, extend_schema, extend_schema_field\nfrom requests import RequestException\nfrom rest_framework.decorators import action\nfrom rest_framework.fields import BooleanField, CharField, ChoiceField, SerializerMethodField\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import ValidationError\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom authentik.core.api.sources import SourceSerializer\nfrom authentik.core.api.used_by import UsedByMixin\nfrom authentik.core.api.utils import PassiveSerializer\nfrom authentik.lib.utils.http import get_http_session\nfrom authentik.sources.oauth.models import OAuthSource\nfrom authentik.sources.oauth.types.registry import SourceType, registry\n\n\nclass SourceTypeSerializer(PassiveSerializer):\n \"\"\"Serializer for SourceType\"\"\"\n\n name = CharField(required=True)\n verbose_name = CharField(required=True)\n urls_customizable = BooleanField()\n request_token_url = CharField(read_only=True, allow_null=True)\n authorization_url = CharField(read_only=True, allow_null=True)\n access_token_url = CharField(read_only=True, allow_null=True)\n profile_url = CharField(read_only=True, allow_null=True)\n oidc_well_known_url = CharField(read_only=True, allow_null=True)\n oidc_jwks_url = CharField(read_only=True, allow_null=True)\n\n\nclass OAuthSourceSerializer(SourceSerializer):\n \"\"\"OAuth Source Serializer\"\"\"\n\n provider_type = ChoiceField(choices=registry.get_name_tuple())\n callback_url = SerializerMethodField()\n type = SerializerMethodField()\n\n def get_callback_url(self, instance: OAuthSource) -> str:\n \"\"\"Get OAuth Callback URL\"\"\"\n relative_url = reverse_lazy(\n \"authentik_sources_oauth:oauth-client-callback\",\n kwargs={\"source_slug\": instance.slug},\n )\n if \"request\" not in self.context:\n return relative_url\n return self.context[\"request\"].build_absolute_uri(relative_url)\n\n @extend_schema_field(SourceTypeSerializer)\n def get_type(self, instance: OAuthSource) -> SourceTypeSerializer:\n \"\"\"Get source's type configuration\"\"\"\n return SourceTypeSerializer(instance.source_type).data\n\n # pylint: disable=too-many-locals\n def validate(self, attrs: dict) -> dict:\n session = get_http_session()\n source_type = registry.find_type(attrs[\"provider_type\"])\n\n well_known = attrs.get(\"oidc_well_known_url\") or source_type.oidc_well_known_url\n inferred_oidc_jwks_url = None\n\n if well_known and well_known != \"\":\n try:\n well_known_config = session.get(well_known)\n well_known_config.raise_for_status()\n except RequestException as exc:\n text = exc.response.text if exc.response else str(exc)\n raise ValidationError({\"oidc_well_known_url\": text})\n config = well_known_config.json()\n if \"issuer\" not in config:\n raise ValidationError({\"oidc_well_known_url\": \"Invalid well-known configuration\"})\n field_map = {\n # authentik field to oidc field\n \"authorization_url\": \"authorization_endpoint\",\n \"access_token_url\": \"token_endpoint\",\n \"profile_url\": \"userinfo_endpoint\",\n }\n for ak_key, oidc_key in field_map.items():\n # Don't overwrite user-set values\n if ak_key in attrs and attrs[ak_key]:\n continue\n attrs[ak_key] = config.get(oidc_key, \"\")\n inferred_oidc_jwks_url = config.get(\"jwks_uri\", \"\")\n\n # Prefer user-entered URL to inferred URL to default URL\n jwks_url = attrs.get(\"oidc_jwks_url\") or inferred_oidc_jwks_url or source_type.oidc_jwks_url\n if jwks_url and jwks_url != \"\":\n attrs[\"oidc_jwks_url\"] = jwks_url\n try:\n jwks_config = session.get(jwks_url)\n jwks_config.raise_for_status()\n except RequestException as exc:\n text = exc.response.text if exc.response else str(exc)\n raise ValidationError({\"oidc_jwks_url\": text})\n config = jwks_config.json()\n attrs[\"oidc_jwks\"] = config\n\n provider_type = registry.find_type(attrs.get(\"provider_type\", \"\"))\n for url in [\n \"authorization_url\",\n \"access_token_url\",\n \"profile_url\",\n ]:\n if getattr(provider_type, url, None) is None:\n if url not in attrs:\n raise ValidationError(\n f\"{url} is required for provider {provider_type.verbose_name}\"\n )\n return attrs\n\n class Meta:\n model = OAuthSource\n fields = SourceSerializer.Meta.fields + [\n \"provider_type\",\n \"request_token_url\",\n \"authorization_url\",\n \"access_token_url\",\n \"profile_url\",\n \"consumer_key\",\n \"consumer_secret\",\n \"callback_url\",\n \"additional_scopes\",\n \"type\",\n \"oidc_well_known_url\",\n \"oidc_jwks_url\",\n \"oidc_jwks\",\n ]\n extra_kwargs = {\"consumer_secret\": {\"write_only\": True}}\n\n\nclass OAuthSourceFilter(FilterSet):\n \"\"\"OAuth Source filter set\"\"\"\n\n has_jwks = BooleanFilter(label=\"Only return sources with JWKS data\", method=\"filter_has_jwks\")\n\n def filter_has_jwks(self, queryset, name, value): # pragma: no cover\n \"\"\"Only return sources with JWKS data\"\"\"\n return queryset.exclude(oidc_jwks__iexact=\"{}\")\n\n class Meta:\n model = OAuthSource\n fields = [\n \"name\",\n \"slug\",\n \"enabled\",\n \"authentication_flow\",\n \"enrollment_flow\",\n \"policy_engine_mode\",\n \"user_matching_mode\",\n \"provider_type\",\n \"request_token_url\",\n \"authorization_url\",\n \"access_token_url\",\n \"profile_url\",\n \"consumer_key\",\n \"additional_scopes\",\n ]\n\n\nclass OAuthSourceViewSet(UsedByMixin, ModelViewSet):\n \"\"\"Source Viewset\"\"\"\n\n queryset = OAuthSource.objects.all()\n serializer_class = OAuthSourceSerializer\n lookup_field = \"slug\"\n filterset_class = OAuthSourceFilter\n search_fields = [\"name\", \"slug\"]\n ordering = [\"name\"]\n\n @extend_schema(\n responses={200: SourceTypeSerializer(many=True)},\n parameters=[\n OpenApiParameter(\n name=\"name\",\n location=OpenApiParameter.QUERY,\n type=OpenApiTypes.STR,\n )\n ],\n )\n @action(detail=False, pagination_class=None, filter_backends=[])\n def source_types(self, request: Request) -> Response:\n \"\"\"Get all creatable source types. If ?name is set, only returns the type for <name>.\n If <name> isn't found, returns the default type.\"\"\"\n data = []\n if \"name\" in request.query_params:\n source_type = registry.find_type(request.query_params.get(\"name\"))\n if source_type.__class__ != SourceType:\n data.append(SourceTypeSerializer(source_type).data)\n else:\n for source_type in registry.get():\n data.append(SourceTypeSerializer(source_type).data)\n return Response(data)\n", "path": "authentik/sources/oauth/api/source.py"}]}
3,215
353
gh_patches_debug_26300
rasdani/github-patches
git_diff
City-of-Helsinki__linkedevents-155
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Use Helsinki servicemap in adding Place in LE admin --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `linkedevents/settings.py` Content: ``` 1 """ 2 Django base settings for linkedevents project. 3 4 For more information on this file, see 5 https://docs.djangoproject.com/en/1.6/topics/settings/ 6 7 For the full list of settings and their values, see 8 https://docs.djangoproject.com/en/1.6/ref/settings/ 9 """ 10 11 # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 12 import os 13 BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) 14 15 # Quick-start development settings - unsuitable for production 16 # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/ 17 18 DEBUG = False 19 20 TEMPLATE_DEBUG = False 21 22 ALLOWED_HOSTS = [] 23 24 SITE_ID = 1 25 26 # Application definition 27 28 INSTALLED_APPS = ( 29 'helusers', 30 'django.contrib.sites', 31 'modeltranslation', 32 'django.contrib.admin', 33 'django.contrib.auth', 34 'django.contrib.contenttypes', 35 'django.contrib.sessions', 36 'django.contrib.messages', 37 'django.contrib.staticfiles', 38 'django.contrib.gis', 39 'django.contrib.postgres', 40 'django_extensions', 41 'events', 42 'corsheaders', 43 'rest_framework', 44 'rest_framework_jwt', 45 'mptt', 46 'reversion', 47 'haystack', 48 'raven.contrib.django.raven_compat', 49 'django_cleanup', 50 51 'allauth', 52 'allauth.account', 53 'allauth.socialaccount', 54 'helusers.providers.helsinki', 55 56 'helevents', 57 'munigeo', 58 ) 59 60 MIDDLEWARE_CLASSES = ( 61 'django.contrib.sessions.middleware.SessionMiddleware', 62 'django.middleware.common.CommonMiddleware', 63 'django.middleware.csrf.CsrfViewMiddleware', 64 'django.contrib.auth.middleware.AuthenticationMiddleware', 65 'corsheaders.middleware.CorsMiddleware', 66 'reversion.middleware.RevisionMiddleware', 67 'django.contrib.messages.middleware.MessageMiddleware', 68 'django.middleware.clickjacking.XFrameOptionsMiddleware', 69 ) 70 71 ROOT_URLCONF = 'linkedevents.urls' 72 73 WSGI_APPLICATION = 'linkedevents.wsgi.application' 74 75 # Database 76 # https://docs.djangoproject.com/en/1.6/ref/settings/#databases 77 78 DATABASES = { 79 'default': { 80 'ENGINE': 'django.contrib.gis.db.backends.postgis', 81 'NAME': 'linkedevents', 82 } 83 } 84 85 # Internationalization 86 # https://docs.djangoproject.com/en/1.6/topics/i18n/ 87 88 LANGUAGES = ( 89 ('fi', 'Finnish'), 90 ('sv', 'Swedish'), 91 ('en', 'English'), 92 ) 93 94 LANGUAGE_CODE = 'fi' 95 96 TIME_ZONE = 'Europe/Helsinki' 97 98 SYSTEM_DATA_SOURCE_ID = 'system' 99 100 101 USE_I18N = True 102 USE_L10N = True 103 USE_TZ = True 104 105 LOCALE_PATHS = ( 106 os.path.join(BASE_DIR, 'locale'), 107 ) 108 109 IMPORT_FILE_PATH = os.path.join(BASE_DIR, 'data') 110 111 # Static files (CSS, JavaScript, Images) 112 # https://docs.djangoproject.com/en/1.6/howto/static-files/ 113 114 STATIC_URL = '/static/' 115 116 MEDIA_ROOT = os.path.join(BASE_DIR, 'media') 117 MEDIA_URL = '/media/' 118 119 # 120 # Authentication 121 # 122 AUTH_USER_MODEL = 'helevents.User' 123 AUTHENTICATION_BACKENDS = ( 124 'django.contrib.auth.backends.ModelBackend', 125 'allauth.account.auth_backends.AuthenticationBackend', 126 ) 127 SOCIALACCOUNT_PROVIDERS = { 128 'helsinki': { 129 'VERIFIED_EMAIL': True 130 } 131 } 132 LOGIN_REDIRECT_URL = '/' 133 ACCOUNT_LOGOUT_ON_GET = True 134 SOCIALACCOUNT_ADAPTER = 'helusers.providers.helsinki.provider.SocialAccountAdapter' 135 136 # 137 # REST Framework 138 # 139 REST_FRAMEWORK = { 140 'PAGE_SIZE': 20, 141 'ORDERING_PARAM': 'sort', 142 'DEFAULT_RENDERER_CLASSES': ( 143 'events.renderers.JSONRenderer', 144 'events.renderers.JSONLDRenderer', 145 'rest_framework.renderers.BrowsableAPIRenderer', 146 ), 147 'DEFAULT_PARSER_CLASSES': ( 148 'events.parsers.CamelCaseJSONParser', 149 'events.parsers.JSONLDParser', 150 'rest_framework.parsers.FormParser', 151 'rest_framework.parsers.MultiPartParser', 152 ), 153 'DEFAULT_FILTER_BACKENDS': ( 154 'rest_framework.filters.DjangoFilterBackend', 155 ), 156 'DEFAULT_PAGINATION_CLASS': 'events.api_pagination.CustomPagination', 157 'DEFAULT_PERMISSION_CLASSES': ( 158 'rest_framework.permissions.IsAuthenticatedOrReadOnly', 159 ), 160 'DEFAULT_AUTHENTICATION_CLASSES': ( 161 'helusers.jwt.JWTAuthentication', 162 ), 163 'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning', 164 'VIEW_NAME_FUNCTION': 'events.api.get_view_name', 165 } 166 JWT_AUTH = { 167 'JWT_PAYLOAD_GET_USER_ID_HANDLER': 'helusers.jwt.get_user_id_from_payload_handler', 168 # JWT_AUDIENCE and JWT_SECRET_KEY must be set in local_settings.py 169 } 170 171 172 CORS_ORIGIN_ALLOW_ALL = True 173 CSRF_COOKIE_NAME = 'linkedevents-csrftoken' 174 SESSION_COOKIE_NAME = 'linkedevents-sessionid' 175 176 TEMPLATES = [ 177 { 178 'BACKEND': 'django.template.backends.django.DjangoTemplates', 179 'DIRS': [ 180 os.path.join(BASE_DIR, 'templates'), 181 ], 182 'APP_DIRS': True, 183 'OPTIONS': { 184 'context_processors': [ 185 'django.contrib.auth.context_processors.auth', 186 'django.template.context_processors.debug', 187 'django.template.context_processors.i18n', 188 'django.template.context_processors.media', 189 'django.template.context_processors.static', 190 'django.template.context_processors.tz', 191 'django.contrib.messages.context_processors.messages', 192 ], 193 }, 194 }, 195 ] 196 197 198 POSTGIS_VERSION = (2, 1, 1) 199 200 # Use ETRS-TM35FIN projection by default 201 PROJECTION_SRID=3067 202 # Bounding box of Finland and then some 203 204 BOUNDING_BOX = [-548576, 6291456, 1548576, 8388608] 205 206 CITYSDK_API_SETTINGS = { 207 'CITYSDK_URL': "http://api.tourism.helsinki.citysdk.eu/CitySDK/", 208 'USERNAME': 'admin', 209 'PASSWORD': 'defaultCitySDKPassword', 210 'SRS_URL': 'http://www.opengis.net/def/crs/EPSG/0/%d' % PROJECTION_SRID, 211 'DEFAULT_POI_CATEGORY': '53562f3238653c0a842a3bf7' 212 } 213 214 HAYSTACK_CONNECTIONS = { 215 'default': { 216 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine' 217 }, 218 'default-fi': { 219 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine' 220 }, 221 'default-en': { 222 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine' 223 }, 224 'default-sv': { 225 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine' 226 } 227 } 228 229 import bleach 230 BLEACH_ALLOWED_TAGS = bleach.ALLOWED_TAGS + ["p", "div"] 231 232 from easy_thumbnails.conf import Settings as thumbnail_settings 233 THUMBNAIL_PROCESSORS = ( 234 'image_cropping.thumbnail_processors.crop_corners', 235 ) + thumbnail_settings.THUMBNAIL_PROCESSORS 236 237 # local_settings.py can be used to override environment-specific settings 238 # like database and email that differ between development and production. 239 f = os.path.join(BASE_DIR, "local_settings.py") 240 if os.path.exists(f): 241 import sys 242 import imp 243 module_name = "%s.local_settings" % ROOT_URLCONF.split('.')[0] 244 module = imp.new_module(module_name) 245 module.__file__ = f 246 sys.modules[module_name] = module 247 exec(open(f, "rb").read()) 248 249 if 'SECRET_KEY' not in locals(): 250 secret_file = os.path.join(BASE_DIR, '.django_secret') 251 try: 252 SECRET_KEY = open(secret_file).read().strip() 253 except IOError: 254 import random 255 system_random = random.SystemRandom() 256 try: 257 SECRET_KEY = ''.join([system_random.choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(64)]) 258 secret = open(secret_file, 'w') 259 import os 260 os.chmod(secret_file, 0o0600) 261 secret.write(SECRET_KEY) 262 secret.close() 263 except IOError: 264 Exception('Please create a %s file with random characters to generate your secret key!' % secret_file) 265 ``` Path: `events/admin.py` Content: ``` 1 from django.conf import settings 2 from django.contrib import admin 3 from django.contrib.admin.widgets import FilteredSelectMultiple 4 from django.contrib.gis import admin as geoadmin 5 from django.contrib.gis.db import models 6 from django.utils.translation import ugettext as _ 7 from modeltranslation.admin import TranslationAdmin 8 from reversion.admin import VersionAdmin 9 from events.api import generate_id 10 from events.models import Event, Keyword, Place, Language, \ 11 OpeningHoursSpecification, KeywordLabel, Organization, License 12 13 14 class BaseAdmin(admin.ModelAdmin): 15 exclude = ("created_by", "modified_by",) 16 17 def save_model(self, request, obj, form, change): 18 if obj.pk is None: 19 obj.created_by = request.user 20 else: 21 obj.modified_by = request.user 22 obj.save() 23 24 25 class EventModelAdmin(BaseAdmin, TranslationAdmin, VersionAdmin): 26 pass 27 28 29 class KeywordAdmin(BaseAdmin, TranslationAdmin, VersionAdmin): 30 pass 31 32 33 class PlaceAdmin(geoadmin.GeoModelAdmin, BaseAdmin, TranslationAdmin, 34 VersionAdmin): 35 fieldsets = ( 36 (None, { 37 'fields': ('publisher', 'name', 'description', 'info_url', 'position', 'parent') 38 39 }), 40 (_('Contact info'), { 41 'fields': ('email', 'telephone', 'contact_type', 'street_address', 'address_locality', 'address_region', 42 'postal_code', 'post_office_box_num') 43 }), 44 ) 45 46 def __init__(self, model, admin_site): 47 super().__init__(model, admin_site) 48 # use https CDN instead 49 self.openlayers_url = 'https://cdnjs.cloudflare.com/ajax/libs/openlayers/2.13.1/OpenLayers.js' 50 51 def save_model(self, request, obj, form, change): 52 system_id = settings.SYSTEM_DATA_SOURCE_ID 53 obj.data_source_id = system_id 54 if not obj.id: 55 obj.id = generate_id(system_id) 56 obj.origin_id = obj.id.split(':')[1] 57 58 super().save_model(request, obj, form, change) 59 60 admin.site.register(Place, PlaceAdmin) 61 62 63 class OrganizationAdmin(BaseAdmin): 64 list_display = ('name', 'nr_org_admins') 65 66 formfield_overrides = { 67 models.ManyToManyField: {'widget': FilteredSelectMultiple("ylläpitäjät", is_stacked=False)}, 68 } 69 fields = ('admin_users',) 70 71 def nr_org_admins(self, obj): 72 return obj.admin_users.count() 73 nr_org_admins.short_description = _('Admins') 74 75 admin.site.register(Organization, OrganizationAdmin) 76 77 78 class LanguageAdmin(BaseAdmin, VersionAdmin): 79 pass 80 81 82 class PersonAdmin(BaseAdmin, VersionAdmin): 83 pass 84 85 86 class LicenseAdmin(BaseAdmin, TranslationAdmin, VersionAdmin): 87 def get_readonly_fields(self, request, obj=None): 88 if obj: 89 return ['id'] 90 else: 91 return [] 92 93 admin.site.register(License, LicenseAdmin) 94 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/events/admin.py b/events/admin.py --- a/events/admin.py +++ b/events/admin.py @@ -1,9 +1,9 @@ from django.conf import settings from django.contrib import admin from django.contrib.admin.widgets import FilteredSelectMultiple -from django.contrib.gis import admin as geoadmin from django.contrib.gis.db import models from django.utils.translation import ugettext as _ +from leaflet.admin import LeafletGeoAdmin from modeltranslation.admin import TranslationAdmin from reversion.admin import VersionAdmin from events.api import generate_id @@ -30,8 +30,16 @@ pass -class PlaceAdmin(geoadmin.GeoModelAdmin, BaseAdmin, TranslationAdmin, - VersionAdmin): +class HelsinkiGeoAdmin(LeafletGeoAdmin): + settings_overrides = { + 'DEFAULT_CENTER': (60.171944, 24.941389), + 'DEFAULT_ZOOM': 11, + 'MIN_ZOOM': 3, + 'MAX_ZOOM': 19, + } + + +class PlaceAdmin(HelsinkiGeoAdmin, BaseAdmin, TranslationAdmin, VersionAdmin): fieldsets = ( (None, { 'fields': ('publisher', 'name', 'description', 'info_url', 'position', 'parent') diff --git a/linkedevents/settings.py b/linkedevents/settings.py --- a/linkedevents/settings.py +++ b/linkedevents/settings.py @@ -55,6 +55,7 @@ 'helevents', 'munigeo', + 'leaflet', ) MIDDLEWARE_CLASSES = (
{"golden_diff": "diff --git a/events/admin.py b/events/admin.py\n--- a/events/admin.py\n+++ b/events/admin.py\n@@ -1,9 +1,9 @@\n from django.conf import settings\n from django.contrib import admin\n from django.contrib.admin.widgets import FilteredSelectMultiple\n-from django.contrib.gis import admin as geoadmin\n from django.contrib.gis.db import models\n from django.utils.translation import ugettext as _\n+from leaflet.admin import LeafletGeoAdmin\n from modeltranslation.admin import TranslationAdmin\n from reversion.admin import VersionAdmin\n from events.api import generate_id\n@@ -30,8 +30,16 @@\n pass\n \n \n-class PlaceAdmin(geoadmin.GeoModelAdmin, BaseAdmin, TranslationAdmin,\n- VersionAdmin):\n+class HelsinkiGeoAdmin(LeafletGeoAdmin):\n+ settings_overrides = {\n+ 'DEFAULT_CENTER': (60.171944, 24.941389),\n+ 'DEFAULT_ZOOM': 11,\n+ 'MIN_ZOOM': 3,\n+ 'MAX_ZOOM': 19,\n+ }\n+\n+\n+class PlaceAdmin(HelsinkiGeoAdmin, BaseAdmin, TranslationAdmin, VersionAdmin):\n fieldsets = (\n (None, {\n 'fields': ('publisher', 'name', 'description', 'info_url', 'position', 'parent')\ndiff --git a/linkedevents/settings.py b/linkedevents/settings.py\n--- a/linkedevents/settings.py\n+++ b/linkedevents/settings.py\n@@ -55,6 +55,7 @@\n \n 'helevents',\n 'munigeo',\n+ 'leaflet',\n )\n \n MIDDLEWARE_CLASSES = (\n", "issue": "Use Helsinki servicemap in adding Place in LE admin\n\n", "before_files": [{"content": "\"\"\"\nDjango base settings for linkedevents project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\nDEBUG = False\n\nTEMPLATE_DEBUG = False\n\nALLOWED_HOSTS = []\n\nSITE_ID = 1\n\n# Application definition\n\nINSTALLED_APPS = (\n 'helusers',\n 'django.contrib.sites',\n 'modeltranslation',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.gis',\n 'django.contrib.postgres',\n 'django_extensions',\n 'events',\n 'corsheaders',\n 'rest_framework',\n 'rest_framework_jwt',\n 'mptt',\n 'reversion',\n 'haystack',\n 'raven.contrib.django.raven_compat',\n 'django_cleanup',\n\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'helusers.providers.helsinki',\n\n 'helevents',\n 'munigeo',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'reversion.middleware.RevisionMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'linkedevents.urls'\n\nWSGI_APPLICATION = 'linkedevents.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.contrib.gis.db.backends.postgis',\n 'NAME': 'linkedevents',\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGES = (\n ('fi', 'Finnish'),\n ('sv', 'Swedish'),\n ('en', 'English'),\n)\n\nLANGUAGE_CODE = 'fi'\n\nTIME_ZONE = 'Europe/Helsinki'\n\nSYSTEM_DATA_SOURCE_ID = 'system'\n\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = (\n os.path.join(BASE_DIR, 'locale'),\n)\n\nIMPORT_FILE_PATH = os.path.join(BASE_DIR, 'data')\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\n#\n# Authentication\n#\nAUTH_USER_MODEL = 'helevents.User'\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\nSOCIALACCOUNT_PROVIDERS = {\n 'helsinki': {\n 'VERIFIED_EMAIL': True\n }\n}\nLOGIN_REDIRECT_URL = '/'\nACCOUNT_LOGOUT_ON_GET = True\nSOCIALACCOUNT_ADAPTER = 'helusers.providers.helsinki.provider.SocialAccountAdapter'\n\n#\n# REST Framework\n#\nREST_FRAMEWORK = {\n 'PAGE_SIZE': 20,\n 'ORDERING_PARAM': 'sort',\n 'DEFAULT_RENDERER_CLASSES': (\n 'events.renderers.JSONRenderer',\n 'events.renderers.JSONLDRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n ),\n 'DEFAULT_PARSER_CLASSES': (\n 'events.parsers.CamelCaseJSONParser',\n 'events.parsers.JSONLDParser',\n 'rest_framework.parsers.FormParser',\n 'rest_framework.parsers.MultiPartParser',\n ),\n 'DEFAULT_FILTER_BACKENDS': (\n 'rest_framework.filters.DjangoFilterBackend',\n ),\n 'DEFAULT_PAGINATION_CLASS': 'events.api_pagination.CustomPagination',\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.IsAuthenticatedOrReadOnly',\n ),\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'helusers.jwt.JWTAuthentication',\n ),\n 'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',\n 'VIEW_NAME_FUNCTION': 'events.api.get_view_name',\n}\nJWT_AUTH = {\n 'JWT_PAYLOAD_GET_USER_ID_HANDLER': 'helusers.jwt.get_user_id_from_payload_handler',\n # JWT_AUDIENCE and JWT_SECRET_KEY must be set in local_settings.py\n}\n\n\nCORS_ORIGIN_ALLOW_ALL = True\nCSRF_COOKIE_NAME = 'linkedevents-csrftoken'\nSESSION_COOKIE_NAME = 'linkedevents-sessionid'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(BASE_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\n\nPOSTGIS_VERSION = (2, 1, 1)\n\n# Use ETRS-TM35FIN projection by default\nPROJECTION_SRID=3067\n# Bounding box of Finland and then some\n\nBOUNDING_BOX = [-548576, 6291456, 1548576, 8388608]\n\nCITYSDK_API_SETTINGS = {\n 'CITYSDK_URL': \"http://api.tourism.helsinki.citysdk.eu/CitySDK/\",\n 'USERNAME': 'admin',\n 'PASSWORD': 'defaultCitySDKPassword',\n 'SRS_URL': 'http://www.opengis.net/def/crs/EPSG/0/%d' % PROJECTION_SRID,\n 'DEFAULT_POI_CATEGORY': '53562f3238653c0a842a3bf7'\n}\n\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'\n },\n 'default-fi': {\n 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'\n },\n 'default-en': {\n 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'\n },\n 'default-sv': {\n 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'\n }\n}\n\nimport bleach\nBLEACH_ALLOWED_TAGS = bleach.ALLOWED_TAGS + [\"p\", \"div\"]\n\nfrom easy_thumbnails.conf import Settings as thumbnail_settings\nTHUMBNAIL_PROCESSORS = (\n 'image_cropping.thumbnail_processors.crop_corners',\n) + thumbnail_settings.THUMBNAIL_PROCESSORS\n\n# local_settings.py can be used to override environment-specific settings\n# like database and email that differ between development and production.\nf = os.path.join(BASE_DIR, \"local_settings.py\")\nif os.path.exists(f):\n import sys\n import imp\n module_name = \"%s.local_settings\" % ROOT_URLCONF.split('.')[0]\n module = imp.new_module(module_name)\n module.__file__ = f\n sys.modules[module_name] = module\n exec(open(f, \"rb\").read())\n\nif 'SECRET_KEY' not in locals():\n secret_file = os.path.join(BASE_DIR, '.django_secret')\n try:\n SECRET_KEY = open(secret_file).read().strip()\n except IOError:\n import random\n system_random = random.SystemRandom()\n try:\n SECRET_KEY = ''.join([system_random.choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(64)])\n secret = open(secret_file, 'w')\n import os\n os.chmod(secret_file, 0o0600)\n secret.write(SECRET_KEY)\n secret.close()\n except IOError:\n Exception('Please create a %s file with random characters to generate your secret key!' % secret_file)\n", "path": "linkedevents/settings.py"}, {"content": "from django.conf import settings\nfrom django.contrib import admin\nfrom django.contrib.admin.widgets import FilteredSelectMultiple\nfrom django.contrib.gis import admin as geoadmin\nfrom django.contrib.gis.db import models\nfrom django.utils.translation import ugettext as _\nfrom modeltranslation.admin import TranslationAdmin\nfrom reversion.admin import VersionAdmin\nfrom events.api import generate_id\nfrom events.models import Event, Keyword, Place, Language, \\\n OpeningHoursSpecification, KeywordLabel, Organization, License\n\n\nclass BaseAdmin(admin.ModelAdmin):\n exclude = (\"created_by\", \"modified_by\",)\n\n def save_model(self, request, obj, form, change):\n if obj.pk is None:\n obj.created_by = request.user\n else:\n obj.modified_by = request.user\n obj.save()\n\n\nclass EventModelAdmin(BaseAdmin, TranslationAdmin, VersionAdmin):\n pass\n\n\nclass KeywordAdmin(BaseAdmin, TranslationAdmin, VersionAdmin):\n pass\n\n\nclass PlaceAdmin(geoadmin.GeoModelAdmin, BaseAdmin, TranslationAdmin,\n VersionAdmin):\n fieldsets = (\n (None, {\n 'fields': ('publisher', 'name', 'description', 'info_url', 'position', 'parent')\n\n }),\n (_('Contact info'), {\n 'fields': ('email', 'telephone', 'contact_type', 'street_address', 'address_locality', 'address_region',\n 'postal_code', 'post_office_box_num')\n }),\n )\n\n def __init__(self, model, admin_site):\n super().__init__(model, admin_site)\n # use https CDN instead\n self.openlayers_url = 'https://cdnjs.cloudflare.com/ajax/libs/openlayers/2.13.1/OpenLayers.js'\n\n def save_model(self, request, obj, form, change):\n system_id = settings.SYSTEM_DATA_SOURCE_ID\n obj.data_source_id = system_id\n if not obj.id:\n obj.id = generate_id(system_id)\n obj.origin_id = obj.id.split(':')[1]\n\n super().save_model(request, obj, form, change)\n\nadmin.site.register(Place, PlaceAdmin)\n\n\nclass OrganizationAdmin(BaseAdmin):\n list_display = ('name', 'nr_org_admins')\n\n formfield_overrides = {\n models.ManyToManyField: {'widget': FilteredSelectMultiple(\"yll\u00e4pit\u00e4j\u00e4t\", is_stacked=False)},\n }\n fields = ('admin_users',)\n\n def nr_org_admins(self, obj):\n return obj.admin_users.count()\n nr_org_admins.short_description = _('Admins')\n\nadmin.site.register(Organization, OrganizationAdmin)\n\n\nclass LanguageAdmin(BaseAdmin, VersionAdmin):\n pass\n\n\nclass PersonAdmin(BaseAdmin, VersionAdmin):\n pass\n\n\nclass LicenseAdmin(BaseAdmin, TranslationAdmin, VersionAdmin):\n def get_readonly_fields(self, request, obj=None):\n if obj:\n return ['id']\n else:\n return []\n\nadmin.site.register(License, LicenseAdmin)\n", "path": "events/admin.py"}], "after_files": [{"content": "\"\"\"\nDjango base settings for linkedevents project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\nDEBUG = False\n\nTEMPLATE_DEBUG = False\n\nALLOWED_HOSTS = []\n\nSITE_ID = 1\n\n# Application definition\n\nINSTALLED_APPS = (\n 'helusers',\n 'django.contrib.sites',\n 'modeltranslation',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.gis',\n 'django.contrib.postgres',\n 'django_extensions',\n 'events',\n 'corsheaders',\n 'rest_framework',\n 'rest_framework_jwt',\n 'mptt',\n 'reversion',\n 'haystack',\n 'raven.contrib.django.raven_compat',\n 'django_cleanup',\n\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'helusers.providers.helsinki',\n\n 'helevents',\n 'munigeo',\n 'leaflet',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'reversion.middleware.RevisionMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'linkedevents.urls'\n\nWSGI_APPLICATION = 'linkedevents.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.contrib.gis.db.backends.postgis',\n 'NAME': 'linkedevents',\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGES = (\n ('fi', 'Finnish'),\n ('sv', 'Swedish'),\n ('en', 'English'),\n)\n\nLANGUAGE_CODE = 'fi'\n\nTIME_ZONE = 'Europe/Helsinki'\n\nSYSTEM_DATA_SOURCE_ID = 'system'\n\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = (\n os.path.join(BASE_DIR, 'locale'),\n)\n\nIMPORT_FILE_PATH = os.path.join(BASE_DIR, 'data')\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\n#\n# Authentication\n#\nAUTH_USER_MODEL = 'helevents.User'\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\nSOCIALACCOUNT_PROVIDERS = {\n 'helsinki': {\n 'VERIFIED_EMAIL': True\n }\n}\nLOGIN_REDIRECT_URL = '/'\nACCOUNT_LOGOUT_ON_GET = True\nSOCIALACCOUNT_ADAPTER = 'helusers.providers.helsinki.provider.SocialAccountAdapter'\n\n#\n# REST Framework\n#\nREST_FRAMEWORK = {\n 'PAGE_SIZE': 20,\n 'ORDERING_PARAM': 'sort',\n 'DEFAULT_RENDERER_CLASSES': (\n 'events.renderers.JSONRenderer',\n 'events.renderers.JSONLDRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n ),\n 'DEFAULT_PARSER_CLASSES': (\n 'events.parsers.CamelCaseJSONParser',\n 'events.parsers.JSONLDParser',\n 'rest_framework.parsers.FormParser',\n 'rest_framework.parsers.MultiPartParser',\n ),\n 'DEFAULT_FILTER_BACKENDS': (\n 'rest_framework.filters.DjangoFilterBackend',\n ),\n 'DEFAULT_PAGINATION_CLASS': 'events.api_pagination.CustomPagination',\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.IsAuthenticatedOrReadOnly',\n ),\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'helusers.jwt.JWTAuthentication',\n ),\n 'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',\n 'VIEW_NAME_FUNCTION': 'events.api.get_view_name',\n}\nJWT_AUTH = {\n 'JWT_PAYLOAD_GET_USER_ID_HANDLER': 'helusers.jwt.get_user_id_from_payload_handler',\n # JWT_AUDIENCE and JWT_SECRET_KEY must be set in local_settings.py\n}\n\n\nCORS_ORIGIN_ALLOW_ALL = True\nCSRF_COOKIE_NAME = 'linkedevents-csrftoken'\nSESSION_COOKIE_NAME = 'linkedevents-sessionid'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(BASE_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\n\nPOSTGIS_VERSION = (2, 1, 1)\n\n# Use ETRS-TM35FIN projection by default\nPROJECTION_SRID=3067\n# Bounding box of Finland and then some\n\nBOUNDING_BOX = [-548576, 6291456, 1548576, 8388608]\n\nCITYSDK_API_SETTINGS = {\n 'CITYSDK_URL': \"http://api.tourism.helsinki.citysdk.eu/CitySDK/\",\n 'USERNAME': 'admin',\n 'PASSWORD': 'defaultCitySDKPassword',\n 'SRS_URL': 'http://www.opengis.net/def/crs/EPSG/0/%d' % PROJECTION_SRID,\n 'DEFAULT_POI_CATEGORY': '53562f3238653c0a842a3bf7'\n}\n\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'\n },\n 'default-fi': {\n 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'\n },\n 'default-en': {\n 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'\n },\n 'default-sv': {\n 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'\n }\n}\n\nimport bleach\nBLEACH_ALLOWED_TAGS = bleach.ALLOWED_TAGS + [\"p\", \"div\"]\n\nfrom easy_thumbnails.conf import Settings as thumbnail_settings\nTHUMBNAIL_PROCESSORS = (\n 'image_cropping.thumbnail_processors.crop_corners',\n) + thumbnail_settings.THUMBNAIL_PROCESSORS\n\n# local_settings.py can be used to override environment-specific settings\n# like database and email that differ between development and production.\nf = os.path.join(BASE_DIR, \"local_settings.py\")\nif os.path.exists(f):\n import sys\n import imp\n module_name = \"%s.local_settings\" % ROOT_URLCONF.split('.')[0]\n module = imp.new_module(module_name)\n module.__file__ = f\n sys.modules[module_name] = module\n exec(open(f, \"rb\").read())\n\nif 'SECRET_KEY' not in locals():\n secret_file = os.path.join(BASE_DIR, '.django_secret')\n try:\n SECRET_KEY = open(secret_file).read().strip()\n except IOError:\n import random\n system_random = random.SystemRandom()\n try:\n SECRET_KEY = ''.join([system_random.choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(64)])\n secret = open(secret_file, 'w')\n import os\n os.chmod(secret_file, 0o0600)\n secret.write(SECRET_KEY)\n secret.close()\n except IOError:\n Exception('Please create a %s file with random characters to generate your secret key!' % secret_file)\n", "path": "linkedevents/settings.py"}, {"content": "from django.conf import settings\nfrom django.contrib import admin\nfrom django.contrib.admin.widgets import FilteredSelectMultiple\nfrom django.contrib.gis.db import models\nfrom django.utils.translation import ugettext as _\nfrom leaflet.admin import LeafletGeoAdmin\nfrom modeltranslation.admin import TranslationAdmin\nfrom reversion.admin import VersionAdmin\nfrom events.api import generate_id\nfrom events.models import Event, Keyword, Place, Language, \\\n OpeningHoursSpecification, KeywordLabel, Organization, License\n\n\nclass BaseAdmin(admin.ModelAdmin):\n exclude = (\"created_by\", \"modified_by\",)\n\n def save_model(self, request, obj, form, change):\n if obj.pk is None:\n obj.created_by = request.user\n else:\n obj.modified_by = request.user\n obj.save()\n\n\nclass EventModelAdmin(BaseAdmin, TranslationAdmin, VersionAdmin):\n pass\n\n\nclass KeywordAdmin(BaseAdmin, TranslationAdmin, VersionAdmin):\n pass\n\n\nclass HelsinkiGeoAdmin(LeafletGeoAdmin):\n settings_overrides = {\n 'DEFAULT_CENTER': (60.171944, 24.941389),\n 'DEFAULT_ZOOM': 11,\n 'MIN_ZOOM': 3,\n 'MAX_ZOOM': 19,\n }\n\n\nclass PlaceAdmin(HelsinkiGeoAdmin, BaseAdmin, TranslationAdmin, VersionAdmin):\n fieldsets = (\n (None, {\n 'fields': ('publisher', 'name', 'description', 'info_url', 'position', 'parent')\n\n }),\n (_('Contact info'), {\n 'fields': ('email', 'telephone', 'contact_type', 'street_address', 'address_locality', 'address_region',\n 'postal_code', 'post_office_box_num')\n }),\n )\n\n def __init__(self, model, admin_site):\n super().__init__(model, admin_site)\n # use https CDN instead\n self.openlayers_url = 'https://cdnjs.cloudflare.com/ajax/libs/openlayers/2.13.1/OpenLayers.js'\n\n def save_model(self, request, obj, form, change):\n system_id = settings.SYSTEM_DATA_SOURCE_ID\n obj.data_source_id = system_id\n if not obj.id:\n obj.id = generate_id(system_id)\n obj.origin_id = obj.id.split(':')[1]\n\n super().save_model(request, obj, form, change)\n\nadmin.site.register(Place, PlaceAdmin)\n\n\nclass OrganizationAdmin(BaseAdmin):\n list_display = ('name', 'nr_org_admins')\n\n formfield_overrides = {\n models.ManyToManyField: {'widget': FilteredSelectMultiple(\"yll\u00e4pit\u00e4j\u00e4t\", is_stacked=False)},\n }\n fields = ('admin_users',)\n\n def nr_org_admins(self, obj):\n return obj.admin_users.count()\n nr_org_admins.short_description = _('Admins')\n\nadmin.site.register(Organization, OrganizationAdmin)\n\n\nclass LanguageAdmin(BaseAdmin, VersionAdmin):\n pass\n\n\nclass PersonAdmin(BaseAdmin, VersionAdmin):\n pass\n\n\nclass LicenseAdmin(BaseAdmin, TranslationAdmin, VersionAdmin):\n def get_readonly_fields(self, request, obj=None):\n if obj:\n return ['id']\n else:\n return []\n\nadmin.site.register(License, LicenseAdmin)\n", "path": "events/admin.py"}]}
3,599
365
gh_patches_debug_29594
rasdani/github-patches
git_diff
fossasia__open-event-server-6739
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove version model **Describe the bug** The version model is not used currently and should be removed https://github.com/fossasia/open-event-server/blob/development/app/models/version.py **Additional context** @iamareebjamal Taking this --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `app/models/version.py` Content: ``` 1 from sqlalchemy.orm import backref 2 3 from app.models import db 4 5 6 class Version(db.Model): 7 """Version model class""" 8 __tablename__ = 'versions' 9 id = db.Column(db.Integer, primary_key=True) 10 event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE')) 11 events = db.relationship("Event", backref=backref('version', uselist=False)) 12 13 event_ver = db.Column(db.Integer, nullable=False, default=0) 14 sessions_ver = db.Column(db.Integer, nullable=False, default=0) 15 speakers_ver = db.Column(db.Integer, nullable=False, default=0) 16 tracks_ver = db.Column(db.Integer, nullable=False, default=0) 17 sponsors_ver = db.Column(db.Integer, nullable=False, default=0) 18 microlocations_ver = db.Column(db.Integer, nullable=False, default=0) 19 20 def __init__(self, 21 event_id=None, 22 event_ver=None, 23 sessions_ver=None, 24 speakers_ver=None, 25 tracks_ver=None, 26 sponsors_ver=None, 27 microlocations_ver=None): 28 self.event_id = event_id 29 self.event_ver = event_ver 30 self.sessions_ver = sessions_ver 31 self.speakers_ver = speakers_ver 32 self.tracks_ver = tracks_ver 33 self.sponsors_ver = sponsors_ver 34 self.microlocations_ver = microlocations_ver 35 36 def __repr__(self): 37 return '<Version %r>' % self.id 38 39 def __str__(self): 40 return self.__repr__() 41 42 @property 43 def serialize(self): 44 """Return object data in easily serializable format""" 45 return { 46 'version': [ 47 {'id': self.id, 48 'event_id': self.event_id, 49 'event_ver': self.event_ver, 50 'sessions_ver': self.sessions_ver, 51 'speakers_ver': self.speakers_ver, 52 'tracks_ver': self.tracks_ver, 53 'sponsors_ver': self.sponsors_ver, 54 'microlocations_ver': self.microlocations_ver} 55 ] 56 } 57 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/app/models/version.py b/app/models/version.py deleted file mode 100644 --- a/app/models/version.py +++ /dev/null @@ -1,56 +0,0 @@ -from sqlalchemy.orm import backref - -from app.models import db - - -class Version(db.Model): - """Version model class""" - __tablename__ = 'versions' - id = db.Column(db.Integer, primary_key=True) - event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE')) - events = db.relationship("Event", backref=backref('version', uselist=False)) - - event_ver = db.Column(db.Integer, nullable=False, default=0) - sessions_ver = db.Column(db.Integer, nullable=False, default=0) - speakers_ver = db.Column(db.Integer, nullable=False, default=0) - tracks_ver = db.Column(db.Integer, nullable=False, default=0) - sponsors_ver = db.Column(db.Integer, nullable=False, default=0) - microlocations_ver = db.Column(db.Integer, nullable=False, default=0) - - def __init__(self, - event_id=None, - event_ver=None, - sessions_ver=None, - speakers_ver=None, - tracks_ver=None, - sponsors_ver=None, - microlocations_ver=None): - self.event_id = event_id - self.event_ver = event_ver - self.sessions_ver = sessions_ver - self.speakers_ver = speakers_ver - self.tracks_ver = tracks_ver - self.sponsors_ver = sponsors_ver - self.microlocations_ver = microlocations_ver - - def __repr__(self): - return '<Version %r>' % self.id - - def __str__(self): - return self.__repr__() - - @property - def serialize(self): - """Return object data in easily serializable format""" - return { - 'version': [ - {'id': self.id, - 'event_id': self.event_id, - 'event_ver': self.event_ver, - 'sessions_ver': self.sessions_ver, - 'speakers_ver': self.speakers_ver, - 'tracks_ver': self.tracks_ver, - 'sponsors_ver': self.sponsors_ver, - 'microlocations_ver': self.microlocations_ver} - ] - }
{"golden_diff": "diff --git a/app/models/version.py b/app/models/version.py\ndeleted file mode 100644\n--- a/app/models/version.py\n+++ /dev/null\n@@ -1,56 +0,0 @@\n-from sqlalchemy.orm import backref\n-\n-from app.models import db\n-\n-\n-class Version(db.Model):\n- \"\"\"Version model class\"\"\"\n- __tablename__ = 'versions'\n- id = db.Column(db.Integer, primary_key=True)\n- event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'))\n- events = db.relationship(\"Event\", backref=backref('version', uselist=False))\n-\n- event_ver = db.Column(db.Integer, nullable=False, default=0)\n- sessions_ver = db.Column(db.Integer, nullable=False, default=0)\n- speakers_ver = db.Column(db.Integer, nullable=False, default=0)\n- tracks_ver = db.Column(db.Integer, nullable=False, default=0)\n- sponsors_ver = db.Column(db.Integer, nullable=False, default=0)\n- microlocations_ver = db.Column(db.Integer, nullable=False, default=0)\n-\n- def __init__(self,\n- event_id=None,\n- event_ver=None,\n- sessions_ver=None,\n- speakers_ver=None,\n- tracks_ver=None,\n- sponsors_ver=None,\n- microlocations_ver=None):\n- self.event_id = event_id\n- self.event_ver = event_ver\n- self.sessions_ver = sessions_ver\n- self.speakers_ver = speakers_ver\n- self.tracks_ver = tracks_ver\n- self.sponsors_ver = sponsors_ver\n- self.microlocations_ver = microlocations_ver\n-\n- def __repr__(self):\n- return '<Version %r>' % self.id\n-\n- def __str__(self):\n- return self.__repr__()\n-\n- @property\n- def serialize(self):\n- \"\"\"Return object data in easily serializable format\"\"\"\n- return {\n- 'version': [\n- {'id': self.id,\n- 'event_id': self.event_id,\n- 'event_ver': self.event_ver,\n- 'sessions_ver': self.sessions_ver,\n- 'speakers_ver': self.speakers_ver,\n- 'tracks_ver': self.tracks_ver,\n- 'sponsors_ver': self.sponsors_ver,\n- 'microlocations_ver': self.microlocations_ver}\n- ]\n- }\n", "issue": "Remove version model\n**Describe the bug**\r\nThe version model is not used currently and should be removed\r\n\r\nhttps://github.com/fossasia/open-event-server/blob/development/app/models/version.py\r\n\r\n\r\n**Additional context**\r\n@iamareebjamal Taking this\r\n\n", "before_files": [{"content": "from sqlalchemy.orm import backref\n\nfrom app.models import db\n\n\nclass Version(db.Model):\n \"\"\"Version model class\"\"\"\n __tablename__ = 'versions'\n id = db.Column(db.Integer, primary_key=True)\n event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'))\n events = db.relationship(\"Event\", backref=backref('version', uselist=False))\n\n event_ver = db.Column(db.Integer, nullable=False, default=0)\n sessions_ver = db.Column(db.Integer, nullable=False, default=0)\n speakers_ver = db.Column(db.Integer, nullable=False, default=0)\n tracks_ver = db.Column(db.Integer, nullable=False, default=0)\n sponsors_ver = db.Column(db.Integer, nullable=False, default=0)\n microlocations_ver = db.Column(db.Integer, nullable=False, default=0)\n\n def __init__(self,\n event_id=None,\n event_ver=None,\n sessions_ver=None,\n speakers_ver=None,\n tracks_ver=None,\n sponsors_ver=None,\n microlocations_ver=None):\n self.event_id = event_id\n self.event_ver = event_ver\n self.sessions_ver = sessions_ver\n self.speakers_ver = speakers_ver\n self.tracks_ver = tracks_ver\n self.sponsors_ver = sponsors_ver\n self.microlocations_ver = microlocations_ver\n\n def __repr__(self):\n return '<Version %r>' % self.id\n\n def __str__(self):\n return self.__repr__()\n\n @property\n def serialize(self):\n \"\"\"Return object data in easily serializable format\"\"\"\n return {\n 'version': [\n {'id': self.id,\n 'event_id': self.event_id,\n 'event_ver': self.event_ver,\n 'sessions_ver': self.sessions_ver,\n 'speakers_ver': self.speakers_ver,\n 'tracks_ver': self.tracks_ver,\n 'sponsors_ver': self.sponsors_ver,\n 'microlocations_ver': self.microlocations_ver}\n ]\n }\n", "path": "app/models/version.py"}], "after_files": [{"content": null, "path": "app/models/version.py"}]}
848
525
gh_patches_debug_64623
rasdani/github-patches
git_diff
quantumlib__Cirq-423
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- MergeInteractions optimization is creating bad circuits Not sure how this made it past the tests yet. ```python def main(): circuit = make_inefficient_circuit() print("BEFORE:") print(circuit.to_unitary_matrix().round(3)) print() cirq.google.MergeInteractions().optimize_circuit(circuit) print("AFTER:") print(circuit.to_unitary_matrix().round(3)) def make_inefficient_circuit(t=0.1, v=0.11): from cirq import H, CNOT, Z a = cirq.NamedQubit('a') b = cirq.NamedQubit('b') return cirq.Circuit.from_ops( H(b), CNOT(a, b), H(b), CNOT(a, b), CNOT(b, a), H(a), CNOT(a, b), Z(a)**t, Z(b)**-t, CNOT(a, b), H(a), Z(b)**v, CNOT(a, b), Z(a)**-v, Z(b)**-v) ``` ``` BEFORE: [[ 1. -0.j 0. -0.j 0. +0.j 0. -0.j ] [-0. -0.j -0. -0.309j 0.951-0.j -0. +0.j ] [ 0. -0.j 0.951-0.j -0. -0.309j 0. +0.j ] [-0. +0.j 0. +0.j 0. -0.j -0.771+0.637j]] AFTER: [[-0.354-0.354j -0.354-0.354j -0.227-0.446j 0.227+0.446j] [-0.452-0.213j -0.452-0.213j 0.364+0.342j -0.364-0.342j] [-0.354-0.354j 0.354+0.354j -0.446-0.227j -0.446-0.227j] [-0.452-0.213j 0.452+0.213j 0.496+0.063j 0.496+0.063j]] ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cirq/google/merge_interactions.py` Content: ``` 1 # Copyright 2018 The Cirq Developers 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """An optimization pass that combines adjacent single-qubit rotations.""" 16 17 from typing import List, Tuple, Optional, cast 18 19 import numpy as np 20 21 from cirq import ops 22 from cirq.circuits import ( 23 Circuit, 24 PointOptimizer, 25 PointOptimizationSummary, 26 ) 27 from cirq.extension import Extensions 28 from cirq.google.decompositions import two_qubit_matrix_to_native_gates 29 30 31 class MergeInteractions(PointOptimizer): 32 """Combines adjacent constant single-qubit rotations.""" 33 34 def __init__(self, 35 tolerance: float = 1e-8, 36 allow_partial_czs: bool = True, 37 extensions: Extensions = None) -> None: 38 self.tolerance = tolerance 39 self.allow_partial_czs = allow_partial_czs 40 self.extensions = extensions or Extensions() 41 42 def optimization_at(self, circuit, index, op): 43 if len(op.qubits) != 2: 44 return None 45 46 interaction_count, indices, matrix = ( 47 self._scan_two_qubit_ops_into_matrix(circuit, index, op.qubits)) 48 if interaction_count <= 1: 49 return None 50 51 # Find a max-3-cz construction. 52 operations = two_qubit_matrix_to_native_gates( 53 op.qubits[0], 54 op.qubits[1], 55 matrix, 56 self.allow_partial_czs, 57 self.tolerance) 58 59 # TODO: don't replace if there's no benefit in CZ depth. 60 61 return PointOptimizationSummary( 62 clear_span=max(indices) + 1 - index, 63 clear_qubits=op.qubits, 64 new_operations=operations) 65 66 def _op_to_matrix(self, 67 op: ops.Operation, 68 qubits: Tuple[ops.QubitId, ...] 69 ) -> Optional[Tuple[np.ndarray, bool]]: 70 """Determines the effect of an operation on the given qubits. 71 72 The operation must be a 1-qubit operation on one of the given qubits, 73 or a 2-qubit operation on both of the given qubits. Also, the operation 74 must have a known matrix. Otherwise None is returned. 75 76 Args: 77 op: The operation to understand. 78 qubits: The qubits we care about. Order determines matrix tensor 79 order. 80 81 Returns: 82 None, or else a tuple containing a matrix equivalent to the effect 83 of the operation and a boolean indicating if the operation is a 84 2-qubit interaction. 85 """ 86 q1, q2 = qubits 87 88 known = self.extensions.try_cast(op.gate, ops.KnownMatrixGate) 89 if known is None: 90 return None 91 m = known.matrix() 92 93 if op.qubits == qubits: 94 return m, True 95 if op.qubits == (q2, q1): 96 return MergeInteractions._flip_kron_order(m), True 97 if op.qubits == (q1,): 98 return np.kron(np.eye(2), m), False 99 if op.qubits == (q2,): 100 return np.kron(m, np.eye(2)), False 101 102 return None 103 104 def _scan_two_qubit_ops_into_matrix( 105 self, 106 circuit: Circuit, 107 index: Optional[int], 108 qubits: Tuple[ops.QubitId, ...] 109 ) -> Tuple[int, List[int], np.ndarray]: 110 """Accumulates operations affecting the given pair of qubits. 111 112 The scan terminates when it hits the end of the circuit, finds an 113 operation without a known matrix, or finds an operation that interacts 114 the given qubits with other qubits. 115 116 Args: 117 circuit: The circuit to scan for operations. 118 index: The index to start scanning forward from. 119 qubits: The pair of qubits we care about. 120 121 Returns: 122 A tuple containing: 123 0. The number of 2-qubit operations that were scanned. 124 1. The moment indices those operations were on. 125 2. A matrix equivalent to the effect of the scanned operations. 126 """ 127 128 product = np.eye(4, dtype=np.complex128) 129 interaction_count = 0 130 touched_indices = [] 131 132 while index is not None: 133 operations = {circuit.operation_at(q, index) for q in qubits} 134 op_data = [ 135 self._op_to_matrix(op, qubits) 136 for op in operations 137 if op 138 ] 139 140 # Stop at any non-constant or non-local interaction. 141 if any(e is None for e in op_data): 142 break 143 present_op_data = cast(List[Tuple[np.ndarray, bool]], op_data) 144 145 for op_mat, interacts in present_op_data: 146 product = np.dot(op_mat, product) 147 if interacts: 148 interaction_count += 1 149 150 touched_indices.append(index) 151 index = circuit.next_moment_operating_on(qubits, index + 1) 152 153 return interaction_count, touched_indices, product 154 155 @staticmethod 156 def _flip_kron_order(mat4x4: np.ndarray) -> np.ndarray: 157 """Given M = sum(kron(a_i, b_i)), returns M' = sum(kron(b_i, a_i)).""" 158 result = np.array([[0] * 4] * 4, dtype=np.complex128) 159 order = [0, 2, 1, 3] 160 for i in range(4): 161 for j in range(4): 162 result[order[i], order[j]] = mat4x4[i, j] 163 return result 164 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cirq/google/merge_interactions.py b/cirq/google/merge_interactions.py --- a/cirq/google/merge_interactions.py +++ b/cirq/google/merge_interactions.py @@ -95,9 +95,9 @@ if op.qubits == (q2, q1): return MergeInteractions._flip_kron_order(m), True if op.qubits == (q1,): - return np.kron(np.eye(2), m), False - if op.qubits == (q2,): return np.kron(m, np.eye(2)), False + if op.qubits == (q2,): + return np.kron(np.eye(2), m), False return None
{"golden_diff": "diff --git a/cirq/google/merge_interactions.py b/cirq/google/merge_interactions.py\n--- a/cirq/google/merge_interactions.py\n+++ b/cirq/google/merge_interactions.py\n@@ -95,9 +95,9 @@\n if op.qubits == (q2, q1):\n return MergeInteractions._flip_kron_order(m), True\n if op.qubits == (q1,):\n- return np.kron(np.eye(2), m), False\n- if op.qubits == (q2,):\n return np.kron(m, np.eye(2)), False\n+ if op.qubits == (q2,):\n+ return np.kron(np.eye(2), m), False\n \n return None\n", "issue": "MergeInteractions optimization is creating bad circuits\nNot sure how this made it past the tests yet.\r\n\r\n```python\r\ndef main():\r\n circuit = make_inefficient_circuit()\r\n print(\"BEFORE:\")\r\n print(circuit.to_unitary_matrix().round(3))\r\n print()\r\n cirq.google.MergeInteractions().optimize_circuit(circuit)\r\n print(\"AFTER:\")\r\n print(circuit.to_unitary_matrix().round(3))\r\n\r\ndef make_inefficient_circuit(t=0.1, v=0.11):\r\n from cirq import H, CNOT, Z\r\n a = cirq.NamedQubit('a')\r\n b = cirq.NamedQubit('b')\r\n\r\n return cirq.Circuit.from_ops(\r\n H(b),\r\n CNOT(a, b),\r\n H(b),\r\n CNOT(a, b),\r\n CNOT(b, a),\r\n H(a),\r\n CNOT(a, b),\r\n Z(a)**t, Z(b)**-t,\r\n CNOT(a, b),\r\n H(a), Z(b)**v,\r\n CNOT(a, b),\r\n Z(a)**-v, Z(b)**-v)\r\n```\r\n\r\n```\r\nBEFORE:\r\n[[ 1. -0.j 0. -0.j 0. +0.j 0. -0.j ]\r\n [-0. -0.j -0. -0.309j 0.951-0.j -0. +0.j ]\r\n [ 0. -0.j 0.951-0.j -0. -0.309j 0. +0.j ]\r\n [-0. +0.j 0. +0.j 0. -0.j -0.771+0.637j]]\r\n\r\nAFTER:\r\n[[-0.354-0.354j -0.354-0.354j -0.227-0.446j 0.227+0.446j]\r\n [-0.452-0.213j -0.452-0.213j 0.364+0.342j -0.364-0.342j]\r\n [-0.354-0.354j 0.354+0.354j -0.446-0.227j -0.446-0.227j]\r\n [-0.452-0.213j 0.452+0.213j 0.496+0.063j 0.496+0.063j]]\r\n```\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"An optimization pass that combines adjacent single-qubit rotations.\"\"\"\n\nfrom typing import List, Tuple, Optional, cast\n\nimport numpy as np\n\nfrom cirq import ops\nfrom cirq.circuits import (\n Circuit,\n PointOptimizer,\n PointOptimizationSummary,\n)\nfrom cirq.extension import Extensions\nfrom cirq.google.decompositions import two_qubit_matrix_to_native_gates\n\n\nclass MergeInteractions(PointOptimizer):\n \"\"\"Combines adjacent constant single-qubit rotations.\"\"\"\n\n def __init__(self,\n tolerance: float = 1e-8,\n allow_partial_czs: bool = True,\n extensions: Extensions = None) -> None:\n self.tolerance = tolerance\n self.allow_partial_czs = allow_partial_czs\n self.extensions = extensions or Extensions()\n\n def optimization_at(self, circuit, index, op):\n if len(op.qubits) != 2:\n return None\n\n interaction_count, indices, matrix = (\n self._scan_two_qubit_ops_into_matrix(circuit, index, op.qubits))\n if interaction_count <= 1:\n return None\n\n # Find a max-3-cz construction.\n operations = two_qubit_matrix_to_native_gates(\n op.qubits[0],\n op.qubits[1],\n matrix,\n self.allow_partial_czs,\n self.tolerance)\n\n # TODO: don't replace if there's no benefit in CZ depth.\n\n return PointOptimizationSummary(\n clear_span=max(indices) + 1 - index,\n clear_qubits=op.qubits,\n new_operations=operations)\n\n def _op_to_matrix(self,\n op: ops.Operation,\n qubits: Tuple[ops.QubitId, ...]\n ) -> Optional[Tuple[np.ndarray, bool]]:\n \"\"\"Determines the effect of an operation on the given qubits.\n\n The operation must be a 1-qubit operation on one of the given qubits,\n or a 2-qubit operation on both of the given qubits. Also, the operation\n must have a known matrix. Otherwise None is returned.\n\n Args:\n op: The operation to understand.\n qubits: The qubits we care about. Order determines matrix tensor\n order.\n\n Returns:\n None, or else a tuple containing a matrix equivalent to the effect\n of the operation and a boolean indicating if the operation is a\n 2-qubit interaction.\n \"\"\"\n q1, q2 = qubits\n\n known = self.extensions.try_cast(op.gate, ops.KnownMatrixGate)\n if known is None:\n return None\n m = known.matrix()\n\n if op.qubits == qubits:\n return m, True\n if op.qubits == (q2, q1):\n return MergeInteractions._flip_kron_order(m), True\n if op.qubits == (q1,):\n return np.kron(np.eye(2), m), False\n if op.qubits == (q2,):\n return np.kron(m, np.eye(2)), False\n\n return None\n\n def _scan_two_qubit_ops_into_matrix(\n self,\n circuit: Circuit,\n index: Optional[int],\n qubits: Tuple[ops.QubitId, ...]\n ) -> Tuple[int, List[int], np.ndarray]:\n \"\"\"Accumulates operations affecting the given pair of qubits.\n\n The scan terminates when it hits the end of the circuit, finds an\n operation without a known matrix, or finds an operation that interacts\n the given qubits with other qubits.\n\n Args:\n circuit: The circuit to scan for operations.\n index: The index to start scanning forward from.\n qubits: The pair of qubits we care about.\n\n Returns:\n A tuple containing:\n 0. The number of 2-qubit operations that were scanned.\n 1. The moment indices those operations were on.\n 2. A matrix equivalent to the effect of the scanned operations.\n \"\"\"\n\n product = np.eye(4, dtype=np.complex128)\n interaction_count = 0\n touched_indices = []\n\n while index is not None:\n operations = {circuit.operation_at(q, index) for q in qubits}\n op_data = [\n self._op_to_matrix(op, qubits)\n for op in operations\n if op\n ]\n\n # Stop at any non-constant or non-local interaction.\n if any(e is None for e in op_data):\n break\n present_op_data = cast(List[Tuple[np.ndarray, bool]], op_data)\n\n for op_mat, interacts in present_op_data:\n product = np.dot(op_mat, product)\n if interacts:\n interaction_count += 1\n\n touched_indices.append(index)\n index = circuit.next_moment_operating_on(qubits, index + 1)\n\n return interaction_count, touched_indices, product\n\n @staticmethod\n def _flip_kron_order(mat4x4: np.ndarray) -> np.ndarray:\n \"\"\"Given M = sum(kron(a_i, b_i)), returns M' = sum(kron(b_i, a_i)).\"\"\"\n result = np.array([[0] * 4] * 4, dtype=np.complex128)\n order = [0, 2, 1, 3]\n for i in range(4):\n for j in range(4):\n result[order[i], order[j]] = mat4x4[i, j]\n return result\n", "path": "cirq/google/merge_interactions.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"An optimization pass that combines adjacent single-qubit rotations.\"\"\"\n\nfrom typing import List, Tuple, Optional, cast\n\nimport numpy as np\n\nfrom cirq import ops\nfrom cirq.circuits import (\n Circuit,\n PointOptimizer,\n PointOptimizationSummary,\n)\nfrom cirq.extension import Extensions\nfrom cirq.google.decompositions import two_qubit_matrix_to_native_gates\n\n\nclass MergeInteractions(PointOptimizer):\n \"\"\"Combines adjacent constant single-qubit rotations.\"\"\"\n\n def __init__(self,\n tolerance: float = 1e-8,\n allow_partial_czs: bool = True,\n extensions: Extensions = None) -> None:\n self.tolerance = tolerance\n self.allow_partial_czs = allow_partial_czs\n self.extensions = extensions or Extensions()\n\n def optimization_at(self, circuit, index, op):\n if len(op.qubits) != 2:\n return None\n\n interaction_count, indices, matrix = (\n self._scan_two_qubit_ops_into_matrix(circuit, index, op.qubits))\n if interaction_count <= 1:\n return None\n\n # Find a max-3-cz construction.\n operations = two_qubit_matrix_to_native_gates(\n op.qubits[0],\n op.qubits[1],\n matrix,\n self.allow_partial_czs,\n self.tolerance)\n\n # TODO: don't replace if there's no benefit in CZ depth.\n\n return PointOptimizationSummary(\n clear_span=max(indices) + 1 - index,\n clear_qubits=op.qubits,\n new_operations=operations)\n\n def _op_to_matrix(self,\n op: ops.Operation,\n qubits: Tuple[ops.QubitId, ...]\n ) -> Optional[Tuple[np.ndarray, bool]]:\n \"\"\"Determines the effect of an operation on the given qubits.\n\n The operation must be a 1-qubit operation on one of the given qubits,\n or a 2-qubit operation on both of the given qubits. Also, the operation\n must have a known matrix. Otherwise None is returned.\n\n Args:\n op: The operation to understand.\n qubits: The qubits we care about. Order determines matrix tensor\n order.\n\n Returns:\n None, or else a tuple containing a matrix equivalent to the effect\n of the operation and a boolean indicating if the operation is a\n 2-qubit interaction.\n \"\"\"\n q1, q2 = qubits\n\n known = self.extensions.try_cast(op.gate, ops.KnownMatrixGate)\n if known is None:\n return None\n m = known.matrix()\n\n if op.qubits == qubits:\n return m, True\n if op.qubits == (q2, q1):\n return MergeInteractions._flip_kron_order(m), True\n if op.qubits == (q1,):\n return np.kron(m, np.eye(2)), False\n if op.qubits == (q2,):\n return np.kron(np.eye(2), m), False\n\n return None\n\n def _scan_two_qubit_ops_into_matrix(\n self,\n circuit: Circuit,\n index: Optional[int],\n qubits: Tuple[ops.QubitId, ...]\n ) -> Tuple[int, List[int], np.ndarray]:\n \"\"\"Accumulates operations affecting the given pair of qubits.\n\n The scan terminates when it hits the end of the circuit, finds an\n operation without a known matrix, or finds an operation that interacts\n the given qubits with other qubits.\n\n Args:\n circuit: The circuit to scan for operations.\n index: The index to start scanning forward from.\n qubits: The pair of qubits we care about.\n\n Returns:\n A tuple containing:\n 0. The number of 2-qubit operations that were scanned.\n 1. The moment indices those operations were on.\n 2. A matrix equivalent to the effect of the scanned operations.\n \"\"\"\n\n product = np.eye(4, dtype=np.complex128)\n interaction_count = 0\n touched_indices = []\n\n while index is not None:\n operations = {circuit.operation_at(q, index) for q in qubits}\n op_data = [\n self._op_to_matrix(op, qubits)\n for op in operations\n if op\n ]\n\n # Stop at any non-constant or non-local interaction.\n if any(e is None for e in op_data):\n break\n present_op_data = cast(List[Tuple[np.ndarray, bool]], op_data)\n\n for op_mat, interacts in present_op_data:\n product = np.dot(op_mat, product)\n if interacts:\n interaction_count += 1\n\n touched_indices.append(index)\n index = circuit.next_moment_operating_on(qubits, index + 1)\n\n return interaction_count, touched_indices, product\n\n @staticmethod\n def _flip_kron_order(mat4x4: np.ndarray) -> np.ndarray:\n \"\"\"Given M = sum(kron(a_i, b_i)), returns M' = sum(kron(b_i, a_i)).\"\"\"\n result = np.array([[0] * 4] * 4, dtype=np.complex128)\n order = [0, 2, 1, 3]\n for i in range(4):\n for j in range(4):\n result[order[i], order[j]] = mat4x4[i, j]\n return result\n", "path": "cirq/google/merge_interactions.py"}]}
2,598
166
gh_patches_debug_7629
rasdani/github-patches
git_diff
pre-commit__pre-commit-1443
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- hook using node doesn't work on FreeBSD We have a hook using doctoc, the node package. I cannot install the hook, this is the output: ``` desktop1% pre-commit install --install-hooks pre-commit installed at .git/hooks/pre-commit [INFO] Initializing environment for [email protected]:jockej/pre-commit-hooks.git. [INFO] Initializing environment for https://github.com/thlorenz/doctoc. [INFO] Initializing environment for https://gitlab.com/pycqa/flake8. [INFO] Initializing environment for https://github.com/jorisroovers/gitlint. [INFO] Installing environment for https://github.com/thlorenz/doctoc. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... An unexpected error has occurred: CalledProcessError: command: ('/home/joakim/.cache/pre-commit/repoizbdwm4g/node_env-system/bin/node', '/usr/local/bin/npm', 'install', '-g', '.') return code: 254 expected return code: 0 stdout: (none) stderr: npm ERR! code ENOENT npm ERR! syscall chmod npm ERR! path /home/joakim/.cache/pre-commit/repoizbdwm4g/node_env-system/lib/node_modules/doctoc/doctoc.js npm ERR! errno -2 npm ERR! enoent ENOENT: no such file or directory, chmod '/home/joakim/.cache/pre-commit/repoizbdwm4g/node_env-system/lib/node_modules/doctoc/doctoc.js' npm ERR! enoent This is related to npm not being able to find a file. npm ERR! enoent npm ERR! A complete log of this run can be found in: npm ERR! /home/joakim/.npm/_logs/2020-05-07T10_30_16_218Z-debug.log Check the log at /home/joakim/.cache/pre-commit/pre-commit.log ``` These hooks all install and run perfectly on GNU/Linux. I believe it happens because pre-commit tries to use the --prebuilt flag to nodeenv, however this doesn't work for FreeBSD since there are no prebuilt packages (the URL constructed gives a 404). I have node installed so it should be possible to use --system, but I haven't been able to make it work myself. Nodeenv itself seems to work fine with --system. Please let me know if I can help with trying things on FreeBSD. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pre_commit/store.py` Content: ``` 1 import contextlib 2 import logging 3 import os.path 4 import sqlite3 5 import tempfile 6 from typing import Callable 7 from typing import Generator 8 from typing import List 9 from typing import Optional 10 from typing import Sequence 11 from typing import Tuple 12 13 import pre_commit.constants as C 14 from pre_commit import file_lock 15 from pre_commit import git 16 from pre_commit.util import CalledProcessError 17 from pre_commit.util import clean_path_on_failure 18 from pre_commit.util import cmd_output_b 19 from pre_commit.util import resource_text 20 from pre_commit.util import rmtree 21 22 23 logger = logging.getLogger('pre_commit') 24 25 26 def _get_default_directory() -> str: 27 """Returns the default directory for the Store. This is intentionally 28 underscored to indicate that `Store.get_default_directory` is the intended 29 way to get this information. This is also done so 30 `Store.get_default_directory` can be mocked in tests and 31 `_get_default_directory` can be tested. 32 """ 33 return os.environ.get('PRE_COMMIT_HOME') or os.path.join( 34 os.environ.get('XDG_CACHE_HOME') or os.path.expanduser('~/.cache'), 35 'pre-commit', 36 ) 37 38 39 class Store: 40 get_default_directory = staticmethod(_get_default_directory) 41 42 def __init__(self, directory: Optional[str] = None) -> None: 43 self.directory = directory or Store.get_default_directory() 44 self.db_path = os.path.join(self.directory, 'db.db') 45 46 if not os.path.exists(self.directory): 47 os.makedirs(self.directory, exist_ok=True) 48 with open(os.path.join(self.directory, 'README'), 'w') as f: 49 f.write( 50 'This directory is maintained by the pre-commit project.\n' 51 'Learn more: https://github.com/pre-commit/pre-commit\n', 52 ) 53 54 if os.path.exists(self.db_path): 55 return 56 with self.exclusive_lock(): 57 # Another process may have already completed this work 58 if os.path.exists(self.db_path): # pragma: no cover (race) 59 return 60 # To avoid a race where someone ^Cs between db creation and 61 # execution of the CREATE TABLE statement 62 fd, tmpfile = tempfile.mkstemp(dir=self.directory) 63 # We'll be managing this file ourselves 64 os.close(fd) 65 with self.connect(db_path=tmpfile) as db: 66 db.executescript( 67 'CREATE TABLE repos (' 68 ' repo TEXT NOT NULL,' 69 ' ref TEXT NOT NULL,' 70 ' path TEXT NOT NULL,' 71 ' PRIMARY KEY (repo, ref)' 72 ');', 73 ) 74 self._create_config_table(db) 75 76 # Atomic file move 77 os.rename(tmpfile, self.db_path) 78 79 @contextlib.contextmanager 80 def exclusive_lock(self) -> Generator[None, None, None]: 81 def blocked_cb() -> None: # pragma: no cover (tests are in-process) 82 logger.info('Locking pre-commit directory') 83 84 with file_lock.lock(os.path.join(self.directory, '.lock'), blocked_cb): 85 yield 86 87 @contextlib.contextmanager 88 def connect( 89 self, 90 db_path: Optional[str] = None, 91 ) -> Generator[sqlite3.Connection, None, None]: 92 db_path = db_path or self.db_path 93 # sqlite doesn't close its fd with its contextmanager >.< 94 # contextlib.closing fixes this. 95 # See: https://stackoverflow.com/a/28032829/812183 96 with contextlib.closing(sqlite3.connect(db_path)) as db: 97 # this creates a transaction 98 with db: 99 yield db 100 101 @classmethod 102 def db_repo_name(cls, repo: str, deps: Sequence[str]) -> str: 103 if deps: 104 return f'{repo}:{",".join(sorted(deps))}' 105 else: 106 return repo 107 108 def _new_repo( 109 self, 110 repo: str, 111 ref: str, 112 deps: Sequence[str], 113 make_strategy: Callable[[str], None], 114 ) -> str: 115 repo = self.db_repo_name(repo, deps) 116 117 def _get_result() -> Optional[str]: 118 # Check if we already exist 119 with self.connect() as db: 120 result = db.execute( 121 'SELECT path FROM repos WHERE repo = ? AND ref = ?', 122 (repo, ref), 123 ).fetchone() 124 return result[0] if result else None 125 126 result = _get_result() 127 if result: 128 return result 129 with self.exclusive_lock(): 130 # Another process may have already completed this work 131 result = _get_result() 132 if result: # pragma: no cover (race) 133 return result 134 135 logger.info(f'Initializing environment for {repo}.') 136 137 directory = tempfile.mkdtemp(prefix='repo', dir=self.directory) 138 with clean_path_on_failure(directory): 139 make_strategy(directory) 140 141 # Update our db with the created repo 142 with self.connect() as db: 143 db.execute( 144 'INSERT INTO repos (repo, ref, path) VALUES (?, ?, ?)', 145 [repo, ref, directory], 146 ) 147 return directory 148 149 def _complete_clone(self, ref: str, git_cmd: Callable[..., None]) -> None: 150 """Perform a complete clone of a repository and its submodules """ 151 152 git_cmd('fetch', 'origin', '--tags') 153 git_cmd('checkout', ref) 154 git_cmd('submodule', 'update', '--init', '--recursive') 155 156 def _shallow_clone(self, ref: str, git_cmd: Callable[..., None]) -> None: 157 """Perform a shallow clone of a repository and its submodules """ 158 159 git_config = 'protocol.version=2' 160 git_cmd('-c', git_config, 'fetch', 'origin', ref, '--depth=1') 161 git_cmd('checkout', 'FETCH_HEAD') 162 git_cmd( 163 '-c', git_config, 'submodule', 'update', '--init', '--recursive', 164 '--depth=1', 165 ) 166 167 def clone(self, repo: str, ref: str, deps: Sequence[str] = ()) -> str: 168 """Clone the given url and checkout the specific ref.""" 169 170 def clone_strategy(directory: str) -> None: 171 git.init_repo(directory, repo) 172 env = git.no_git_env() 173 174 def _git_cmd(*args: str) -> None: 175 cmd_output_b('git', *args, cwd=directory, env=env) 176 177 try: 178 self._shallow_clone(ref, _git_cmd) 179 except CalledProcessError: 180 self._complete_clone(ref, _git_cmd) 181 182 return self._new_repo(repo, ref, deps, clone_strategy) 183 184 LOCAL_RESOURCES = ( 185 'Cargo.toml', 'main.go', 'go.mod', 'main.rs', '.npmignore', 186 'package.json', 'pre_commit_dummy_package.gemspec', 'setup.py', 187 'environment.yml', 'Makefile.PL', 188 ) 189 190 def make_local(self, deps: Sequence[str]) -> str: 191 def make_local_strategy(directory: str) -> None: 192 for resource in self.LOCAL_RESOURCES: 193 contents = resource_text(f'empty_template_{resource}') 194 with open(os.path.join(directory, resource), 'w') as f: 195 f.write(contents) 196 197 env = git.no_git_env() 198 199 # initialize the git repository so it looks more like cloned repos 200 def _git_cmd(*args: str) -> None: 201 cmd_output_b('git', *args, cwd=directory, env=env) 202 203 git.init_repo(directory, '<<unknown>>') 204 _git_cmd('add', '.') 205 git.commit(repo=directory) 206 207 return self._new_repo( 208 'local', C.LOCAL_REPO_VERSION, deps, make_local_strategy, 209 ) 210 211 def _create_config_table(self, db: sqlite3.Connection) -> None: 212 db.executescript( 213 'CREATE TABLE IF NOT EXISTS configs (' 214 ' path TEXT NOT NULL,' 215 ' PRIMARY KEY (path)' 216 ');', 217 ) 218 219 def mark_config_used(self, path: str) -> None: 220 path = os.path.realpath(path) 221 # don't insert config files that do not exist 222 if not os.path.exists(path): 223 return 224 with self.connect() as db: 225 # TODO: eventually remove this and only create in _create 226 self._create_config_table(db) 227 db.execute('INSERT OR IGNORE INTO configs VALUES (?)', (path,)) 228 229 def select_all_configs(self) -> List[str]: 230 with self.connect() as db: 231 self._create_config_table(db) 232 rows = db.execute('SELECT path FROM configs').fetchall() 233 return [path for path, in rows] 234 235 def delete_configs(self, configs: List[str]) -> None: 236 with self.connect() as db: 237 rows = [(path,) for path in configs] 238 db.executemany('DELETE FROM configs WHERE path = ?', rows) 239 240 def select_all_repos(self) -> List[Tuple[str, str, str]]: 241 with self.connect() as db: 242 return db.execute('SELECT repo, ref, path from repos').fetchall() 243 244 def delete_repo(self, db_repo_name: str, ref: str, path: str) -> None: 245 with self.connect() as db: 246 db.execute( 247 'DELETE FROM repos WHERE repo = ? and ref = ?', 248 (db_repo_name, ref), 249 ) 250 rmtree(path) 251 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pre_commit/store.py b/pre_commit/store.py --- a/pre_commit/store.py +++ b/pre_commit/store.py @@ -30,10 +30,11 @@ `Store.get_default_directory` can be mocked in tests and `_get_default_directory` can be tested. """ - return os.environ.get('PRE_COMMIT_HOME') or os.path.join( + ret = os.environ.get('PRE_COMMIT_HOME') or os.path.join( os.environ.get('XDG_CACHE_HOME') or os.path.expanduser('~/.cache'), 'pre-commit', ) + return os.path.realpath(ret) class Store:
{"golden_diff": "diff --git a/pre_commit/store.py b/pre_commit/store.py\n--- a/pre_commit/store.py\n+++ b/pre_commit/store.py\n@@ -30,10 +30,11 @@\n `Store.get_default_directory` can be mocked in tests and\n `_get_default_directory` can be tested.\n \"\"\"\n- return os.environ.get('PRE_COMMIT_HOME') or os.path.join(\n+ ret = os.environ.get('PRE_COMMIT_HOME') or os.path.join(\n os.environ.get('XDG_CACHE_HOME') or os.path.expanduser('~/.cache'),\n 'pre-commit',\n )\n+ return os.path.realpath(ret)\n \n \n class Store:\n", "issue": "hook using node doesn't work on FreeBSD\nWe have a hook using doctoc, the node package. I cannot install the hook, this is the output:\r\n\r\n```\r\ndesktop1% pre-commit install --install-hooks \r\npre-commit installed at .git/hooks/pre-commit\r\n[INFO] Initializing environment for [email protected]:jockej/pre-commit-hooks.git.\r\n[INFO] Initializing environment for https://github.com/thlorenz/doctoc.\r\n[INFO] Initializing environment for https://gitlab.com/pycqa/flake8.\r\n[INFO] Initializing environment for https://github.com/jorisroovers/gitlint.\r\n[INFO] Installing environment for https://github.com/thlorenz/doctoc.\r\n[INFO] Once installed this environment will be reused.\r\n[INFO] This may take a few minutes...\r\nAn unexpected error has occurred: CalledProcessError: command: ('/home/joakim/.cache/pre-commit/repoizbdwm4g/node_env-system/bin/node', '/usr/local/bin/npm', 'install', '-g', '.')\r\nreturn code: 254\r\nexpected return code: 0\r\nstdout: (none)\r\nstderr:\r\n npm ERR! code ENOENT\r\n npm ERR! syscall chmod\r\n npm ERR! path /home/joakim/.cache/pre-commit/repoizbdwm4g/node_env-system/lib/node_modules/doctoc/doctoc.js\r\n npm ERR! errno -2\r\n npm ERR! enoent ENOENT: no such file or directory, chmod '/home/joakim/.cache/pre-commit/repoizbdwm4g/node_env-system/lib/node_modules/doctoc/doctoc.js'\r\n npm ERR! enoent This is related to npm not being able to find a file.\r\n npm ERR! enoent \r\n \r\n npm ERR! A complete log of this run can be found in:\r\n npm ERR! /home/joakim/.npm/_logs/2020-05-07T10_30_16_218Z-debug.log\r\n \r\nCheck the log at /home/joakim/.cache/pre-commit/pre-commit.log\r\n```\r\nThese hooks all install and run perfectly on GNU/Linux.\r\n\r\nI believe it happens because pre-commit tries to use the --prebuilt flag to nodeenv, however this doesn't work for FreeBSD since there are no prebuilt packages (the URL constructed gives a 404). I have node installed so it should be possible to use --system, but I haven't been able to make it work myself.\r\n\r\nNodeenv itself seems to work fine with --system.\r\n\r\nPlease let me know if I can help with trying things on FreeBSD.\r\n\n", "before_files": [{"content": "import contextlib\nimport logging\nimport os.path\nimport sqlite3\nimport tempfile\nfrom typing import Callable\nfrom typing import Generator\nfrom typing import List\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit import file_lock\nfrom pre_commit import git\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import resource_text\nfrom pre_commit.util import rmtree\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _get_default_directory() -> str:\n \"\"\"Returns the default directory for the Store. This is intentionally\n underscored to indicate that `Store.get_default_directory` is the intended\n way to get this information. This is also done so\n `Store.get_default_directory` can be mocked in tests and\n `_get_default_directory` can be tested.\n \"\"\"\n return os.environ.get('PRE_COMMIT_HOME') or os.path.join(\n os.environ.get('XDG_CACHE_HOME') or os.path.expanduser('~/.cache'),\n 'pre-commit',\n )\n\n\nclass Store:\n get_default_directory = staticmethod(_get_default_directory)\n\n def __init__(self, directory: Optional[str] = None) -> None:\n self.directory = directory or Store.get_default_directory()\n self.db_path = os.path.join(self.directory, 'db.db')\n\n if not os.path.exists(self.directory):\n os.makedirs(self.directory, exist_ok=True)\n with open(os.path.join(self.directory, 'README'), 'w') as f:\n f.write(\n 'This directory is maintained by the pre-commit project.\\n'\n 'Learn more: https://github.com/pre-commit/pre-commit\\n',\n )\n\n if os.path.exists(self.db_path):\n return\n with self.exclusive_lock():\n # Another process may have already completed this work\n if os.path.exists(self.db_path): # pragma: no cover (race)\n return\n # To avoid a race where someone ^Cs between db creation and\n # execution of the CREATE TABLE statement\n fd, tmpfile = tempfile.mkstemp(dir=self.directory)\n # We'll be managing this file ourselves\n os.close(fd)\n with self.connect(db_path=tmpfile) as db:\n db.executescript(\n 'CREATE TABLE repos ('\n ' repo TEXT NOT NULL,'\n ' ref TEXT NOT NULL,'\n ' path TEXT NOT NULL,'\n ' PRIMARY KEY (repo, ref)'\n ');',\n )\n self._create_config_table(db)\n\n # Atomic file move\n os.rename(tmpfile, self.db_path)\n\n @contextlib.contextmanager\n def exclusive_lock(self) -> Generator[None, None, None]:\n def blocked_cb() -> None: # pragma: no cover (tests are in-process)\n logger.info('Locking pre-commit directory')\n\n with file_lock.lock(os.path.join(self.directory, '.lock'), blocked_cb):\n yield\n\n @contextlib.contextmanager\n def connect(\n self,\n db_path: Optional[str] = None,\n ) -> Generator[sqlite3.Connection, None, None]:\n db_path = db_path or self.db_path\n # sqlite doesn't close its fd with its contextmanager >.<\n # contextlib.closing fixes this.\n # See: https://stackoverflow.com/a/28032829/812183\n with contextlib.closing(sqlite3.connect(db_path)) as db:\n # this creates a transaction\n with db:\n yield db\n\n @classmethod\n def db_repo_name(cls, repo: str, deps: Sequence[str]) -> str:\n if deps:\n return f'{repo}:{\",\".join(sorted(deps))}'\n else:\n return repo\n\n def _new_repo(\n self,\n repo: str,\n ref: str,\n deps: Sequence[str],\n make_strategy: Callable[[str], None],\n ) -> str:\n repo = self.db_repo_name(repo, deps)\n\n def _get_result() -> Optional[str]:\n # Check if we already exist\n with self.connect() as db:\n result = db.execute(\n 'SELECT path FROM repos WHERE repo = ? AND ref = ?',\n (repo, ref),\n ).fetchone()\n return result[0] if result else None\n\n result = _get_result()\n if result:\n return result\n with self.exclusive_lock():\n # Another process may have already completed this work\n result = _get_result()\n if result: # pragma: no cover (race)\n return result\n\n logger.info(f'Initializing environment for {repo}.')\n\n directory = tempfile.mkdtemp(prefix='repo', dir=self.directory)\n with clean_path_on_failure(directory):\n make_strategy(directory)\n\n # Update our db with the created repo\n with self.connect() as db:\n db.execute(\n 'INSERT INTO repos (repo, ref, path) VALUES (?, ?, ?)',\n [repo, ref, directory],\n )\n return directory\n\n def _complete_clone(self, ref: str, git_cmd: Callable[..., None]) -> None:\n \"\"\"Perform a complete clone of a repository and its submodules \"\"\"\n\n git_cmd('fetch', 'origin', '--tags')\n git_cmd('checkout', ref)\n git_cmd('submodule', 'update', '--init', '--recursive')\n\n def _shallow_clone(self, ref: str, git_cmd: Callable[..., None]) -> None:\n \"\"\"Perform a shallow clone of a repository and its submodules \"\"\"\n\n git_config = 'protocol.version=2'\n git_cmd('-c', git_config, 'fetch', 'origin', ref, '--depth=1')\n git_cmd('checkout', 'FETCH_HEAD')\n git_cmd(\n '-c', git_config, 'submodule', 'update', '--init', '--recursive',\n '--depth=1',\n )\n\n def clone(self, repo: str, ref: str, deps: Sequence[str] = ()) -> str:\n \"\"\"Clone the given url and checkout the specific ref.\"\"\"\n\n def clone_strategy(directory: str) -> None:\n git.init_repo(directory, repo)\n env = git.no_git_env()\n\n def _git_cmd(*args: str) -> None:\n cmd_output_b('git', *args, cwd=directory, env=env)\n\n try:\n self._shallow_clone(ref, _git_cmd)\n except CalledProcessError:\n self._complete_clone(ref, _git_cmd)\n\n return self._new_repo(repo, ref, deps, clone_strategy)\n\n LOCAL_RESOURCES = (\n 'Cargo.toml', 'main.go', 'go.mod', 'main.rs', '.npmignore',\n 'package.json', 'pre_commit_dummy_package.gemspec', 'setup.py',\n 'environment.yml', 'Makefile.PL',\n )\n\n def make_local(self, deps: Sequence[str]) -> str:\n def make_local_strategy(directory: str) -> None:\n for resource in self.LOCAL_RESOURCES:\n contents = resource_text(f'empty_template_{resource}')\n with open(os.path.join(directory, resource), 'w') as f:\n f.write(contents)\n\n env = git.no_git_env()\n\n # initialize the git repository so it looks more like cloned repos\n def _git_cmd(*args: str) -> None:\n cmd_output_b('git', *args, cwd=directory, env=env)\n\n git.init_repo(directory, '<<unknown>>')\n _git_cmd('add', '.')\n git.commit(repo=directory)\n\n return self._new_repo(\n 'local', C.LOCAL_REPO_VERSION, deps, make_local_strategy,\n )\n\n def _create_config_table(self, db: sqlite3.Connection) -> None:\n db.executescript(\n 'CREATE TABLE IF NOT EXISTS configs ('\n ' path TEXT NOT NULL,'\n ' PRIMARY KEY (path)'\n ');',\n )\n\n def mark_config_used(self, path: str) -> None:\n path = os.path.realpath(path)\n # don't insert config files that do not exist\n if not os.path.exists(path):\n return\n with self.connect() as db:\n # TODO: eventually remove this and only create in _create\n self._create_config_table(db)\n db.execute('INSERT OR IGNORE INTO configs VALUES (?)', (path,))\n\n def select_all_configs(self) -> List[str]:\n with self.connect() as db:\n self._create_config_table(db)\n rows = db.execute('SELECT path FROM configs').fetchall()\n return [path for path, in rows]\n\n def delete_configs(self, configs: List[str]) -> None:\n with self.connect() as db:\n rows = [(path,) for path in configs]\n db.executemany('DELETE FROM configs WHERE path = ?', rows)\n\n def select_all_repos(self) -> List[Tuple[str, str, str]]:\n with self.connect() as db:\n return db.execute('SELECT repo, ref, path from repos').fetchall()\n\n def delete_repo(self, db_repo_name: str, ref: str, path: str) -> None:\n with self.connect() as db:\n db.execute(\n 'DELETE FROM repos WHERE repo = ? and ref = ?',\n (db_repo_name, ref),\n )\n rmtree(path)\n", "path": "pre_commit/store.py"}], "after_files": [{"content": "import contextlib\nimport logging\nimport os.path\nimport sqlite3\nimport tempfile\nfrom typing import Callable\nfrom typing import Generator\nfrom typing import List\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit import file_lock\nfrom pre_commit import git\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import resource_text\nfrom pre_commit.util import rmtree\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _get_default_directory() -> str:\n \"\"\"Returns the default directory for the Store. This is intentionally\n underscored to indicate that `Store.get_default_directory` is the intended\n way to get this information. This is also done so\n `Store.get_default_directory` can be mocked in tests and\n `_get_default_directory` can be tested.\n \"\"\"\n ret = os.environ.get('PRE_COMMIT_HOME') or os.path.join(\n os.environ.get('XDG_CACHE_HOME') or os.path.expanduser('~/.cache'),\n 'pre-commit',\n )\n return os.path.realpath(ret)\n\n\nclass Store:\n get_default_directory = staticmethod(_get_default_directory)\n\n def __init__(self, directory: Optional[str] = None) -> None:\n self.directory = directory or Store.get_default_directory()\n self.db_path = os.path.join(self.directory, 'db.db')\n\n if not os.path.exists(self.directory):\n os.makedirs(self.directory, exist_ok=True)\n with open(os.path.join(self.directory, 'README'), 'w') as f:\n f.write(\n 'This directory is maintained by the pre-commit project.\\n'\n 'Learn more: https://github.com/pre-commit/pre-commit\\n',\n )\n\n if os.path.exists(self.db_path):\n return\n with self.exclusive_lock():\n # Another process may have already completed this work\n if os.path.exists(self.db_path): # pragma: no cover (race)\n return\n # To avoid a race where someone ^Cs between db creation and\n # execution of the CREATE TABLE statement\n fd, tmpfile = tempfile.mkstemp(dir=self.directory)\n # We'll be managing this file ourselves\n os.close(fd)\n with self.connect(db_path=tmpfile) as db:\n db.executescript(\n 'CREATE TABLE repos ('\n ' repo TEXT NOT NULL,'\n ' ref TEXT NOT NULL,'\n ' path TEXT NOT NULL,'\n ' PRIMARY KEY (repo, ref)'\n ');',\n )\n self._create_config_table(db)\n\n # Atomic file move\n os.rename(tmpfile, self.db_path)\n\n @contextlib.contextmanager\n def exclusive_lock(self) -> Generator[None, None, None]:\n def blocked_cb() -> None: # pragma: no cover (tests are in-process)\n logger.info('Locking pre-commit directory')\n\n with file_lock.lock(os.path.join(self.directory, '.lock'), blocked_cb):\n yield\n\n @contextlib.contextmanager\n def connect(\n self,\n db_path: Optional[str] = None,\n ) -> Generator[sqlite3.Connection, None, None]:\n db_path = db_path or self.db_path\n # sqlite doesn't close its fd with its contextmanager >.<\n # contextlib.closing fixes this.\n # See: https://stackoverflow.com/a/28032829/812183\n with contextlib.closing(sqlite3.connect(db_path)) as db:\n # this creates a transaction\n with db:\n yield db\n\n @classmethod\n def db_repo_name(cls, repo: str, deps: Sequence[str]) -> str:\n if deps:\n return f'{repo}:{\",\".join(sorted(deps))}'\n else:\n return repo\n\n def _new_repo(\n self,\n repo: str,\n ref: str,\n deps: Sequence[str],\n make_strategy: Callable[[str], None],\n ) -> str:\n repo = self.db_repo_name(repo, deps)\n\n def _get_result() -> Optional[str]:\n # Check if we already exist\n with self.connect() as db:\n result = db.execute(\n 'SELECT path FROM repos WHERE repo = ? AND ref = ?',\n (repo, ref),\n ).fetchone()\n return result[0] if result else None\n\n result = _get_result()\n if result:\n return result\n with self.exclusive_lock():\n # Another process may have already completed this work\n result = _get_result()\n if result: # pragma: no cover (race)\n return result\n\n logger.info(f'Initializing environment for {repo}.')\n\n directory = tempfile.mkdtemp(prefix='repo', dir=self.directory)\n with clean_path_on_failure(directory):\n make_strategy(directory)\n\n # Update our db with the created repo\n with self.connect() as db:\n db.execute(\n 'INSERT INTO repos (repo, ref, path) VALUES (?, ?, ?)',\n [repo, ref, directory],\n )\n return directory\n\n def _complete_clone(self, ref: str, git_cmd: Callable[..., None]) -> None:\n \"\"\"Perform a complete clone of a repository and its submodules \"\"\"\n\n git_cmd('fetch', 'origin', '--tags')\n git_cmd('checkout', ref)\n git_cmd('submodule', 'update', '--init', '--recursive')\n\n def _shallow_clone(self, ref: str, git_cmd: Callable[..., None]) -> None:\n \"\"\"Perform a shallow clone of a repository and its submodules \"\"\"\n\n git_config = 'protocol.version=2'\n git_cmd('-c', git_config, 'fetch', 'origin', ref, '--depth=1')\n git_cmd('checkout', 'FETCH_HEAD')\n git_cmd(\n '-c', git_config, 'submodule', 'update', '--init', '--recursive',\n '--depth=1',\n )\n\n def clone(self, repo: str, ref: str, deps: Sequence[str] = ()) -> str:\n \"\"\"Clone the given url and checkout the specific ref.\"\"\"\n\n def clone_strategy(directory: str) -> None:\n git.init_repo(directory, repo)\n env = git.no_git_env()\n\n def _git_cmd(*args: str) -> None:\n cmd_output_b('git', *args, cwd=directory, env=env)\n\n try:\n self._shallow_clone(ref, _git_cmd)\n except CalledProcessError:\n self._complete_clone(ref, _git_cmd)\n\n return self._new_repo(repo, ref, deps, clone_strategy)\n\n LOCAL_RESOURCES = (\n 'Cargo.toml', 'main.go', 'go.mod', 'main.rs', '.npmignore',\n 'package.json', 'pre_commit_dummy_package.gemspec', 'setup.py',\n 'environment.yml', 'Makefile.PL',\n )\n\n def make_local(self, deps: Sequence[str]) -> str:\n def make_local_strategy(directory: str) -> None:\n for resource in self.LOCAL_RESOURCES:\n contents = resource_text(f'empty_template_{resource}')\n with open(os.path.join(directory, resource), 'w') as f:\n f.write(contents)\n\n env = git.no_git_env()\n\n # initialize the git repository so it looks more like cloned repos\n def _git_cmd(*args: str) -> None:\n cmd_output_b('git', *args, cwd=directory, env=env)\n\n git.init_repo(directory, '<<unknown>>')\n _git_cmd('add', '.')\n git.commit(repo=directory)\n\n return self._new_repo(\n 'local', C.LOCAL_REPO_VERSION, deps, make_local_strategy,\n )\n\n def _create_config_table(self, db: sqlite3.Connection) -> None:\n db.executescript(\n 'CREATE TABLE IF NOT EXISTS configs ('\n ' path TEXT NOT NULL,'\n ' PRIMARY KEY (path)'\n ');',\n )\n\n def mark_config_used(self, path: str) -> None:\n path = os.path.realpath(path)\n # don't insert config files that do not exist\n if not os.path.exists(path):\n return\n with self.connect() as db:\n # TODO: eventually remove this and only create in _create\n self._create_config_table(db)\n db.execute('INSERT OR IGNORE INTO configs VALUES (?)', (path,))\n\n def select_all_configs(self) -> List[str]:\n with self.connect() as db:\n self._create_config_table(db)\n rows = db.execute('SELECT path FROM configs').fetchall()\n return [path for path, in rows]\n\n def delete_configs(self, configs: List[str]) -> None:\n with self.connect() as db:\n rows = [(path,) for path in configs]\n db.executemany('DELETE FROM configs WHERE path = ?', rows)\n\n def select_all_repos(self) -> List[Tuple[str, str, str]]:\n with self.connect() as db:\n return db.execute('SELECT repo, ref, path from repos').fetchall()\n\n def delete_repo(self, db_repo_name: str, ref: str, path: str) -> None:\n with self.connect() as db:\n db.execute(\n 'DELETE FROM repos WHERE repo = ? and ref = ?',\n (db_repo_name, ref),\n )\n rmtree(path)\n", "path": "pre_commit/store.py"}]}
3,497
139
gh_patches_debug_5247
rasdani/github-patches
git_diff
pytorch__ignite-1978
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Improve Accumulation metric on tensors to work with Apex ## 🚀 Feature Request It seems that the ignite is not compatible with apex. When i am trying to use the (automatic mixed precision)amp from the Apex for the GPU Acceleration, I have encountered the following error. Look around the Ignite source code and I find that it uses the original torch.Tensor. Is there any timeline for Ignite to be compatible with Apex? ``` m.update(loss) -- File "/usr/local/lib/python3.6/site-packages/ignite/metrics/metric.py", line 575, in wrapper func(self, *args, **kwargs) File "/usr/local/lib/python3.6/site-packages/ignite/metrics/accumulation.py", line 73, in update self.accumulator = self._op(self.accumulator, output) File "/usr/local/lib/python3.6/site-packages/ignite/metrics/accumulation.py", line 130, in _mean_op return a + x File "/usr/local/lib64/python3.6/site-packages/apex/amp/wrap.py", line 62, in wrapper .format(types)) NotImplementedError: Do not know how to handle these types to promote: {'FloatTensor', 'DoubleTensor'} ``` Apex: https://github.com/NVIDIA/apex ## Environment - PyTorch Version (1.4.0): - Ignite Version (0.4.4): - OS (Linux): - Python version: python 3.6 - Apex Version (0.1) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ignite/metrics/accumulation.py` Content: ``` 1 import numbers 2 from typing import Callable, Tuple, Union 3 4 import torch 5 6 from ignite.exceptions import NotComputableError 7 from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce 8 9 __all__ = ["VariableAccumulation", "GeometricAverage", "Average"] 10 11 12 class VariableAccumulation(Metric): 13 """Single variable accumulator helper to compute (arithmetic, geometric, harmonic) average of a single variable. 14 15 - ``update`` must receive output of the form `x`. 16 - `x` can be a number or `torch.Tensor`. 17 18 Note: 19 20 The class stores input into two public variables: `accumulator` and `num_examples`. 21 Number of samples is updated following the rule: 22 23 - `+1` if input is a number 24 - `+1` if input is a 1D `torch.Tensor` 25 - `+batch_size` if input is a ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`). 26 27 Args: 28 op: a callable to update accumulator. Method's signature is `(accumulator, output)`. 29 For example, to compute arithmetic mean value, `op = lambda a, x: a + x`. 30 output_transform: a callable that is used to transform the 31 :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the 32 form expected by the metric. This can be useful if, for example, you have a multi-output model and 33 you want to compute the metric with respect to one of the outputs. 34 device: specifies which device updates are accumulated on. Setting the metric's 35 device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By 36 default, CPU. 37 38 """ 39 40 required_output_keys = None 41 42 def __init__( 43 self, 44 op: Callable, 45 output_transform: Callable = lambda x: x, 46 device: Union[str, torch.device] = torch.device("cpu"), 47 ): 48 if not callable(op): 49 raise TypeError(f"Argument op should be a callable, but given {type(op)}") 50 51 self._op = op 52 53 super(VariableAccumulation, self).__init__(output_transform=output_transform, device=device) 54 55 @reinit__is_reduced 56 def reset(self) -> None: 57 self.accumulator = torch.tensor(0.0, dtype=torch.float64, device=self._device) 58 self.num_examples = 0 59 60 def _check_output_type(self, output: Union[float, torch.Tensor]) -> None: 61 if not (isinstance(output, numbers.Number) or isinstance(output, torch.Tensor)): 62 raise TypeError(f"Output should be a number or torch.Tensor, but given {type(output)}") 63 64 @reinit__is_reduced 65 def update(self, output: Union[float, torch.Tensor]) -> None: 66 self._check_output_type(output) 67 68 if isinstance(output, torch.Tensor): 69 output = output.detach() 70 if output.device != self._device: 71 output = output.to(self._device) 72 73 self.accumulator = self._op(self.accumulator, output) 74 75 if isinstance(output, torch.Tensor): 76 self.num_examples += output.shape[0] if len(output.shape) > 1 else 1 77 else: 78 self.num_examples += 1 79 80 @sync_all_reduce("accumulator", "num_examples") 81 def compute(self) -> Tuple[torch.Tensor, int]: 82 return self.accumulator, self.num_examples 83 84 85 class Average(VariableAccumulation): 86 """Helper class to compute arithmetic average of a single variable. 87 88 - ``update`` must receive output of the form `x`. 89 - `x` can be a number or `torch.Tensor`. 90 91 Note: 92 93 Number of samples is updated following the rule: 94 95 - `+1` if input is a number 96 - `+1` if input is a 1D `torch.Tensor` 97 - `+batch_size` if input is an ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`). 98 99 For input `x` being an ND `torch.Tensor` with N > 1, the first dimension is seen as the number of samples and 100 is summed up and added to the accumulator: `accumulator += x.sum(dim=0)` 101 102 Examples: 103 104 .. code-block:: python 105 106 evaluator = ... 107 108 custom_var_mean = Average(output_transform=lambda output: output['custom_var']) 109 custom_var_mean.attach(evaluator, 'mean_custom_var') 110 111 state = evaluator.run(dataset) 112 # state.metrics['mean_custom_var'] -> average of output['custom_var'] 113 114 Args: 115 output_transform: a callable that is used to transform the 116 :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the 117 form expected by the metric. This can be useful if, for example, you have a multi-output model and 118 you want to compute the metric with respect to one of the outputs. 119 device: specifies which device updates are accumulated on. Setting the metric's 120 device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By 121 default, CPU. 122 """ 123 124 def __init__( 125 self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu") 126 ): 127 def _mean_op(a: Union[float, torch.Tensor], x: Union[float, torch.Tensor]) -> Union[float, torch.Tensor]: 128 if isinstance(x, torch.Tensor) and x.ndim > 1: 129 x = x.sum(dim=0) 130 return a + x 131 132 super(Average, self).__init__(op=_mean_op, output_transform=output_transform, device=device) 133 134 @sync_all_reduce("accumulator", "num_examples") 135 def compute(self) -> Union[float, torch.Tensor]: 136 if self.num_examples < 1: 137 raise NotComputableError( 138 f"{self.__class__.__name__} must have at least one example before it can be computed." 139 ) 140 141 return self.accumulator / self.num_examples 142 143 144 class GeometricAverage(VariableAccumulation): 145 """Helper class to compute geometric average of a single variable. 146 147 - ``update`` must receive output of the form `x`. 148 - `x` can be a positive number or a positive `torch.Tensor`, such that ``torch.log(x)`` is not `nan`. 149 150 Note: 151 152 Number of samples is updated following the rule: 153 154 - `+1` if input is a number 155 - `+1` if input is a 1D `torch.Tensor` 156 - `+batch_size` if input is a ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`). 157 158 For input `x` being an ND `torch.Tensor` with N > 1, the first dimension is seen as the number of samples and 159 is aggregated and added to the accumulator: `accumulator *= prod(x, dim=0)` 160 161 Args: 162 output_transform: a callable that is used to transform the 163 :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the 164 form expected by the metric. This can be useful if, for example, you have a multi-output model and 165 you want to compute the metric with respect to one of the outputs. 166 device: specifies which device updates are accumulated on. Setting the metric's 167 device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By 168 default, CPU. 169 170 """ 171 172 def __init__( 173 self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu") 174 ): 175 def _geom_op(a: torch.Tensor, x: Union[float, torch.Tensor]) -> torch.Tensor: 176 if not isinstance(x, torch.Tensor): 177 x = torch.tensor(x) 178 x = torch.log(x) 179 if x.ndim > 1: 180 x = x.sum(dim=0) 181 return a + x 182 183 super(GeometricAverage, self).__init__(op=_geom_op, output_transform=output_transform, device=device) 184 185 @sync_all_reduce("accumulator", "num_examples") 186 def compute(self) -> Union[float, torch.Tensor]: 187 if self.num_examples < 1: 188 raise NotComputableError( 189 f"{self.__class__.__name__} must have at least one example before it can be computed." 190 ) 191 192 tensor = torch.exp(self.accumulator / self.num_examples) 193 194 if tensor.numel() == 1: 195 return tensor.item() 196 197 return tensor 198 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ignite/metrics/accumulation.py b/ignite/metrics/accumulation.py --- a/ignite/metrics/accumulation.py +++ b/ignite/metrics/accumulation.py @@ -67,8 +67,8 @@ if isinstance(output, torch.Tensor): output = output.detach() - if output.device != self._device: - output = output.to(self._device) + if not (output.device == self._device and output.dtype == self.accumulator.dtype): + output = output.to(self.accumulator) self.accumulator = self._op(self.accumulator, output)
{"golden_diff": "diff --git a/ignite/metrics/accumulation.py b/ignite/metrics/accumulation.py\n--- a/ignite/metrics/accumulation.py\n+++ b/ignite/metrics/accumulation.py\n@@ -67,8 +67,8 @@\n \n if isinstance(output, torch.Tensor):\n output = output.detach()\n- if output.device != self._device:\n- output = output.to(self._device)\n+ if not (output.device == self._device and output.dtype == self.accumulator.dtype):\n+ output = output.to(self.accumulator)\n \n self.accumulator = self._op(self.accumulator, output)\n", "issue": "Improve Accumulation metric on tensors to work with Apex\n## \ud83d\ude80 Feature Request\r\nIt seems that the ignite is not compatible with apex. When i am trying to use the (automatic mixed precision)amp from the Apex for the GPU Acceleration, I have encountered the following error. Look around the Ignite source code and I find that it uses the original torch.Tensor. Is there any timeline for Ignite to be compatible with Apex?\r\n```\r\nm.update(loss)\r\n--\r\nFile \"/usr/local/lib/python3.6/site-packages/ignite/metrics/metric.py\", line 575, in wrapper\r\nfunc(self, *args, **kwargs)\r\nFile \"/usr/local/lib/python3.6/site-packages/ignite/metrics/accumulation.py\", line 73, in update\r\nself.accumulator = self._op(self.accumulator, output)\r\nFile \"/usr/local/lib/python3.6/site-packages/ignite/metrics/accumulation.py\", line 130, in _mean_op\r\nreturn a + x\r\nFile \"/usr/local/lib64/python3.6/site-packages/apex/amp/wrap.py\", line 62, in wrapper\r\n.format(types))\r\nNotImplementedError: Do not know how to handle these types to promote: {'FloatTensor', 'DoubleTensor'}\r\n```\r\nApex: https://github.com/NVIDIA/apex\r\n\r\n## Environment\r\n\r\n- PyTorch Version (1.4.0):\r\n- Ignite Version (0.4.4):\r\n- OS (Linux):\r\n- Python version: python 3.6\r\n- Apex Version (0.1)\n", "before_files": [{"content": "import numbers\nfrom typing import Callable, Tuple, Union\n\nimport torch\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce\n\n__all__ = [\"VariableAccumulation\", \"GeometricAverage\", \"Average\"]\n\n\nclass VariableAccumulation(Metric):\n \"\"\"Single variable accumulator helper to compute (arithmetic, geometric, harmonic) average of a single variable.\n\n - ``update`` must receive output of the form `x`.\n - `x` can be a number or `torch.Tensor`.\n\n Note:\n\n The class stores input into two public variables: `accumulator` and `num_examples`.\n Number of samples is updated following the rule:\n\n - `+1` if input is a number\n - `+1` if input is a 1D `torch.Tensor`\n - `+batch_size` if input is a ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`).\n\n Args:\n op: a callable to update accumulator. Method's signature is `(accumulator, output)`.\n For example, to compute arithmetic mean value, `op = lambda a, x: a + x`.\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n device: specifies which device updates are accumulated on. Setting the metric's\n device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By\n default, CPU.\n\n \"\"\"\n\n required_output_keys = None\n\n def __init__(\n self,\n op: Callable,\n output_transform: Callable = lambda x: x,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ):\n if not callable(op):\n raise TypeError(f\"Argument op should be a callable, but given {type(op)}\")\n\n self._op = op\n\n super(VariableAccumulation, self).__init__(output_transform=output_transform, device=device)\n\n @reinit__is_reduced\n def reset(self) -> None:\n self.accumulator = torch.tensor(0.0, dtype=torch.float64, device=self._device)\n self.num_examples = 0\n\n def _check_output_type(self, output: Union[float, torch.Tensor]) -> None:\n if not (isinstance(output, numbers.Number) or isinstance(output, torch.Tensor)):\n raise TypeError(f\"Output should be a number or torch.Tensor, but given {type(output)}\")\n\n @reinit__is_reduced\n def update(self, output: Union[float, torch.Tensor]) -> None:\n self._check_output_type(output)\n\n if isinstance(output, torch.Tensor):\n output = output.detach()\n if output.device != self._device:\n output = output.to(self._device)\n\n self.accumulator = self._op(self.accumulator, output)\n\n if isinstance(output, torch.Tensor):\n self.num_examples += output.shape[0] if len(output.shape) > 1 else 1\n else:\n self.num_examples += 1\n\n @sync_all_reduce(\"accumulator\", \"num_examples\")\n def compute(self) -> Tuple[torch.Tensor, int]:\n return self.accumulator, self.num_examples\n\n\nclass Average(VariableAccumulation):\n \"\"\"Helper class to compute arithmetic average of a single variable.\n\n - ``update`` must receive output of the form `x`.\n - `x` can be a number or `torch.Tensor`.\n\n Note:\n\n Number of samples is updated following the rule:\n\n - `+1` if input is a number\n - `+1` if input is a 1D `torch.Tensor`\n - `+batch_size` if input is an ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`).\n\n For input `x` being an ND `torch.Tensor` with N > 1, the first dimension is seen as the number of samples and\n is summed up and added to the accumulator: `accumulator += x.sum(dim=0)`\n\n Examples:\n\n .. code-block:: python\n\n evaluator = ...\n\n custom_var_mean = Average(output_transform=lambda output: output['custom_var'])\n custom_var_mean.attach(evaluator, 'mean_custom_var')\n\n state = evaluator.run(dataset)\n # state.metrics['mean_custom_var'] -> average of output['custom_var']\n\n Args:\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n device: specifies which device updates are accumulated on. Setting the metric's\n device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By\n default, CPU.\n \"\"\"\n\n def __init__(\n self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device(\"cpu\")\n ):\n def _mean_op(a: Union[float, torch.Tensor], x: Union[float, torch.Tensor]) -> Union[float, torch.Tensor]:\n if isinstance(x, torch.Tensor) and x.ndim > 1:\n x = x.sum(dim=0)\n return a + x\n\n super(Average, self).__init__(op=_mean_op, output_transform=output_transform, device=device)\n\n @sync_all_reduce(\"accumulator\", \"num_examples\")\n def compute(self) -> Union[float, torch.Tensor]:\n if self.num_examples < 1:\n raise NotComputableError(\n f\"{self.__class__.__name__} must have at least one example before it can be computed.\"\n )\n\n return self.accumulator / self.num_examples\n\n\nclass GeometricAverage(VariableAccumulation):\n \"\"\"Helper class to compute geometric average of a single variable.\n\n - ``update`` must receive output of the form `x`.\n - `x` can be a positive number or a positive `torch.Tensor`, such that ``torch.log(x)`` is not `nan`.\n\n Note:\n\n Number of samples is updated following the rule:\n\n - `+1` if input is a number\n - `+1` if input is a 1D `torch.Tensor`\n - `+batch_size` if input is a ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`).\n\n For input `x` being an ND `torch.Tensor` with N > 1, the first dimension is seen as the number of samples and\n is aggregated and added to the accumulator: `accumulator *= prod(x, dim=0)`\n\n Args:\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n device: specifies which device updates are accumulated on. Setting the metric's\n device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By\n default, CPU.\n\n \"\"\"\n\n def __init__(\n self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device(\"cpu\")\n ):\n def _geom_op(a: torch.Tensor, x: Union[float, torch.Tensor]) -> torch.Tensor:\n if not isinstance(x, torch.Tensor):\n x = torch.tensor(x)\n x = torch.log(x)\n if x.ndim > 1:\n x = x.sum(dim=0)\n return a + x\n\n super(GeometricAverage, self).__init__(op=_geom_op, output_transform=output_transform, device=device)\n\n @sync_all_reduce(\"accumulator\", \"num_examples\")\n def compute(self) -> Union[float, torch.Tensor]:\n if self.num_examples < 1:\n raise NotComputableError(\n f\"{self.__class__.__name__} must have at least one example before it can be computed.\"\n )\n\n tensor = torch.exp(self.accumulator / self.num_examples)\n\n if tensor.numel() == 1:\n return tensor.item()\n\n return tensor\n", "path": "ignite/metrics/accumulation.py"}], "after_files": [{"content": "import numbers\nfrom typing import Callable, Tuple, Union\n\nimport torch\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce\n\n__all__ = [\"VariableAccumulation\", \"GeometricAverage\", \"Average\"]\n\n\nclass VariableAccumulation(Metric):\n \"\"\"Single variable accumulator helper to compute (arithmetic, geometric, harmonic) average of a single variable.\n\n - ``update`` must receive output of the form `x`.\n - `x` can be a number or `torch.Tensor`.\n\n Note:\n\n The class stores input into two public variables: `accumulator` and `num_examples`.\n Number of samples is updated following the rule:\n\n - `+1` if input is a number\n - `+1` if input is a 1D `torch.Tensor`\n - `+batch_size` if input is a ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`).\n\n Args:\n op: a callable to update accumulator. Method's signature is `(accumulator, output)`.\n For example, to compute arithmetic mean value, `op = lambda a, x: a + x`.\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n device: specifies which device updates are accumulated on. Setting the metric's\n device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By\n default, CPU.\n\n \"\"\"\n\n required_output_keys = None\n\n def __init__(\n self,\n op: Callable,\n output_transform: Callable = lambda x: x,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ):\n if not callable(op):\n raise TypeError(f\"Argument op should be a callable, but given {type(op)}\")\n\n self._op = op\n\n super(VariableAccumulation, self).__init__(output_transform=output_transform, device=device)\n\n @reinit__is_reduced\n def reset(self) -> None:\n self.accumulator = torch.tensor(0.0, dtype=torch.float64, device=self._device)\n self.num_examples = 0\n\n def _check_output_type(self, output: Union[float, torch.Tensor]) -> None:\n if not (isinstance(output, numbers.Number) or isinstance(output, torch.Tensor)):\n raise TypeError(f\"Output should be a number or torch.Tensor, but given {type(output)}\")\n\n @reinit__is_reduced\n def update(self, output: Union[float, torch.Tensor]) -> None:\n self._check_output_type(output)\n\n if isinstance(output, torch.Tensor):\n output = output.detach()\n if not (output.device == self._device and output.dtype == self.accumulator.dtype):\n output = output.to(self.accumulator)\n\n self.accumulator = self._op(self.accumulator, output)\n\n if isinstance(output, torch.Tensor):\n self.num_examples += output.shape[0] if len(output.shape) > 1 else 1\n else:\n self.num_examples += 1\n\n @sync_all_reduce(\"accumulator\", \"num_examples\")\n def compute(self) -> Tuple[torch.Tensor, int]:\n return self.accumulator, self.num_examples\n\n\nclass Average(VariableAccumulation):\n \"\"\"Helper class to compute arithmetic average of a single variable.\n\n - ``update`` must receive output of the form `x`.\n - `x` can be a number or `torch.Tensor`.\n\n Note:\n\n Number of samples is updated following the rule:\n\n - `+1` if input is a number\n - `+1` if input is a 1D `torch.Tensor`\n - `+batch_size` if input is an ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`).\n\n For input `x` being an ND `torch.Tensor` with N > 1, the first dimension is seen as the number of samples and\n is summed up and added to the accumulator: `accumulator += x.sum(dim=0)`\n\n Examples:\n\n .. code-block:: python\n\n evaluator = ...\n\n custom_var_mean = Average(output_transform=lambda output: output['custom_var'])\n custom_var_mean.attach(evaluator, 'mean_custom_var')\n\n state = evaluator.run(dataset)\n # state.metrics['mean_custom_var'] -> average of output['custom_var']\n\n Args:\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n device: specifies which device updates are accumulated on. Setting the metric's\n device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By\n default, CPU.\n \"\"\"\n\n def __init__(\n self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device(\"cpu\")\n ):\n def _mean_op(a: Union[float, torch.Tensor], x: Union[float, torch.Tensor]) -> Union[float, torch.Tensor]:\n if isinstance(x, torch.Tensor) and x.ndim > 1:\n x = x.sum(dim=0)\n return a + x\n\n super(Average, self).__init__(op=_mean_op, output_transform=output_transform, device=device)\n\n @sync_all_reduce(\"accumulator\", \"num_examples\")\n def compute(self) -> Union[float, torch.Tensor]:\n if self.num_examples < 1:\n raise NotComputableError(\n f\"{self.__class__.__name__} must have at least one example before it can be computed.\"\n )\n\n return self.accumulator / self.num_examples\n\n\nclass GeometricAverage(VariableAccumulation):\n \"\"\"Helper class to compute geometric average of a single variable.\n\n - ``update`` must receive output of the form `x`.\n - `x` can be a positive number or a positive `torch.Tensor`, such that ``torch.log(x)`` is not `nan`.\n\n Note:\n\n Number of samples is updated following the rule:\n\n - `+1` if input is a number\n - `+1` if input is a 1D `torch.Tensor`\n - `+batch_size` if input is a ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`).\n\n For input `x` being an ND `torch.Tensor` with N > 1, the first dimension is seen as the number of samples and\n is aggregated and added to the accumulator: `accumulator *= prod(x, dim=0)`\n\n Args:\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n device: specifies which device updates are accumulated on. Setting the metric's\n device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By\n default, CPU.\n\n \"\"\"\n\n def __init__(\n self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device(\"cpu\")\n ):\n def _geom_op(a: torch.Tensor, x: Union[float, torch.Tensor]) -> torch.Tensor:\n if not isinstance(x, torch.Tensor):\n x = torch.tensor(x)\n x = torch.log(x)\n if x.ndim > 1:\n x = x.sum(dim=0)\n return a + x\n\n super(GeometricAverage, self).__init__(op=_geom_op, output_transform=output_transform, device=device)\n\n @sync_all_reduce(\"accumulator\", \"num_examples\")\n def compute(self) -> Union[float, torch.Tensor]:\n if self.num_examples < 1:\n raise NotComputableError(\n f\"{self.__class__.__name__} must have at least one example before it can be computed.\"\n )\n\n tensor = torch.exp(self.accumulator / self.num_examples)\n\n if tensor.numel() == 1:\n return tensor.item()\n\n return tensor\n", "path": "ignite/metrics/accumulation.py"}]}
2,945
136
gh_patches_debug_33990
rasdani/github-patches
git_diff
netbox-community__netbox-16049
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error on cable edit after B port was deleted / wrong status ### Deployment Type Self-hosted (Docker) ### NetBox Version v3.7.5 ### Python Version 3.11 ### Steps to Reproduce 1. Have a device with front- and rear-Ports (Patch Panel) 2. Have a device with interfaces (switches) 3. Connect switch interface (here gi43) with front-port (here 22) 4. Delete rear ports on patch panel device 5. Go To Connections > Cables 6. Click on edit of cable --> error message ### Expected Behavior - Edit Button works to connect cable again ### Observed Behavior - Error Message ![2024-04-19 19_20_00-Server Error and 5 more pages - Work - Microsoft​ Edge](https://github.com/netbox-community/netbox/assets/13959569/78d69dbd-7b02-4b1a-be0a-3523e51152a6) Cables/UI: Unable to change from front to rear while editing cable ### NetBox version v3.5.6 ### Feature type Change to existing functionality ### Proposed functionality Currently, if someone accidentally connects a cable to the rear port instead of the front port, the entire cable must be deleted and created again. It would be nice to be able to change not only the port number but also the location (front/rear) when editing the cable. This might just be an ui change as the api seems to allow to change it. ### Use case It is not necessary to delete the cable and enter all information (label, length etc) again. You can just reconnect it. ### Database changes _No response_ ### External dependencies _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `netbox/dcim/forms/connections.py` Content: ``` 1 from django import forms 2 from django.contrib.contenttypes.models import ContentType 3 from django.utils.translation import gettext_lazy as _ 4 5 from circuits.models import Circuit, CircuitTermination 6 from dcim.models import * 7 from utilities.forms.fields import DynamicModelChoiceField, DynamicModelMultipleChoiceField 8 from .model_forms import CableForm 9 10 11 def get_cable_form(a_type, b_type): 12 13 class FormMetaclass(forms.models.ModelFormMetaclass): 14 15 def __new__(mcs, name, bases, attrs): 16 17 for cable_end, term_cls in (('a', a_type), ('b', b_type)): 18 19 # Device component 20 if hasattr(term_cls, 'device'): 21 22 attrs[f'termination_{cable_end}_device'] = DynamicModelChoiceField( 23 queryset=Device.objects.all(), 24 label=_('Device'), 25 required=False, 26 selector=True, 27 initial_params={ 28 f'{term_cls._meta.model_name}s__in': f'${cable_end}_terminations' 29 } 30 ) 31 attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField( 32 queryset=term_cls.objects.all(), 33 label=term_cls._meta.verbose_name.title(), 34 context={ 35 'disabled': '_occupied', 36 }, 37 query_params={ 38 'device_id': f'$termination_{cable_end}_device', 39 'kind': 'physical', # Exclude virtual interfaces 40 } 41 ) 42 43 # PowerFeed 44 elif term_cls == PowerFeed: 45 46 attrs[f'termination_{cable_end}_powerpanel'] = DynamicModelChoiceField( 47 queryset=PowerPanel.objects.all(), 48 label=_('Power Panel'), 49 required=False, 50 selector=True, 51 initial_params={ 52 'powerfeeds__in': f'${cable_end}_terminations' 53 } 54 ) 55 attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField( 56 queryset=term_cls.objects.all(), 57 label=_('Power Feed'), 58 context={ 59 'disabled': '_occupied', 60 }, 61 query_params={ 62 'power_panel_id': f'$termination_{cable_end}_powerpanel', 63 } 64 ) 65 66 # CircuitTermination 67 elif term_cls == CircuitTermination: 68 69 attrs[f'termination_{cable_end}_circuit'] = DynamicModelChoiceField( 70 queryset=Circuit.objects.all(), 71 label=_('Circuit'), 72 selector=True, 73 initial_params={ 74 'terminations__in': f'${cable_end}_terminations' 75 } 76 ) 77 attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField( 78 queryset=term_cls.objects.all(), 79 label=_('Side'), 80 context={ 81 'disabled': '_occupied', 82 }, 83 query_params={ 84 'circuit_id': f'$termination_{cable_end}_circuit', 85 } 86 ) 87 88 return super().__new__(mcs, name, bases, attrs) 89 90 class _CableForm(CableForm, metaclass=FormMetaclass): 91 92 def __init__(self, *args, initial=None, **kwargs): 93 94 initial = initial or {} 95 if a_type: 96 ct = ContentType.objects.get_for_model(a_type) 97 initial['a_terminations_type'] = f'{ct.app_label}.{ct.model}' 98 if b_type: 99 ct = ContentType.objects.get_for_model(b_type) 100 initial['b_terminations_type'] = f'{ct.app_label}.{ct.model}' 101 102 # TODO: Temporary hack to work around list handling limitations with utils.normalize_querydict() 103 for field_name in ('a_terminations', 'b_terminations'): 104 if field_name in initial and type(initial[field_name]) is not list: 105 initial[field_name] = [initial[field_name]] 106 107 super().__init__(*args, initial=initial, **kwargs) 108 109 if self.instance and self.instance.pk: 110 # Initialize A/B terminations when modifying an existing Cable instance 111 self.initial['a_terminations'] = self.instance.a_terminations 112 self.initial['b_terminations'] = self.instance.b_terminations 113 114 def clean(self): 115 super().clean() 116 117 # Set the A/B terminations on the Cable instance 118 self.instance.a_terminations = self.cleaned_data.get('a_terminations', []) 119 self.instance.b_terminations = self.cleaned_data.get('b_terminations', []) 120 121 return _CableForm 122 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/netbox/dcim/forms/connections.py b/netbox/dcim/forms/connections.py --- a/netbox/dcim/forms/connections.py +++ b/netbox/dcim/forms/connections.py @@ -90,14 +90,14 @@ class _CableForm(CableForm, metaclass=FormMetaclass): def __init__(self, *args, initial=None, **kwargs): - initial = initial or {} + if a_type: - ct = ContentType.objects.get_for_model(a_type) - initial['a_terminations_type'] = f'{ct.app_label}.{ct.model}' + a_ct = ContentType.objects.get_for_model(a_type) + initial['a_terminations_type'] = f'{a_ct.app_label}.{a_ct.model}' if b_type: - ct = ContentType.objects.get_for_model(b_type) - initial['b_terminations_type'] = f'{ct.app_label}.{ct.model}' + b_ct = ContentType.objects.get_for_model(b_type) + initial['b_terminations_type'] = f'{b_ct.app_label}.{b_ct.model}' # TODO: Temporary hack to work around list handling limitations with utils.normalize_querydict() for field_name in ('a_terminations', 'b_terminations'): @@ -108,8 +108,17 @@ if self.instance and self.instance.pk: # Initialize A/B terminations when modifying an existing Cable instance - self.initial['a_terminations'] = self.instance.a_terminations - self.initial['b_terminations'] = self.instance.b_terminations + if a_type and self.instance.a_terminations and a_ct == ContentType.objects.get_for_model(self.instance.a_terminations[0]): + self.initial['a_terminations'] = self.instance.a_terminations + if b_type and self.instance.b_terminations and b_ct == ContentType.objects.get_for_model(self.instance.b_terminations[0]): + self.initial['b_terminations'] = self.instance.b_terminations + else: + # Need to clear terminations if swapped type - but need to do it only + # if not from instance + if a_type: + initial.pop('a_terminations', None) + if b_type: + initial.pop('b_terminations', None) def clean(self): super().clean()
{"golden_diff": "diff --git a/netbox/dcim/forms/connections.py b/netbox/dcim/forms/connections.py\n--- a/netbox/dcim/forms/connections.py\n+++ b/netbox/dcim/forms/connections.py\n@@ -90,14 +90,14 @@\n class _CableForm(CableForm, metaclass=FormMetaclass):\n \n def __init__(self, *args, initial=None, **kwargs):\n-\n initial = initial or {}\n+\n if a_type:\n- ct = ContentType.objects.get_for_model(a_type)\n- initial['a_terminations_type'] = f'{ct.app_label}.{ct.model}'\n+ a_ct = ContentType.objects.get_for_model(a_type)\n+ initial['a_terminations_type'] = f'{a_ct.app_label}.{a_ct.model}'\n if b_type:\n- ct = ContentType.objects.get_for_model(b_type)\n- initial['b_terminations_type'] = f'{ct.app_label}.{ct.model}'\n+ b_ct = ContentType.objects.get_for_model(b_type)\n+ initial['b_terminations_type'] = f'{b_ct.app_label}.{b_ct.model}'\n \n # TODO: Temporary hack to work around list handling limitations with utils.normalize_querydict()\n for field_name in ('a_terminations', 'b_terminations'):\n@@ -108,8 +108,17 @@\n \n if self.instance and self.instance.pk:\n # Initialize A/B terminations when modifying an existing Cable instance\n- self.initial['a_terminations'] = self.instance.a_terminations\n- self.initial['b_terminations'] = self.instance.b_terminations\n+ if a_type and self.instance.a_terminations and a_ct == ContentType.objects.get_for_model(self.instance.a_terminations[0]):\n+ self.initial['a_terminations'] = self.instance.a_terminations\n+ if b_type and self.instance.b_terminations and b_ct == ContentType.objects.get_for_model(self.instance.b_terminations[0]):\n+ self.initial['b_terminations'] = self.instance.b_terminations\n+ else:\n+ # Need to clear terminations if swapped type - but need to do it only\n+ # if not from instance\n+ if a_type:\n+ initial.pop('a_terminations', None)\n+ if b_type:\n+ initial.pop('b_terminations', None)\n \n def clean(self):\n super().clean()\n", "issue": "Error on cable edit after B port was deleted / wrong status\n### Deployment Type\r\n\r\nSelf-hosted (Docker)\r\n\r\n### NetBox Version\r\n\r\nv3.7.5\r\n\r\n### Python Version\r\n\r\n3.11\r\n\r\n### Steps to Reproduce\r\n\r\n1. Have a device with front- and rear-Ports (Patch Panel)\r\n2. Have a device with interfaces (switches)\r\n3. Connect switch interface (here gi43) with front-port (here 22)\r\n4. Delete rear ports on patch panel device\r\n5. Go To Connections > Cables\r\n6. Click on edit of cable --> error message\r\n\r\n### Expected Behavior\r\n\r\n- Edit Button works to connect cable again\r\n\r\n### Observed Behavior\r\n\r\n- Error Message \r\n![2024-04-19 19_20_00-Server Error and 5 more pages - Work - Microsoft\u200b Edge](https://github.com/netbox-community/netbox/assets/13959569/78d69dbd-7b02-4b1a-be0a-3523e51152a6)\r\n\nCables/UI: Unable to change from front to rear while editing cable\n### NetBox version\n\nv3.5.6\n\n### Feature type\n\nChange to existing functionality\n\n### Proposed functionality\n\nCurrently, if someone accidentally connects a cable to the rear port instead of the front port, the entire cable must be deleted and created again. It would be nice to be able to change not only the port number but also the location (front/rear) when editing the cable. This might just be an ui change as the api seems to allow to change it.\n\n### Use case\n\nIt is not necessary to delete the cable and enter all information (label, length etc) again. You can just reconnect it.\n\n### Database changes\n\n_No response_\n\n### External dependencies\n\n_No response_\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.translation import gettext_lazy as _\n\nfrom circuits.models import Circuit, CircuitTermination\nfrom dcim.models import *\nfrom utilities.forms.fields import DynamicModelChoiceField, DynamicModelMultipleChoiceField\nfrom .model_forms import CableForm\n\n\ndef get_cable_form(a_type, b_type):\n\n class FormMetaclass(forms.models.ModelFormMetaclass):\n\n def __new__(mcs, name, bases, attrs):\n\n for cable_end, term_cls in (('a', a_type), ('b', b_type)):\n\n # Device component\n if hasattr(term_cls, 'device'):\n\n attrs[f'termination_{cable_end}_device'] = DynamicModelChoiceField(\n queryset=Device.objects.all(),\n label=_('Device'),\n required=False,\n selector=True,\n initial_params={\n f'{term_cls._meta.model_name}s__in': f'${cable_end}_terminations'\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label=term_cls._meta.verbose_name.title(),\n context={\n 'disabled': '_occupied',\n },\n query_params={\n 'device_id': f'$termination_{cable_end}_device',\n 'kind': 'physical', # Exclude virtual interfaces\n }\n )\n\n # PowerFeed\n elif term_cls == PowerFeed:\n\n attrs[f'termination_{cable_end}_powerpanel'] = DynamicModelChoiceField(\n queryset=PowerPanel.objects.all(),\n label=_('Power Panel'),\n required=False,\n selector=True,\n initial_params={\n 'powerfeeds__in': f'${cable_end}_terminations'\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label=_('Power Feed'),\n context={\n 'disabled': '_occupied',\n },\n query_params={\n 'power_panel_id': f'$termination_{cable_end}_powerpanel',\n }\n )\n\n # CircuitTermination\n elif term_cls == CircuitTermination:\n\n attrs[f'termination_{cable_end}_circuit'] = DynamicModelChoiceField(\n queryset=Circuit.objects.all(),\n label=_('Circuit'),\n selector=True,\n initial_params={\n 'terminations__in': f'${cable_end}_terminations'\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label=_('Side'),\n context={\n 'disabled': '_occupied',\n },\n query_params={\n 'circuit_id': f'$termination_{cable_end}_circuit',\n }\n )\n\n return super().__new__(mcs, name, bases, attrs)\n\n class _CableForm(CableForm, metaclass=FormMetaclass):\n\n def __init__(self, *args, initial=None, **kwargs):\n\n initial = initial or {}\n if a_type:\n ct = ContentType.objects.get_for_model(a_type)\n initial['a_terminations_type'] = f'{ct.app_label}.{ct.model}'\n if b_type:\n ct = ContentType.objects.get_for_model(b_type)\n initial['b_terminations_type'] = f'{ct.app_label}.{ct.model}'\n\n # TODO: Temporary hack to work around list handling limitations with utils.normalize_querydict()\n for field_name in ('a_terminations', 'b_terminations'):\n if field_name in initial and type(initial[field_name]) is not list:\n initial[field_name] = [initial[field_name]]\n\n super().__init__(*args, initial=initial, **kwargs)\n\n if self.instance and self.instance.pk:\n # Initialize A/B terminations when modifying an existing Cable instance\n self.initial['a_terminations'] = self.instance.a_terminations\n self.initial['b_terminations'] = self.instance.b_terminations\n\n def clean(self):\n super().clean()\n\n # Set the A/B terminations on the Cable instance\n self.instance.a_terminations = self.cleaned_data.get('a_terminations', [])\n self.instance.b_terminations = self.cleaned_data.get('b_terminations', [])\n\n return _CableForm\n", "path": "netbox/dcim/forms/connections.py"}], "after_files": [{"content": "from django import forms\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.translation import gettext_lazy as _\n\nfrom circuits.models import Circuit, CircuitTermination\nfrom dcim.models import *\nfrom utilities.forms.fields import DynamicModelChoiceField, DynamicModelMultipleChoiceField\nfrom .model_forms import CableForm\n\n\ndef get_cable_form(a_type, b_type):\n\n class FormMetaclass(forms.models.ModelFormMetaclass):\n\n def __new__(mcs, name, bases, attrs):\n\n for cable_end, term_cls in (('a', a_type), ('b', b_type)):\n\n # Device component\n if hasattr(term_cls, 'device'):\n\n attrs[f'termination_{cable_end}_device'] = DynamicModelChoiceField(\n queryset=Device.objects.all(),\n label=_('Device'),\n required=False,\n selector=True,\n initial_params={\n f'{term_cls._meta.model_name}s__in': f'${cable_end}_terminations'\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label=term_cls._meta.verbose_name.title(),\n context={\n 'disabled': '_occupied',\n },\n query_params={\n 'device_id': f'$termination_{cable_end}_device',\n 'kind': 'physical', # Exclude virtual interfaces\n }\n )\n\n # PowerFeed\n elif term_cls == PowerFeed:\n\n attrs[f'termination_{cable_end}_powerpanel'] = DynamicModelChoiceField(\n queryset=PowerPanel.objects.all(),\n label=_('Power Panel'),\n required=False,\n selector=True,\n initial_params={\n 'powerfeeds__in': f'${cable_end}_terminations'\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label=_('Power Feed'),\n context={\n 'disabled': '_occupied',\n },\n query_params={\n 'power_panel_id': f'$termination_{cable_end}_powerpanel',\n }\n )\n\n # CircuitTermination\n elif term_cls == CircuitTermination:\n\n attrs[f'termination_{cable_end}_circuit'] = DynamicModelChoiceField(\n queryset=Circuit.objects.all(),\n label=_('Circuit'),\n selector=True,\n initial_params={\n 'terminations__in': f'${cable_end}_terminations'\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label=_('Side'),\n context={\n 'disabled': '_occupied',\n },\n query_params={\n 'circuit_id': f'$termination_{cable_end}_circuit',\n }\n )\n\n return super().__new__(mcs, name, bases, attrs)\n\n class _CableForm(CableForm, metaclass=FormMetaclass):\n\n def __init__(self, *args, initial=None, **kwargs):\n initial = initial or {}\n\n if a_type:\n a_ct = ContentType.objects.get_for_model(a_type)\n initial['a_terminations_type'] = f'{a_ct.app_label}.{a_ct.model}'\n if b_type:\n b_ct = ContentType.objects.get_for_model(b_type)\n initial['b_terminations_type'] = f'{b_ct.app_label}.{b_ct.model}'\n\n # TODO: Temporary hack to work around list handling limitations with utils.normalize_querydict()\n for field_name in ('a_terminations', 'b_terminations'):\n if field_name in initial and type(initial[field_name]) is not list:\n initial[field_name] = [initial[field_name]]\n\n super().__init__(*args, initial=initial, **kwargs)\n\n if self.instance and self.instance.pk:\n # Initialize A/B terminations when modifying an existing Cable instance\n if a_type and self.instance.a_terminations and a_ct == ContentType.objects.get_for_model(self.instance.a_terminations[0]):\n self.initial['a_terminations'] = self.instance.a_terminations\n if b_type and self.instance.b_terminations and b_ct == ContentType.objects.get_for_model(self.instance.b_terminations[0]):\n self.initial['b_terminations'] = self.instance.b_terminations\n else:\n # Need to clear terminations if swapped type - but need to do it only\n # if not from instance\n if a_type:\n initial.pop('a_terminations', None)\n if b_type:\n initial.pop('b_terminations', None)\n\n def clean(self):\n super().clean()\n\n # Set the A/B terminations on the Cable instance\n self.instance.a_terminations = self.cleaned_data.get('a_terminations', [])\n self.instance.b_terminations = self.cleaned_data.get('b_terminations', [])\n\n return _CableForm\n", "path": "netbox/dcim/forms/connections.py"}]}
1,846
529