problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
25.4k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
582
39.1k
num_tokens
int64
271
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_12598
rasdani/github-patches
git_diff
Kinto__kinto-1279
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- HTTP 500 while fetching the permission endpoint using the account plugin. Error while fetching the permissions endpoint using the account plugin. ``` File "~/kinto/venv/lib/python3.6/site-packages/cornice/service.py", line 491, in wrapper response = view_() File "~/kinto/venv/lib/python3.6/site-packages/kinto/core/resource/__init__.py", line 290, in collection_get include_deleted=include_deleted) File "~/kinto/venv/lib/python3.6/site-packages/kinto/views/permissions.py", line 84, in get_records from_settings = allowed_from_settings(self.request.registry.settings, principals) File "~/kinto/venv/lib/python3.6/site-packages/kinto/views/permissions.py", line 46, in allowed_from_settings 'record': 'collection'}[resource_name] KeyError: 'account' ``` HTTP 500 while fetching the permission endpoint using the account plugin. Error while fetching the permissions endpoint using the account plugin. ``` File "~/kinto/venv/lib/python3.6/site-packages/cornice/service.py", line 491, in wrapper response = view_() File "~/kinto/venv/lib/python3.6/site-packages/kinto/core/resource/__init__.py", line 290, in collection_get include_deleted=include_deleted) File "~/kinto/venv/lib/python3.6/site-packages/kinto/views/permissions.py", line 84, in get_records from_settings = allowed_from_settings(self.request.registry.settings, principals) File "~/kinto/venv/lib/python3.6/site-packages/kinto/views/permissions.py", line 46, in allowed_from_settings 'record': 'collection'}[resource_name] KeyError: 'account' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kinto/views/permissions.py` Content: ``` 1 import colander 2 from pyramid.security import NO_PERMISSION_REQUIRED 3 from pyramid.settings import aslist 4 5 from kinto.authorization import PERMISSIONS_INHERITANCE_TREE 6 from kinto.core import utils as core_utils, resource 7 from kinto.core.storage import Sort 8 from kinto.core.storage.memory import extract_record_set 9 10 11 def allowed_from_settings(settings, principals): 12 """Returns every permissions allowed from settings for the current user. 13 :param settings dict: app settings 14 :param principals list: list of principals of current user 15 :rtype: dict 16 17 Result example:: 18 19 { 20 "bucket": {"write", "collection:create"}, 21 "collection": {"read"} 22 } 23 24 XXX: This helper will be useful for Kinto/kinto#894 25 """ 26 perms_settings = {k: aslist(v) for k, v in settings.items() 27 if k.endswith('_principals')} 28 from_settings = {} 29 for key, allowed_principals in perms_settings.items(): 30 resource_name, permission, _ = key.split('_') 31 # Keep the known permissions only. 32 if resource_name not in PERMISSIONS_INHERITANCE_TREE.keys(): 33 continue 34 # Keep the permissions of the current user only. 35 if not bool(set(principals) & set(allowed_principals)): 36 continue 37 # ``collection_create_principals`` means ``collection:create`` in bucket. 38 if permission == 'create': 39 permission = '{resource_name}:{permission}'.format( 40 resource_name=resource_name, 41 permission=permission) 42 resource_name = { # resource parents. 43 'bucket': '', 44 'collection': 'bucket', 45 'group': 'bucket', 46 'record': 'collection'}[resource_name] 47 # Store them in a convenient way. 48 from_settings.setdefault(resource_name, set()).add(permission) 49 return from_settings 50 51 52 class PermissionsModel: 53 id_field = 'id' 54 modified_field = 'last_modified' 55 deleted_field = 'deleted' 56 57 def __init__(self, request): 58 self.request = request 59 60 def timestamp(self, parent_id=None): 61 return 0 62 63 def get_records(self, filters=None, sorting=None, pagination_rules=None, 64 limit=None, include_deleted=False, parent_id=None): 65 # Invert the permissions inheritance tree. 66 perms_descending_tree = {} 67 for on_resource, tree in PERMISSIONS_INHERITANCE_TREE.items(): 68 for obtained_perm, obtained_from in tree.items(): 69 for from_resource, perms in obtained_from.items(): 70 for perm in perms: 71 perms_descending_tree.setdefault(from_resource, {})\ 72 .setdefault(perm, {})\ 73 .setdefault(on_resource, set())\ 74 .add(obtained_perm) 75 76 # Obtain current principals. 77 principals = self.request.prefixed_principals 78 79 # Query every possible permission of the current user from backend. 80 backend = self.request.registry.permission 81 perms_by_object_uri = backend.get_accessible_objects(principals) 82 83 # Check settings for every allowed resources. 84 from_settings = allowed_from_settings(self.request.registry.settings, principals) 85 86 # Expand permissions obtained from backend with the object URIs that 87 # correspond to permissions allowed from settings. 88 allowed_resources = {'bucket', 'collection', 'group'} & set(from_settings.keys()) 89 if allowed_resources: 90 storage = self.request.registry.storage 91 every_bucket, _ = storage.get_all(parent_id='', collection_id='bucket') 92 for bucket in every_bucket: 93 bucket_uri = '/buckets/{id}'.format_map(bucket) 94 for res in allowed_resources: 95 resource_perms = from_settings[res] 96 # Bucket is always fetched. 97 if res == 'bucket': 98 perms_by_object_uri.setdefault(bucket_uri, set()).update(resource_perms) 99 continue 100 # Fetch bucket collections and groups. 101 # XXX: wrong approach: query in a loop! 102 every_subobjects, _ = storage.get_all(parent_id=bucket_uri, 103 collection_id=res) 104 for subobject in every_subobjects: 105 subobj_uri = bucket_uri + '/{0}s/{1}'.format(res, subobject['id']) 106 perms_by_object_uri.setdefault(subobj_uri, set()).update(resource_perms) 107 108 entries = [] 109 for object_uri, perms in perms_by_object_uri.items(): 110 try: 111 # Obtain associated res from object URI 112 resource_name, matchdict = core_utils.view_lookup(self.request, 113 object_uri) 114 except ValueError: 115 # Skip permissions entries that are not linked to an object URI 116 continue 117 118 # For consistency with event payloads, prefix id with resource name 119 matchdict[resource_name + '_id'] = matchdict.get('id') 120 121 # Expand implicit permissions using descending tree. 122 permissions = set(perms) 123 for perm in perms: 124 obtained = perms_descending_tree[resource_name][perm] 125 # Related to same resource only and not every sub-objects. 126 # (e.g "bucket:write" gives "bucket:read" but not "group:read") 127 permissions |= obtained[resource_name] 128 129 entry = dict(uri=object_uri, 130 resource_name=resource_name, 131 permissions=list(permissions), 132 **matchdict) 133 entries.append(entry) 134 135 return extract_record_set(entries, filters=filters, sorting=sorting, 136 pagination_rules=pagination_rules, 137 limit=limit) 138 139 140 class PermissionsSchema(resource.ResourceSchema): 141 uri = colander.SchemaNode(colander.String()) 142 resource_name = colander.SchemaNode(colander.String()) 143 permissions = colander.Sequence(colander.SchemaNode(colander.String())) 144 bucket_id = colander.SchemaNode(colander.String()) 145 collection_id = colander.SchemaNode(colander.String(), 146 missing=colander.drop) 147 group_id = colander.SchemaNode(colander.String(), 148 missing=colander.drop) 149 record_id = colander.SchemaNode(colander.String(), 150 missing=colander.drop) 151 152 class Options: 153 preserve_unknown = False 154 155 156 @resource.register(name='permissions', 157 description='List of user permissions', 158 collection_path='/permissions', 159 record_path=None, 160 collection_methods=('GET',), 161 permission=NO_PERMISSION_REQUIRED) 162 class Permissions(resource.ShareableResource): 163 164 schema = PermissionsSchema 165 166 def __init__(self, request, context=None): 167 super().__init__(request, context) 168 self.model = PermissionsModel(request) 169 170 def _extract_sorting(self, limit): 171 # Permissions entries are not stored with timestamp, so do not 172 # force it. 173 result = super()._extract_sorting(limit) 174 without_last_modified = [s for s in result 175 if s.field != self.model.modified_field] 176 # For pagination, there must be at least one sort criteria. 177 # We use ``uri`` because its values are unique. 178 if "uri" not in [s.field for s in without_last_modified]: 179 without_last_modified.append(Sort("uri", -1)) 180 return without_last_modified 181 182 def _extract_filters(self): 183 result = super()._extract_filters() 184 without_last_modified = [s for s in result 185 if s.field != self.model.modified_field] 186 return without_last_modified 187 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kinto/views/permissions.py b/kinto/views/permissions.py --- a/kinto/views/permissions.py +++ b/kinto/views/permissions.py @@ -40,10 +40,9 @@ resource_name=resource_name, permission=permission) resource_name = { # resource parents. - 'bucket': '', 'collection': 'bucket', 'group': 'bucket', - 'record': 'collection'}[resource_name] + 'record': 'collection'}.get(resource_name, '') # Store them in a convenient way. from_settings.setdefault(resource_name, set()).add(permission) return from_settings
{"golden_diff": "diff --git a/kinto/views/permissions.py b/kinto/views/permissions.py\n--- a/kinto/views/permissions.py\n+++ b/kinto/views/permissions.py\n@@ -40,10 +40,9 @@\n resource_name=resource_name,\n permission=permission)\n resource_name = { # resource parents.\n- 'bucket': '',\n 'collection': 'bucket',\n 'group': 'bucket',\n- 'record': 'collection'}[resource_name]\n+ 'record': 'collection'}.get(resource_name, '')\n # Store them in a convenient way.\n from_settings.setdefault(resource_name, set()).add(permission)\n return from_settings\n", "issue": "HTTP 500 while fetching the permission endpoint using the account plugin.\nError while fetching the permissions endpoint using the account plugin.\r\n\r\n```\r\n File \"~/kinto/venv/lib/python3.6/site-packages/cornice/service.py\", line 491, in wrapper\r\n response = view_()\r\n File \"~/kinto/venv/lib/python3.6/site-packages/kinto/core/resource/__init__.py\", line 290, in collection_get\r\n include_deleted=include_deleted)\r\n File \"~/kinto/venv/lib/python3.6/site-packages/kinto/views/permissions.py\", line 84, in get_records\r\n from_settings = allowed_from_settings(self.request.registry.settings, principals)\r\n File \"~/kinto/venv/lib/python3.6/site-packages/kinto/views/permissions.py\", line 46, in allowed_from_settings\r\n 'record': 'collection'}[resource_name]\r\nKeyError: 'account'\r\n```\nHTTP 500 while fetching the permission endpoint using the account plugin.\nError while fetching the permissions endpoint using the account plugin.\r\n\r\n```\r\n File \"~/kinto/venv/lib/python3.6/site-packages/cornice/service.py\", line 491, in wrapper\r\n response = view_()\r\n File \"~/kinto/venv/lib/python3.6/site-packages/kinto/core/resource/__init__.py\", line 290, in collection_get\r\n include_deleted=include_deleted)\r\n File \"~/kinto/venv/lib/python3.6/site-packages/kinto/views/permissions.py\", line 84, in get_records\r\n from_settings = allowed_from_settings(self.request.registry.settings, principals)\r\n File \"~/kinto/venv/lib/python3.6/site-packages/kinto/views/permissions.py\", line 46, in allowed_from_settings\r\n 'record': 'collection'}[resource_name]\r\nKeyError: 'account'\r\n```\n", "before_files": [{"content": "import colander\nfrom pyramid.security import NO_PERMISSION_REQUIRED\nfrom pyramid.settings import aslist\n\nfrom kinto.authorization import PERMISSIONS_INHERITANCE_TREE\nfrom kinto.core import utils as core_utils, resource\nfrom kinto.core.storage import Sort\nfrom kinto.core.storage.memory import extract_record_set\n\n\ndef allowed_from_settings(settings, principals):\n \"\"\"Returns every permissions allowed from settings for the current user.\n :param settings dict: app settings\n :param principals list: list of principals of current user\n :rtype: dict\n\n Result example::\n\n {\n \"bucket\": {\"write\", \"collection:create\"},\n \"collection\": {\"read\"}\n }\n\n XXX: This helper will be useful for Kinto/kinto#894\n \"\"\"\n perms_settings = {k: aslist(v) for k, v in settings.items()\n if k.endswith('_principals')}\n from_settings = {}\n for key, allowed_principals in perms_settings.items():\n resource_name, permission, _ = key.split('_')\n # Keep the known permissions only.\n if resource_name not in PERMISSIONS_INHERITANCE_TREE.keys():\n continue\n # Keep the permissions of the current user only.\n if not bool(set(principals) & set(allowed_principals)):\n continue\n # ``collection_create_principals`` means ``collection:create`` in bucket.\n if permission == 'create':\n permission = '{resource_name}:{permission}'.format(\n resource_name=resource_name,\n permission=permission)\n resource_name = { # resource parents.\n 'bucket': '',\n 'collection': 'bucket',\n 'group': 'bucket',\n 'record': 'collection'}[resource_name]\n # Store them in a convenient way.\n from_settings.setdefault(resource_name, set()).add(permission)\n return from_settings\n\n\nclass PermissionsModel:\n id_field = 'id'\n modified_field = 'last_modified'\n deleted_field = 'deleted'\n\n def __init__(self, request):\n self.request = request\n\n def timestamp(self, parent_id=None):\n return 0\n\n def get_records(self, filters=None, sorting=None, pagination_rules=None,\n limit=None, include_deleted=False, parent_id=None):\n # Invert the permissions inheritance tree.\n perms_descending_tree = {}\n for on_resource, tree in PERMISSIONS_INHERITANCE_TREE.items():\n for obtained_perm, obtained_from in tree.items():\n for from_resource, perms in obtained_from.items():\n for perm in perms:\n perms_descending_tree.setdefault(from_resource, {})\\\n .setdefault(perm, {})\\\n .setdefault(on_resource, set())\\\n .add(obtained_perm)\n\n # Obtain current principals.\n principals = self.request.prefixed_principals\n\n # Query every possible permission of the current user from backend.\n backend = self.request.registry.permission\n perms_by_object_uri = backend.get_accessible_objects(principals)\n\n # Check settings for every allowed resources.\n from_settings = allowed_from_settings(self.request.registry.settings, principals)\n\n # Expand permissions obtained from backend with the object URIs that\n # correspond to permissions allowed from settings.\n allowed_resources = {'bucket', 'collection', 'group'} & set(from_settings.keys())\n if allowed_resources:\n storage = self.request.registry.storage\n every_bucket, _ = storage.get_all(parent_id='', collection_id='bucket')\n for bucket in every_bucket:\n bucket_uri = '/buckets/{id}'.format_map(bucket)\n for res in allowed_resources:\n resource_perms = from_settings[res]\n # Bucket is always fetched.\n if res == 'bucket':\n perms_by_object_uri.setdefault(bucket_uri, set()).update(resource_perms)\n continue\n # Fetch bucket collections and groups.\n # XXX: wrong approach: query in a loop!\n every_subobjects, _ = storage.get_all(parent_id=bucket_uri,\n collection_id=res)\n for subobject in every_subobjects:\n subobj_uri = bucket_uri + '/{0}s/{1}'.format(res, subobject['id'])\n perms_by_object_uri.setdefault(subobj_uri, set()).update(resource_perms)\n\n entries = []\n for object_uri, perms in perms_by_object_uri.items():\n try:\n # Obtain associated res from object URI\n resource_name, matchdict = core_utils.view_lookup(self.request,\n object_uri)\n except ValueError:\n # Skip permissions entries that are not linked to an object URI\n continue\n\n # For consistency with event payloads, prefix id with resource name\n matchdict[resource_name + '_id'] = matchdict.get('id')\n\n # Expand implicit permissions using descending tree.\n permissions = set(perms)\n for perm in perms:\n obtained = perms_descending_tree[resource_name][perm]\n # Related to same resource only and not every sub-objects.\n # (e.g \"bucket:write\" gives \"bucket:read\" but not \"group:read\")\n permissions |= obtained[resource_name]\n\n entry = dict(uri=object_uri,\n resource_name=resource_name,\n permissions=list(permissions),\n **matchdict)\n entries.append(entry)\n\n return extract_record_set(entries, filters=filters, sorting=sorting,\n pagination_rules=pagination_rules,\n limit=limit)\n\n\nclass PermissionsSchema(resource.ResourceSchema):\n uri = colander.SchemaNode(colander.String())\n resource_name = colander.SchemaNode(colander.String())\n permissions = colander.Sequence(colander.SchemaNode(colander.String()))\n bucket_id = colander.SchemaNode(colander.String())\n collection_id = colander.SchemaNode(colander.String(),\n missing=colander.drop)\n group_id = colander.SchemaNode(colander.String(),\n missing=colander.drop)\n record_id = colander.SchemaNode(colander.String(),\n missing=colander.drop)\n\n class Options:\n preserve_unknown = False\n\n\[email protected](name='permissions',\n description='List of user permissions',\n collection_path='/permissions',\n record_path=None,\n collection_methods=('GET',),\n permission=NO_PERMISSION_REQUIRED)\nclass Permissions(resource.ShareableResource):\n\n schema = PermissionsSchema\n\n def __init__(self, request, context=None):\n super().__init__(request, context)\n self.model = PermissionsModel(request)\n\n def _extract_sorting(self, limit):\n # Permissions entries are not stored with timestamp, so do not\n # force it.\n result = super()._extract_sorting(limit)\n without_last_modified = [s for s in result\n if s.field != self.model.modified_field]\n # For pagination, there must be at least one sort criteria.\n # We use ``uri`` because its values are unique.\n if \"uri\" not in [s.field for s in without_last_modified]:\n without_last_modified.append(Sort(\"uri\", -1))\n return without_last_modified\n\n def _extract_filters(self):\n result = super()._extract_filters()\n without_last_modified = [s for s in result\n if s.field != self.model.modified_field]\n return without_last_modified\n", "path": "kinto/views/permissions.py"}], "after_files": [{"content": "import colander\nfrom pyramid.security import NO_PERMISSION_REQUIRED\nfrom pyramid.settings import aslist\n\nfrom kinto.authorization import PERMISSIONS_INHERITANCE_TREE\nfrom kinto.core import utils as core_utils, resource\nfrom kinto.core.storage import Sort\nfrom kinto.core.storage.memory import extract_record_set\n\n\ndef allowed_from_settings(settings, principals):\n \"\"\"Returns every permissions allowed from settings for the current user.\n :param settings dict: app settings\n :param principals list: list of principals of current user\n :rtype: dict\n\n Result example::\n\n {\n \"bucket\": {\"write\", \"collection:create\"},\n \"collection\": {\"read\"}\n }\n\n XXX: This helper will be useful for Kinto/kinto#894\n \"\"\"\n perms_settings = {k: aslist(v) for k, v in settings.items()\n if k.endswith('_principals')}\n from_settings = {}\n for key, allowed_principals in perms_settings.items():\n resource_name, permission, _ = key.split('_')\n # Keep the known permissions only.\n if resource_name not in PERMISSIONS_INHERITANCE_TREE.keys():\n continue\n # Keep the permissions of the current user only.\n if not bool(set(principals) & set(allowed_principals)):\n continue\n # ``collection_create_principals`` means ``collection:create`` in bucket.\n if permission == 'create':\n permission = '{resource_name}:{permission}'.format(\n resource_name=resource_name,\n permission=permission)\n resource_name = { # resource parents.\n 'collection': 'bucket',\n 'group': 'bucket',\n 'record': 'collection'}.get(resource_name, '')\n # Store them in a convenient way.\n from_settings.setdefault(resource_name, set()).add(permission)\n return from_settings\n\n\nclass PermissionsModel:\n id_field = 'id'\n modified_field = 'last_modified'\n deleted_field = 'deleted'\n\n def __init__(self, request):\n self.request = request\n\n def timestamp(self, parent_id=None):\n return 0\n\n def get_records(self, filters=None, sorting=None, pagination_rules=None,\n limit=None, include_deleted=False, parent_id=None):\n # Invert the permissions inheritance tree.\n perms_descending_tree = {}\n for on_resource, tree in PERMISSIONS_INHERITANCE_TREE.items():\n for obtained_perm, obtained_from in tree.items():\n for from_resource, perms in obtained_from.items():\n for perm in perms:\n perms_descending_tree.setdefault(from_resource, {})\\\n .setdefault(perm, {})\\\n .setdefault(on_resource, set())\\\n .add(obtained_perm)\n\n # Obtain current principals.\n principals = self.request.prefixed_principals\n\n # Query every possible permission of the current user from backend.\n backend = self.request.registry.permission\n perms_by_object_uri = backend.get_accessible_objects(principals)\n\n # Check settings for every allowed resources.\n from_settings = allowed_from_settings(self.request.registry.settings, principals)\n\n # Expand permissions obtained from backend with the object URIs that\n # correspond to permissions allowed from settings.\n allowed_resources = {'bucket', 'collection', 'group'} & set(from_settings.keys())\n if allowed_resources:\n storage = self.request.registry.storage\n every_bucket, _ = storage.get_all(parent_id='', collection_id='bucket')\n for bucket in every_bucket:\n bucket_uri = '/buckets/{id}'.format_map(bucket)\n for res in allowed_resources:\n resource_perms = from_settings[res]\n # Bucket is always fetched.\n if res == 'bucket':\n perms_by_object_uri.setdefault(bucket_uri, set()).update(resource_perms)\n continue\n # Fetch bucket collections and groups.\n # XXX: wrong approach: query in a loop!\n every_subobjects, _ = storage.get_all(parent_id=bucket_uri,\n collection_id=res)\n for subobject in every_subobjects:\n subobj_uri = bucket_uri + '/{0}s/{1}'.format(res, subobject['id'])\n perms_by_object_uri.setdefault(subobj_uri, set()).update(resource_perms)\n\n entries = []\n for object_uri, perms in perms_by_object_uri.items():\n try:\n # Obtain associated res from object URI\n resource_name, matchdict = core_utils.view_lookup(self.request,\n object_uri)\n except ValueError:\n # Skip permissions entries that are not linked to an object URI\n continue\n\n # For consistency with event payloads, prefix id with resource name\n matchdict[resource_name + '_id'] = matchdict.get('id')\n\n # Expand implicit permissions using descending tree.\n permissions = set(perms)\n for perm in perms:\n obtained = perms_descending_tree[resource_name][perm]\n # Related to same resource only and not every sub-objects.\n # (e.g \"bucket:write\" gives \"bucket:read\" but not \"group:read\")\n permissions |= obtained[resource_name]\n\n entry = dict(uri=object_uri,\n resource_name=resource_name,\n permissions=list(permissions),\n **matchdict)\n entries.append(entry)\n\n return extract_record_set(entries, filters=filters, sorting=sorting,\n pagination_rules=pagination_rules,\n limit=limit)\n\n\nclass PermissionsSchema(resource.ResourceSchema):\n uri = colander.SchemaNode(colander.String())\n resource_name = colander.SchemaNode(colander.String())\n permissions = colander.Sequence(colander.SchemaNode(colander.String()))\n bucket_id = colander.SchemaNode(colander.String())\n collection_id = colander.SchemaNode(colander.String(),\n missing=colander.drop)\n group_id = colander.SchemaNode(colander.String(),\n missing=colander.drop)\n record_id = colander.SchemaNode(colander.String(),\n missing=colander.drop)\n\n class Options:\n preserve_unknown = False\n\n\[email protected](name='permissions',\n description='List of user permissions',\n collection_path='/permissions',\n record_path=None,\n collection_methods=('GET',),\n permission=NO_PERMISSION_REQUIRED)\nclass Permissions(resource.ShareableResource):\n\n schema = PermissionsSchema\n\n def __init__(self, request, context=None):\n super().__init__(request, context)\n self.model = PermissionsModel(request)\n\n def _extract_sorting(self, limit):\n # Permissions entries are not stored with timestamp, so do not\n # force it.\n result = super()._extract_sorting(limit)\n without_last_modified = [s for s in result\n if s.field != self.model.modified_field]\n # For pagination, there must be at least one sort criteria.\n # We use ``uri`` because its values are unique.\n if \"uri\" not in [s.field for s in without_last_modified]:\n without_last_modified.append(Sort(\"uri\", -1))\n return without_last_modified\n\n def _extract_filters(self):\n result = super()._extract_filters()\n without_last_modified = [s for s in result\n if s.field != self.model.modified_field]\n return without_last_modified\n", "path": "kinto/views/permissions.py"}]}
2,625
143
gh_patches_debug_22456
rasdani/github-patches
git_diff
Qiskit__qiskit-1020
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Using simulator instructions crashes the latex drawer <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Informations - **Qiskit Terra version**: latest master - **Python version**: 3.7 - **Operating system**: linux ### What is the current behavior? Attempting to use the latex drawer to render a circuit with simulator instructions stack traces in the dagunroller. For example: ``` Traceback (most recent call last): File "test_qiskit.py", line 67, in <module> visualization.generate_latex_source(qc, filename='out.tex') File "/tmp/qiskit/qiskit-terra/qiskit/tools/visualization/_circuit_visualization.py", line 354, in generate_latex_source json_circuit = transpile(dag_circuit, basis_gates=basis, format='json') File "/tmp/qiskit/qiskit-terra/qiskit/transpiler/_transpiler.py", line 346, in transpile dag = dag_unroller.expand_gates() File "/tmp/qiskit/qiskit-terra/qiskit/unroll/_dagunroller.py", line 86, in expand_gates not self.dag_circuit.gates[current_node["name"]]["opaque"]: KeyError: 'snapshot' ``` It looks like it's trying to treat the snapshot instruction as a gate (which it's not) and that's causing things to crash. ### Steps to reproduce the problem I've been running: ``` import qiskit.extensions.simulator from qiskit import * from qiskit.tools import visualization q = QuantumRegister(2) c = ClassicalRegister(1) qc = QuantumCircuit(q, c) qc.x(q[0]) qc.snapshot(slot=3) qc.x(q[1]) qc.h(q[0]) qc.barrier() qc.measure(q[0], c[0]) visualization.generate_latex_source(qc, filename='out.tex') ``` Also replacing snapshot() with save(), load(), and noise() ### What is the expected behavior? This should draw a circuit (the barriers won't be drawn for the simulator instructions, that's what I was working on adding when I encountered this) and not stack trace. ### Suggested solutions Fix the crash. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `qiskit/unroll/_dagunroller.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # Copyright 2017, IBM. 4 # 5 # This source code is licensed under the Apache License, Version 2.0 found in 6 # the LICENSE.txt file in the root directory of this source tree. 7 8 """ 9 DAG Unroller 10 """ 11 12 import networkx as nx 13 14 from qiskit.unroll import Unroller 15 from qiskit.qasm._node import Real, Id, IdList, ExpressionList, Gate, \ 16 PrimaryList, Int, IndexedId, Qreg, If, Creg, \ 17 Program, CustomUnitary 18 from ._unrollererror import UnrollerError 19 from ._dagbackend import DAGBackend 20 21 22 class DagUnroller(object): 23 """An Unroller that takes Dag circuits as the input.""" 24 def __init__(self, dag_circuit, backend=None): 25 if dag_circuit is None: 26 raise UnrollerError('Invalid dag circuit!!') 27 28 self.dag_circuit = dag_circuit 29 self.backend = backend 30 31 def set_backend(self, backend): 32 """Set the backend object.""" 33 self.backend = backend 34 35 def execute(self): 36 """Interpret OPENQASM and make appropriate backend calls.""" 37 if self.backend is not None: 38 self._process() 39 return self.backend.get_output() 40 else: 41 raise UnrollerError("backend not attached") 42 43 # TODO This method should merge with .execute(), so the output will depend 44 # on the backend associated with this DagUnroller instance 45 def expand_gates(self, basis=None): 46 """Expand all gate nodes to the given basis. 47 48 If basis is empty, each custom gate node is replaced by its 49 implementation over U and CX. If basis contains names, then 50 those custom gates are not expanded. For example, if "u3" 51 is in basis, then the gate "u3" will not be expanded wherever 52 it occurs. 53 54 This member function replicates the behavior of the unroller 55 module without using the OpenQASM parser. 56 """ 57 58 if basis is None: 59 basis = self.backend.basis 60 61 if not isinstance(self.backend, DAGBackend): 62 raise UnrollerError("expand_gates only accepts a DAGBackend!!") 63 64 # Build the Gate AST nodes for user-defined gates 65 gatedefs = [] 66 for name, gate in self.dag_circuit.gates.items(): 67 children = [Id(name, 0, "")] 68 if gate["n_args"] > 0: 69 children.append(ExpressionList(list( 70 map(lambda x: Id(x, 0, ""), 71 gate["args"]) 72 ))) 73 children.append(IdList(list( 74 map(lambda x: Id(x, 0, ""), 75 gate["bits"]) 76 ))) 77 children.append(gate["body"]) 78 gatedefs.append(Gate(children)) 79 # Walk through the DAG and examine each node 80 builtins = ["U", "CX", "measure", "reset", "barrier"] 81 topological_sorted_list = list(nx.topological_sort(self.dag_circuit.multi_graph)) 82 for node in topological_sorted_list: 83 current_node = self.dag_circuit.multi_graph.node[node] 84 if current_node["type"] == "op" and \ 85 current_node["name"] not in builtins + basis and \ 86 not self.dag_circuit.gates[current_node["name"]]["opaque"]: 87 subcircuit, wires = self._build_subcircuit(gatedefs, 88 basis, 89 current_node["name"], 90 current_node["params"], 91 current_node["qargs"], 92 current_node["condition"]) 93 self.dag_circuit.substitute_circuit_one(node, subcircuit, wires) 94 return self.dag_circuit 95 96 def _build_subcircuit(self, gatedefs, basis, gate_name, gate_params, gate_args, 97 gate_condition): 98 """Build DAGCircuit for a given user-defined gate node. 99 100 gatedefs = dictionary of Gate AST nodes for user-defined gates 101 gate_name = name of gate to expand to target_basis (nd["name"]) 102 gate_params = list of gate parameters (nd["params"]) 103 gate_args = list of gate arguments (nd["qargs"]) 104 gate_condition = None or tuple (string, int) (nd["condition"]) 105 106 Returns (subcircuit, wires) where subcircuit is the DAGCircuit 107 corresponding to the user-defined gate node expanded to target_basis 108 and wires is the list of input wires to the subcircuit in order 109 corresponding to the gate's arguments. 110 """ 111 112 children = [Id(gate_name, 0, "")] 113 if gate_params: 114 children.append( 115 ExpressionList(list(map(Real, gate_params))) 116 ) 117 new_wires = [("q", j) for j in range(len(gate_args))] 118 children.append( 119 PrimaryList( 120 list(map(lambda x: IndexedId( 121 [Id(x[0], 0, ""), Int(x[1])] 122 ), new_wires)) 123 ) 124 ) 125 gate_node = CustomUnitary(children) 126 id_int = [Id("q", 0, ""), Int(len(gate_args))] 127 # Make a list of register declaration nodes 128 reg_nodes = [ 129 Qreg( 130 [ 131 IndexedId(id_int) 132 ] 133 ) 134 ] 135 # Add an If node when there is a condition present 136 if gate_condition: 137 gate_node = If([ 138 Id(gate_condition[0], 0, ""), 139 Int(gate_condition[1]), 140 gate_node 141 ]) 142 new_wires += [(gate_condition[0], j) 143 for j in range(self.dag_circuit.cregs[gate_condition[0]])] 144 reg_nodes.append( 145 Creg([ 146 IndexedId([ 147 Id(gate_condition[0], 0, ""), 148 Int(self.dag_circuit.cregs[gate_condition[0]]) 149 ]) 150 ]) 151 ) 152 153 # Build the whole program's AST 154 sub_ast = Program(gatedefs + reg_nodes + [gate_node]) 155 # Interpret the AST to give a new DAGCircuit over backend basis 156 sub_circuit = Unroller(sub_ast, DAGBackend(basis)).execute() 157 return sub_circuit, new_wires 158 159 def _process(self): 160 for name, width in self.dag_circuit.qregs.items(): 161 self.backend.new_qreg(name, width) 162 for name, width in self.dag_circuit.cregs.items(): 163 self.backend.new_creg(name, width) 164 for name, data in self.dag_circuit.gates.items(): 165 self.backend.define_gate(name, data) 166 for n in nx.topological_sort(self.dag_circuit.multi_graph): 167 current_node = self.dag_circuit.multi_graph.node[n] 168 if current_node["type"] == "op": 169 params = map(Real, current_node["params"]) 170 params = list(params) 171 if current_node["condition"] is not None: 172 self.backend.set_condition(current_node["condition"][0], 173 current_node["condition"][1]) 174 if not current_node["cargs"]: 175 if current_node["name"] == "U": 176 self.backend.u(params, current_node["qargs"][0]) 177 elif current_node["name"] == "CX": 178 self.backend.cx(current_node["qargs"][0], current_node["qargs"][1]) 179 elif current_node["name"] == "barrier": 180 self.backend.barrier([current_node["qargs"]]) 181 elif current_node["name"] == "reset": 182 self.backend.reset(current_node["qargs"][0]) 183 184 # TODO: The schema of the snapshot gate is radically 185 # different to other QASM instructions. The current model 186 # of extensions does not support generating custom Qobj 187 # instructions (only custom QASM strings) and the default 188 # instruction generator is not enough to produce a valid 189 # snapshot instruction for the new Qobj format. 190 # 191 # This is a hack since there would be mechanisms for the 192 # extensions to provide their own Qobj instructions. 193 # Extensions should not be hardcoded in the DAGUnroller. 194 elif current_node["name"] == "snapshot": 195 self.backend.start_gate( 196 "snapshot", params, current_node["qargs"], 197 extra_fields={'type': 'MISSING', 'label': 'MISSING', 'texparams': []}) 198 self.backend.end_gate("snapshot", params, current_node["qargs"]) 199 else: 200 self.backend.start_gate(current_node["name"], params, 201 current_node["qargs"]) 202 self.backend.end_gate(current_node["name"], params, current_node["qargs"]) 203 else: 204 if current_node["name"] == "measure": 205 if len(current_node["cargs"]) != 1 or len(current_node["qargs"]) != 1 \ 206 or current_node["params"]: 207 raise UnrollerError("Bad node data!!") 208 209 self.backend.measure(current_node["qargs"][0], current_node["cargs"][0]) 210 else: 211 raise UnrollerError("Bad node data!") 212 213 self.backend.drop_condition() 214 return self.backend.get_output() 215 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/qiskit/unroll/_dagunroller.py b/qiskit/unroll/_dagunroller.py --- a/qiskit/unroll/_dagunroller.py +++ b/qiskit/unroll/_dagunroller.py @@ -78,11 +78,12 @@ gatedefs.append(Gate(children)) # Walk through the DAG and examine each node builtins = ["U", "CX", "measure", "reset", "barrier"] + simulator_builtins = ['snapshot', 'save', 'load', 'noise'] topological_sorted_list = list(nx.topological_sort(self.dag_circuit.multi_graph)) for node in topological_sorted_list: current_node = self.dag_circuit.multi_graph.node[node] if current_node["type"] == "op" and \ - current_node["name"] not in builtins + basis and \ + current_node["name"] not in builtins + basis + simulator_builtins and \ not self.dag_circuit.gates[current_node["name"]]["opaque"]: subcircuit, wires = self._build_subcircuit(gatedefs, basis,
{"golden_diff": "diff --git a/qiskit/unroll/_dagunroller.py b/qiskit/unroll/_dagunroller.py\n--- a/qiskit/unroll/_dagunroller.py\n+++ b/qiskit/unroll/_dagunroller.py\n@@ -78,11 +78,12 @@\n gatedefs.append(Gate(children))\n # Walk through the DAG and examine each node\n builtins = [\"U\", \"CX\", \"measure\", \"reset\", \"barrier\"]\n+ simulator_builtins = ['snapshot', 'save', 'load', 'noise']\n topological_sorted_list = list(nx.topological_sort(self.dag_circuit.multi_graph))\n for node in topological_sorted_list:\n current_node = self.dag_circuit.multi_graph.node[node]\n if current_node[\"type\"] == \"op\" and \\\n- current_node[\"name\"] not in builtins + basis and \\\n+ current_node[\"name\"] not in builtins + basis + simulator_builtins and \\\n not self.dag_circuit.gates[current_node[\"name\"]][\"opaque\"]:\n subcircuit, wires = self._build_subcircuit(gatedefs,\n basis,\n", "issue": "Using simulator instructions crashes the latex drawer\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues -->\r\n\r\n### Informations\r\n\r\n- **Qiskit Terra version**: latest master\r\n- **Python version**: 3.7\r\n- **Operating system**: linux\r\n\r\n### What is the current behavior?\r\n\r\nAttempting to use the latex drawer to render a circuit with simulator instructions stack traces in the dagunroller. For example:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"test_qiskit.py\", line 67, in <module>\r\n visualization.generate_latex_source(qc, filename='out.tex')\r\n File \"/tmp/qiskit/qiskit-terra/qiskit/tools/visualization/_circuit_visualization.py\", line 354, in generate_latex_source\r\n json_circuit = transpile(dag_circuit, basis_gates=basis, format='json')\r\n File \"/tmp/qiskit/qiskit-terra/qiskit/transpiler/_transpiler.py\", line 346, in transpile\r\n dag = dag_unroller.expand_gates()\r\n File \"/tmp/qiskit/qiskit-terra/qiskit/unroll/_dagunroller.py\", line 86, in expand_gates\r\n not self.dag_circuit.gates[current_node[\"name\"]][\"opaque\"]:\r\nKeyError: 'snapshot'\r\n```\r\nIt looks like it's trying to treat the snapshot instruction as a gate (which it's not) and that's causing things to crash.\r\n\r\n### Steps to reproduce the problem\r\n\r\nI've been running:\r\n\r\n```\r\nimport qiskit.extensions.simulator\r\nfrom qiskit import *\r\nfrom qiskit.tools import visualization\r\n\r\nq = QuantumRegister(2)\r\nc = ClassicalRegister(1)\r\nqc = QuantumCircuit(q, c)\r\n\r\nqc.x(q[0])\r\nqc.snapshot(slot=3)\r\nqc.x(q[1])\r\nqc.h(q[0])\r\nqc.barrier()\r\nqc.measure(q[0], c[0])\r\n\r\nvisualization.generate_latex_source(qc, filename='out.tex')\r\n```\r\nAlso replacing snapshot() with save(), load(), and noise()\r\n\r\n### What is the expected behavior?\r\n\r\nThis should draw a circuit (the barriers won't be drawn for the simulator instructions, that's what I was working on adding when I encountered this) and not stack trace.\r\n\r\n### Suggested solutions\r\n\r\nFix the crash.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright 2017, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\"\"\"\nDAG Unroller\n\"\"\"\n\nimport networkx as nx\n\nfrom qiskit.unroll import Unroller\nfrom qiskit.qasm._node import Real, Id, IdList, ExpressionList, Gate, \\\n PrimaryList, Int, IndexedId, Qreg, If, Creg, \\\n Program, CustomUnitary\nfrom ._unrollererror import UnrollerError\nfrom ._dagbackend import DAGBackend\n\n\nclass DagUnroller(object):\n \"\"\"An Unroller that takes Dag circuits as the input.\"\"\"\n def __init__(self, dag_circuit, backend=None):\n if dag_circuit is None:\n raise UnrollerError('Invalid dag circuit!!')\n\n self.dag_circuit = dag_circuit\n self.backend = backend\n\n def set_backend(self, backend):\n \"\"\"Set the backend object.\"\"\"\n self.backend = backend\n\n def execute(self):\n \"\"\"Interpret OPENQASM and make appropriate backend calls.\"\"\"\n if self.backend is not None:\n self._process()\n return self.backend.get_output()\n else:\n raise UnrollerError(\"backend not attached\")\n\n # TODO This method should merge with .execute(), so the output will depend\n # on the backend associated with this DagUnroller instance\n def expand_gates(self, basis=None):\n \"\"\"Expand all gate nodes to the given basis.\n\n If basis is empty, each custom gate node is replaced by its\n implementation over U and CX. If basis contains names, then\n those custom gates are not expanded. For example, if \"u3\"\n is in basis, then the gate \"u3\" will not be expanded wherever\n it occurs.\n\n This member function replicates the behavior of the unroller\n module without using the OpenQASM parser.\n \"\"\"\n\n if basis is None:\n basis = self.backend.basis\n\n if not isinstance(self.backend, DAGBackend):\n raise UnrollerError(\"expand_gates only accepts a DAGBackend!!\")\n\n # Build the Gate AST nodes for user-defined gates\n gatedefs = []\n for name, gate in self.dag_circuit.gates.items():\n children = [Id(name, 0, \"\")]\n if gate[\"n_args\"] > 0:\n children.append(ExpressionList(list(\n map(lambda x: Id(x, 0, \"\"),\n gate[\"args\"])\n )))\n children.append(IdList(list(\n map(lambda x: Id(x, 0, \"\"),\n gate[\"bits\"])\n )))\n children.append(gate[\"body\"])\n gatedefs.append(Gate(children))\n # Walk through the DAG and examine each node\n builtins = [\"U\", \"CX\", \"measure\", \"reset\", \"barrier\"]\n topological_sorted_list = list(nx.topological_sort(self.dag_circuit.multi_graph))\n for node in topological_sorted_list:\n current_node = self.dag_circuit.multi_graph.node[node]\n if current_node[\"type\"] == \"op\" and \\\n current_node[\"name\"] not in builtins + basis and \\\n not self.dag_circuit.gates[current_node[\"name\"]][\"opaque\"]:\n subcircuit, wires = self._build_subcircuit(gatedefs,\n basis,\n current_node[\"name\"],\n current_node[\"params\"],\n current_node[\"qargs\"],\n current_node[\"condition\"])\n self.dag_circuit.substitute_circuit_one(node, subcircuit, wires)\n return self.dag_circuit\n\n def _build_subcircuit(self, gatedefs, basis, gate_name, gate_params, gate_args,\n gate_condition):\n \"\"\"Build DAGCircuit for a given user-defined gate node.\n\n gatedefs = dictionary of Gate AST nodes for user-defined gates\n gate_name = name of gate to expand to target_basis (nd[\"name\"])\n gate_params = list of gate parameters (nd[\"params\"])\n gate_args = list of gate arguments (nd[\"qargs\"])\n gate_condition = None or tuple (string, int) (nd[\"condition\"])\n\n Returns (subcircuit, wires) where subcircuit is the DAGCircuit\n corresponding to the user-defined gate node expanded to target_basis\n and wires is the list of input wires to the subcircuit in order\n corresponding to the gate's arguments.\n \"\"\"\n\n children = [Id(gate_name, 0, \"\")]\n if gate_params:\n children.append(\n ExpressionList(list(map(Real, gate_params)))\n )\n new_wires = [(\"q\", j) for j in range(len(gate_args))]\n children.append(\n PrimaryList(\n list(map(lambda x: IndexedId(\n [Id(x[0], 0, \"\"), Int(x[1])]\n ), new_wires))\n )\n )\n gate_node = CustomUnitary(children)\n id_int = [Id(\"q\", 0, \"\"), Int(len(gate_args))]\n # Make a list of register declaration nodes\n reg_nodes = [\n Qreg(\n [\n IndexedId(id_int)\n ]\n )\n ]\n # Add an If node when there is a condition present\n if gate_condition:\n gate_node = If([\n Id(gate_condition[0], 0, \"\"),\n Int(gate_condition[1]),\n gate_node\n ])\n new_wires += [(gate_condition[0], j)\n for j in range(self.dag_circuit.cregs[gate_condition[0]])]\n reg_nodes.append(\n Creg([\n IndexedId([\n Id(gate_condition[0], 0, \"\"),\n Int(self.dag_circuit.cregs[gate_condition[0]])\n ])\n ])\n )\n\n # Build the whole program's AST\n sub_ast = Program(gatedefs + reg_nodes + [gate_node])\n # Interpret the AST to give a new DAGCircuit over backend basis\n sub_circuit = Unroller(sub_ast, DAGBackend(basis)).execute()\n return sub_circuit, new_wires\n\n def _process(self):\n for name, width in self.dag_circuit.qregs.items():\n self.backend.new_qreg(name, width)\n for name, width in self.dag_circuit.cregs.items():\n self.backend.new_creg(name, width)\n for name, data in self.dag_circuit.gates.items():\n self.backend.define_gate(name, data)\n for n in nx.topological_sort(self.dag_circuit.multi_graph):\n current_node = self.dag_circuit.multi_graph.node[n]\n if current_node[\"type\"] == \"op\":\n params = map(Real, current_node[\"params\"])\n params = list(params)\n if current_node[\"condition\"] is not None:\n self.backend.set_condition(current_node[\"condition\"][0],\n current_node[\"condition\"][1])\n if not current_node[\"cargs\"]:\n if current_node[\"name\"] == \"U\":\n self.backend.u(params, current_node[\"qargs\"][0])\n elif current_node[\"name\"] == \"CX\":\n self.backend.cx(current_node[\"qargs\"][0], current_node[\"qargs\"][1])\n elif current_node[\"name\"] == \"barrier\":\n self.backend.barrier([current_node[\"qargs\"]])\n elif current_node[\"name\"] == \"reset\":\n self.backend.reset(current_node[\"qargs\"][0])\n\n # TODO: The schema of the snapshot gate is radically\n # different to other QASM instructions. The current model\n # of extensions does not support generating custom Qobj\n # instructions (only custom QASM strings) and the default\n # instruction generator is not enough to produce a valid\n # snapshot instruction for the new Qobj format.\n #\n # This is a hack since there would be mechanisms for the\n # extensions to provide their own Qobj instructions.\n # Extensions should not be hardcoded in the DAGUnroller.\n elif current_node[\"name\"] == \"snapshot\":\n self.backend.start_gate(\n \"snapshot\", params, current_node[\"qargs\"],\n extra_fields={'type': 'MISSING', 'label': 'MISSING', 'texparams': []})\n self.backend.end_gate(\"snapshot\", params, current_node[\"qargs\"])\n else:\n self.backend.start_gate(current_node[\"name\"], params,\n current_node[\"qargs\"])\n self.backend.end_gate(current_node[\"name\"], params, current_node[\"qargs\"])\n else:\n if current_node[\"name\"] == \"measure\":\n if len(current_node[\"cargs\"]) != 1 or len(current_node[\"qargs\"]) != 1 \\\n or current_node[\"params\"]:\n raise UnrollerError(\"Bad node data!!\")\n\n self.backend.measure(current_node[\"qargs\"][0], current_node[\"cargs\"][0])\n else:\n raise UnrollerError(\"Bad node data!\")\n\n self.backend.drop_condition()\n return self.backend.get_output()\n", "path": "qiskit/unroll/_dagunroller.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright 2017, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\"\"\"\nDAG Unroller\n\"\"\"\n\nimport networkx as nx\n\nfrom qiskit.unroll import Unroller\nfrom qiskit.qasm._node import Real, Id, IdList, ExpressionList, Gate, \\\n PrimaryList, Int, IndexedId, Qreg, If, Creg, \\\n Program, CustomUnitary\nfrom ._unrollererror import UnrollerError\nfrom ._dagbackend import DAGBackend\n\n\nclass DagUnroller(object):\n \"\"\"An Unroller that takes Dag circuits as the input.\"\"\"\n def __init__(self, dag_circuit, backend=None):\n if dag_circuit is None:\n raise UnrollerError('Invalid dag circuit!!')\n\n self.dag_circuit = dag_circuit\n self.backend = backend\n\n def set_backend(self, backend):\n \"\"\"Set the backend object.\"\"\"\n self.backend = backend\n\n def execute(self):\n \"\"\"Interpret OPENQASM and make appropriate backend calls.\"\"\"\n if self.backend is not None:\n self._process()\n return self.backend.get_output()\n else:\n raise UnrollerError(\"backend not attached\")\n\n # TODO This method should merge with .execute(), so the output will depend\n # on the backend associated with this DagUnroller instance\n def expand_gates(self, basis=None):\n \"\"\"Expand all gate nodes to the given basis.\n\n If basis is empty, each custom gate node is replaced by its\n implementation over U and CX. If basis contains names, then\n those custom gates are not expanded. For example, if \"u3\"\n is in basis, then the gate \"u3\" will not be expanded wherever\n it occurs.\n\n This member function replicates the behavior of the unroller\n module without using the OpenQASM parser.\n \"\"\"\n\n if basis is None:\n basis = self.backend.basis\n\n if not isinstance(self.backend, DAGBackend):\n raise UnrollerError(\"expand_gates only accepts a DAGBackend!!\")\n\n # Build the Gate AST nodes for user-defined gates\n gatedefs = []\n for name, gate in self.dag_circuit.gates.items():\n children = [Id(name, 0, \"\")]\n if gate[\"n_args\"] > 0:\n children.append(ExpressionList(list(\n map(lambda x: Id(x, 0, \"\"),\n gate[\"args\"])\n )))\n children.append(IdList(list(\n map(lambda x: Id(x, 0, \"\"),\n gate[\"bits\"])\n )))\n children.append(gate[\"body\"])\n gatedefs.append(Gate(children))\n # Walk through the DAG and examine each node\n builtins = [\"U\", \"CX\", \"measure\", \"reset\", \"barrier\"]\n simulator_builtins = ['snapshot', 'save', 'load', 'noise']\n topological_sorted_list = list(nx.topological_sort(self.dag_circuit.multi_graph))\n for node in topological_sorted_list:\n current_node = self.dag_circuit.multi_graph.node[node]\n if current_node[\"type\"] == \"op\" and \\\n current_node[\"name\"] not in builtins + basis + simulator_builtins and \\\n not self.dag_circuit.gates[current_node[\"name\"]][\"opaque\"]:\n subcircuit, wires = self._build_subcircuit(gatedefs,\n basis,\n current_node[\"name\"],\n current_node[\"params\"],\n current_node[\"qargs\"],\n current_node[\"condition\"])\n self.dag_circuit.substitute_circuit_one(node, subcircuit, wires)\n return self.dag_circuit\n\n def _build_subcircuit(self, gatedefs, basis, gate_name, gate_params, gate_args,\n gate_condition):\n \"\"\"Build DAGCircuit for a given user-defined gate node.\n\n gatedefs = dictionary of Gate AST nodes for user-defined gates\n gate_name = name of gate to expand to target_basis (nd[\"name\"])\n gate_params = list of gate parameters (nd[\"params\"])\n gate_args = list of gate arguments (nd[\"qargs\"])\n gate_condition = None or tuple (string, int) (nd[\"condition\"])\n\n Returns (subcircuit, wires) where subcircuit is the DAGCircuit\n corresponding to the user-defined gate node expanded to target_basis\n and wires is the list of input wires to the subcircuit in order\n corresponding to the gate's arguments.\n \"\"\"\n\n children = [Id(gate_name, 0, \"\")]\n if gate_params:\n children.append(\n ExpressionList(list(map(Real, gate_params)))\n )\n new_wires = [(\"q\", j) for j in range(len(gate_args))]\n children.append(\n PrimaryList(\n list(map(lambda x: IndexedId(\n [Id(x[0], 0, \"\"), Int(x[1])]\n ), new_wires))\n )\n )\n gate_node = CustomUnitary(children)\n id_int = [Id(\"q\", 0, \"\"), Int(len(gate_args))]\n # Make a list of register declaration nodes\n reg_nodes = [\n Qreg(\n [\n IndexedId(id_int)\n ]\n )\n ]\n # Add an If node when there is a condition present\n if gate_condition:\n gate_node = If([\n Id(gate_condition[0], 0, \"\"),\n Int(gate_condition[1]),\n gate_node\n ])\n new_wires += [(gate_condition[0], j)\n for j in range(self.dag_circuit.cregs[gate_condition[0]])]\n reg_nodes.append(\n Creg([\n IndexedId([\n Id(gate_condition[0], 0, \"\"),\n Int(self.dag_circuit.cregs[gate_condition[0]])\n ])\n ])\n )\n\n # Build the whole program's AST\n sub_ast = Program(gatedefs + reg_nodes + [gate_node])\n # Interpret the AST to give a new DAGCircuit over backend basis\n sub_circuit = Unroller(sub_ast, DAGBackend(basis)).execute()\n return sub_circuit, new_wires\n\n def _process(self):\n for name, width in self.dag_circuit.qregs.items():\n self.backend.new_qreg(name, width)\n for name, width in self.dag_circuit.cregs.items():\n self.backend.new_creg(name, width)\n for name, data in self.dag_circuit.gates.items():\n self.backend.define_gate(name, data)\n for n in nx.topological_sort(self.dag_circuit.multi_graph):\n current_node = self.dag_circuit.multi_graph.node[n]\n if current_node[\"type\"] == \"op\":\n params = map(Real, current_node[\"params\"])\n params = list(params)\n if current_node[\"condition\"] is not None:\n self.backend.set_condition(current_node[\"condition\"][0],\n current_node[\"condition\"][1])\n if not current_node[\"cargs\"]:\n if current_node[\"name\"] == \"U\":\n self.backend.u(params, current_node[\"qargs\"][0])\n elif current_node[\"name\"] == \"CX\":\n self.backend.cx(current_node[\"qargs\"][0], current_node[\"qargs\"][1])\n elif current_node[\"name\"] == \"barrier\":\n self.backend.barrier([current_node[\"qargs\"]])\n elif current_node[\"name\"] == \"reset\":\n self.backend.reset(current_node[\"qargs\"][0])\n\n # TODO: The schema of the snapshot gate is radically\n # different to other QASM instructions. The current model\n # of extensions does not support generating custom Qobj\n # instructions (only custom QASM strings) and the default\n # instruction generator is not enough to produce a valid\n # snapshot instruction for the new Qobj format.\n #\n # This is a hack since there would be mechanisms for the\n # extensions to provide their own Qobj instructions.\n # Extensions should not be hardcoded in the DAGUnroller.\n elif current_node[\"name\"] == \"snapshot\":\n self.backend.start_gate(\n \"snapshot\", params, current_node[\"qargs\"],\n extra_fields={'type': 'MISSING', 'label': 'MISSING', 'texparams': []})\n self.backend.end_gate(\"snapshot\", params, current_node[\"qargs\"])\n else:\n self.backend.start_gate(current_node[\"name\"], params,\n current_node[\"qargs\"])\n self.backend.end_gate(current_node[\"name\"], params, current_node[\"qargs\"])\n else:\n if current_node[\"name\"] == \"measure\":\n if len(current_node[\"cargs\"]) != 1 or len(current_node[\"qargs\"]) != 1 \\\n or current_node[\"params\"]:\n raise UnrollerError(\"Bad node data!!\")\n\n self.backend.measure(current_node[\"qargs\"][0], current_node[\"cargs\"][0])\n else:\n raise UnrollerError(\"Bad node data!\")\n\n self.backend.drop_condition()\n return self.backend.get_output()\n", "path": "qiskit/unroll/_dagunroller.py"}]}
3,252
251
gh_patches_debug_330
rasdani/github-patches
git_diff
Pylons__pyramid-3272
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bump Sphinx to >=1.7.2 Would anyone be opposed to bumping Sphinx to >=1.7.2, != 1.7.3 in `setup.py`? I really want our PDFs to have `emphasize-lines` support, at long last, and bring in support for Unicode characters in PDFs via xelatex. Refs: * #667 * #2572 * https://github.com/rtfd/readthedocs.org/issues/4015 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 ############################################################################## 2 # 3 # Copyright (c) 2008-2013 Agendaless Consulting and Contributors. 4 # All Rights Reserved. 5 # 6 # This software is subject to the provisions of the BSD-like license at 7 # http://www.repoze.org/LICENSE.txt. A copy of the license should accompany 8 # this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL 9 # EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, 10 # THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND 11 # FITNESS FOR A PARTICULAR PURPOSE 12 # 13 ############################################################################## 14 15 import os 16 17 from setuptools import setup, find_packages 18 19 here = os.path.abspath(os.path.dirname(__file__)) 20 try: 21 with open(os.path.join(here, 'README.rst')) as f: 22 README = f.read() 23 with open(os.path.join(here, 'CHANGES.txt')) as f: 24 CHANGES = f.read() 25 except IOError: 26 README = CHANGES = '' 27 28 install_requires = [ 29 'setuptools', 30 'WebOb >= 1.7.0', # Response.has_body 31 'repoze.lru >= 0.4', # py3 compat 32 'zope.interface >= 3.8.0', # has zope.interface.registry 33 'zope.deprecation >= 3.5.0', # py3 compat 34 'venusian >= 1.0a3', # ``ignore`` 35 'translationstring >= 0.4', # py3 compat 36 'PasteDeploy >= 1.5.0', # py3 compat 37 'plaster', 38 'plaster_pastedeploy', 39 'hupper', 40 ] 41 42 tests_require = [ 43 'WebTest >= 1.3.1', # py3 compat 44 'zope.component >= 4.0', # py3 compat 45 ] 46 47 48 docs_extras = [ 49 'Sphinx >= 1.3.5, != 1.7.3', 50 'docutils', 51 'repoze.sphinx.autointerface', 52 'pylons_sphinx_latesturl', 53 'pylons-sphinx-themes', 54 'sphinxcontrib-autoprogram', 55 ] 56 57 testing_extras = tests_require + [ 58 'nose', 59 'coverage', 60 'virtualenv', # for scaffolding tests 61 ] 62 63 setup(name='pyramid', 64 version='1.9.2', 65 description='The Pyramid Web Framework, a Pylons project', 66 long_description=README + '\n\n' + CHANGES, 67 classifiers=[ 68 "Development Status :: 6 - Mature", 69 "Intended Audience :: Developers", 70 "Programming Language :: Python", 71 "Programming Language :: Python :: 2.7", 72 "Programming Language :: Python :: 3", 73 "Programming Language :: Python :: 3.4", 74 "Programming Language :: Python :: 3.5", 75 "Programming Language :: Python :: 3.6", 76 "Programming Language :: Python :: Implementation :: CPython", 77 "Programming Language :: Python :: Implementation :: PyPy", 78 "Framework :: Pyramid", 79 "Topic :: Internet :: WWW/HTTP", 80 "Topic :: Internet :: WWW/HTTP :: WSGI", 81 "License :: Repoze Public License", 82 ], 83 keywords='web wsgi pylons pyramid', 84 author="Chris McDonough, Agendaless Consulting", 85 author_email="[email protected]", 86 url="https://trypyramid.com", 87 license="BSD-derived (http://www.repoze.org/LICENSE.txt)", 88 packages=find_packages(), 89 include_package_data=True, 90 zip_safe=False, 91 python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*', 92 install_requires=install_requires, 93 extras_require={ 94 'testing': testing_extras, 95 'docs': docs_extras, 96 }, 97 tests_require=tests_require, 98 test_suite="pyramid.tests", 99 entry_points="""\ 100 [pyramid.scaffold] 101 starter=pyramid.scaffolds:StarterProjectTemplate 102 zodb=pyramid.scaffolds:ZODBProjectTemplate 103 alchemy=pyramid.scaffolds:AlchemyProjectTemplate 104 [pyramid.pshell_runner] 105 python=pyramid.scripts.pshell:python_shell_runner 106 [console_scripts] 107 pcreate = pyramid.scripts.pcreate:main 108 pserve = pyramid.scripts.pserve:main 109 pshell = pyramid.scripts.pshell:main 110 proutes = pyramid.scripts.proutes:main 111 pviews = pyramid.scripts.pviews:main 112 ptweens = pyramid.scripts.ptweens:main 113 prequest = pyramid.scripts.prequest:main 114 pdistreport = pyramid.scripts.pdistreport:main 115 [paste.server_runner] 116 wsgiref = pyramid.scripts.pserve:wsgiref_server_runner 117 cherrypy = pyramid.scripts.pserve:cherrypy_server_runner 118 """ 119 ) 120 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -46,7 +46,7 @@ docs_extras = [ - 'Sphinx >= 1.3.5, != 1.7.3', + 'Sphinx >= 1.7.4', 'docutils', 'repoze.sphinx.autointerface', 'pylons_sphinx_latesturl',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -46,7 +46,7 @@\n \n \n docs_extras = [\n- 'Sphinx >= 1.3.5, != 1.7.3',\n+ 'Sphinx >= 1.7.4',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n", "issue": "Bump Sphinx to >=1.7.2\nWould anyone be opposed to bumping Sphinx to >=1.7.2, != 1.7.3 in `setup.py`? I really want our PDFs to have `emphasize-lines` support, at long last, and bring in support for Unicode characters in PDFs via xelatex.\r\n\r\nRefs:\r\n* #667\r\n* #2572\r\n* https://github.com/rtfd/readthedocs.org/issues/4015\r\n\n", "before_files": [{"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n with open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n with open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\nexcept IOError:\n README = CHANGES = ''\n\ninstall_requires = [\n 'setuptools',\n 'WebOb >= 1.7.0', # Response.has_body\n 'repoze.lru >= 0.4', # py3 compat\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0a3', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n 'plaster',\n 'plaster_pastedeploy',\n 'hupper',\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n 'zope.component >= 4.0', # py3 compat\n ]\n\n\ndocs_extras = [\n 'Sphinx >= 1.3.5, != 1.7.3',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-autoprogram',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.9.2',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Development Status :: 6 - Mature\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"[email protected]\",\n url=\"https://trypyramid.com\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',\n install_requires=install_requires,\n extras_require={\n 'testing': testing_extras,\n 'docs': docs_extras,\n },\n tests_require=tests_require,\n test_suite=\"pyramid.tests\",\n entry_points=\"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [pyramid.pshell_runner]\n python=pyramid.scripts.pshell:python_shell_runner\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n", "path": "setup.py"}], "after_files": [{"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n with open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n with open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\nexcept IOError:\n README = CHANGES = ''\n\ninstall_requires = [\n 'setuptools',\n 'WebOb >= 1.7.0', # Response.has_body\n 'repoze.lru >= 0.4', # py3 compat\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0a3', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n 'plaster',\n 'plaster_pastedeploy',\n 'hupper',\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n 'zope.component >= 4.0', # py3 compat\n ]\n\n\ndocs_extras = [\n 'Sphinx >= 1.7.4',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-autoprogram',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.9.2',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Development Status :: 6 - Mature\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"[email protected]\",\n url=\"https://trypyramid.com\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',\n install_requires=install_requires,\n extras_require={\n 'testing': testing_extras,\n 'docs': docs_extras,\n },\n tests_require=tests_require,\n test_suite=\"pyramid.tests\",\n entry_points=\"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [pyramid.pshell_runner]\n python=pyramid.scripts.pshell:python_shell_runner\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n", "path": "setup.py"}]}
1,699
98
gh_patches_debug_8003
rasdani/github-patches
git_diff
pypa__setuptools-1625
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- AssertionError on len(sdists) == 1 when sdist format is zip In some Windows projects, users expect sdists to be released in zip form. For that reason, I've added the following to a project's setup.cfg: ``` [sdist] formats=zip ``` With this setting in place, performing a `pep517.build .` on such a project results in the following traceback: ``` Traceback (most recent call last): File "/Users/jaraco/code/main/jaraco.video/.tox/release/lib/python3.7/site-packages/pep517/_in_process.py", line 207, in <module> main() File "/Users/jaraco/code/main/jaraco.video/.tox/release/lib/python3.7/site-packages/pep517/_in_process.py", line 197, in main json_out['return_val'] = hook(**hook_input['kwargs']) File "/Users/jaraco/code/main/jaraco.video/.tox/release/lib/python3.7/site-packages/pep517/_in_process.py", line 170, in build_sdist return backend.build_sdist(sdist_directory, config_settings) File "/var/folders/c6/v7hnmq453xb6p2dbz1gqc6rr0000gn/T/pep517-build-env-rgz_zb4_/lib/python3.7/site-packages/setuptools/build_meta.py", line 181, in build_sdist assert len(sdists) == 1 AssertionError ``` Even though the output from building the sdist was seen above. I suspect the builder expects the sdist to have a particular extension. AssertionError on len(sdists) == 1 when sdist format is zip In some Windows projects, users expect sdists to be released in zip form. For that reason, I've added the following to a project's setup.cfg: ``` [sdist] formats=zip ``` With this setting in place, performing a `pep517.build .` on such a project results in the following traceback: ``` Traceback (most recent call last): File "/Users/jaraco/code/main/jaraco.video/.tox/release/lib/python3.7/site-packages/pep517/_in_process.py", line 207, in <module> main() File "/Users/jaraco/code/main/jaraco.video/.tox/release/lib/python3.7/site-packages/pep517/_in_process.py", line 197, in main json_out['return_val'] = hook(**hook_input['kwargs']) File "/Users/jaraco/code/main/jaraco.video/.tox/release/lib/python3.7/site-packages/pep517/_in_process.py", line 170, in build_sdist return backend.build_sdist(sdist_directory, config_settings) File "/var/folders/c6/v7hnmq453xb6p2dbz1gqc6rr0000gn/T/pep517-build-env-rgz_zb4_/lib/python3.7/site-packages/setuptools/build_meta.py", line 181, in build_sdist assert len(sdists) == 1 AssertionError ``` Even though the output from building the sdist was seen above. I suspect the builder expects the sdist to have a particular extension. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setuptools/build_meta.py` Content: ``` 1 """A PEP 517 interface to setuptools 2 3 Previously, when a user or a command line tool (let's call it a "frontend") 4 needed to make a request of setuptools to take a certain action, for 5 example, generating a list of installation requirements, the frontend would 6 would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line. 7 8 PEP 517 defines a different method of interfacing with setuptools. Rather 9 than calling "setup.py" directly, the frontend should: 10 11 1. Set the current directory to the directory with a setup.py file 12 2. Import this module into a safe python interpreter (one in which 13 setuptools can potentially set global variables or crash hard). 14 3. Call one of the functions defined in PEP 517. 15 16 What each function does is defined in PEP 517. However, here is a "casual" 17 definition of the functions (this definition should not be relied on for 18 bug reports or API stability): 19 20 - `build_wheel`: build a wheel in the folder and return the basename 21 - `get_requires_for_build_wheel`: get the `setup_requires` to build 22 - `prepare_metadata_for_build_wheel`: get the `install_requires` 23 - `build_sdist`: build an sdist in the folder and return the basename 24 - `get_requires_for_build_sdist`: get the `setup_requires` to build 25 26 Again, this is not a formal definition! Just a "taste" of the module. 27 """ 28 29 import os 30 import sys 31 import tokenize 32 import shutil 33 import contextlib 34 35 import setuptools 36 import distutils 37 38 39 class SetupRequirementsError(BaseException): 40 def __init__(self, specifiers): 41 self.specifiers = specifiers 42 43 44 class Distribution(setuptools.dist.Distribution): 45 def fetch_build_eggs(self, specifiers): 46 raise SetupRequirementsError(specifiers) 47 48 @classmethod 49 @contextlib.contextmanager 50 def patch(cls): 51 """ 52 Replace 53 distutils.dist.Distribution with this class 54 for the duration of this context. 55 """ 56 orig = distutils.core.Distribution 57 distutils.core.Distribution = cls 58 try: 59 yield 60 finally: 61 distutils.core.Distribution = orig 62 63 64 def _to_str(s): 65 """ 66 Convert a filename to a string (on Python 2, explicitly 67 a byte string, not Unicode) as distutils checks for the 68 exact type str. 69 """ 70 if sys.version_info[0] == 2 and not isinstance(s, str): 71 # Assume it's Unicode, as that's what the PEP says 72 # should be provided. 73 return s.encode(sys.getfilesystemencoding()) 74 return s 75 76 77 def _run_setup(setup_script='setup.py'): 78 # Note that we can reuse our build directory between calls 79 # Correctness comes first, then optimization later 80 __file__ = setup_script 81 __name__ = '__main__' 82 f = getattr(tokenize, 'open', open)(__file__) 83 code = f.read().replace('\\r\\n', '\\n') 84 f.close() 85 exec(compile(code, __file__, 'exec'), locals()) 86 87 88 def _fix_config(config_settings): 89 config_settings = config_settings or {} 90 config_settings.setdefault('--global-option', []) 91 return config_settings 92 93 94 def _get_build_requires(config_settings, requirements): 95 config_settings = _fix_config(config_settings) 96 97 sys.argv = sys.argv[:1] + ['egg_info'] + \ 98 config_settings["--global-option"] 99 try: 100 with Distribution.patch(): 101 _run_setup() 102 except SetupRequirementsError as e: 103 requirements += e.specifiers 104 105 return requirements 106 107 108 def _get_immediate_subdirectories(a_dir): 109 return [name for name in os.listdir(a_dir) 110 if os.path.isdir(os.path.join(a_dir, name))] 111 112 113 def get_requires_for_build_wheel(config_settings=None): 114 config_settings = _fix_config(config_settings) 115 return _get_build_requires(config_settings, requirements=['wheel']) 116 117 118 def get_requires_for_build_sdist(config_settings=None): 119 config_settings = _fix_config(config_settings) 120 return _get_build_requires(config_settings, requirements=[]) 121 122 123 def prepare_metadata_for_build_wheel(metadata_directory, config_settings=None): 124 sys.argv = sys.argv[:1] + ['dist_info', '--egg-base', _to_str(metadata_directory)] 125 _run_setup() 126 127 dist_info_directory = metadata_directory 128 while True: 129 dist_infos = [f for f in os.listdir(dist_info_directory) 130 if f.endswith('.dist-info')] 131 132 if len(dist_infos) == 0 and \ 133 len(_get_immediate_subdirectories(dist_info_directory)) == 1: 134 dist_info_directory = os.path.join( 135 dist_info_directory, os.listdir(dist_info_directory)[0]) 136 continue 137 138 assert len(dist_infos) == 1 139 break 140 141 # PEP 517 requires that the .dist-info directory be placed in the 142 # metadata_directory. To comply, we MUST copy the directory to the root 143 if dist_info_directory != metadata_directory: 144 shutil.move( 145 os.path.join(dist_info_directory, dist_infos[0]), 146 metadata_directory) 147 shutil.rmtree(dist_info_directory, ignore_errors=True) 148 149 return dist_infos[0] 150 151 152 def build_wheel(wheel_directory, config_settings=None, 153 metadata_directory=None): 154 config_settings = _fix_config(config_settings) 155 wheel_directory = os.path.abspath(wheel_directory) 156 sys.argv = sys.argv[:1] + ['bdist_wheel'] + \ 157 config_settings["--global-option"] 158 _run_setup() 159 if wheel_directory != 'dist': 160 shutil.rmtree(wheel_directory) 161 shutil.copytree('dist', wheel_directory) 162 163 wheels = [f for f in os.listdir(wheel_directory) 164 if f.endswith('.whl')] 165 166 assert len(wheels) == 1 167 return wheels[0] 168 169 170 def build_sdist(sdist_directory, config_settings=None): 171 config_settings = _fix_config(config_settings) 172 sdist_directory = os.path.abspath(sdist_directory) 173 sys.argv = sys.argv[:1] + ['sdist'] + \ 174 config_settings["--global-option"] + \ 175 ["--dist-dir", sdist_directory] 176 _run_setup() 177 178 sdists = [f for f in os.listdir(sdist_directory) 179 if f.endswith('.tar.gz')] 180 181 assert len(sdists) == 1 182 return sdists[0] 183 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setuptools/build_meta.py b/setuptools/build_meta.py --- a/setuptools/build_meta.py +++ b/setuptools/build_meta.py @@ -170,7 +170,7 @@ def build_sdist(sdist_directory, config_settings=None): config_settings = _fix_config(config_settings) sdist_directory = os.path.abspath(sdist_directory) - sys.argv = sys.argv[:1] + ['sdist'] + \ + sys.argv = sys.argv[:1] + ['sdist', '--formats', 'gztar'] + \ config_settings["--global-option"] + \ ["--dist-dir", sdist_directory] _run_setup()
{"golden_diff": "diff --git a/setuptools/build_meta.py b/setuptools/build_meta.py\n--- a/setuptools/build_meta.py\n+++ b/setuptools/build_meta.py\n@@ -170,7 +170,7 @@\n def build_sdist(sdist_directory, config_settings=None):\n config_settings = _fix_config(config_settings)\n sdist_directory = os.path.abspath(sdist_directory)\n- sys.argv = sys.argv[:1] + ['sdist'] + \\\n+ sys.argv = sys.argv[:1] + ['sdist', '--formats', 'gztar'] + \\\n config_settings[\"--global-option\"] + \\\n [\"--dist-dir\", sdist_directory]\n _run_setup()\n", "issue": "AssertionError on len(sdists) == 1 when sdist format is zip\nIn some Windows projects, users expect sdists to be released in zip form. For that reason, I've added the following to a project's setup.cfg:\r\n\r\n```\r\n[sdist]\r\nformats=zip\r\n```\r\n\r\nWith this setting in place, performing a `pep517.build .` on such a project results in the following traceback:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/jaraco/code/main/jaraco.video/.tox/release/lib/python3.7/site-packages/pep517/_in_process.py\", line 207, in <module>\r\n main()\r\n File \"/Users/jaraco/code/main/jaraco.video/.tox/release/lib/python3.7/site-packages/pep517/_in_process.py\", line 197, in main\r\n json_out['return_val'] = hook(**hook_input['kwargs'])\r\n File \"/Users/jaraco/code/main/jaraco.video/.tox/release/lib/python3.7/site-packages/pep517/_in_process.py\", line 170, in build_sdist\r\n return backend.build_sdist(sdist_directory, config_settings)\r\n File \"/var/folders/c6/v7hnmq453xb6p2dbz1gqc6rr0000gn/T/pep517-build-env-rgz_zb4_/lib/python3.7/site-packages/setuptools/build_meta.py\", line 181, in build_sdist\r\n assert len(sdists) == 1\r\nAssertionError\r\n```\r\n\r\nEven though the output from building the sdist was seen above. I suspect the builder expects the sdist to have a particular extension.\nAssertionError on len(sdists) == 1 when sdist format is zip\nIn some Windows projects, users expect sdists to be released in zip form. For that reason, I've added the following to a project's setup.cfg:\r\n\r\n```\r\n[sdist]\r\nformats=zip\r\n```\r\n\r\nWith this setting in place, performing a `pep517.build .` on such a project results in the following traceback:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/jaraco/code/main/jaraco.video/.tox/release/lib/python3.7/site-packages/pep517/_in_process.py\", line 207, in <module>\r\n main()\r\n File \"/Users/jaraco/code/main/jaraco.video/.tox/release/lib/python3.7/site-packages/pep517/_in_process.py\", line 197, in main\r\n json_out['return_val'] = hook(**hook_input['kwargs'])\r\n File \"/Users/jaraco/code/main/jaraco.video/.tox/release/lib/python3.7/site-packages/pep517/_in_process.py\", line 170, in build_sdist\r\n return backend.build_sdist(sdist_directory, config_settings)\r\n File \"/var/folders/c6/v7hnmq453xb6p2dbz1gqc6rr0000gn/T/pep517-build-env-rgz_zb4_/lib/python3.7/site-packages/setuptools/build_meta.py\", line 181, in build_sdist\r\n assert len(sdists) == 1\r\nAssertionError\r\n```\r\n\r\nEven though the output from building the sdist was seen above. I suspect the builder expects the sdist to have a particular extension.\n", "before_files": [{"content": "\"\"\"A PEP 517 interface to setuptools\n\nPreviously, when a user or a command line tool (let's call it a \"frontend\")\nneeded to make a request of setuptools to take a certain action, for\nexample, generating a list of installation requirements, the frontend would\nwould call \"setup.py egg_info\" or \"setup.py bdist_wheel\" on the command line.\n\nPEP 517 defines a different method of interfacing with setuptools. Rather\nthan calling \"setup.py\" directly, the frontend should:\n\n 1. Set the current directory to the directory with a setup.py file\n 2. Import this module into a safe python interpreter (one in which\n setuptools can potentially set global variables or crash hard).\n 3. Call one of the functions defined in PEP 517.\n\nWhat each function does is defined in PEP 517. However, here is a \"casual\"\ndefinition of the functions (this definition should not be relied on for\nbug reports or API stability):\n\n - `build_wheel`: build a wheel in the folder and return the basename\n - `get_requires_for_build_wheel`: get the `setup_requires` to build\n - `prepare_metadata_for_build_wheel`: get the `install_requires`\n - `build_sdist`: build an sdist in the folder and return the basename\n - `get_requires_for_build_sdist`: get the `setup_requires` to build\n\nAgain, this is not a formal definition! Just a \"taste\" of the module.\n\"\"\"\n\nimport os\nimport sys\nimport tokenize\nimport shutil\nimport contextlib\n\nimport setuptools\nimport distutils\n\n\nclass SetupRequirementsError(BaseException):\n def __init__(self, specifiers):\n self.specifiers = specifiers\n\n\nclass Distribution(setuptools.dist.Distribution):\n def fetch_build_eggs(self, specifiers):\n raise SetupRequirementsError(specifiers)\n\n @classmethod\n @contextlib.contextmanager\n def patch(cls):\n \"\"\"\n Replace\n distutils.dist.Distribution with this class\n for the duration of this context.\n \"\"\"\n orig = distutils.core.Distribution\n distutils.core.Distribution = cls\n try:\n yield\n finally:\n distutils.core.Distribution = orig\n\n\ndef _to_str(s):\n \"\"\"\n Convert a filename to a string (on Python 2, explicitly\n a byte string, not Unicode) as distutils checks for the\n exact type str.\n \"\"\"\n if sys.version_info[0] == 2 and not isinstance(s, str):\n # Assume it's Unicode, as that's what the PEP says\n # should be provided.\n return s.encode(sys.getfilesystemencoding())\n return s\n\n\ndef _run_setup(setup_script='setup.py'):\n # Note that we can reuse our build directory between calls\n # Correctness comes first, then optimization later\n __file__ = setup_script\n __name__ = '__main__'\n f = getattr(tokenize, 'open', open)(__file__)\n code = f.read().replace('\\\\r\\\\n', '\\\\n')\n f.close()\n exec(compile(code, __file__, 'exec'), locals())\n\n\ndef _fix_config(config_settings):\n config_settings = config_settings or {}\n config_settings.setdefault('--global-option', [])\n return config_settings\n\n\ndef _get_build_requires(config_settings, requirements):\n config_settings = _fix_config(config_settings)\n\n sys.argv = sys.argv[:1] + ['egg_info'] + \\\n config_settings[\"--global-option\"]\n try:\n with Distribution.patch():\n _run_setup()\n except SetupRequirementsError as e:\n requirements += e.specifiers\n\n return requirements\n\n\ndef _get_immediate_subdirectories(a_dir):\n return [name for name in os.listdir(a_dir)\n if os.path.isdir(os.path.join(a_dir, name))]\n\n\ndef get_requires_for_build_wheel(config_settings=None):\n config_settings = _fix_config(config_settings)\n return _get_build_requires(config_settings, requirements=['wheel'])\n\n\ndef get_requires_for_build_sdist(config_settings=None):\n config_settings = _fix_config(config_settings)\n return _get_build_requires(config_settings, requirements=[])\n\n\ndef prepare_metadata_for_build_wheel(metadata_directory, config_settings=None):\n sys.argv = sys.argv[:1] + ['dist_info', '--egg-base', _to_str(metadata_directory)]\n _run_setup()\n\n dist_info_directory = metadata_directory\n while True:\n dist_infos = [f for f in os.listdir(dist_info_directory)\n if f.endswith('.dist-info')]\n\n if len(dist_infos) == 0 and \\\n len(_get_immediate_subdirectories(dist_info_directory)) == 1:\n dist_info_directory = os.path.join(\n dist_info_directory, os.listdir(dist_info_directory)[0])\n continue\n\n assert len(dist_infos) == 1\n break\n\n # PEP 517 requires that the .dist-info directory be placed in the\n # metadata_directory. To comply, we MUST copy the directory to the root\n if dist_info_directory != metadata_directory:\n shutil.move(\n os.path.join(dist_info_directory, dist_infos[0]),\n metadata_directory)\n shutil.rmtree(dist_info_directory, ignore_errors=True)\n\n return dist_infos[0]\n\n\ndef build_wheel(wheel_directory, config_settings=None,\n metadata_directory=None):\n config_settings = _fix_config(config_settings)\n wheel_directory = os.path.abspath(wheel_directory)\n sys.argv = sys.argv[:1] + ['bdist_wheel'] + \\\n config_settings[\"--global-option\"]\n _run_setup()\n if wheel_directory != 'dist':\n shutil.rmtree(wheel_directory)\n shutil.copytree('dist', wheel_directory)\n\n wheels = [f for f in os.listdir(wheel_directory)\n if f.endswith('.whl')]\n\n assert len(wheels) == 1\n return wheels[0]\n\n\ndef build_sdist(sdist_directory, config_settings=None):\n config_settings = _fix_config(config_settings)\n sdist_directory = os.path.abspath(sdist_directory)\n sys.argv = sys.argv[:1] + ['sdist'] + \\\n config_settings[\"--global-option\"] + \\\n [\"--dist-dir\", sdist_directory]\n _run_setup()\n\n sdists = [f for f in os.listdir(sdist_directory)\n if f.endswith('.tar.gz')]\n\n assert len(sdists) == 1\n return sdists[0]\n", "path": "setuptools/build_meta.py"}], "after_files": [{"content": "\"\"\"A PEP 517 interface to setuptools\n\nPreviously, when a user or a command line tool (let's call it a \"frontend\")\nneeded to make a request of setuptools to take a certain action, for\nexample, generating a list of installation requirements, the frontend would\nwould call \"setup.py egg_info\" or \"setup.py bdist_wheel\" on the command line.\n\nPEP 517 defines a different method of interfacing with setuptools. Rather\nthan calling \"setup.py\" directly, the frontend should:\n\n 1. Set the current directory to the directory with a setup.py file\n 2. Import this module into a safe python interpreter (one in which\n setuptools can potentially set global variables or crash hard).\n 3. Call one of the functions defined in PEP 517.\n\nWhat each function does is defined in PEP 517. However, here is a \"casual\"\ndefinition of the functions (this definition should not be relied on for\nbug reports or API stability):\n\n - `build_wheel`: build a wheel in the folder and return the basename\n - `get_requires_for_build_wheel`: get the `setup_requires` to build\n - `prepare_metadata_for_build_wheel`: get the `install_requires`\n - `build_sdist`: build an sdist in the folder and return the basename\n - `get_requires_for_build_sdist`: get the `setup_requires` to build\n\nAgain, this is not a formal definition! Just a \"taste\" of the module.\n\"\"\"\n\nimport os\nimport sys\nimport tokenize\nimport shutil\nimport contextlib\n\nimport setuptools\nimport distutils\n\n\nclass SetupRequirementsError(BaseException):\n def __init__(self, specifiers):\n self.specifiers = specifiers\n\n\nclass Distribution(setuptools.dist.Distribution):\n def fetch_build_eggs(self, specifiers):\n raise SetupRequirementsError(specifiers)\n\n @classmethod\n @contextlib.contextmanager\n def patch(cls):\n \"\"\"\n Replace\n distutils.dist.Distribution with this class\n for the duration of this context.\n \"\"\"\n orig = distutils.core.Distribution\n distutils.core.Distribution = cls\n try:\n yield\n finally:\n distutils.core.Distribution = orig\n\n\ndef _to_str(s):\n \"\"\"\n Convert a filename to a string (on Python 2, explicitly\n a byte string, not Unicode) as distutils checks for the\n exact type str.\n \"\"\"\n if sys.version_info[0] == 2 and not isinstance(s, str):\n # Assume it's Unicode, as that's what the PEP says\n # should be provided.\n return s.encode(sys.getfilesystemencoding())\n return s\n\n\ndef _run_setup(setup_script='setup.py'):\n # Note that we can reuse our build directory between calls\n # Correctness comes first, then optimization later\n __file__ = setup_script\n __name__ = '__main__'\n f = getattr(tokenize, 'open', open)(__file__)\n code = f.read().replace('\\\\r\\\\n', '\\\\n')\n f.close()\n exec(compile(code, __file__, 'exec'), locals())\n\n\ndef _fix_config(config_settings):\n config_settings = config_settings or {}\n config_settings.setdefault('--global-option', [])\n return config_settings\n\n\ndef _get_build_requires(config_settings, requirements):\n config_settings = _fix_config(config_settings)\n\n sys.argv = sys.argv[:1] + ['egg_info'] + \\\n config_settings[\"--global-option\"]\n try:\n with Distribution.patch():\n _run_setup()\n except SetupRequirementsError as e:\n requirements += e.specifiers\n\n return requirements\n\n\ndef _get_immediate_subdirectories(a_dir):\n return [name for name in os.listdir(a_dir)\n if os.path.isdir(os.path.join(a_dir, name))]\n\n\ndef get_requires_for_build_wheel(config_settings=None):\n config_settings = _fix_config(config_settings)\n return _get_build_requires(config_settings, requirements=['wheel'])\n\n\ndef get_requires_for_build_sdist(config_settings=None):\n config_settings = _fix_config(config_settings)\n return _get_build_requires(config_settings, requirements=[])\n\n\ndef prepare_metadata_for_build_wheel(metadata_directory, config_settings=None):\n sys.argv = sys.argv[:1] + ['dist_info', '--egg-base', _to_str(metadata_directory)]\n _run_setup()\n\n dist_info_directory = metadata_directory\n while True:\n dist_infos = [f for f in os.listdir(dist_info_directory)\n if f.endswith('.dist-info')]\n\n if len(dist_infos) == 0 and \\\n len(_get_immediate_subdirectories(dist_info_directory)) == 1:\n dist_info_directory = os.path.join(\n dist_info_directory, os.listdir(dist_info_directory)[0])\n continue\n\n assert len(dist_infos) == 1\n break\n\n # PEP 517 requires that the .dist-info directory be placed in the\n # metadata_directory. To comply, we MUST copy the directory to the root\n if dist_info_directory != metadata_directory:\n shutil.move(\n os.path.join(dist_info_directory, dist_infos[0]),\n metadata_directory)\n shutil.rmtree(dist_info_directory, ignore_errors=True)\n\n return dist_infos[0]\n\n\ndef build_wheel(wheel_directory, config_settings=None,\n metadata_directory=None):\n config_settings = _fix_config(config_settings)\n wheel_directory = os.path.abspath(wheel_directory)\n sys.argv = sys.argv[:1] + ['bdist_wheel'] + \\\n config_settings[\"--global-option\"]\n _run_setup()\n if wheel_directory != 'dist':\n shutil.rmtree(wheel_directory)\n shutil.copytree('dist', wheel_directory)\n\n wheels = [f for f in os.listdir(wheel_directory)\n if f.endswith('.whl')]\n\n assert len(wheels) == 1\n return wheels[0]\n\n\ndef build_sdist(sdist_directory, config_settings=None):\n config_settings = _fix_config(config_settings)\n sdist_directory = os.path.abspath(sdist_directory)\n sys.argv = sys.argv[:1] + ['sdist', '--formats', 'gztar'] + \\\n config_settings[\"--global-option\"] + \\\n [\"--dist-dir\", sdist_directory]\n _run_setup()\n\n sdists = [f for f in os.listdir(sdist_directory)\n if f.endswith('.tar.gz')]\n\n assert len(sdists) == 1\n return sdists[0]\n", "path": "setuptools/build_meta.py"}]}
2,849
147
gh_patches_debug_23277
rasdani/github-patches
git_diff
fidals__shopelectro-1006
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Search shows products with no category It should not, of course ![image](https://user-images.githubusercontent.com/3526796/67692684-9a9cb680-f9b1-11e9-8038-99534e063e03.png) Search link: https://www.shopelectro.ru/search/?term=MK1215NC Link to the product: https://www.shopelectro.ru/catalog/products/7608/ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `shopelectro/views/search.py` Content: ``` 1 from django.conf import settings 2 3 from search import views as search_views, search as search_engine 4 5 from pages.models import Page 6 7 from shopelectro.models import Category, Product 8 9 10 class Search(search_views.SearchView): 11 def get_redirect_search_entity(self): 12 return next(s for s in self.search_entities if s.name == 'product') 13 14 # ignore CPDBear 15 search_entities = [ 16 search_engine.Search( 17 name='category', 18 qs=Category.objects.active(), 19 fields=['name'], # Ignore CPDBear 20 min_similarity=settings.TRIGRAM_MIN_SIMILARITY, 21 ), 22 search_engine.Search( 23 name='product', 24 qs=Product.objects.active(), 25 fields=['name'], 26 redirect_field='vendor_code', 27 min_similarity=settings.TRIGRAM_MIN_SIMILARITY, 28 ), 29 search_engine.Search( 30 name='page', # Ignore CPDBear 31 qs=Page.objects.filter(is_active=True).exclude(type=Page.MODEL_TYPE), 32 fields=['name'], 33 min_similarity=settings.TRIGRAM_MIN_SIMILARITY, 34 ) 35 ] 36 37 redirect_field = 'vendor_code' 38 39 40 class Autocomplete(search_views.AutocompleteView): 41 42 # ignore CPDBear 43 search_entities = [ 44 search_engine.Search( 45 name='category', 46 qs=Category.objects.filter(page__is_active=True), 47 fields=['name', 'id'], 48 template_fields=['name', 'url'], 49 min_similarity=settings.TRIGRAM_MIN_SIMILARITY, 50 ), 51 search_engine.Search( 52 name='product', 53 qs=Product.objects.active(), 54 fields=['name', 'id', 'vendor_code'], 55 template_fields=['name', 'price', 'url'], # Ignore CPDBear 56 min_similarity=settings.TRIGRAM_MIN_SIMILARITY, 57 ), 58 search_engine.Search( 59 name='pages', 60 qs=Page.objects.filter(is_active=True).exclude(type=Page.MODEL_TYPE), 61 fields=['name'], 62 template_fields=['name', 'url'], 63 min_similarity=settings.TRIGRAM_MIN_SIMILARITY, 64 ) 65 ] 66 67 see_all_label = settings.SEARCH_SEE_ALL_LABEL 68 69 70 class AdminAutocomplete(search_views.AdminAutocompleteView): 71 72 # ignore CPDBear 73 search_entities = [ 74 search_engine.Search( 75 name='category', 76 qs=Category.objects.filter(page__is_active=True), 77 fields=['name'], 78 min_similarity=settings.TRIGRAM_MIN_SIMILARITY, 79 ), 80 search_engine.Search( 81 name='product', 82 qs=Product.objects.active(), 83 fields=['name'], 84 min_similarity=settings.TRIGRAM_MIN_SIMILARITY, 85 ), 86 search_engine.Search( 87 name='pages', 88 qs=Page.objects.filter(is_active=True).exclude(type=Page.MODEL_TYPE), 89 fields=['name'], 90 min_similarity=settings.TRIGRAM_MIN_SIMILARITY, 91 ) 92 ] 93 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/shopelectro/views/search.py b/shopelectro/views/search.py --- a/shopelectro/views/search.py +++ b/shopelectro/views/search.py @@ -1,9 +1,7 @@ from django.conf import settings -from search import views as search_views, search as search_engine - from pages.models import Page - +from search import views as search_views, search as search_engine from shopelectro.models import Category, Product @@ -21,14 +19,14 @@ ), search_engine.Search( name='product', - qs=Product.objects.active(), + qs=Product.objects.active().exclude(category__isnull=True), fields=['name'], redirect_field='vendor_code', min_similarity=settings.TRIGRAM_MIN_SIMILARITY, ), search_engine.Search( name='page', # Ignore CPDBear - qs=Page.objects.filter(is_active=True).exclude(type=Page.MODEL_TYPE), + qs=Page.objects.active().exclude(type=Page.MODEL_TYPE), fields=['name'], min_similarity=settings.TRIGRAM_MIN_SIMILARITY, )
{"golden_diff": "diff --git a/shopelectro/views/search.py b/shopelectro/views/search.py\n--- a/shopelectro/views/search.py\n+++ b/shopelectro/views/search.py\n@@ -1,9 +1,7 @@\n from django.conf import settings\n \n-from search import views as search_views, search as search_engine\n-\n from pages.models import Page\n-\n+from search import views as search_views, search as search_engine\n from shopelectro.models import Category, Product\n \n \n@@ -21,14 +19,14 @@\n ),\n search_engine.Search(\n name='product',\n- qs=Product.objects.active(),\n+ qs=Product.objects.active().exclude(category__isnull=True),\n fields=['name'],\n redirect_field='vendor_code',\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n ),\n search_engine.Search(\n name='page', # Ignore CPDBear\n- qs=Page.objects.filter(is_active=True).exclude(type=Page.MODEL_TYPE),\n+ qs=Page.objects.active().exclude(type=Page.MODEL_TYPE),\n fields=['name'],\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n )\n", "issue": "Search shows products with no category\nIt should not, of course\r\n\r\n![image](https://user-images.githubusercontent.com/3526796/67692684-9a9cb680-f9b1-11e9-8038-99534e063e03.png)\r\n\r\nSearch link: https://www.shopelectro.ru/search/?term=MK1215NC\r\nLink to the product: https://www.shopelectro.ru/catalog/products/7608/\r\n\n", "before_files": [{"content": "from django.conf import settings\n\nfrom search import views as search_views, search as search_engine\n\nfrom pages.models import Page\n\nfrom shopelectro.models import Category, Product\n\n\nclass Search(search_views.SearchView):\n def get_redirect_search_entity(self):\n return next(s for s in self.search_entities if s.name == 'product')\n\n # ignore CPDBear\n search_entities = [\n search_engine.Search(\n name='category',\n qs=Category.objects.active(),\n fields=['name'], # Ignore CPDBear\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n ),\n search_engine.Search(\n name='product',\n qs=Product.objects.active(),\n fields=['name'],\n redirect_field='vendor_code',\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n ),\n search_engine.Search(\n name='page', # Ignore CPDBear\n qs=Page.objects.filter(is_active=True).exclude(type=Page.MODEL_TYPE),\n fields=['name'],\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n )\n ]\n\n redirect_field = 'vendor_code'\n\n\nclass Autocomplete(search_views.AutocompleteView):\n\n # ignore CPDBear\n search_entities = [\n search_engine.Search(\n name='category',\n qs=Category.objects.filter(page__is_active=True),\n fields=['name', 'id'],\n template_fields=['name', 'url'],\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n ),\n search_engine.Search(\n name='product',\n qs=Product.objects.active(),\n fields=['name', 'id', 'vendor_code'],\n template_fields=['name', 'price', 'url'], # Ignore CPDBear\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n ),\n search_engine.Search(\n name='pages',\n qs=Page.objects.filter(is_active=True).exclude(type=Page.MODEL_TYPE),\n fields=['name'],\n template_fields=['name', 'url'],\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n )\n ]\n\n see_all_label = settings.SEARCH_SEE_ALL_LABEL\n\n\nclass AdminAutocomplete(search_views.AdminAutocompleteView):\n\n # ignore CPDBear\n search_entities = [\n search_engine.Search(\n name='category',\n qs=Category.objects.filter(page__is_active=True),\n fields=['name'],\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n ),\n search_engine.Search(\n name='product',\n qs=Product.objects.active(),\n fields=['name'],\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n ),\n search_engine.Search(\n name='pages',\n qs=Page.objects.filter(is_active=True).exclude(type=Page.MODEL_TYPE),\n fields=['name'],\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n )\n ]\n", "path": "shopelectro/views/search.py"}], "after_files": [{"content": "from django.conf import settings\n\nfrom pages.models import Page\nfrom search import views as search_views, search as search_engine\nfrom shopelectro.models import Category, Product\n\n\nclass Search(search_views.SearchView):\n def get_redirect_search_entity(self):\n return next(s for s in self.search_entities if s.name == 'product')\n\n # ignore CPDBear\n search_entities = [\n search_engine.Search(\n name='category',\n qs=Category.objects.active(),\n fields=['name'], # Ignore CPDBear\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n ),\n search_engine.Search(\n name='product',\n qs=Product.objects.active().exclude(category__isnull=True),\n fields=['name'],\n redirect_field='vendor_code',\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n ),\n search_engine.Search(\n name='page', # Ignore CPDBear\n qs=Page.objects.active().exclude(type=Page.MODEL_TYPE),\n fields=['name'],\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n )\n ]\n\n redirect_field = 'vendor_code'\n\n\nclass Autocomplete(search_views.AutocompleteView):\n\n # ignore CPDBear\n search_entities = [\n search_engine.Search(\n name='category',\n qs=Category.objects.filter(page__is_active=True),\n fields=['name', 'id'],\n template_fields=['name', 'url'],\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n ),\n search_engine.Search(\n name='product',\n qs=Product.objects.active(),\n fields=['name', 'id', 'vendor_code'],\n template_fields=['name', 'price', 'url'], # Ignore CPDBear\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n ),\n search_engine.Search(\n name='pages',\n qs=Page.objects.filter(is_active=True).exclude(type=Page.MODEL_TYPE),\n fields=['name'],\n template_fields=['name', 'url'],\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n )\n ]\n\n see_all_label = settings.SEARCH_SEE_ALL_LABEL\n\n\nclass AdminAutocomplete(search_views.AdminAutocompleteView):\n\n # ignore CPDBear\n search_entities = [\n search_engine.Search(\n name='category',\n qs=Category.objects.filter(page__is_active=True),\n fields=['name'],\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n ),\n search_engine.Search(\n name='product',\n qs=Product.objects.active(),\n fields=['name'],\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n ),\n search_engine.Search(\n name='pages',\n qs=Page.objects.filter(is_active=True).exclude(type=Page.MODEL_TYPE),\n fields=['name'],\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n )\n ]\n", "path": "shopelectro/views/search.py"}]}
1,167
249
gh_patches_debug_25998
rasdani/github-patches
git_diff
doccano__doccano-964
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Expose timestamps associated with annotations with the API The `..Annotation` models currently stores the `created_at` and `updated_at` fields but they are not exposed by the API. I'd like to propose exposing them through the API so that downstream analysis can be conducted using simple API calls, like those made using [`doccano-client`](https://github.com/doccano/doccano-client), for instance. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `app/api/serializers.py` Content: ``` 1 from django.conf import settings 2 from django.contrib.auth import get_user_model 3 from django.shortcuts import get_object_or_404 4 from rest_framework import serializers 5 from rest_polymorphic.serializers import PolymorphicSerializer 6 from rest_framework.exceptions import ValidationError 7 8 9 from .models import Label, Project, Document, RoleMapping, Role 10 from .models import TextClassificationProject, SequenceLabelingProject, Seq2seqProject, Speech2textProject 11 from .models import DocumentAnnotation, SequenceAnnotation, Seq2seqAnnotation, Speech2textAnnotation 12 13 14 class UserSerializer(serializers.ModelSerializer): 15 16 class Meta: 17 model = get_user_model() 18 fields = ('id', 'username', 'first_name', 'last_name', 'email', 'is_superuser') 19 20 21 class LabelSerializer(serializers.ModelSerializer): 22 23 def validate(self, attrs): 24 prefix_key = attrs.get('prefix_key') 25 suffix_key = attrs.get('suffix_key') 26 27 # In the case of user don't set any shortcut key. 28 if prefix_key is None and suffix_key is None: 29 return super().validate(attrs) 30 31 # Don't allow shortcut key not to have a suffix key. 32 if prefix_key and not suffix_key: 33 raise ValidationError('Shortcut key may not have a suffix key.') 34 35 # Don't allow to save same shortcut key when prefix_key is null. 36 try: 37 context = self.context['request'].parser_context 38 project_id = context['kwargs']['project_id'] 39 label_id = context['kwargs'].get('label_id') 40 except (AttributeError, KeyError): 41 pass # unit tests don't always have the correct context set up 42 else: 43 conflicting_labels = Label.objects.filter( 44 suffix_key=suffix_key, 45 prefix_key=prefix_key, 46 project=project_id, 47 ) 48 49 if label_id is not None: 50 conflicting_labels = conflicting_labels.exclude(id=label_id) 51 52 if conflicting_labels.exists(): 53 raise ValidationError('Duplicate shortcut key.') 54 55 return super().validate(attrs) 56 57 class Meta: 58 model = Label 59 fields = ('id', 'text', 'prefix_key', 'suffix_key', 'background_color', 'text_color') 60 61 62 class DocumentSerializer(serializers.ModelSerializer): 63 annotations = serializers.SerializerMethodField() 64 annotation_approver = serializers.SerializerMethodField() 65 66 def get_annotations(self, instance): 67 request = self.context.get('request') 68 project = instance.project 69 model = project.get_annotation_class() 70 serializer = project.get_annotation_serializer() 71 annotations = model.objects.filter(document=instance.id) 72 if request and not project.collaborative_annotation: 73 annotations = annotations.filter(user=request.user) 74 serializer = serializer(annotations, many=True) 75 return serializer.data 76 77 @classmethod 78 def get_annotation_approver(cls, instance): 79 approver = instance.annotations_approved_by 80 return approver.username if approver else None 81 82 class Meta: 83 model = Document 84 fields = ('id', 'text', 'annotations', 'meta', 'annotation_approver') 85 86 87 class ApproverSerializer(DocumentSerializer): 88 89 class Meta: 90 model = Document 91 fields = ('id', 'annotation_approver') 92 93 94 class ProjectSerializer(serializers.ModelSerializer): 95 current_users_role = serializers.SerializerMethodField() 96 97 def get_current_users_role(self, instance): 98 role_abstractor = { 99 "is_project_admin": settings.ROLE_PROJECT_ADMIN, 100 "is_annotator": settings.ROLE_ANNOTATOR, 101 "is_annotation_approver": settings.ROLE_ANNOTATION_APPROVER, 102 } 103 queryset = RoleMapping.objects.values("role_id__name") 104 if queryset: 105 users_role = get_object_or_404( 106 queryset, project=instance.id, user=self.context.get("request").user.id 107 ) 108 for key, val in role_abstractor.items(): 109 role_abstractor[key] = users_role["role_id__name"] == val 110 return role_abstractor 111 112 class Meta: 113 model = Project 114 fields = ('id', 'name', 'description', 'guideline', 'users', 'current_users_role', 'project_type', 'image', 115 'updated_at', 'randomize_document_order', 'collaborative_annotation', 'single_class_classification') 116 read_only_fields = ('image', 'updated_at', 'users', 'current_users_role') 117 118 119 class TextClassificationProjectSerializer(ProjectSerializer): 120 121 class Meta: 122 model = TextClassificationProject 123 fields = ProjectSerializer.Meta.fields 124 read_only_fields = ProjectSerializer.Meta.read_only_fields 125 126 127 class SequenceLabelingProjectSerializer(ProjectSerializer): 128 129 class Meta: 130 model = SequenceLabelingProject 131 fields = ProjectSerializer.Meta.fields 132 read_only_fields = ProjectSerializer.Meta.read_only_fields 133 134 135 class Seq2seqProjectSerializer(ProjectSerializer): 136 137 class Meta: 138 model = Seq2seqProject 139 fields = ProjectSerializer.Meta.fields 140 read_only_fields = ProjectSerializer.Meta.read_only_fields 141 142 143 class Speech2textProjectSerializer(ProjectSerializer): 144 145 class Meta: 146 model = Speech2textProject 147 fields = ('id', 'name', 'description', 'guideline', 'users', 'current_users_role', 'project_type', 'image', 148 'updated_at', 'randomize_document_order') 149 read_only_fields = ('image', 'updated_at', 'users', 'current_users_role') 150 151 152 class ProjectPolymorphicSerializer(PolymorphicSerializer): 153 model_serializer_mapping = { 154 Project: ProjectSerializer, 155 TextClassificationProject: TextClassificationProjectSerializer, 156 SequenceLabelingProject: SequenceLabelingProjectSerializer, 157 Seq2seqProject: Seq2seqProjectSerializer, 158 Speech2textProject: Speech2textProjectSerializer, 159 } 160 161 162 class ProjectFilteredPrimaryKeyRelatedField(serializers.PrimaryKeyRelatedField): 163 164 def get_queryset(self): 165 view = self.context.get('view', None) 166 request = self.context.get('request', None) 167 queryset = super(ProjectFilteredPrimaryKeyRelatedField, self).get_queryset() 168 if not request or not queryset or not view: 169 return None 170 return queryset.filter(project=view.kwargs['project_id']) 171 172 173 class DocumentAnnotationSerializer(serializers.ModelSerializer): 174 # label = ProjectFilteredPrimaryKeyRelatedField(queryset=Label.objects.all()) 175 label = serializers.PrimaryKeyRelatedField(queryset=Label.objects.all()) 176 document = serializers.PrimaryKeyRelatedField(queryset=Document.objects.all()) 177 178 class Meta: 179 model = DocumentAnnotation 180 fields = ('id', 'prob', 'label', 'user', 'document') 181 read_only_fields = ('user', ) 182 183 184 class SequenceAnnotationSerializer(serializers.ModelSerializer): 185 #label = ProjectFilteredPrimaryKeyRelatedField(queryset=Label.objects.all()) 186 label = serializers.PrimaryKeyRelatedField(queryset=Label.objects.all()) 187 document = serializers.PrimaryKeyRelatedField(queryset=Document.objects.all()) 188 189 class Meta: 190 model = SequenceAnnotation 191 fields = ('id', 'prob', 'label', 'start_offset', 'end_offset', 'user', 'document') 192 read_only_fields = ('user',) 193 194 195 class Seq2seqAnnotationSerializer(serializers.ModelSerializer): 196 document = serializers.PrimaryKeyRelatedField(queryset=Document.objects.all()) 197 198 class Meta: 199 model = Seq2seqAnnotation 200 fields = ('id', 'text', 'user', 'document', 'prob') 201 read_only_fields = ('user',) 202 203 204 class Speech2textAnnotationSerializer(serializers.ModelSerializer): 205 document = serializers.PrimaryKeyRelatedField(queryset=Document.objects.all()) 206 207 class Meta: 208 model = Speech2textAnnotation 209 fields = ('id', 'prob', 'text', 'user', 'document') 210 read_only_fields = ('user',) 211 212 213 class RoleSerializer(serializers.ModelSerializer): 214 class Meta: 215 model = Role 216 fields = ('id', 'name') 217 218 219 class RoleMappingSerializer(serializers.ModelSerializer): 220 username = serializers.SerializerMethodField() 221 rolename = serializers.SerializerMethodField() 222 223 @classmethod 224 def get_username(cls, instance): 225 user = instance.user 226 return user.username if user else None 227 228 @classmethod 229 def get_rolename(cls, instance): 230 role = instance.role 231 return role.name if role else None 232 233 class Meta: 234 model = RoleMapping 235 fields = ('id', 'user', 'role', 'username', 'rolename') 236 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/app/api/serializers.py b/app/api/serializers.py --- a/app/api/serializers.py +++ b/app/api/serializers.py @@ -177,7 +177,7 @@ class Meta: model = DocumentAnnotation - fields = ('id', 'prob', 'label', 'user', 'document') + fields = ('id', 'prob', 'label', 'user', 'document', 'created_at', 'updated_at') read_only_fields = ('user', ) @@ -188,7 +188,7 @@ class Meta: model = SequenceAnnotation - fields = ('id', 'prob', 'label', 'start_offset', 'end_offset', 'user', 'document') + fields = ('id', 'prob', 'label', 'start_offset', 'end_offset', 'user', 'document', 'created_at', 'updated_at') read_only_fields = ('user',) @@ -197,7 +197,7 @@ class Meta: model = Seq2seqAnnotation - fields = ('id', 'text', 'user', 'document', 'prob') + fields = ('id', 'text', 'user', 'document', 'prob', 'created_at', 'updated_at') read_only_fields = ('user',) @@ -206,7 +206,7 @@ class Meta: model = Speech2textAnnotation - fields = ('id', 'prob', 'text', 'user', 'document') + fields = ('id', 'prob', 'text', 'user', 'document', 'created_at', 'updated_at') read_only_fields = ('user',)
{"golden_diff": "diff --git a/app/api/serializers.py b/app/api/serializers.py\n--- a/app/api/serializers.py\n+++ b/app/api/serializers.py\n@@ -177,7 +177,7 @@\n \n class Meta:\n model = DocumentAnnotation\n- fields = ('id', 'prob', 'label', 'user', 'document')\n+ fields = ('id', 'prob', 'label', 'user', 'document', 'created_at', 'updated_at')\n read_only_fields = ('user', )\n \n \n@@ -188,7 +188,7 @@\n \n class Meta:\n model = SequenceAnnotation\n- fields = ('id', 'prob', 'label', 'start_offset', 'end_offset', 'user', 'document')\n+ fields = ('id', 'prob', 'label', 'start_offset', 'end_offset', 'user', 'document', 'created_at', 'updated_at')\n read_only_fields = ('user',)\n \n \n@@ -197,7 +197,7 @@\n \n class Meta:\n model = Seq2seqAnnotation\n- fields = ('id', 'text', 'user', 'document', 'prob')\n+ fields = ('id', 'text', 'user', 'document', 'prob', 'created_at', 'updated_at')\n read_only_fields = ('user',)\n \n \n@@ -206,7 +206,7 @@\n \n class Meta:\n model = Speech2textAnnotation\n- fields = ('id', 'prob', 'text', 'user', 'document')\n+ fields = ('id', 'prob', 'text', 'user', 'document', 'created_at', 'updated_at')\n read_only_fields = ('user',)\n", "issue": "Expose timestamps associated with annotations with the API\nThe `..Annotation` models currently stores the `created_at` and `updated_at` fields but they are not exposed by the API.\r\n\r\nI'd like to propose exposing them through the API so that downstream analysis can be conducted using simple API calls, like those made using [`doccano-client`](https://github.com/doccano/doccano-client), for instance.\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import serializers\nfrom rest_polymorphic.serializers import PolymorphicSerializer\nfrom rest_framework.exceptions import ValidationError\n\n\nfrom .models import Label, Project, Document, RoleMapping, Role\nfrom .models import TextClassificationProject, SequenceLabelingProject, Seq2seqProject, Speech2textProject\nfrom .models import DocumentAnnotation, SequenceAnnotation, Seq2seqAnnotation, Speech2textAnnotation\n\n\nclass UserSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = get_user_model()\n fields = ('id', 'username', 'first_name', 'last_name', 'email', 'is_superuser')\n\n\nclass LabelSerializer(serializers.ModelSerializer):\n\n def validate(self, attrs):\n prefix_key = attrs.get('prefix_key')\n suffix_key = attrs.get('suffix_key')\n\n # In the case of user don't set any shortcut key.\n if prefix_key is None and suffix_key is None:\n return super().validate(attrs)\n\n # Don't allow shortcut key not to have a suffix key.\n if prefix_key and not suffix_key:\n raise ValidationError('Shortcut key may not have a suffix key.')\n\n # Don't allow to save same shortcut key when prefix_key is null.\n try:\n context = self.context['request'].parser_context\n project_id = context['kwargs']['project_id']\n label_id = context['kwargs'].get('label_id')\n except (AttributeError, KeyError):\n pass # unit tests don't always have the correct context set up\n else:\n conflicting_labels = Label.objects.filter(\n suffix_key=suffix_key,\n prefix_key=prefix_key,\n project=project_id,\n )\n\n if label_id is not None:\n conflicting_labels = conflicting_labels.exclude(id=label_id)\n\n if conflicting_labels.exists():\n raise ValidationError('Duplicate shortcut key.')\n\n return super().validate(attrs)\n\n class Meta:\n model = Label\n fields = ('id', 'text', 'prefix_key', 'suffix_key', 'background_color', 'text_color')\n\n\nclass DocumentSerializer(serializers.ModelSerializer):\n annotations = serializers.SerializerMethodField()\n annotation_approver = serializers.SerializerMethodField()\n\n def get_annotations(self, instance):\n request = self.context.get('request')\n project = instance.project\n model = project.get_annotation_class()\n serializer = project.get_annotation_serializer()\n annotations = model.objects.filter(document=instance.id)\n if request and not project.collaborative_annotation:\n annotations = annotations.filter(user=request.user)\n serializer = serializer(annotations, many=True)\n return serializer.data\n\n @classmethod\n def get_annotation_approver(cls, instance):\n approver = instance.annotations_approved_by\n return approver.username if approver else None\n\n class Meta:\n model = Document\n fields = ('id', 'text', 'annotations', 'meta', 'annotation_approver')\n\n\nclass ApproverSerializer(DocumentSerializer):\n\n class Meta:\n model = Document\n fields = ('id', 'annotation_approver')\n\n\nclass ProjectSerializer(serializers.ModelSerializer):\n current_users_role = serializers.SerializerMethodField()\n\n def get_current_users_role(self, instance):\n role_abstractor = {\n \"is_project_admin\": settings.ROLE_PROJECT_ADMIN,\n \"is_annotator\": settings.ROLE_ANNOTATOR,\n \"is_annotation_approver\": settings.ROLE_ANNOTATION_APPROVER,\n }\n queryset = RoleMapping.objects.values(\"role_id__name\")\n if queryset:\n users_role = get_object_or_404(\n queryset, project=instance.id, user=self.context.get(\"request\").user.id\n )\n for key, val in role_abstractor.items():\n role_abstractor[key] = users_role[\"role_id__name\"] == val\n return role_abstractor\n\n class Meta:\n model = Project\n fields = ('id', 'name', 'description', 'guideline', 'users', 'current_users_role', 'project_type', 'image',\n 'updated_at', 'randomize_document_order', 'collaborative_annotation', 'single_class_classification')\n read_only_fields = ('image', 'updated_at', 'users', 'current_users_role')\n\n\nclass TextClassificationProjectSerializer(ProjectSerializer):\n\n class Meta:\n model = TextClassificationProject\n fields = ProjectSerializer.Meta.fields\n read_only_fields = ProjectSerializer.Meta.read_only_fields\n\n\nclass SequenceLabelingProjectSerializer(ProjectSerializer):\n\n class Meta:\n model = SequenceLabelingProject\n fields = ProjectSerializer.Meta.fields\n read_only_fields = ProjectSerializer.Meta.read_only_fields\n\n\nclass Seq2seqProjectSerializer(ProjectSerializer):\n\n class Meta:\n model = Seq2seqProject\n fields = ProjectSerializer.Meta.fields\n read_only_fields = ProjectSerializer.Meta.read_only_fields\n\n\nclass Speech2textProjectSerializer(ProjectSerializer):\n\n class Meta:\n model = Speech2textProject\n fields = ('id', 'name', 'description', 'guideline', 'users', 'current_users_role', 'project_type', 'image',\n 'updated_at', 'randomize_document_order')\n read_only_fields = ('image', 'updated_at', 'users', 'current_users_role')\n\n\nclass ProjectPolymorphicSerializer(PolymorphicSerializer):\n model_serializer_mapping = {\n Project: ProjectSerializer,\n TextClassificationProject: TextClassificationProjectSerializer,\n SequenceLabelingProject: SequenceLabelingProjectSerializer,\n Seq2seqProject: Seq2seqProjectSerializer,\n Speech2textProject: Speech2textProjectSerializer,\n }\n\n\nclass ProjectFilteredPrimaryKeyRelatedField(serializers.PrimaryKeyRelatedField):\n\n def get_queryset(self):\n view = self.context.get('view', None)\n request = self.context.get('request', None)\n queryset = super(ProjectFilteredPrimaryKeyRelatedField, self).get_queryset()\n if not request or not queryset or not view:\n return None\n return queryset.filter(project=view.kwargs['project_id'])\n\n\nclass DocumentAnnotationSerializer(serializers.ModelSerializer):\n # label = ProjectFilteredPrimaryKeyRelatedField(queryset=Label.objects.all())\n label = serializers.PrimaryKeyRelatedField(queryset=Label.objects.all())\n document = serializers.PrimaryKeyRelatedField(queryset=Document.objects.all())\n\n class Meta:\n model = DocumentAnnotation\n fields = ('id', 'prob', 'label', 'user', 'document')\n read_only_fields = ('user', )\n\n\nclass SequenceAnnotationSerializer(serializers.ModelSerializer):\n #label = ProjectFilteredPrimaryKeyRelatedField(queryset=Label.objects.all())\n label = serializers.PrimaryKeyRelatedField(queryset=Label.objects.all())\n document = serializers.PrimaryKeyRelatedField(queryset=Document.objects.all())\n\n class Meta:\n model = SequenceAnnotation\n fields = ('id', 'prob', 'label', 'start_offset', 'end_offset', 'user', 'document')\n read_only_fields = ('user',)\n\n\nclass Seq2seqAnnotationSerializer(serializers.ModelSerializer):\n document = serializers.PrimaryKeyRelatedField(queryset=Document.objects.all())\n\n class Meta:\n model = Seq2seqAnnotation\n fields = ('id', 'text', 'user', 'document', 'prob')\n read_only_fields = ('user',)\n\n\nclass Speech2textAnnotationSerializer(serializers.ModelSerializer):\n document = serializers.PrimaryKeyRelatedField(queryset=Document.objects.all())\n\n class Meta:\n model = Speech2textAnnotation\n fields = ('id', 'prob', 'text', 'user', 'document')\n read_only_fields = ('user',)\n\n\nclass RoleSerializer(serializers.ModelSerializer):\n class Meta:\n model = Role\n fields = ('id', 'name')\n\n\nclass RoleMappingSerializer(serializers.ModelSerializer):\n username = serializers.SerializerMethodField()\n rolename = serializers.SerializerMethodField()\n\n @classmethod\n def get_username(cls, instance):\n user = instance.user\n return user.username if user else None\n\n @classmethod\n def get_rolename(cls, instance):\n role = instance.role\n return role.name if role else None\n\n class Meta:\n model = RoleMapping\n fields = ('id', 'user', 'role', 'username', 'rolename')\n", "path": "app/api/serializers.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import serializers\nfrom rest_polymorphic.serializers import PolymorphicSerializer\nfrom rest_framework.exceptions import ValidationError\n\n\nfrom .models import Label, Project, Document, RoleMapping, Role\nfrom .models import TextClassificationProject, SequenceLabelingProject, Seq2seqProject, Speech2textProject\nfrom .models import DocumentAnnotation, SequenceAnnotation, Seq2seqAnnotation, Speech2textAnnotation\n\n\nclass UserSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = get_user_model()\n fields = ('id', 'username', 'first_name', 'last_name', 'email', 'is_superuser')\n\n\nclass LabelSerializer(serializers.ModelSerializer):\n\n def validate(self, attrs):\n prefix_key = attrs.get('prefix_key')\n suffix_key = attrs.get('suffix_key')\n\n # In the case of user don't set any shortcut key.\n if prefix_key is None and suffix_key is None:\n return super().validate(attrs)\n\n # Don't allow shortcut key not to have a suffix key.\n if prefix_key and not suffix_key:\n raise ValidationError('Shortcut key may not have a suffix key.')\n\n # Don't allow to save same shortcut key when prefix_key is null.\n try:\n context = self.context['request'].parser_context\n project_id = context['kwargs']['project_id']\n label_id = context['kwargs'].get('label_id')\n except (AttributeError, KeyError):\n pass # unit tests don't always have the correct context set up\n else:\n conflicting_labels = Label.objects.filter(\n suffix_key=suffix_key,\n prefix_key=prefix_key,\n project=project_id,\n )\n\n if label_id is not None:\n conflicting_labels = conflicting_labels.exclude(id=label_id)\n\n if conflicting_labels.exists():\n raise ValidationError('Duplicate shortcut key.')\n\n return super().validate(attrs)\n\n class Meta:\n model = Label\n fields = ('id', 'text', 'prefix_key', 'suffix_key', 'background_color', 'text_color')\n\n\nclass DocumentSerializer(serializers.ModelSerializer):\n annotations = serializers.SerializerMethodField()\n annotation_approver = serializers.SerializerMethodField()\n\n def get_annotations(self, instance):\n request = self.context.get('request')\n project = instance.project\n model = project.get_annotation_class()\n serializer = project.get_annotation_serializer()\n annotations = model.objects.filter(document=instance.id)\n if request and not project.collaborative_annotation:\n annotations = annotations.filter(user=request.user)\n serializer = serializer(annotations, many=True)\n return serializer.data\n\n @classmethod\n def get_annotation_approver(cls, instance):\n approver = instance.annotations_approved_by\n return approver.username if approver else None\n\n class Meta:\n model = Document\n fields = ('id', 'text', 'annotations', 'meta', 'annotation_approver')\n\n\nclass ApproverSerializer(DocumentSerializer):\n\n class Meta:\n model = Document\n fields = ('id', 'annotation_approver')\n\n\nclass ProjectSerializer(serializers.ModelSerializer):\n current_users_role = serializers.SerializerMethodField()\n\n def get_current_users_role(self, instance):\n role_abstractor = {\n \"is_project_admin\": settings.ROLE_PROJECT_ADMIN,\n \"is_annotator\": settings.ROLE_ANNOTATOR,\n \"is_annotation_approver\": settings.ROLE_ANNOTATION_APPROVER,\n }\n queryset = RoleMapping.objects.values(\"role_id__name\")\n if queryset:\n users_role = get_object_or_404(\n queryset, project=instance.id, user=self.context.get(\"request\").user.id\n )\n for key, val in role_abstractor.items():\n role_abstractor[key] = users_role[\"role_id__name\"] == val\n return role_abstractor\n\n class Meta:\n model = Project\n fields = ('id', 'name', 'description', 'guideline', 'users', 'current_users_role', 'project_type', 'image',\n 'updated_at', 'randomize_document_order', 'collaborative_annotation', 'single_class_classification')\n read_only_fields = ('image', 'updated_at', 'users', 'current_users_role')\n\n\nclass TextClassificationProjectSerializer(ProjectSerializer):\n\n class Meta:\n model = TextClassificationProject\n fields = ProjectSerializer.Meta.fields\n read_only_fields = ProjectSerializer.Meta.read_only_fields\n\n\nclass SequenceLabelingProjectSerializer(ProjectSerializer):\n\n class Meta:\n model = SequenceLabelingProject\n fields = ProjectSerializer.Meta.fields\n read_only_fields = ProjectSerializer.Meta.read_only_fields\n\n\nclass Seq2seqProjectSerializer(ProjectSerializer):\n\n class Meta:\n model = Seq2seqProject\n fields = ProjectSerializer.Meta.fields\n read_only_fields = ProjectSerializer.Meta.read_only_fields\n\n\nclass Speech2textProjectSerializer(ProjectSerializer):\n\n class Meta:\n model = Speech2textProject\n fields = ('id', 'name', 'description', 'guideline', 'users', 'current_users_role', 'project_type', 'image',\n 'updated_at', 'randomize_document_order')\n read_only_fields = ('image', 'updated_at', 'users', 'current_users_role')\n\n\nclass ProjectPolymorphicSerializer(PolymorphicSerializer):\n model_serializer_mapping = {\n Project: ProjectSerializer,\n TextClassificationProject: TextClassificationProjectSerializer,\n SequenceLabelingProject: SequenceLabelingProjectSerializer,\n Seq2seqProject: Seq2seqProjectSerializer,\n Speech2textProject: Speech2textProjectSerializer,\n }\n\n\nclass ProjectFilteredPrimaryKeyRelatedField(serializers.PrimaryKeyRelatedField):\n\n def get_queryset(self):\n view = self.context.get('view', None)\n request = self.context.get('request', None)\n queryset = super(ProjectFilteredPrimaryKeyRelatedField, self).get_queryset()\n if not request or not queryset or not view:\n return None\n return queryset.filter(project=view.kwargs['project_id'])\n\n\nclass DocumentAnnotationSerializer(serializers.ModelSerializer):\n # label = ProjectFilteredPrimaryKeyRelatedField(queryset=Label.objects.all())\n label = serializers.PrimaryKeyRelatedField(queryset=Label.objects.all())\n document = serializers.PrimaryKeyRelatedField(queryset=Document.objects.all())\n\n class Meta:\n model = DocumentAnnotation\n fields = ('id', 'prob', 'label', 'user', 'document', 'created_at', 'updated_at')\n read_only_fields = ('user', )\n\n\nclass SequenceAnnotationSerializer(serializers.ModelSerializer):\n #label = ProjectFilteredPrimaryKeyRelatedField(queryset=Label.objects.all())\n label = serializers.PrimaryKeyRelatedField(queryset=Label.objects.all())\n document = serializers.PrimaryKeyRelatedField(queryset=Document.objects.all())\n\n class Meta:\n model = SequenceAnnotation\n fields = ('id', 'prob', 'label', 'start_offset', 'end_offset', 'user', 'document', 'created_at', 'updated_at')\n read_only_fields = ('user',)\n\n\nclass Seq2seqAnnotationSerializer(serializers.ModelSerializer):\n document = serializers.PrimaryKeyRelatedField(queryset=Document.objects.all())\n\n class Meta:\n model = Seq2seqAnnotation\n fields = ('id', 'text', 'user', 'document', 'prob', 'created_at', 'updated_at')\n read_only_fields = ('user',)\n\n\nclass Speech2textAnnotationSerializer(serializers.ModelSerializer):\n document = serializers.PrimaryKeyRelatedField(queryset=Document.objects.all())\n\n class Meta:\n model = Speech2textAnnotation\n fields = ('id', 'prob', 'text', 'user', 'document', 'created_at', 'updated_at')\n read_only_fields = ('user',)\n\n\nclass RoleSerializer(serializers.ModelSerializer):\n class Meta:\n model = Role\n fields = ('id', 'name')\n\n\nclass RoleMappingSerializer(serializers.ModelSerializer):\n username = serializers.SerializerMethodField()\n rolename = serializers.SerializerMethodField()\n\n @classmethod\n def get_username(cls, instance):\n user = instance.user\n return user.username if user else None\n\n @classmethod\n def get_rolename(cls, instance):\n role = instance.role\n return role.name if role else None\n\n class Meta:\n model = RoleMapping\n fields = ('id', 'user', 'role', 'username', 'rolename')\n", "path": "app/api/serializers.py"}]}
2,678
376
gh_patches_debug_6171
rasdani/github-patches
git_diff
yt-project__yt-4205
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ENH: "local" config file upward lookup ### Bug report **Bug summary** Since yt 4.0 we support two locations to store a configuration file `yt.toml`, namely `$XDG_CONFIG/yt/yt.toml` (this is the global configuration) and `./yt.toml` (local) Now, assuming a data exploration project organised into subfolders, for instance ```bash . ├── scripts │   ├── exp1 │   │   ├── t1.py │   │   ├── t2.py │   │   └── t3.py │   └── exp2 │   ├── t1.py │   ├── t2.py │   └── t3.py └── yt.toml ``` The results of any script will differ depending on wether it's launched from the top level of the project (where `yt.toml` lives) or from within their respective containing directories. To solve this, we could implement an upward lookup routine to check for `yt.toml` files in all parents directories until it is found (or we reach root `/`). There is a precedent to the proposed behaviour: many tools already implement this mechanism, for instance - flake8 https://github.com/PyCQA/flake8/blob/ca573a7ccf2d4a1c7df0b577bb6d3455c941e828/src/flake8/options/config.py#L17 - black https://github.com/psf/black/blob/1af29fbfa507daa8166e7aac659e9b2ff2b47a3c/src/black/files.py#L84 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `yt/utilities/configure.py` Content: ``` 1 import os 2 import sys 3 import warnings 4 from pathlib import Path 5 from typing import Callable, List 6 7 import tomli_w 8 from more_itertools import always_iterable 9 10 from yt.utilities.configuration_tree import ConfigLeaf, ConfigNode 11 12 if sys.version_info >= (3, 11): 13 import tomllib 14 else: 15 import tomli as tomllib 16 17 configuration_callbacks: List[Callable[["YTConfig"], None]] = [] 18 19 20 def config_dir(): 21 config_root = os.environ.get( 22 "XDG_CONFIG_HOME", os.path.join(os.path.expanduser("~"), ".config") 23 ) 24 conf_dir = os.path.join(config_root, "yt") 25 return conf_dir 26 27 28 class YTConfig: 29 def __init__(self, defaults=None): 30 if defaults is None: 31 defaults = {} 32 self.config_root = ConfigNode(None) 33 34 def get(self, section, *keys, callback=None): 35 node_or_leaf = self.config_root.get(section, *keys) 36 if isinstance(node_or_leaf, ConfigLeaf): 37 if callback is not None: 38 return callback(node_or_leaf) 39 return node_or_leaf.value 40 return node_or_leaf 41 42 def get_most_specific(self, section, *keys, **kwargs): 43 use_fallback = "fallback" in kwargs 44 fallback = kwargs.pop("fallback", None) 45 try: 46 return self.config_root.get_deepest_leaf(section, *keys) 47 except KeyError as err: 48 if use_fallback: 49 return fallback 50 else: 51 raise err 52 53 def update(self, new_values, metadata=None): 54 if metadata is None: 55 metadata = {} 56 self.config_root.update(new_values, metadata) 57 58 def has_section(self, section): 59 try: 60 self.config_root.get_child(section) 61 return True 62 except KeyError: 63 return False 64 65 def add_section(self, section): 66 self.config_root.add_child(section) 67 68 def remove_section(self, section): 69 if self.has_section(section): 70 self.config_root.remove_child(section) 71 return True 72 else: 73 return False 74 75 def set(self, *args, metadata=None): 76 section, *keys, value = args 77 if metadata is None: 78 metadata = {"source": "runtime"} 79 self.config_root.upsert_from_list( 80 [section] + list(keys), value, extra_data=metadata 81 ) 82 83 def remove(self, *args): 84 self.config_root.pop_leaf(args) 85 86 def read(self, file_names): 87 file_names_read = [] 88 for fname in always_iterable(file_names): 89 if not os.path.exists(fname): 90 continue 91 metadata = {"source": f"file: {fname}"} 92 try: 93 with open(fname, "rb") as fh: 94 data = tomllib.load(fh) 95 except tomllib.TOMLDecodeError as exc: 96 warnings.warn( 97 f"Could not load configuration file {fname} (invalid TOML: {exc})" 98 ) 99 else: 100 self.update(data, metadata=metadata) 101 file_names_read.append(fname) 102 103 return file_names_read 104 105 def write(self, file_handler): 106 value = self.config_root.as_dict() 107 config_as_str = tomli_w.dumps(value) 108 109 try: 110 file_path = Path(file_handler) 111 except TypeError: 112 if not hasattr(file_handler, "write"): 113 raise TypeError( 114 f"Expected a path to a file, or a writable object, got {file_handler}" 115 ) from None 116 file_handler.write(config_as_str) 117 else: 118 pdir = file_path.parent 119 if not pdir.exists(): 120 warnings.warn(f"{pdir!s} does not exist, creating it (recursively)") 121 os.makedirs(pdir) 122 file_path.write_text(config_as_str) 123 124 @staticmethod 125 def get_global_config_file(): 126 return os.path.join(config_dir(), "yt.toml") 127 128 @staticmethod 129 def get_local_config_file(): 130 return os.path.join(os.path.abspath(os.curdir), "yt.toml") 131 132 def __setitem__(self, args, value): 133 section, *keys = always_iterable(args) 134 self.set(section, *keys, value, metadata=None) 135 136 def __getitem__(self, key): 137 section, *keys = always_iterable(key) 138 return self.get(section, *keys) 139 140 def __contains__(self, item): 141 return item in self.config_root 142 143 # Add support for IPython rich display 144 # see https://ipython.readthedocs.io/en/stable/config/integrating.html 145 def _repr_json_(self): 146 return self.config_root._repr_json_() 147 148 149 CONFIG = YTConfig() 150 151 152 def _cast_bool_helper(value): 153 if value == "True": 154 return True 155 elif value == "False": 156 return False 157 else: 158 raise ValueError("Cannot safely cast to bool") 159 160 161 def _expand_all(s): 162 return os.path.expandvars(os.path.expanduser(s)) 163 164 165 def _cast_value_helper(value, types=(_cast_bool_helper, int, float, _expand_all)): 166 for t in types: 167 try: 168 retval = t(value) 169 return retval 170 except ValueError: 171 pass 172 173 174 def get_config(section, option): 175 *option_path, option_name = option.split(".") 176 return CONFIG.get(section, *option_path, option_name) 177 178 179 def set_config(section, option, value, config_file): 180 if not CONFIG.has_section(section): 181 CONFIG.add_section(section) 182 183 option_path = option.split(".") 184 CONFIG.set(section, *option_path, _cast_value_helper(value)) 185 write_config(config_file) 186 187 188 def write_config(config_file): 189 CONFIG.write(config_file) 190 191 192 def rm_config(section, option, config_file): 193 option_path = option.split(".") 194 CONFIG.remove(section, *option_path) 195 write_config(config_file) 196 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/yt/utilities/configure.py b/yt/utilities/configure.py --- a/yt/utilities/configure.py +++ b/yt/utilities/configure.py @@ -127,6 +127,14 @@ @staticmethod def get_local_config_file(): + path = Path.cwd() + while path.parent is not path: + candidate = path.joinpath("yt.toml") + if candidate.is_file(): + return os.path.abspath(candidate) + else: + path = path.parent + return os.path.join(os.path.abspath(os.curdir), "yt.toml") def __setitem__(self, args, value):
{"golden_diff": "diff --git a/yt/utilities/configure.py b/yt/utilities/configure.py\n--- a/yt/utilities/configure.py\n+++ b/yt/utilities/configure.py\n@@ -127,6 +127,14 @@\n \n @staticmethod\n def get_local_config_file():\n+ path = Path.cwd()\n+ while path.parent is not path:\n+ candidate = path.joinpath(\"yt.toml\")\n+ if candidate.is_file():\n+ return os.path.abspath(candidate)\n+ else:\n+ path = path.parent\n+\n return os.path.join(os.path.abspath(os.curdir), \"yt.toml\")\n \n def __setitem__(self, args, value):\n", "issue": "ENH: \"local\" config file upward lookup\n### Bug report\r\n\r\n**Bug summary**\r\n\r\nSince yt 4.0 we support two locations to store a configuration file `yt.toml`, namely `$XDG_CONFIG/yt/yt.toml` (this is the global configuration) and `./yt.toml` (local)\r\n\r\nNow, assuming a data exploration project organised into subfolders, for instance\r\n\r\n```bash\r\n.\r\n\u251c\u2500\u2500 scripts\r\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 exp1\r\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 t1.py\r\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 t2.py\r\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 t3.py\r\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 exp2\r\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 t1.py\r\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 t2.py\r\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 t3.py\r\n\u2514\u2500\u2500 yt.toml\r\n```\r\n\r\nThe results of any script will differ depending on wether it's launched from the top level of the project (where `yt.toml` lives) or from within their respective containing directories.\r\n\r\nTo solve this, we could implement an upward lookup routine to check for `yt.toml` files in all parents directories until it is found (or we reach root `/`).\r\n\r\nThere is a precedent to the proposed behaviour: many tools already implement this mechanism, for instance\r\n - flake8 \r\n https://github.com/PyCQA/flake8/blob/ca573a7ccf2d4a1c7df0b577bb6d3455c941e828/src/flake8/options/config.py#L17\r\n- black https://github.com/psf/black/blob/1af29fbfa507daa8166e7aac659e9b2ff2b47a3c/src/black/files.py#L84\n", "before_files": [{"content": "import os\nimport sys\nimport warnings\nfrom pathlib import Path\nfrom typing import Callable, List\n\nimport tomli_w\nfrom more_itertools import always_iterable\n\nfrom yt.utilities.configuration_tree import ConfigLeaf, ConfigNode\n\nif sys.version_info >= (3, 11):\n import tomllib\nelse:\n import tomli as tomllib\n\nconfiguration_callbacks: List[Callable[[\"YTConfig\"], None]] = []\n\n\ndef config_dir():\n config_root = os.environ.get(\n \"XDG_CONFIG_HOME\", os.path.join(os.path.expanduser(\"~\"), \".config\")\n )\n conf_dir = os.path.join(config_root, \"yt\")\n return conf_dir\n\n\nclass YTConfig:\n def __init__(self, defaults=None):\n if defaults is None:\n defaults = {}\n self.config_root = ConfigNode(None)\n\n def get(self, section, *keys, callback=None):\n node_or_leaf = self.config_root.get(section, *keys)\n if isinstance(node_or_leaf, ConfigLeaf):\n if callback is not None:\n return callback(node_or_leaf)\n return node_or_leaf.value\n return node_or_leaf\n\n def get_most_specific(self, section, *keys, **kwargs):\n use_fallback = \"fallback\" in kwargs\n fallback = kwargs.pop(\"fallback\", None)\n try:\n return self.config_root.get_deepest_leaf(section, *keys)\n except KeyError as err:\n if use_fallback:\n return fallback\n else:\n raise err\n\n def update(self, new_values, metadata=None):\n if metadata is None:\n metadata = {}\n self.config_root.update(new_values, metadata)\n\n def has_section(self, section):\n try:\n self.config_root.get_child(section)\n return True\n except KeyError:\n return False\n\n def add_section(self, section):\n self.config_root.add_child(section)\n\n def remove_section(self, section):\n if self.has_section(section):\n self.config_root.remove_child(section)\n return True\n else:\n return False\n\n def set(self, *args, metadata=None):\n section, *keys, value = args\n if metadata is None:\n metadata = {\"source\": \"runtime\"}\n self.config_root.upsert_from_list(\n [section] + list(keys), value, extra_data=metadata\n )\n\n def remove(self, *args):\n self.config_root.pop_leaf(args)\n\n def read(self, file_names):\n file_names_read = []\n for fname in always_iterable(file_names):\n if not os.path.exists(fname):\n continue\n metadata = {\"source\": f\"file: {fname}\"}\n try:\n with open(fname, \"rb\") as fh:\n data = tomllib.load(fh)\n except tomllib.TOMLDecodeError as exc:\n warnings.warn(\n f\"Could not load configuration file {fname} (invalid TOML: {exc})\"\n )\n else:\n self.update(data, metadata=metadata)\n file_names_read.append(fname)\n\n return file_names_read\n\n def write(self, file_handler):\n value = self.config_root.as_dict()\n config_as_str = tomli_w.dumps(value)\n\n try:\n file_path = Path(file_handler)\n except TypeError:\n if not hasattr(file_handler, \"write\"):\n raise TypeError(\n f\"Expected a path to a file, or a writable object, got {file_handler}\"\n ) from None\n file_handler.write(config_as_str)\n else:\n pdir = file_path.parent\n if not pdir.exists():\n warnings.warn(f\"{pdir!s} does not exist, creating it (recursively)\")\n os.makedirs(pdir)\n file_path.write_text(config_as_str)\n\n @staticmethod\n def get_global_config_file():\n return os.path.join(config_dir(), \"yt.toml\")\n\n @staticmethod\n def get_local_config_file():\n return os.path.join(os.path.abspath(os.curdir), \"yt.toml\")\n\n def __setitem__(self, args, value):\n section, *keys = always_iterable(args)\n self.set(section, *keys, value, metadata=None)\n\n def __getitem__(self, key):\n section, *keys = always_iterable(key)\n return self.get(section, *keys)\n\n def __contains__(self, item):\n return item in self.config_root\n\n # Add support for IPython rich display\n # see https://ipython.readthedocs.io/en/stable/config/integrating.html\n def _repr_json_(self):\n return self.config_root._repr_json_()\n\n\nCONFIG = YTConfig()\n\n\ndef _cast_bool_helper(value):\n if value == \"True\":\n return True\n elif value == \"False\":\n return False\n else:\n raise ValueError(\"Cannot safely cast to bool\")\n\n\ndef _expand_all(s):\n return os.path.expandvars(os.path.expanduser(s))\n\n\ndef _cast_value_helper(value, types=(_cast_bool_helper, int, float, _expand_all)):\n for t in types:\n try:\n retval = t(value)\n return retval\n except ValueError:\n pass\n\n\ndef get_config(section, option):\n *option_path, option_name = option.split(\".\")\n return CONFIG.get(section, *option_path, option_name)\n\n\ndef set_config(section, option, value, config_file):\n if not CONFIG.has_section(section):\n CONFIG.add_section(section)\n\n option_path = option.split(\".\")\n CONFIG.set(section, *option_path, _cast_value_helper(value))\n write_config(config_file)\n\n\ndef write_config(config_file):\n CONFIG.write(config_file)\n\n\ndef rm_config(section, option, config_file):\n option_path = option.split(\".\")\n CONFIG.remove(section, *option_path)\n write_config(config_file)\n", "path": "yt/utilities/configure.py"}], "after_files": [{"content": "import os\nimport sys\nimport warnings\nfrom pathlib import Path\nfrom typing import Callable, List\n\nimport tomli_w\nfrom more_itertools import always_iterable\n\nfrom yt.utilities.configuration_tree import ConfigLeaf, ConfigNode\n\nif sys.version_info >= (3, 11):\n import tomllib\nelse:\n import tomli as tomllib\n\nconfiguration_callbacks: List[Callable[[\"YTConfig\"], None]] = []\n\n\ndef config_dir():\n config_root = os.environ.get(\n \"XDG_CONFIG_HOME\", os.path.join(os.path.expanduser(\"~\"), \".config\")\n )\n conf_dir = os.path.join(config_root, \"yt\")\n return conf_dir\n\n\nclass YTConfig:\n def __init__(self, defaults=None):\n if defaults is None:\n defaults = {}\n self.config_root = ConfigNode(None)\n\n def get(self, section, *keys, callback=None):\n node_or_leaf = self.config_root.get(section, *keys)\n if isinstance(node_or_leaf, ConfigLeaf):\n if callback is not None:\n return callback(node_or_leaf)\n return node_or_leaf.value\n return node_or_leaf\n\n def get_most_specific(self, section, *keys, **kwargs):\n use_fallback = \"fallback\" in kwargs\n fallback = kwargs.pop(\"fallback\", None)\n try:\n return self.config_root.get_deepest_leaf(section, *keys)\n except KeyError as err:\n if use_fallback:\n return fallback\n else:\n raise err\n\n def update(self, new_values, metadata=None):\n if metadata is None:\n metadata = {}\n self.config_root.update(new_values, metadata)\n\n def has_section(self, section):\n try:\n self.config_root.get_child(section)\n return True\n except KeyError:\n return False\n\n def add_section(self, section):\n self.config_root.add_child(section)\n\n def remove_section(self, section):\n if self.has_section(section):\n self.config_root.remove_child(section)\n return True\n else:\n return False\n\n def set(self, *args, metadata=None):\n section, *keys, value = args\n if metadata is None:\n metadata = {\"source\": \"runtime\"}\n self.config_root.upsert_from_list(\n [section] + list(keys), value, extra_data=metadata\n )\n\n def remove(self, *args):\n self.config_root.pop_leaf(args)\n\n def read(self, file_names):\n file_names_read = []\n for fname in always_iterable(file_names):\n if not os.path.exists(fname):\n continue\n metadata = {\"source\": f\"file: {fname}\"}\n try:\n with open(fname, \"rb\") as fh:\n data = tomllib.load(fh)\n except tomllib.TOMLDecodeError as exc:\n warnings.warn(\n f\"Could not load configuration file {fname} (invalid TOML: {exc})\"\n )\n else:\n self.update(data, metadata=metadata)\n file_names_read.append(fname)\n\n return file_names_read\n\n def write(self, file_handler):\n value = self.config_root.as_dict()\n config_as_str = tomli_w.dumps(value)\n\n try:\n file_path = Path(file_handler)\n except TypeError:\n if not hasattr(file_handler, \"write\"):\n raise TypeError(\n f\"Expected a path to a file, or a writable object, got {file_handler}\"\n ) from None\n file_handler.write(config_as_str)\n else:\n pdir = file_path.parent\n if not pdir.exists():\n warnings.warn(f\"{pdir!s} does not exist, creating it (recursively)\")\n os.makedirs(pdir)\n file_path.write_text(config_as_str)\n\n @staticmethod\n def get_global_config_file():\n return os.path.join(config_dir(), \"yt.toml\")\n\n @staticmethod\n def get_local_config_file():\n path = Path.cwd()\n while path.parent is not path:\n candidate = path.joinpath(\"yt.toml\")\n if candidate.is_file():\n return os.path.abspath(candidate)\n else:\n path = path.parent\n\n return os.path.join(os.path.abspath(os.curdir), \"yt.toml\")\n\n def __setitem__(self, args, value):\n section, *keys = always_iterable(args)\n self.set(section, *keys, value, metadata=None)\n\n def __getitem__(self, key):\n section, *keys = always_iterable(key)\n return self.get(section, *keys)\n\n def __contains__(self, item):\n return item in self.config_root\n\n # Add support for IPython rich display\n # see https://ipython.readthedocs.io/en/stable/config/integrating.html\n def _repr_json_(self):\n return self.config_root._repr_json_()\n\n\nCONFIG = YTConfig()\n\n\ndef _cast_bool_helper(value):\n if value == \"True\":\n return True\n elif value == \"False\":\n return False\n else:\n raise ValueError(\"Cannot safely cast to bool\")\n\n\ndef _expand_all(s):\n return os.path.expandvars(os.path.expanduser(s))\n\n\ndef _cast_value_helper(value, types=(_cast_bool_helper, int, float, _expand_all)):\n for t in types:\n try:\n retval = t(value)\n return retval\n except ValueError:\n pass\n\n\ndef get_config(section, option):\n *option_path, option_name = option.split(\".\")\n return CONFIG.get(section, *option_path, option_name)\n\n\ndef set_config(section, option, value, config_file):\n if not CONFIG.has_section(section):\n CONFIG.add_section(section)\n\n option_path = option.split(\".\")\n CONFIG.set(section, *option_path, _cast_value_helper(value))\n write_config(config_file)\n\n\ndef write_config(config_file):\n CONFIG.write(config_file)\n\n\ndef rm_config(section, option, config_file):\n option_path = option.split(\".\")\n CONFIG.remove(section, *option_path)\n write_config(config_file)\n", "path": "yt/utilities/configure.py"}]}
2,370
152
gh_patches_debug_14820
rasdani/github-patches
git_diff
crytic__slither-786
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- AttributeError: 'StructureTopLevel' object has no attribute 'contract' On 0x0cf55d57d241161e0ec68e72cbb175dbfe84173a Here there should be a different case for top-level elements and non-top-level: https://github.com/crytic/slither/blob/c0c581b3ba830b6ce8dc3f4be82592a7a42e9752/slither/core/solidity_types/user_defined_type.py#L65-L66 AttributeError: 'StructureTopLevel' object has no attribute 'contract' On 0x0cf55d57d241161e0ec68e72cbb175dbfe84173a Here there should be a different case for top-level elements and non-top-level: https://github.com/crytic/slither/blob/c0c581b3ba830b6ce8dc3f4be82592a7a42e9752/slither/core/solidity_types/user_defined_type.py#L65-L66 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `slither/core/solidity_types/user_defined_type.py` Content: ``` 1 from typing import Union, TYPE_CHECKING, Tuple 2 import math 3 4 from slither.core.solidity_types.type import Type 5 from slither.exceptions import SlitherException 6 7 if TYPE_CHECKING: 8 from slither.core.declarations.structure import Structure 9 from slither.core.declarations.enum import Enum 10 from slither.core.declarations.contract import Contract 11 12 # pylint: disable=import-outside-toplevel 13 class UserDefinedType(Type): 14 def __init__(self, t): 15 from slither.core.declarations.structure import Structure 16 from slither.core.declarations.enum import Enum 17 from slither.core.declarations.contract import Contract 18 19 assert isinstance(t, (Contract, Enum, Structure)) 20 super().__init__() 21 self._type = t 22 23 @property 24 def type(self) -> Union["Contract", "Enum", "Structure"]: 25 return self._type 26 27 @property 28 def storage_size(self) -> Tuple[int, bool]: 29 from slither.core.declarations.structure import Structure 30 from slither.core.declarations.enum import Enum 31 from slither.core.declarations.contract import Contract 32 33 if isinstance(self._type, Contract): 34 return 20, False 35 if isinstance(self._type, Enum): 36 return int(math.ceil(math.log2(len(self._type.values)) / 8)), False 37 if isinstance(self._type, Structure): 38 # todo there's some duplicate logic here and slither_core, can we refactor this? 39 slot = 0 40 offset = 0 41 for elem in self._type.elems_ordered: 42 size, new_slot = elem.type.storage_size 43 if new_slot: 44 if offset > 0: 45 slot += 1 46 offset = 0 47 elif size + offset > 32: 48 slot += 1 49 offset = 0 50 51 if new_slot: 52 slot += math.ceil(size / 32) 53 else: 54 offset += size 55 if offset > 0: 56 slot += 1 57 return slot * 32, True 58 to_log = f"{self} does not have storage size" 59 raise SlitherException(to_log) 60 61 def __str__(self): 62 from slither.core.declarations.structure import Structure 63 from slither.core.declarations.enum import Enum 64 65 if isinstance(self.type, (Enum, Structure)): 66 return str(self.type.contract) + "." + str(self.type.name) 67 return str(self.type.name) 68 69 def __eq__(self, other): 70 if not isinstance(other, UserDefinedType): 71 return False 72 return self.type == other.type 73 74 def __hash__(self): 75 return hash(str(self)) 76 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/slither/core/solidity_types/user_defined_type.py b/slither/core/solidity_types/user_defined_type.py --- a/slither/core/solidity_types/user_defined_type.py +++ b/slither/core/solidity_types/user_defined_type.py @@ -59,12 +59,13 @@ raise SlitherException(to_log) def __str__(self): - from slither.core.declarations.structure import Structure - from slither.core.declarations.enum import Enum + from slither.core.declarations.structure_contract import StructureContract + from slither.core.declarations.enum_contract import EnumContract - if isinstance(self.type, (Enum, Structure)): - return str(self.type.contract) + "." + str(self.type.name) - return str(self.type.name) + type_used = self.type + if isinstance(type_used, (EnumContract, StructureContract)): + return str(type_used.contract) + "." + str(type_used.name) + return str(type_used.name) def __eq__(self, other): if not isinstance(other, UserDefinedType):
{"golden_diff": "diff --git a/slither/core/solidity_types/user_defined_type.py b/slither/core/solidity_types/user_defined_type.py\n--- a/slither/core/solidity_types/user_defined_type.py\n+++ b/slither/core/solidity_types/user_defined_type.py\n@@ -59,12 +59,13 @@\n raise SlitherException(to_log)\n \n def __str__(self):\n- from slither.core.declarations.structure import Structure\n- from slither.core.declarations.enum import Enum\n+ from slither.core.declarations.structure_contract import StructureContract\n+ from slither.core.declarations.enum_contract import EnumContract\n \n- if isinstance(self.type, (Enum, Structure)):\n- return str(self.type.contract) + \".\" + str(self.type.name)\n- return str(self.type.name)\n+ type_used = self.type\n+ if isinstance(type_used, (EnumContract, StructureContract)):\n+ return str(type_used.contract) + \".\" + str(type_used.name)\n+ return str(type_used.name)\n \n def __eq__(self, other):\n if not isinstance(other, UserDefinedType):\n", "issue": "AttributeError: 'StructureTopLevel' object has no attribute 'contract'\nOn 0x0cf55d57d241161e0ec68e72cbb175dbfe84173a\r\n\r\nHere there should be a different case for top-level elements and non-top-level:\r\n\r\nhttps://github.com/crytic/slither/blob/c0c581b3ba830b6ce8dc3f4be82592a7a42e9752/slither/core/solidity_types/user_defined_type.py#L65-L66\nAttributeError: 'StructureTopLevel' object has no attribute 'contract'\nOn 0x0cf55d57d241161e0ec68e72cbb175dbfe84173a\r\n\r\nHere there should be a different case for top-level elements and non-top-level:\r\n\r\nhttps://github.com/crytic/slither/blob/c0c581b3ba830b6ce8dc3f4be82592a7a42e9752/slither/core/solidity_types/user_defined_type.py#L65-L66\n", "before_files": [{"content": "from typing import Union, TYPE_CHECKING, Tuple\nimport math\n\nfrom slither.core.solidity_types.type import Type\nfrom slither.exceptions import SlitherException\n\nif TYPE_CHECKING:\n from slither.core.declarations.structure import Structure\n from slither.core.declarations.enum import Enum\n from slither.core.declarations.contract import Contract\n\n# pylint: disable=import-outside-toplevel\nclass UserDefinedType(Type):\n def __init__(self, t):\n from slither.core.declarations.structure import Structure\n from slither.core.declarations.enum import Enum\n from slither.core.declarations.contract import Contract\n\n assert isinstance(t, (Contract, Enum, Structure))\n super().__init__()\n self._type = t\n\n @property\n def type(self) -> Union[\"Contract\", \"Enum\", \"Structure\"]:\n return self._type\n\n @property\n def storage_size(self) -> Tuple[int, bool]:\n from slither.core.declarations.structure import Structure\n from slither.core.declarations.enum import Enum\n from slither.core.declarations.contract import Contract\n\n if isinstance(self._type, Contract):\n return 20, False\n if isinstance(self._type, Enum):\n return int(math.ceil(math.log2(len(self._type.values)) / 8)), False\n if isinstance(self._type, Structure):\n # todo there's some duplicate logic here and slither_core, can we refactor this?\n slot = 0\n offset = 0\n for elem in self._type.elems_ordered:\n size, new_slot = elem.type.storage_size\n if new_slot:\n if offset > 0:\n slot += 1\n offset = 0\n elif size + offset > 32:\n slot += 1\n offset = 0\n\n if new_slot:\n slot += math.ceil(size / 32)\n else:\n offset += size\n if offset > 0:\n slot += 1\n return slot * 32, True\n to_log = f\"{self} does not have storage size\"\n raise SlitherException(to_log)\n\n def __str__(self):\n from slither.core.declarations.structure import Structure\n from slither.core.declarations.enum import Enum\n\n if isinstance(self.type, (Enum, Structure)):\n return str(self.type.contract) + \".\" + str(self.type.name)\n return str(self.type.name)\n\n def __eq__(self, other):\n if not isinstance(other, UserDefinedType):\n return False\n return self.type == other.type\n\n def __hash__(self):\n return hash(str(self))\n", "path": "slither/core/solidity_types/user_defined_type.py"}], "after_files": [{"content": "from typing import Union, TYPE_CHECKING, Tuple\nimport math\n\nfrom slither.core.solidity_types.type import Type\nfrom slither.exceptions import SlitherException\n\nif TYPE_CHECKING:\n from slither.core.declarations.structure import Structure\n from slither.core.declarations.enum import Enum\n from slither.core.declarations.contract import Contract\n\n# pylint: disable=import-outside-toplevel\nclass UserDefinedType(Type):\n def __init__(self, t):\n from slither.core.declarations.structure import Structure\n from slither.core.declarations.enum import Enum\n from slither.core.declarations.contract import Contract\n\n assert isinstance(t, (Contract, Enum, Structure))\n super().__init__()\n self._type = t\n\n @property\n def type(self) -> Union[\"Contract\", \"Enum\", \"Structure\"]:\n return self._type\n\n @property\n def storage_size(self) -> Tuple[int, bool]:\n from slither.core.declarations.structure import Structure\n from slither.core.declarations.enum import Enum\n from slither.core.declarations.contract import Contract\n\n if isinstance(self._type, Contract):\n return 20, False\n if isinstance(self._type, Enum):\n return int(math.ceil(math.log2(len(self._type.values)) / 8)), False\n if isinstance(self._type, Structure):\n # todo there's some duplicate logic here and slither_core, can we refactor this?\n slot = 0\n offset = 0\n for elem in self._type.elems_ordered:\n size, new_slot = elem.type.storage_size\n if new_slot:\n if offset > 0:\n slot += 1\n offset = 0\n elif size + offset > 32:\n slot += 1\n offset = 0\n\n if new_slot:\n slot += math.ceil(size / 32)\n else:\n offset += size\n if offset > 0:\n slot += 1\n return slot * 32, True\n to_log = f\"{self} does not have storage size\"\n raise SlitherException(to_log)\n\n def __str__(self):\n from slither.core.declarations.structure_contract import StructureContract\n from slither.core.declarations.enum_contract import EnumContract\n\n type_used = self.type\n if isinstance(type_used, (EnumContract, StructureContract)):\n return str(type_used.contract) + \".\" + str(type_used.name)\n return str(type_used.name)\n\n def __eq__(self, other):\n if not isinstance(other, UserDefinedType):\n return False\n return self.type == other.type\n\n def __hash__(self):\n return hash(str(self))\n", "path": "slither/core/solidity_types/user_defined_type.py"}]}
1,252
240
gh_patches_debug_9673
rasdani/github-patches
git_diff
scipy__scipy-16111
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- BUG: scipy.sparse.linalg.norm does not work on sparse arrays ### Describe your issue. Greetings. As from the document of sparse matrices : https://docs.scipy.org/doc/scipy/reference/sparse.html, both "Sparse array classes" and "Sparse matrix classes" belong to sparse matrices. Also, from the document of sparse.linalg.norm, the input array arr is only mentioned as sparse matrix, so I expect that norm function should work on both sparse array classes and sparse matrix classes. However, norm does not work on sparse array classes, and the error message is shown below. I think the main reason of this is that: Since the intermediate output M in: https://github.com/scipy/scipy/blob/main/scipy/sparse/linalg/_norm.py#L180 returns numpy.ndarray rather than numpy.matrix, and numpy.ndarray does not have attribute A. Since sparse array classes will pass issparse(), maybe we should add another assert for this if norm only supports sparse matrix classes. Thank you. ### Reproducing Code Example ```python import numpy as np from scipy.sparse import coo_array from scipy.sparse import linalg row = np.array([0, 0, 1, 1]) col = np.array([0, 1, 2, 3]) data = np.array([4, 5, 7, 9]) test_arr = coo_array((data, (row, col)), shape=(2, 4)) linalg.norm(test_arr, ord=1, axis=0) ``` ### Error message ```shell --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) Input In [73], in <cell line: 8>() 6 data = np.array([4, 5, 7, 9]) 7 test_arr = coo_array((data, (row, col)), shape=(2, 4)) ----> 8 linalg.norm(test_arr, ord=1, axis=0) File ~/.local/lib/python3.9/site-packages/scipy/sparse/linalg/_norm.py:180, in norm(x, ord, axis) 178 raise ValueError('Invalid norm order for vectors.') from e 179 M = np.power(abs(x).power(ord).sum(axis=a), 1 / ord) --> 180 return M.A.ravel() 181 else: 182 raise ValueError("Improper number of dimensions to norm.") AttributeError: 'numpy.ndarray' object has no attribute 'A' ``` ### SciPy/NumPy/Python version information 1.8.0 1.22.3 sys.version_info(major=3, minor=9, micro=10, releaselevel='final', serial=0) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scipy/sparse/linalg/_norm.py` Content: ``` 1 """Sparse matrix norms. 2 3 """ 4 import numpy as np 5 from scipy.sparse import issparse 6 7 from numpy import Inf, sqrt, abs 8 9 __all__ = ['norm'] 10 11 12 def _sparse_frobenius_norm(x): 13 if np.issubdtype(x.dtype, np.complexfloating): 14 sqnorm = abs(x).power(2).sum() 15 else: 16 sqnorm = x.power(2).sum() 17 return sqrt(sqnorm) 18 19 20 def norm(x, ord=None, axis=None): 21 """ 22 Norm of a sparse matrix 23 24 This function is able to return one of seven different matrix norms, 25 depending on the value of the ``ord`` parameter. 26 27 Parameters 28 ---------- 29 x : a sparse matrix 30 Input sparse matrix. 31 ord : {non-zero int, inf, -inf, 'fro'}, optional 32 Order of the norm (see table under ``Notes``). inf means numpy's 33 `inf` object. 34 axis : {int, 2-tuple of ints, None}, optional 35 If `axis` is an integer, it specifies the axis of `x` along which to 36 compute the vector norms. If `axis` is a 2-tuple, it specifies the 37 axes that hold 2-D matrices, and the matrix norms of these matrices 38 are computed. If `axis` is None then either a vector norm (when `x` 39 is 1-D) or a matrix norm (when `x` is 2-D) is returned. 40 41 Returns 42 ------- 43 n : float or ndarray 44 45 Notes 46 ----- 47 Some of the ord are not implemented because some associated functions like, 48 _multi_svd_norm, are not yet available for sparse matrix. 49 50 This docstring is modified based on numpy.linalg.norm. 51 https://github.com/numpy/numpy/blob/main/numpy/linalg/linalg.py 52 53 The following norms can be calculated: 54 55 ===== ============================ 56 ord norm for sparse matrices 57 ===== ============================ 58 None Frobenius norm 59 'fro' Frobenius norm 60 inf max(sum(abs(x), axis=1)) 61 -inf min(sum(abs(x), axis=1)) 62 0 abs(x).sum(axis=axis) 63 1 max(sum(abs(x), axis=0)) 64 -1 min(sum(abs(x), axis=0)) 65 2 Not implemented 66 -2 Not implemented 67 other Not implemented 68 ===== ============================ 69 70 The Frobenius norm is given by [1]_: 71 72 :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` 73 74 References 75 ---------- 76 .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, 77 Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 78 79 Examples 80 -------- 81 >>> from scipy.sparse import * 82 >>> import numpy as np 83 >>> from scipy.sparse.linalg import norm 84 >>> a = np.arange(9) - 4 85 >>> a 86 array([-4, -3, -2, -1, 0, 1, 2, 3, 4]) 87 >>> b = a.reshape((3, 3)) 88 >>> b 89 array([[-4, -3, -2], 90 [-1, 0, 1], 91 [ 2, 3, 4]]) 92 93 >>> b = csr_matrix(b) 94 >>> norm(b) 95 7.745966692414834 96 >>> norm(b, 'fro') 97 7.745966692414834 98 >>> norm(b, np.inf) 99 9 100 >>> norm(b, -np.inf) 101 2 102 >>> norm(b, 1) 103 7 104 >>> norm(b, -1) 105 6 106 107 """ 108 if not issparse(x): 109 raise TypeError("input is not sparse. use numpy.linalg.norm") 110 111 # Check the default case first and handle it immediately. 112 if axis is None and ord in (None, 'fro', 'f'): 113 return _sparse_frobenius_norm(x) 114 115 # Some norms require functions that are not implemented for all types. 116 x = x.tocsr() 117 118 if axis is None: 119 axis = (0, 1) 120 elif not isinstance(axis, tuple): 121 msg = "'axis' must be None, an integer or a tuple of integers" 122 try: 123 int_axis = int(axis) 124 except TypeError as e: 125 raise TypeError(msg) from e 126 if axis != int_axis: 127 raise TypeError(msg) 128 axis = (int_axis,) 129 130 nd = 2 131 if len(axis) == 2: 132 row_axis, col_axis = axis 133 if not (-nd <= row_axis < nd and -nd <= col_axis < nd): 134 raise ValueError('Invalid axis %r for an array with shape %r' % 135 (axis, x.shape)) 136 if row_axis % nd == col_axis % nd: 137 raise ValueError('Duplicate axes given.') 138 if ord == 2: 139 raise NotImplementedError 140 #return _multi_svd_norm(x, row_axis, col_axis, amax) 141 elif ord == -2: 142 raise NotImplementedError 143 #return _multi_svd_norm(x, row_axis, col_axis, amin) 144 elif ord == 1: 145 return abs(x).sum(axis=row_axis).max(axis=col_axis)[0,0] 146 elif ord == Inf: 147 return abs(x).sum(axis=col_axis).max(axis=row_axis)[0,0] 148 elif ord == -1: 149 return abs(x).sum(axis=row_axis).min(axis=col_axis)[0,0] 150 elif ord == -Inf: 151 return abs(x).sum(axis=col_axis).min(axis=row_axis)[0,0] 152 elif ord in (None, 'f', 'fro'): 153 # The axis order does not matter for this norm. 154 return _sparse_frobenius_norm(x) 155 else: 156 raise ValueError("Invalid norm order for matrices.") 157 elif len(axis) == 1: 158 a, = axis 159 if not (-nd <= a < nd): 160 raise ValueError('Invalid axis %r for an array with shape %r' % 161 (axis, x.shape)) 162 if ord == Inf: 163 M = abs(x).max(axis=a) 164 elif ord == -Inf: 165 M = abs(x).min(axis=a) 166 elif ord == 0: 167 # Zero norm 168 M = (x != 0).sum(axis=a) 169 elif ord == 1: 170 # special case for speedup 171 M = abs(x).sum(axis=a) 172 elif ord in (2, None): 173 M = sqrt(abs(x).power(2).sum(axis=a)) 174 else: 175 try: 176 ord + 1 177 except TypeError as e: 178 raise ValueError('Invalid norm order for vectors.') from e 179 M = np.power(abs(x).power(ord).sum(axis=a), 1 / ord) 180 return M.A.ravel() 181 else: 182 raise ValueError("Improper number of dimensions to norm.") 183 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scipy/sparse/linalg/_norm.py b/scipy/sparse/linalg/_norm.py --- a/scipy/sparse/linalg/_norm.py +++ b/scipy/sparse/linalg/_norm.py @@ -177,6 +177,11 @@ except TypeError as e: raise ValueError('Invalid norm order for vectors.') from e M = np.power(abs(x).power(ord).sum(axis=a), 1 / ord) - return M.A.ravel() + if hasattr(M, 'toarray'): + return M.toarray().ravel() + elif hasattr(M, 'A'): + return M.A.ravel() + else: + return M.ravel() else: raise ValueError("Improper number of dimensions to norm.")
{"golden_diff": "diff --git a/scipy/sparse/linalg/_norm.py b/scipy/sparse/linalg/_norm.py\n--- a/scipy/sparse/linalg/_norm.py\n+++ b/scipy/sparse/linalg/_norm.py\n@@ -177,6 +177,11 @@\n except TypeError as e:\n raise ValueError('Invalid norm order for vectors.') from e\n M = np.power(abs(x).power(ord).sum(axis=a), 1 / ord)\n- return M.A.ravel()\n+ if hasattr(M, 'toarray'):\n+ return M.toarray().ravel()\n+ elif hasattr(M, 'A'):\n+ return M.A.ravel()\n+ else:\n+ return M.ravel()\n else:\n raise ValueError(\"Improper number of dimensions to norm.\")\n", "issue": "BUG: scipy.sparse.linalg.norm does not work on sparse arrays\n### Describe your issue.\n\nGreetings.\r\nAs from the document of sparse matrices : https://docs.scipy.org/doc/scipy/reference/sparse.html,\r\nboth \"Sparse array classes\" and \"Sparse matrix classes\" belong to sparse matrices.\r\n\r\nAlso, from the document of sparse.linalg.norm, the input array arr is only mentioned as sparse matrix, \r\nso I expect that norm function should work on both sparse array classes and sparse matrix classes.\r\n\r\nHowever, norm does not work on sparse array classes, and the error message is shown below.\r\nI think the main reason of this is that: Since the intermediate output M in: https://github.com/scipy/scipy/blob/main/scipy/sparse/linalg/_norm.py#L180\r\nreturns numpy.ndarray rather than numpy.matrix, and numpy.ndarray does not have attribute A.\r\n\r\nSince sparse array classes will pass issparse(), maybe we should add another assert for this if norm only supports sparse matrix classes.\r\n\r\nThank you.\n\n### Reproducing Code Example\n\n```python\nimport numpy as np\r\nfrom scipy.sparse import coo_array\r\nfrom scipy.sparse import linalg\r\nrow = np.array([0, 0, 1, 1])\r\ncol = np.array([0, 1, 2, 3])\r\ndata = np.array([4, 5, 7, 9])\r\ntest_arr = coo_array((data, (row, col)), shape=(2, 4))\r\nlinalg.norm(test_arr, ord=1, axis=0)\n```\n\n\n### Error message\n\n```shell\n---------------------------------------------------------------------------\r\nAttributeError Traceback (most recent call last)\r\nInput In [73], in <cell line: 8>()\r\n 6 data = np.array([4, 5, 7, 9])\r\n 7 test_arr = coo_array((data, (row, col)), shape=(2, 4))\r\n----> 8 linalg.norm(test_arr, ord=1, axis=0)\r\n\r\nFile ~/.local/lib/python3.9/site-packages/scipy/sparse/linalg/_norm.py:180, in norm(x, ord, axis)\r\n 178 raise ValueError('Invalid norm order for vectors.') from e\r\n 179 M = np.power(abs(x).power(ord).sum(axis=a), 1 / ord)\r\n--> 180 return M.A.ravel()\r\n 181 else:\r\n 182 raise ValueError(\"Improper number of dimensions to norm.\")\r\n\r\nAttributeError: 'numpy.ndarray' object has no attribute 'A'\n```\n\n\n### SciPy/NumPy/Python version information\n\n1.8.0 1.22.3 sys.version_info(major=3, minor=9, micro=10, releaselevel='final', serial=0)\n", "before_files": [{"content": "\"\"\"Sparse matrix norms.\n\n\"\"\"\nimport numpy as np\nfrom scipy.sparse import issparse\n\nfrom numpy import Inf, sqrt, abs\n\n__all__ = ['norm']\n\n\ndef _sparse_frobenius_norm(x):\n if np.issubdtype(x.dtype, np.complexfloating):\n sqnorm = abs(x).power(2).sum()\n else:\n sqnorm = x.power(2).sum()\n return sqrt(sqnorm)\n\n\ndef norm(x, ord=None, axis=None):\n \"\"\"\n Norm of a sparse matrix\n\n This function is able to return one of seven different matrix norms,\n depending on the value of the ``ord`` parameter.\n\n Parameters\n ----------\n x : a sparse matrix\n Input sparse matrix.\n ord : {non-zero int, inf, -inf, 'fro'}, optional\n Order of the norm (see table under ``Notes``). inf means numpy's\n `inf` object.\n axis : {int, 2-tuple of ints, None}, optional\n If `axis` is an integer, it specifies the axis of `x` along which to\n compute the vector norms. If `axis` is a 2-tuple, it specifies the\n axes that hold 2-D matrices, and the matrix norms of these matrices\n are computed. If `axis` is None then either a vector norm (when `x`\n is 1-D) or a matrix norm (when `x` is 2-D) is returned.\n\n Returns\n -------\n n : float or ndarray\n\n Notes\n -----\n Some of the ord are not implemented because some associated functions like,\n _multi_svd_norm, are not yet available for sparse matrix.\n\n This docstring is modified based on numpy.linalg.norm.\n https://github.com/numpy/numpy/blob/main/numpy/linalg/linalg.py\n\n The following norms can be calculated:\n\n ===== ============================\n ord norm for sparse matrices\n ===== ============================\n None Frobenius norm\n 'fro' Frobenius norm\n inf max(sum(abs(x), axis=1))\n -inf min(sum(abs(x), axis=1))\n 0 abs(x).sum(axis=axis)\n 1 max(sum(abs(x), axis=0))\n -1 min(sum(abs(x), axis=0))\n 2 Not implemented\n -2 Not implemented\n other Not implemented\n ===== ============================\n\n The Frobenius norm is given by [1]_:\n\n :math:`||A||_F = [\\\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`\n\n References\n ----------\n .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,\n Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15\n\n Examples\n --------\n >>> from scipy.sparse import *\n >>> import numpy as np\n >>> from scipy.sparse.linalg import norm\n >>> a = np.arange(9) - 4\n >>> a\n array([-4, -3, -2, -1, 0, 1, 2, 3, 4])\n >>> b = a.reshape((3, 3))\n >>> b\n array([[-4, -3, -2],\n [-1, 0, 1],\n [ 2, 3, 4]])\n\n >>> b = csr_matrix(b)\n >>> norm(b)\n 7.745966692414834\n >>> norm(b, 'fro')\n 7.745966692414834\n >>> norm(b, np.inf)\n 9\n >>> norm(b, -np.inf)\n 2\n >>> norm(b, 1)\n 7\n >>> norm(b, -1)\n 6\n\n \"\"\"\n if not issparse(x):\n raise TypeError(\"input is not sparse. use numpy.linalg.norm\")\n\n # Check the default case first and handle it immediately.\n if axis is None and ord in (None, 'fro', 'f'):\n return _sparse_frobenius_norm(x)\n\n # Some norms require functions that are not implemented for all types.\n x = x.tocsr()\n\n if axis is None:\n axis = (0, 1)\n elif not isinstance(axis, tuple):\n msg = \"'axis' must be None, an integer or a tuple of integers\"\n try:\n int_axis = int(axis)\n except TypeError as e:\n raise TypeError(msg) from e\n if axis != int_axis:\n raise TypeError(msg)\n axis = (int_axis,)\n\n nd = 2\n if len(axis) == 2:\n row_axis, col_axis = axis\n if not (-nd <= row_axis < nd and -nd <= col_axis < nd):\n raise ValueError('Invalid axis %r for an array with shape %r' %\n (axis, x.shape))\n if row_axis % nd == col_axis % nd:\n raise ValueError('Duplicate axes given.')\n if ord == 2:\n raise NotImplementedError\n #return _multi_svd_norm(x, row_axis, col_axis, amax)\n elif ord == -2:\n raise NotImplementedError\n #return _multi_svd_norm(x, row_axis, col_axis, amin)\n elif ord == 1:\n return abs(x).sum(axis=row_axis).max(axis=col_axis)[0,0]\n elif ord == Inf:\n return abs(x).sum(axis=col_axis).max(axis=row_axis)[0,0]\n elif ord == -1:\n return abs(x).sum(axis=row_axis).min(axis=col_axis)[0,0]\n elif ord == -Inf:\n return abs(x).sum(axis=col_axis).min(axis=row_axis)[0,0]\n elif ord in (None, 'f', 'fro'):\n # The axis order does not matter for this norm.\n return _sparse_frobenius_norm(x)\n else:\n raise ValueError(\"Invalid norm order for matrices.\")\n elif len(axis) == 1:\n a, = axis\n if not (-nd <= a < nd):\n raise ValueError('Invalid axis %r for an array with shape %r' %\n (axis, x.shape))\n if ord == Inf:\n M = abs(x).max(axis=a)\n elif ord == -Inf:\n M = abs(x).min(axis=a)\n elif ord == 0:\n # Zero norm\n M = (x != 0).sum(axis=a)\n elif ord == 1:\n # special case for speedup\n M = abs(x).sum(axis=a)\n elif ord in (2, None):\n M = sqrt(abs(x).power(2).sum(axis=a))\n else:\n try:\n ord + 1\n except TypeError as e:\n raise ValueError('Invalid norm order for vectors.') from e\n M = np.power(abs(x).power(ord).sum(axis=a), 1 / ord)\n return M.A.ravel()\n else:\n raise ValueError(\"Improper number of dimensions to norm.\")\n", "path": "scipy/sparse/linalg/_norm.py"}], "after_files": [{"content": "\"\"\"Sparse matrix norms.\n\n\"\"\"\nimport numpy as np\nfrom scipy.sparse import issparse\n\nfrom numpy import Inf, sqrt, abs\n\n__all__ = ['norm']\n\n\ndef _sparse_frobenius_norm(x):\n if np.issubdtype(x.dtype, np.complexfloating):\n sqnorm = abs(x).power(2).sum()\n else:\n sqnorm = x.power(2).sum()\n return sqrt(sqnorm)\n\n\ndef norm(x, ord=None, axis=None):\n \"\"\"\n Norm of a sparse matrix\n\n This function is able to return one of seven different matrix norms,\n depending on the value of the ``ord`` parameter.\n\n Parameters\n ----------\n x : a sparse matrix\n Input sparse matrix.\n ord : {non-zero int, inf, -inf, 'fro'}, optional\n Order of the norm (see table under ``Notes``). inf means numpy's\n `inf` object.\n axis : {int, 2-tuple of ints, None}, optional\n If `axis` is an integer, it specifies the axis of `x` along which to\n compute the vector norms. If `axis` is a 2-tuple, it specifies the\n axes that hold 2-D matrices, and the matrix norms of these matrices\n are computed. If `axis` is None then either a vector norm (when `x`\n is 1-D) or a matrix norm (when `x` is 2-D) is returned.\n\n Returns\n -------\n n : float or ndarray\n\n Notes\n -----\n Some of the ord are not implemented because some associated functions like,\n _multi_svd_norm, are not yet available for sparse matrix.\n\n This docstring is modified based on numpy.linalg.norm.\n https://github.com/numpy/numpy/blob/main/numpy/linalg/linalg.py\n\n The following norms can be calculated:\n\n ===== ============================\n ord norm for sparse matrices\n ===== ============================\n None Frobenius norm\n 'fro' Frobenius norm\n inf max(sum(abs(x), axis=1))\n -inf min(sum(abs(x), axis=1))\n 0 abs(x).sum(axis=axis)\n 1 max(sum(abs(x), axis=0))\n -1 min(sum(abs(x), axis=0))\n 2 Not implemented\n -2 Not implemented\n other Not implemented\n ===== ============================\n\n The Frobenius norm is given by [1]_:\n\n :math:`||A||_F = [\\\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`\n\n References\n ----------\n .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,\n Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15\n\n Examples\n --------\n >>> from scipy.sparse import *\n >>> import numpy as np\n >>> from scipy.sparse.linalg import norm\n >>> a = np.arange(9) - 4\n >>> a\n array([-4, -3, -2, -1, 0, 1, 2, 3, 4])\n >>> b = a.reshape((3, 3))\n >>> b\n array([[-4, -3, -2],\n [-1, 0, 1],\n [ 2, 3, 4]])\n\n >>> b = csr_matrix(b)\n >>> norm(b)\n 7.745966692414834\n >>> norm(b, 'fro')\n 7.745966692414834\n >>> norm(b, np.inf)\n 9\n >>> norm(b, -np.inf)\n 2\n >>> norm(b, 1)\n 7\n >>> norm(b, -1)\n 6\n\n \"\"\"\n if not issparse(x):\n raise TypeError(\"input is not sparse. use numpy.linalg.norm\")\n\n # Check the default case first and handle it immediately.\n if axis is None and ord in (None, 'fro', 'f'):\n return _sparse_frobenius_norm(x)\n\n # Some norms require functions that are not implemented for all types.\n x = x.tocsr()\n\n if axis is None:\n axis = (0, 1)\n elif not isinstance(axis, tuple):\n msg = \"'axis' must be None, an integer or a tuple of integers\"\n try:\n int_axis = int(axis)\n except TypeError as e:\n raise TypeError(msg) from e\n if axis != int_axis:\n raise TypeError(msg)\n axis = (int_axis,)\n\n nd = 2\n if len(axis) == 2:\n row_axis, col_axis = axis\n if not (-nd <= row_axis < nd and -nd <= col_axis < nd):\n raise ValueError('Invalid axis %r for an array with shape %r' %\n (axis, x.shape))\n if row_axis % nd == col_axis % nd:\n raise ValueError('Duplicate axes given.')\n if ord == 2:\n raise NotImplementedError\n #return _multi_svd_norm(x, row_axis, col_axis, amax)\n elif ord == -2:\n raise NotImplementedError\n #return _multi_svd_norm(x, row_axis, col_axis, amin)\n elif ord == 1:\n return abs(x).sum(axis=row_axis).max(axis=col_axis)[0,0]\n elif ord == Inf:\n return abs(x).sum(axis=col_axis).max(axis=row_axis)[0,0]\n elif ord == -1:\n return abs(x).sum(axis=row_axis).min(axis=col_axis)[0,0]\n elif ord == -Inf:\n return abs(x).sum(axis=col_axis).min(axis=row_axis)[0,0]\n elif ord in (None, 'f', 'fro'):\n # The axis order does not matter for this norm.\n return _sparse_frobenius_norm(x)\n else:\n raise ValueError(\"Invalid norm order for matrices.\")\n elif len(axis) == 1:\n a, = axis\n if not (-nd <= a < nd):\n raise ValueError('Invalid axis %r for an array with shape %r' %\n (axis, x.shape))\n if ord == Inf:\n M = abs(x).max(axis=a)\n elif ord == -Inf:\n M = abs(x).min(axis=a)\n elif ord == 0:\n # Zero norm\n M = (x != 0).sum(axis=a)\n elif ord == 1:\n # special case for speedup\n M = abs(x).sum(axis=a)\n elif ord in (2, None):\n M = sqrt(abs(x).power(2).sum(axis=a))\n else:\n try:\n ord + 1\n except TypeError as e:\n raise ValueError('Invalid norm order for vectors.') from e\n M = np.power(abs(x).power(ord).sum(axis=a), 1 / ord)\n if hasattr(M, 'toarray'):\n return M.toarray().ravel()\n elif hasattr(M, 'A'):\n return M.A.ravel()\n else:\n return M.ravel()\n else:\n raise ValueError(\"Improper number of dimensions to norm.\")\n", "path": "scipy/sparse/linalg/_norm.py"}]}
2,883
170
gh_patches_debug_867
rasdani/github-patches
git_diff
ansible__ansible-modules-extras-387
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Freshly installed bower raises json error I ran into an issue where the ansible bower module when attempting to run bower install can't parse the json from `bower list --json` Here is the stacktrace ``` failed: [default] => {"failed": true, "parsed": false} BECOME-SUCCESS-bcokpjdhrlrcdlrfpmvdgmahrbmtzoqk Traceback (most recent call last): File "/home/vagrant/.ansible/tmp/ansible-tmp-1427221462.07-279423510478512/bower", line 1781, in <module> main() File "/home/vagrant/.ansible/tmp/ansible-tmp-1427221462.07-279423510478512/bower", line 168, in main installed, missing, outdated = bower.list() File "/home/vagrant/.ansible/tmp/ansible-tmp-1427221462.07-279423510478512/bower", line 116, in list data = json.loads(self._exec(cmd, True, False)) File "/usr/lib/python2.7/json/__init__.py", line 338, in loads return _default_decoder.decode(s) File "/usr/lib/python2.7/json/decoder.py", line 366, in decode obj, end = self.raw_decode(s, idx=_w(s, 0).end()) File "/usr/lib/python2.7/json/decoder.py", line 384, in raw_decode raise ValueError("No JSON object could be decoded") ``` So, when I logged into run the bower list --json command manually I saw this ``` vagrant@vagrant-ubuntu-trusty-64:~/catdoor/opus$ bower list --json [?] May bower anonymously report usage statistics to improve the tool over time? Yes ``` Which makes me wonder if a freshly installed bower will always ask that question, thus not producing json output. When i subsquently run the provision it fails the same way. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `packaging/language/bower.py` Content: ``` 1 #!/usr/bin/python 2 # -*- coding: utf-8 -*- 3 4 # (c) 2014, Michael Warkentin <[email protected]> 5 # 6 # This file is part of Ansible 7 # 8 # Ansible is free software: you can redistribute it and/or modify 9 # it under the terms of the GNU General Public License as published by 10 # the Free Software Foundation, either version 3 of the License, or 11 # (at your option) any later version. 12 # 13 # Ansible is distributed in the hope that it will be useful, 14 # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 # GNU General Public License for more details. 17 # 18 # You should have received a copy of the GNU General Public License 19 # along with Ansible. If not, see <http://www.gnu.org/licenses/>. 20 21 DOCUMENTATION = ''' 22 --- 23 module: bower 24 short_description: Manage bower packages with bower 25 description: 26 - Manage bower packages with bower 27 version_added: 1.9 28 author: Michael Warkentin 29 options: 30 name: 31 description: 32 - The name of a bower package to install 33 required: false 34 offline: 35 description: 36 - Install packages from local cache, if the packages were installed before 37 required: false 38 default: no 39 choices: [ "yes", "no" ] 40 path: 41 description: 42 - The base path where to install the bower packages 43 required: true 44 state: 45 description: 46 - The state of the bower package 47 required: false 48 default: present 49 choices: [ "present", "absent", "latest" ] 50 version: 51 description: 52 - The version to be installed 53 required: false 54 ''' 55 56 EXAMPLES = ''' 57 description: Install "bootstrap" bower package. 58 - bower: name=bootstrap 59 60 description: Install "bootstrap" bower package on version 3.1.1. 61 - bower: name=bootstrap version=3.1.1 62 63 description: Remove the "bootstrap" bower package. 64 - bower: name=bootstrap state=absent 65 66 description: Install packages based on bower.json. 67 - bower: path=/app/location 68 69 description: Update packages based on bower.json to their latest version. 70 - bower: path=/app/location state=latest 71 ''' 72 73 74 class Bower(object): 75 def __init__(self, module, **kwargs): 76 self.module = module 77 self.name = kwargs['name'] 78 self.offline = kwargs['offline'] 79 self.path = kwargs['path'] 80 self.version = kwargs['version'] 81 82 if kwargs['version']: 83 self.name_version = self.name + '#' + self.version 84 else: 85 self.name_version = self.name 86 87 def _exec(self, args, run_in_check_mode=False, check_rc=True): 88 if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): 89 cmd = ["bower"] + args 90 91 if self.name: 92 cmd.append(self.name_version) 93 94 if self.offline: 95 cmd.append('--offline') 96 97 # If path is specified, cd into that path and run the command. 98 cwd = None 99 if self.path: 100 if not os.path.exists(self.path): 101 os.makedirs(self.path) 102 if not os.path.isdir(self.path): 103 self.module.fail_json(msg="path %s is not a directory" % self.path) 104 cwd = self.path 105 106 rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) 107 return out 108 return '' 109 110 def list(self): 111 cmd = ['list', '--json'] 112 113 installed = list() 114 missing = list() 115 outdated = list() 116 data = json.loads(self._exec(cmd, True, False)) 117 if 'dependencies' in data: 118 for dep in data['dependencies']: 119 if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']: 120 missing.append(dep) 121 elif data['dependencies'][dep]['pkgMeta']['version'] != data['dependencies'][dep]['update']['latest']: 122 outdated.append(dep) 123 elif 'incompatible' in data['dependencies'][dep] and data['dependencies'][dep]['incompatible']: 124 outdated.append(dep) 125 else: 126 installed.append(dep) 127 # Named dependency not installed 128 else: 129 missing.append(self.name) 130 131 return installed, missing, outdated 132 133 def install(self): 134 return self._exec(['install']) 135 136 def update(self): 137 return self._exec(['update']) 138 139 def uninstall(self): 140 return self._exec(['uninstall']) 141 142 143 def main(): 144 arg_spec = dict( 145 name=dict(default=None), 146 offline=dict(default='no', type='bool'), 147 path=dict(required=True), 148 state=dict(default='present', choices=['present', 'absent', 'latest', ]), 149 version=dict(default=None), 150 ) 151 module = AnsibleModule( 152 argument_spec=arg_spec 153 ) 154 155 name = module.params['name'] 156 offline = module.params['offline'] 157 path = module.params['path'] 158 state = module.params['state'] 159 version = module.params['version'] 160 161 if state == 'absent' and not name: 162 module.fail_json(msg='uninstalling a package is only available for named packages') 163 164 bower = Bower(module, name=name, offline=offline, path=path, version=version) 165 166 changed = False 167 if state == 'present': 168 installed, missing, outdated = bower.list() 169 if len(missing): 170 changed = True 171 bower.install() 172 elif state == 'latest': 173 installed, missing, outdated = bower.list() 174 if len(missing) or len(outdated): 175 changed = True 176 bower.update() 177 else: # Absent 178 installed, missing, outdated = bower.list() 179 if name in installed: 180 changed = True 181 bower.uninstall() 182 183 module.exit_json(changed=changed) 184 185 # Import module snippets 186 from ansible.module_utils.basic import * 187 main() 188 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/packaging/language/bower.py b/packaging/language/bower.py --- a/packaging/language/bower.py +++ b/packaging/language/bower.py @@ -108,7 +108,7 @@ return '' def list(self): - cmd = ['list', '--json'] + cmd = ['list', '--json', '--config.interactive=false', '--allow-root'] installed = list() missing = list()
{"golden_diff": "diff --git a/packaging/language/bower.py b/packaging/language/bower.py\n--- a/packaging/language/bower.py\n+++ b/packaging/language/bower.py\n@@ -108,7 +108,7 @@\n return ''\n \n def list(self):\n- cmd = ['list', '--json']\n+ cmd = ['list', '--json', '--config.interactive=false', '--allow-root']\n \n installed = list()\n missing = list()\n", "issue": "Freshly installed bower raises json error\nI ran into an issue where the ansible bower module when attempting to run bower install can't parse the json from `bower list --json`\n\nHere is the stacktrace\n\n```\nfailed: [default] => {\"failed\": true, \"parsed\": false}\nBECOME-SUCCESS-bcokpjdhrlrcdlrfpmvdgmahrbmtzoqk\nTraceback (most recent call last):\n File \"/home/vagrant/.ansible/tmp/ansible-tmp-1427221462.07-279423510478512/bower\", line 1781, in <module>\n main()\n File \"/home/vagrant/.ansible/tmp/ansible-tmp-1427221462.07-279423510478512/bower\", line 168, in main\n installed, missing, outdated = bower.list()\n File \"/home/vagrant/.ansible/tmp/ansible-tmp-1427221462.07-279423510478512/bower\", line 116, in list\n data = json.loads(self._exec(cmd, True, False))\n File \"/usr/lib/python2.7/json/__init__.py\", line 338, in loads\n return _default_decoder.decode(s)\n File \"/usr/lib/python2.7/json/decoder.py\", line 366, in decode\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\n File \"/usr/lib/python2.7/json/decoder.py\", line 384, in raw_decode\n raise ValueError(\"No JSON object could be decoded\")\n```\n\nSo, when I logged into run the bower list --json command manually I saw this\n\n```\nvagrant@vagrant-ubuntu-trusty-64:~/catdoor/opus$ bower list --json\n[?] May bower anonymously report usage statistics to improve the tool over time? Yes\n```\n\nWhich makes me wonder if a freshly installed bower will always ask that question, thus not producing json output.\n\nWhen i subsquently run the provision it fails the same way.\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2014, Michael Warkentin <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nmodule: bower\nshort_description: Manage bower packages with bower\ndescription:\n - Manage bower packages with bower\nversion_added: 1.9\nauthor: Michael Warkentin\noptions:\n name:\n description:\n - The name of a bower package to install\n required: false\n offline:\n description:\n - Install packages from local cache, if the packages were installed before\n required: false\n default: no\n choices: [ \"yes\", \"no\" ]\n path:\n description:\n - The base path where to install the bower packages\n required: true\n state:\n description:\n - The state of the bower package\n required: false\n default: present\n choices: [ \"present\", \"absent\", \"latest\" ]\n version:\n description:\n - The version to be installed\n required: false\n'''\n\nEXAMPLES = '''\ndescription: Install \"bootstrap\" bower package.\n- bower: name=bootstrap\n\ndescription: Install \"bootstrap\" bower package on version 3.1.1.\n- bower: name=bootstrap version=3.1.1\n\ndescription: Remove the \"bootstrap\" bower package.\n- bower: name=bootstrap state=absent\n\ndescription: Install packages based on bower.json.\n- bower: path=/app/location\n\ndescription: Update packages based on bower.json to their latest version.\n- bower: path=/app/location state=latest\n'''\n\n\nclass Bower(object):\n def __init__(self, module, **kwargs):\n self.module = module\n self.name = kwargs['name']\n self.offline = kwargs['offline']\n self.path = kwargs['path']\n self.version = kwargs['version']\n\n if kwargs['version']:\n self.name_version = self.name + '#' + self.version\n else:\n self.name_version = self.name\n\n def _exec(self, args, run_in_check_mode=False, check_rc=True):\n if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):\n cmd = [\"bower\"] + args\n\n if self.name:\n cmd.append(self.name_version)\n\n if self.offline:\n cmd.append('--offline')\n\n # If path is specified, cd into that path and run the command.\n cwd = None\n if self.path:\n if not os.path.exists(self.path):\n os.makedirs(self.path)\n if not os.path.isdir(self.path):\n self.module.fail_json(msg=\"path %s is not a directory\" % self.path)\n cwd = self.path\n\n rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)\n return out\n return ''\n\n def list(self):\n cmd = ['list', '--json']\n\n installed = list()\n missing = list()\n outdated = list()\n data = json.loads(self._exec(cmd, True, False))\n if 'dependencies' in data:\n for dep in data['dependencies']:\n if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']:\n missing.append(dep)\n elif data['dependencies'][dep]['pkgMeta']['version'] != data['dependencies'][dep]['update']['latest']:\n outdated.append(dep)\n elif 'incompatible' in data['dependencies'][dep] and data['dependencies'][dep]['incompatible']:\n outdated.append(dep)\n else:\n installed.append(dep)\n # Named dependency not installed\n else:\n missing.append(self.name)\n\n return installed, missing, outdated\n\n def install(self):\n return self._exec(['install'])\n\n def update(self):\n return self._exec(['update'])\n\n def uninstall(self):\n return self._exec(['uninstall'])\n\n\ndef main():\n arg_spec = dict(\n name=dict(default=None),\n offline=dict(default='no', type='bool'),\n path=dict(required=True),\n state=dict(default='present', choices=['present', 'absent', 'latest', ]),\n version=dict(default=None),\n )\n module = AnsibleModule(\n argument_spec=arg_spec\n )\n\n name = module.params['name']\n offline = module.params['offline']\n path = module.params['path']\n state = module.params['state']\n version = module.params['version']\n\n if state == 'absent' and not name:\n module.fail_json(msg='uninstalling a package is only available for named packages')\n\n bower = Bower(module, name=name, offline=offline, path=path, version=version)\n\n changed = False\n if state == 'present':\n installed, missing, outdated = bower.list()\n if len(missing):\n changed = True\n bower.install()\n elif state == 'latest':\n installed, missing, outdated = bower.list()\n if len(missing) or len(outdated):\n changed = True\n bower.update()\n else: # Absent\n installed, missing, outdated = bower.list()\n if name in installed:\n changed = True\n bower.uninstall()\n\n module.exit_json(changed=changed)\n\n# Import module snippets\nfrom ansible.module_utils.basic import *\nmain()\n", "path": "packaging/language/bower.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2014, Michael Warkentin <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nmodule: bower\nshort_description: Manage bower packages with bower\ndescription:\n - Manage bower packages with bower\nversion_added: 1.9\nauthor: Michael Warkentin\noptions:\n name:\n description:\n - The name of a bower package to install\n required: false\n offline:\n description:\n - Install packages from local cache, if the packages were installed before\n required: false\n default: no\n choices: [ \"yes\", \"no\" ]\n path:\n description:\n - The base path where to install the bower packages\n required: true\n state:\n description:\n - The state of the bower package\n required: false\n default: present\n choices: [ \"present\", \"absent\", \"latest\" ]\n version:\n description:\n - The version to be installed\n required: false\n'''\n\nEXAMPLES = '''\ndescription: Install \"bootstrap\" bower package.\n- bower: name=bootstrap\n\ndescription: Install \"bootstrap\" bower package on version 3.1.1.\n- bower: name=bootstrap version=3.1.1\n\ndescription: Remove the \"bootstrap\" bower package.\n- bower: name=bootstrap state=absent\n\ndescription: Install packages based on bower.json.\n- bower: path=/app/location\n\ndescription: Update packages based on bower.json to their latest version.\n- bower: path=/app/location state=latest\n'''\n\n\nclass Bower(object):\n def __init__(self, module, **kwargs):\n self.module = module\n self.name = kwargs['name']\n self.offline = kwargs['offline']\n self.path = kwargs['path']\n self.version = kwargs['version']\n\n if kwargs['version']:\n self.name_version = self.name + '#' + self.version\n else:\n self.name_version = self.name\n\n def _exec(self, args, run_in_check_mode=False, check_rc=True):\n if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):\n cmd = [\"bower\"] + args\n\n if self.name:\n cmd.append(self.name_version)\n\n if self.offline:\n cmd.append('--offline')\n\n # If path is specified, cd into that path and run the command.\n cwd = None\n if self.path:\n if not os.path.exists(self.path):\n os.makedirs(self.path)\n if not os.path.isdir(self.path):\n self.module.fail_json(msg=\"path %s is not a directory\" % self.path)\n cwd = self.path\n\n rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)\n return out\n return ''\n\n def list(self):\n cmd = ['list', '--json', '--config.interactive=false', '--allow-root']\n\n installed = list()\n missing = list()\n outdated = list()\n data = json.loads(self._exec(cmd, True, False))\n if 'dependencies' in data:\n for dep in data['dependencies']:\n if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']:\n missing.append(dep)\n elif data['dependencies'][dep]['pkgMeta']['version'] != data['dependencies'][dep]['update']['latest']:\n outdated.append(dep)\n elif 'incompatible' in data['dependencies'][dep] and data['dependencies'][dep]['incompatible']:\n outdated.append(dep)\n else:\n installed.append(dep)\n # Named dependency not installed\n else:\n missing.append(self.name)\n\n return installed, missing, outdated\n\n def install(self):\n return self._exec(['install'])\n\n def update(self):\n return self._exec(['update'])\n\n def uninstall(self):\n return self._exec(['uninstall'])\n\n\ndef main():\n arg_spec = dict(\n name=dict(default=None),\n offline=dict(default='no', type='bool'),\n path=dict(required=True),\n state=dict(default='present', choices=['present', 'absent', 'latest', ]),\n version=dict(default=None),\n )\n module = AnsibleModule(\n argument_spec=arg_spec\n )\n\n name = module.params['name']\n offline = module.params['offline']\n path = module.params['path']\n state = module.params['state']\n version = module.params['version']\n\n if state == 'absent' and not name:\n module.fail_json(msg='uninstalling a package is only available for named packages')\n\n bower = Bower(module, name=name, offline=offline, path=path, version=version)\n\n changed = False\n if state == 'present':\n installed, missing, outdated = bower.list()\n if len(missing):\n changed = True\n bower.install()\n elif state == 'latest':\n installed, missing, outdated = bower.list()\n if len(missing) or len(outdated):\n changed = True\n bower.update()\n else: # Absent\n installed, missing, outdated = bower.list()\n if name in installed:\n changed = True\n bower.uninstall()\n\n module.exit_json(changed=changed)\n\n# Import module snippets\nfrom ansible.module_utils.basic import *\nmain()\n", "path": "packaging/language/bower.py"}]}
2,534
100
gh_patches_debug_66494
rasdani/github-patches
git_diff
conan-io__conan-4965
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- tar_extract is affected by symlinks in current directory If a current directory has a symlink with the name matching a directory in the installed conan_package.tgz, then the directory is not extracted to the package directory. Reproduced on 1.15.0-dev. Notice the `include` directory disappears from the package directory. ``` $ mkdir /tmp/testing && cd /tmp/testing $ conan new Hello/0.1 -t $ conan create . demo/testing $ conan upload Hello/0.1@demo/testing --all $ conan remove Hello/0.1@demo/testing --force $ conan install Hello/0.1@demo/testing $ conan get Hello/0.1@demo/testing:57547fe65fffc300f05aa42ee64b3b02eeabb6d7 . Listing directory '.': conaninfo.txt conanmanifest.txt include lib $ ln -s /tmp include $ conan remove Hello/0.1@demo/testing --force $ conan install Hello/0.1@demo/testing $ conan get Hello/0.1@demo/testing:57547fe65fffc300f05aa42ee64b3b02eeabb6d7 . Listing directory '.': conaninfo.txt conanmanifest.txt lib ``` The issue is caused by `badpath` check. The `base` is the current directory. Joining current directory and `include`, and resolving the symlink makes it to not to start with the current directory. What is the true intent for the `badpath` method? Is there a good test case to show it's necessary? ```python 293 def tar_extract(fileobj, destination_dir): 294 """Extract tar file controlling not absolute paths and fixing the routes 295 if the tar was zipped in windows""" 296 def badpath(path, base): 297 # joinpath will ignore base if path is absolute 298 return not realpath(abspath(joinpath(base, path))).startswith(base) 299 300 def safemembers(members): 301 base = realpath(abspath(".")) 302 303 for finfo in members: 304 if badpath(finfo.name, base) or finfo.islnk(): 305 continue 306 else: 307 # Fixes unzip a windows zipped file in linux 308 finfo.name = finfo.name.replace("\\", "/") 309 yield finfo ``` - [x] I've read the [CONTRIBUTING guide](https://github.com/conan-io/conan/blob/develop/.github/CONTRIBUTING.md). - [x] I've specified the Conan version, operating system version and any tool that can be relevant. - [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conans/util/files.py` Content: ``` 1 import errno 2 import hashlib 3 import os 4 import platform 5 import re 6 import shutil 7 import stat 8 import sys 9 import tarfile 10 import tempfile 11 12 from os.path import abspath, join as joinpath, realpath 13 14 import six 15 16 from conans.util.log import logger 17 18 19 def walk(top, **kwargs): 20 if six.PY2: 21 # If py2 os.walk receives a unicode object, it will fail if a non-ascii file name is found 22 # during the iteration. More info: 23 # https://stackoverflow.com/questions/21772271/unicodedecodeerror-when-performing-os-walk 24 try: 25 top = str(top) 26 except UnicodeDecodeError: 27 pass 28 29 return os.walk(top, **kwargs) 30 31 32 def make_read_only(path): 33 for root, _, files in walk(path): 34 for f in files: 35 full_path = os.path.join(root, f) 36 mode = os.stat(full_path).st_mode 37 os.chmod(full_path, mode & ~ stat.S_IWRITE) 38 39 40 _DIRTY_FOLDER = ".dirty" 41 42 43 def set_dirty(folder): 44 dirty_file = os.path.normpath(folder) + _DIRTY_FOLDER 45 save(dirty_file, "") 46 47 48 def clean_dirty(folder): 49 dirty_file = os.path.normpath(folder) + _DIRTY_FOLDER 50 os.remove(dirty_file) 51 52 53 def is_dirty(folder): 54 dirty_file = os.path.normpath(folder) + _DIRTY_FOLDER 55 return os.path.exists(dirty_file) 56 57 58 def decode_text(text): 59 decoders = ["utf-8", "Windows-1252"] 60 for decoder in decoders: 61 try: 62 return text.decode(decoder) 63 except UnicodeDecodeError: 64 continue 65 logger.warning("can't decode %s" % str(text)) 66 return text.decode("utf-8", "ignore") # Ignore not compatible characters 67 68 69 def touch(fname, times=None): 70 os.utime(fname, times) 71 72 73 def touch_folder(folder): 74 for dirname, _, filenames in walk(folder): 75 for fname in filenames: 76 os.utime(os.path.join(dirname, fname), None) 77 78 79 def normalize(text): 80 if platform.system() == "Windows": 81 return re.sub("\r?\n", "\r\n", text) 82 else: 83 return text 84 85 86 def md5(content): 87 md5alg = hashlib.md5() 88 if isinstance(content, bytes): 89 tmp = content 90 else: 91 tmp = content.encode("utf-8") 92 md5alg.update(tmp) 93 return md5alg.hexdigest() 94 95 96 def md5sum(file_path): 97 return _generic_algorithm_sum(file_path, "md5") 98 99 100 def sha1sum(file_path): 101 return _generic_algorithm_sum(file_path, "sha1") 102 103 104 def sha256sum(file_path): 105 return _generic_algorithm_sum(file_path, "sha256") 106 107 108 def _generic_algorithm_sum(file_path, algorithm_name): 109 110 with open(file_path, 'rb') as fh: 111 m = hashlib.new(algorithm_name) 112 while True: 113 data = fh.read(8192) 114 if not data: 115 break 116 m.update(data) 117 return m.hexdigest() 118 119 120 def save_append(path, content): 121 try: 122 os.makedirs(os.path.dirname(path)) 123 except: 124 pass 125 126 with open(path, "ab") as handle: 127 handle.write(to_file_bytes(content)) 128 129 130 def save(path, content, only_if_modified=False): 131 """ 132 Saves a file with given content 133 Params: 134 path: path to write file to 135 content: contents to save in the file 136 only_if_modified: file won't be modified if the content hasn't changed 137 """ 138 try: 139 os.makedirs(os.path.dirname(path)) 140 except: 141 pass 142 143 new_content = to_file_bytes(content) 144 145 if only_if_modified and os.path.exists(path): 146 old_content = load(path, binary=True) 147 if old_content == new_content: 148 return 149 150 with open(path, "wb") as handle: 151 handle.write(new_content) 152 153 154 def mkdir_tmp(): 155 return tempfile.mkdtemp(suffix='tmp_conan') 156 157 158 def to_file_bytes(content): 159 if six.PY3: 160 if not isinstance(content, bytes): 161 content = bytes(content, "utf-8") 162 elif isinstance(content, unicode): 163 content = content.encode("utf-8") 164 return content 165 166 167 def save_files(path, files, only_if_modified=False): 168 for name, content in list(files.items()): 169 save(os.path.join(path, name), content, only_if_modified=only_if_modified) 170 171 172 def load(path, binary=False): 173 """ Loads a file content """ 174 with open(path, 'rb') as handle: 175 tmp = handle.read() 176 return tmp if binary else decode_text(tmp) 177 178 179 def relative_dirs(path): 180 """ Walks a dir and return a list with the relative paths """ 181 ret = [] 182 for dirpath, _, fnames in walk(path): 183 for filename in fnames: 184 tmp = os.path.join(dirpath, filename) 185 tmp = tmp[len(path) + 1:] 186 ret.append(tmp) 187 return ret 188 189 190 def get_abs_path(folder, origin): 191 if folder: 192 if os.path.isabs(folder): 193 return folder 194 return os.path.join(origin, folder) 195 return origin 196 197 198 def _change_permissions(func, path, exc_info): 199 if not os.access(path, os.W_OK): 200 os.chmod(path, stat.S_IWUSR) 201 func(path) 202 else: 203 raise OSError("Cannot change permissions for {}! Exception info: {}".format(path, exc_info)) 204 205 206 def rmdir(path): 207 try: 208 shutil.rmtree(path, onerror=_change_permissions) 209 except OSError as err: 210 if err.errno == errno.ENOENT: 211 return 212 raise 213 214 215 def remove(path): 216 try: 217 assert os.path.isfile(path) 218 os.remove(path) 219 except (IOError, OSError) as e: # for py3, handle just PermissionError 220 if e.errno == errno.EPERM or e.errno == errno.EACCES: 221 os.chmod(path, stat.S_IRWXU) 222 os.remove(path) 223 return 224 raise 225 226 227 def mkdir(path): 228 """Recursive mkdir, doesnt fail if already existing""" 229 if os.path.exists(path): 230 return 231 os.makedirs(path) 232 233 234 def path_exists(path, basedir): 235 """Case sensitive, for windows, optional 236 basedir for skip caps check for tmp folders in testing for example (returned always 237 in lowercase for some strange reason)""" 238 exists = os.path.exists(path) 239 if not exists or sys.platform == "linux2": 240 return exists 241 242 path = os.path.normpath(path) 243 path = os.path.relpath(path, basedir) 244 chunks = path.split(os.sep) 245 tmp = basedir 246 247 for chunk in chunks: 248 if chunk and chunk not in os.listdir(tmp): 249 return False 250 tmp = os.path.normpath(tmp + os.sep + chunk) 251 return True 252 253 254 def gzopen_without_timestamps(name, mode="r", fileobj=None, compresslevel=None, **kwargs): 255 """ !! Method overrided by laso to pass mtime=0 (!=None) to avoid time.time() was 256 setted in Gzip file causing md5 to change. Not possible using the 257 previous tarfile open because arguments are not passed to GzipFile constructor 258 """ 259 from tarfile import CompressionError, ReadError 260 261 compresslevel = compresslevel or int(os.getenv("CONAN_COMPRESSION_LEVEL", 9)) 262 263 if mode not in ("r", "w"): 264 raise ValueError("mode must be 'r' or 'w'") 265 266 try: 267 import gzip 268 gzip.GzipFile 269 except (ImportError, AttributeError): 270 raise CompressionError("gzip module is not available") 271 272 try: 273 fileobj = gzip.GzipFile(name, mode, compresslevel, fileobj, mtime=0) 274 except OSError: 275 if fileobj is not None and mode == 'r': 276 raise ReadError("not a gzip file") 277 raise 278 279 try: 280 t = tarfile.TarFile.taropen(name, mode, fileobj, **kwargs) 281 except IOError: 282 fileobj.close() 283 if mode == 'r': 284 raise ReadError("not a gzip file") 285 raise 286 except: 287 fileobj.close() 288 raise 289 t._extfileobj = False 290 return t 291 292 293 def tar_extract(fileobj, destination_dir): 294 """Extract tar file controlling not absolute paths and fixing the routes 295 if the tar was zipped in windows""" 296 def badpath(path, base): 297 # joinpath will ignore base if path is absolute 298 return not realpath(abspath(joinpath(base, path))).startswith(base) 299 300 def safemembers(members): 301 base = realpath(abspath(".")) 302 303 for finfo in members: 304 if badpath(finfo.name, base) or finfo.islnk(): 305 continue 306 else: 307 # Fixes unzip a windows zipped file in linux 308 finfo.name = finfo.name.replace("\\", "/") 309 yield finfo 310 311 the_tar = tarfile.open(fileobj=fileobj) 312 # NOTE: The errorlevel=2 has been removed because it was failing in Win10, it didn't allow to 313 # "could not change modification time", with time=0 314 # the_tar.errorlevel = 2 # raise exception if any error 315 the_tar.extractall(path=destination_dir, members=safemembers(the_tar)) 316 the_tar.close() 317 318 319 def list_folder_subdirs(basedir, level): 320 ret = [] 321 for root, dirs, _ in walk(basedir): 322 rel_path = os.path.relpath(root, basedir) 323 if rel_path == ".": 324 continue 325 dir_split = rel_path.split(os.sep) 326 if len(dir_split) == level: 327 ret.append("/".join(dir_split)) 328 dirs[:] = [] # Stop iterate subdirs 329 return ret 330 331 332 def exception_message_safe(exc): 333 try: 334 return str(exc) 335 except: 336 return decode_text(repr(exc)) 337 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conans/util/files.py b/conans/util/files.py --- a/conans/util/files.py +++ b/conans/util/files.py @@ -298,7 +298,7 @@ return not realpath(abspath(joinpath(base, path))).startswith(base) def safemembers(members): - base = realpath(abspath(".")) + base = realpath(abspath(destination_dir)) for finfo in members: if badpath(finfo.name, base) or finfo.islnk():
{"golden_diff": "diff --git a/conans/util/files.py b/conans/util/files.py\n--- a/conans/util/files.py\n+++ b/conans/util/files.py\n@@ -298,7 +298,7 @@\n return not realpath(abspath(joinpath(base, path))).startswith(base)\n \n def safemembers(members):\n- base = realpath(abspath(\".\"))\n+ base = realpath(abspath(destination_dir))\n \n for finfo in members:\n if badpath(finfo.name, base) or finfo.islnk():\n", "issue": "tar_extract is affected by symlinks in current directory\nIf a current directory has a symlink with the name matching a directory in the installed conan_package.tgz, then the directory is not extracted to the package directory.\r\n\r\nReproduced on 1.15.0-dev. Notice the `include` directory disappears from the package directory.\r\n```\r\n$ mkdir /tmp/testing && cd /tmp/testing\r\n$ conan new Hello/0.1 -t\r\n$ conan create . demo/testing\r\n$ conan upload Hello/0.1@demo/testing --all\r\n$ conan remove Hello/0.1@demo/testing --force\r\n$ conan install Hello/0.1@demo/testing\r\n$ conan get Hello/0.1@demo/testing:57547fe65fffc300f05aa42ee64b3b02eeabb6d7 .\r\nListing directory '.':\r\n conaninfo.txt\r\n conanmanifest.txt\r\n include\r\n lib\r\n$ ln -s /tmp include\r\n$ conan remove Hello/0.1@demo/testing --force\r\n$ conan install Hello/0.1@demo/testing\r\n$ conan get Hello/0.1@demo/testing:57547fe65fffc300f05aa42ee64b3b02eeabb6d7 .\r\nListing directory '.':\r\n conaninfo.txt\r\n conanmanifest.txt\r\n lib\r\n```\r\n\r\nThe issue is caused by `badpath` check. The `base` is the current directory. Joining current directory and `include`, and resolving the symlink makes it to not to start with the current directory. What is the true intent for the `badpath` method? Is there a good test case to show it's necessary?\r\n```python\r\n 293 def tar_extract(fileobj, destination_dir):\r\n 294 \"\"\"Extract tar file controlling not absolute paths and fixing the routes\r\n 295 if the tar was zipped in windows\"\"\"\r\n 296 def badpath(path, base):\r\n 297 # joinpath will ignore base if path is absolute\r\n 298 return not realpath(abspath(joinpath(base, path))).startswith(base)\r\n 299\r\n 300 def safemembers(members):\r\n 301 base = realpath(abspath(\".\"))\r\n 302\r\n 303 for finfo in members:\r\n 304 if badpath(finfo.name, base) or finfo.islnk():\r\n 305 continue\r\n 306 else:\r\n 307 # Fixes unzip a windows zipped file in linux\r\n 308 finfo.name = finfo.name.replace(\"\\\\\", \"/\")\r\n 309 yield finfo\r\n```\r\n\r\n\r\n\r\n\r\n- [x] I've read the [CONTRIBUTING guide](https://github.com/conan-io/conan/blob/develop/.github/CONTRIBUTING.md).\r\n- [x] I've specified the Conan version, operating system version and any tool that can be relevant.\r\n- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.\r\n\r\n\n", "before_files": [{"content": "import errno\nimport hashlib\nimport os\nimport platform\nimport re\nimport shutil\nimport stat\nimport sys\nimport tarfile\nimport tempfile\n\nfrom os.path import abspath, join as joinpath, realpath\n\nimport six\n\nfrom conans.util.log import logger\n\n\ndef walk(top, **kwargs):\n if six.PY2:\n # If py2 os.walk receives a unicode object, it will fail if a non-ascii file name is found\n # during the iteration. More info:\n # https://stackoverflow.com/questions/21772271/unicodedecodeerror-when-performing-os-walk\n try:\n top = str(top)\n except UnicodeDecodeError:\n pass\n\n return os.walk(top, **kwargs)\n\n\ndef make_read_only(path):\n for root, _, files in walk(path):\n for f in files:\n full_path = os.path.join(root, f)\n mode = os.stat(full_path).st_mode\n os.chmod(full_path, mode & ~ stat.S_IWRITE)\n\n\n_DIRTY_FOLDER = \".dirty\"\n\n\ndef set_dirty(folder):\n dirty_file = os.path.normpath(folder) + _DIRTY_FOLDER\n save(dirty_file, \"\")\n\n\ndef clean_dirty(folder):\n dirty_file = os.path.normpath(folder) + _DIRTY_FOLDER\n os.remove(dirty_file)\n\n\ndef is_dirty(folder):\n dirty_file = os.path.normpath(folder) + _DIRTY_FOLDER\n return os.path.exists(dirty_file)\n\n\ndef decode_text(text):\n decoders = [\"utf-8\", \"Windows-1252\"]\n for decoder in decoders:\n try:\n return text.decode(decoder)\n except UnicodeDecodeError:\n continue\n logger.warning(\"can't decode %s\" % str(text))\n return text.decode(\"utf-8\", \"ignore\") # Ignore not compatible characters\n\n\ndef touch(fname, times=None):\n os.utime(fname, times)\n\n\ndef touch_folder(folder):\n for dirname, _, filenames in walk(folder):\n for fname in filenames:\n os.utime(os.path.join(dirname, fname), None)\n\n\ndef normalize(text):\n if platform.system() == \"Windows\":\n return re.sub(\"\\r?\\n\", \"\\r\\n\", text)\n else:\n return text\n\n\ndef md5(content):\n md5alg = hashlib.md5()\n if isinstance(content, bytes):\n tmp = content\n else:\n tmp = content.encode(\"utf-8\")\n md5alg.update(tmp)\n return md5alg.hexdigest()\n\n\ndef md5sum(file_path):\n return _generic_algorithm_sum(file_path, \"md5\")\n\n\ndef sha1sum(file_path):\n return _generic_algorithm_sum(file_path, \"sha1\")\n\n\ndef sha256sum(file_path):\n return _generic_algorithm_sum(file_path, \"sha256\")\n\n\ndef _generic_algorithm_sum(file_path, algorithm_name):\n\n with open(file_path, 'rb') as fh:\n m = hashlib.new(algorithm_name)\n while True:\n data = fh.read(8192)\n if not data:\n break\n m.update(data)\n return m.hexdigest()\n\n\ndef save_append(path, content):\n try:\n os.makedirs(os.path.dirname(path))\n except:\n pass\n\n with open(path, \"ab\") as handle:\n handle.write(to_file_bytes(content))\n\n\ndef save(path, content, only_if_modified=False):\n \"\"\"\n Saves a file with given content\n Params:\n path: path to write file to\n content: contents to save in the file\n only_if_modified: file won't be modified if the content hasn't changed\n \"\"\"\n try:\n os.makedirs(os.path.dirname(path))\n except:\n pass\n\n new_content = to_file_bytes(content)\n\n if only_if_modified and os.path.exists(path):\n old_content = load(path, binary=True)\n if old_content == new_content:\n return\n\n with open(path, \"wb\") as handle:\n handle.write(new_content)\n\n\ndef mkdir_tmp():\n return tempfile.mkdtemp(suffix='tmp_conan')\n\n\ndef to_file_bytes(content):\n if six.PY3:\n if not isinstance(content, bytes):\n content = bytes(content, \"utf-8\")\n elif isinstance(content, unicode):\n content = content.encode(\"utf-8\")\n return content\n\n\ndef save_files(path, files, only_if_modified=False):\n for name, content in list(files.items()):\n save(os.path.join(path, name), content, only_if_modified=only_if_modified)\n\n\ndef load(path, binary=False):\n \"\"\" Loads a file content \"\"\"\n with open(path, 'rb') as handle:\n tmp = handle.read()\n return tmp if binary else decode_text(tmp)\n\n\ndef relative_dirs(path):\n \"\"\" Walks a dir and return a list with the relative paths \"\"\"\n ret = []\n for dirpath, _, fnames in walk(path):\n for filename in fnames:\n tmp = os.path.join(dirpath, filename)\n tmp = tmp[len(path) + 1:]\n ret.append(tmp)\n return ret\n\n\ndef get_abs_path(folder, origin):\n if folder:\n if os.path.isabs(folder):\n return folder\n return os.path.join(origin, folder)\n return origin\n\n\ndef _change_permissions(func, path, exc_info):\n if not os.access(path, os.W_OK):\n os.chmod(path, stat.S_IWUSR)\n func(path)\n else:\n raise OSError(\"Cannot change permissions for {}! Exception info: {}\".format(path, exc_info))\n\n\ndef rmdir(path):\n try:\n shutil.rmtree(path, onerror=_change_permissions)\n except OSError as err:\n if err.errno == errno.ENOENT:\n return\n raise\n\n\ndef remove(path):\n try:\n assert os.path.isfile(path)\n os.remove(path)\n except (IOError, OSError) as e: # for py3, handle just PermissionError\n if e.errno == errno.EPERM or e.errno == errno.EACCES:\n os.chmod(path, stat.S_IRWXU)\n os.remove(path)\n return\n raise\n\n\ndef mkdir(path):\n \"\"\"Recursive mkdir, doesnt fail if already existing\"\"\"\n if os.path.exists(path):\n return\n os.makedirs(path)\n\n\ndef path_exists(path, basedir):\n \"\"\"Case sensitive, for windows, optional\n basedir for skip caps check for tmp folders in testing for example (returned always\n in lowercase for some strange reason)\"\"\"\n exists = os.path.exists(path)\n if not exists or sys.platform == \"linux2\":\n return exists\n\n path = os.path.normpath(path)\n path = os.path.relpath(path, basedir)\n chunks = path.split(os.sep)\n tmp = basedir\n\n for chunk in chunks:\n if chunk and chunk not in os.listdir(tmp):\n return False\n tmp = os.path.normpath(tmp + os.sep + chunk)\n return True\n\n\ndef gzopen_without_timestamps(name, mode=\"r\", fileobj=None, compresslevel=None, **kwargs):\n \"\"\" !! Method overrided by laso to pass mtime=0 (!=None) to avoid time.time() was\n setted in Gzip file causing md5 to change. Not possible using the\n previous tarfile open because arguments are not passed to GzipFile constructor\n \"\"\"\n from tarfile import CompressionError, ReadError\n\n compresslevel = compresslevel or int(os.getenv(\"CONAN_COMPRESSION_LEVEL\", 9))\n\n if mode not in (\"r\", \"w\"):\n raise ValueError(\"mode must be 'r' or 'w'\")\n\n try:\n import gzip\n gzip.GzipFile\n except (ImportError, AttributeError):\n raise CompressionError(\"gzip module is not available\")\n\n try:\n fileobj = gzip.GzipFile(name, mode, compresslevel, fileobj, mtime=0)\n except OSError:\n if fileobj is not None and mode == 'r':\n raise ReadError(\"not a gzip file\")\n raise\n\n try:\n t = tarfile.TarFile.taropen(name, mode, fileobj, **kwargs)\n except IOError:\n fileobj.close()\n if mode == 'r':\n raise ReadError(\"not a gzip file\")\n raise\n except:\n fileobj.close()\n raise\n t._extfileobj = False\n return t\n\n\ndef tar_extract(fileobj, destination_dir):\n \"\"\"Extract tar file controlling not absolute paths and fixing the routes\n if the tar was zipped in windows\"\"\"\n def badpath(path, base):\n # joinpath will ignore base if path is absolute\n return not realpath(abspath(joinpath(base, path))).startswith(base)\n\n def safemembers(members):\n base = realpath(abspath(\".\"))\n\n for finfo in members:\n if badpath(finfo.name, base) or finfo.islnk():\n continue\n else:\n # Fixes unzip a windows zipped file in linux\n finfo.name = finfo.name.replace(\"\\\\\", \"/\")\n yield finfo\n\n the_tar = tarfile.open(fileobj=fileobj)\n # NOTE: The errorlevel=2 has been removed because it was failing in Win10, it didn't allow to\n # \"could not change modification time\", with time=0\n # the_tar.errorlevel = 2 # raise exception if any error\n the_tar.extractall(path=destination_dir, members=safemembers(the_tar))\n the_tar.close()\n\n\ndef list_folder_subdirs(basedir, level):\n ret = []\n for root, dirs, _ in walk(basedir):\n rel_path = os.path.relpath(root, basedir)\n if rel_path == \".\":\n continue\n dir_split = rel_path.split(os.sep)\n if len(dir_split) == level:\n ret.append(\"/\".join(dir_split))\n dirs[:] = [] # Stop iterate subdirs\n return ret\n\n\ndef exception_message_safe(exc):\n try:\n return str(exc)\n except:\n return decode_text(repr(exc))\n", "path": "conans/util/files.py"}], "after_files": [{"content": "import errno\nimport hashlib\nimport os\nimport platform\nimport re\nimport shutil\nimport stat\nimport sys\nimport tarfile\nimport tempfile\n\nfrom os.path import abspath, join as joinpath, realpath\n\nimport six\n\nfrom conans.util.log import logger\n\n\ndef walk(top, **kwargs):\n if six.PY2:\n # If py2 os.walk receives a unicode object, it will fail if a non-ascii file name is found\n # during the iteration. More info:\n # https://stackoverflow.com/questions/21772271/unicodedecodeerror-when-performing-os-walk\n try:\n top = str(top)\n except UnicodeDecodeError:\n pass\n\n return os.walk(top, **kwargs)\n\n\ndef make_read_only(path):\n for root, _, files in walk(path):\n for f in files:\n full_path = os.path.join(root, f)\n mode = os.stat(full_path).st_mode\n os.chmod(full_path, mode & ~ stat.S_IWRITE)\n\n\n_DIRTY_FOLDER = \".dirty\"\n\n\ndef set_dirty(folder):\n dirty_file = os.path.normpath(folder) + _DIRTY_FOLDER\n save(dirty_file, \"\")\n\n\ndef clean_dirty(folder):\n dirty_file = os.path.normpath(folder) + _DIRTY_FOLDER\n os.remove(dirty_file)\n\n\ndef is_dirty(folder):\n dirty_file = os.path.normpath(folder) + _DIRTY_FOLDER\n return os.path.exists(dirty_file)\n\n\ndef decode_text(text):\n decoders = [\"utf-8\", \"Windows-1252\"]\n for decoder in decoders:\n try:\n return text.decode(decoder)\n except UnicodeDecodeError:\n continue\n logger.warning(\"can't decode %s\" % str(text))\n return text.decode(\"utf-8\", \"ignore\") # Ignore not compatible characters\n\n\ndef touch(fname, times=None):\n os.utime(fname, times)\n\n\ndef touch_folder(folder):\n for dirname, _, filenames in walk(folder):\n for fname in filenames:\n os.utime(os.path.join(dirname, fname), None)\n\n\ndef normalize(text):\n if platform.system() == \"Windows\":\n return re.sub(\"\\r?\\n\", \"\\r\\n\", text)\n else:\n return text\n\n\ndef md5(content):\n md5alg = hashlib.md5()\n if isinstance(content, bytes):\n tmp = content\n else:\n tmp = content.encode(\"utf-8\")\n md5alg.update(tmp)\n return md5alg.hexdigest()\n\n\ndef md5sum(file_path):\n return _generic_algorithm_sum(file_path, \"md5\")\n\n\ndef sha1sum(file_path):\n return _generic_algorithm_sum(file_path, \"sha1\")\n\n\ndef sha256sum(file_path):\n return _generic_algorithm_sum(file_path, \"sha256\")\n\n\ndef _generic_algorithm_sum(file_path, algorithm_name):\n\n with open(file_path, 'rb') as fh:\n m = hashlib.new(algorithm_name)\n while True:\n data = fh.read(8192)\n if not data:\n break\n m.update(data)\n return m.hexdigest()\n\n\ndef save_append(path, content):\n try:\n os.makedirs(os.path.dirname(path))\n except:\n pass\n\n with open(path, \"ab\") as handle:\n handle.write(to_file_bytes(content))\n\n\ndef save(path, content, only_if_modified=False):\n \"\"\"\n Saves a file with given content\n Params:\n path: path to write file to\n content: contents to save in the file\n only_if_modified: file won't be modified if the content hasn't changed\n \"\"\"\n try:\n os.makedirs(os.path.dirname(path))\n except:\n pass\n\n new_content = to_file_bytes(content)\n\n if only_if_modified and os.path.exists(path):\n old_content = load(path, binary=True)\n if old_content == new_content:\n return\n\n with open(path, \"wb\") as handle:\n handle.write(new_content)\n\n\ndef mkdir_tmp():\n return tempfile.mkdtemp(suffix='tmp_conan')\n\n\ndef to_file_bytes(content):\n if six.PY3:\n if not isinstance(content, bytes):\n content = bytes(content, \"utf-8\")\n elif isinstance(content, unicode):\n content = content.encode(\"utf-8\")\n return content\n\n\ndef save_files(path, files, only_if_modified=False):\n for name, content in list(files.items()):\n save(os.path.join(path, name), content, only_if_modified=only_if_modified)\n\n\ndef load(path, binary=False):\n \"\"\" Loads a file content \"\"\"\n with open(path, 'rb') as handle:\n tmp = handle.read()\n return tmp if binary else decode_text(tmp)\n\n\ndef relative_dirs(path):\n \"\"\" Walks a dir and return a list with the relative paths \"\"\"\n ret = []\n for dirpath, _, fnames in walk(path):\n for filename in fnames:\n tmp = os.path.join(dirpath, filename)\n tmp = tmp[len(path) + 1:]\n ret.append(tmp)\n return ret\n\n\ndef get_abs_path(folder, origin):\n if folder:\n if os.path.isabs(folder):\n return folder\n return os.path.join(origin, folder)\n return origin\n\n\ndef _change_permissions(func, path, exc_info):\n if not os.access(path, os.W_OK):\n os.chmod(path, stat.S_IWUSR)\n func(path)\n else:\n raise OSError(\"Cannot change permissions for {}! Exception info: {}\".format(path, exc_info))\n\n\ndef rmdir(path):\n try:\n shutil.rmtree(path, onerror=_change_permissions)\n except OSError as err:\n if err.errno == errno.ENOENT:\n return\n raise\n\n\ndef remove(path):\n try:\n assert os.path.isfile(path)\n os.remove(path)\n except (IOError, OSError) as e: # for py3, handle just PermissionError\n if e.errno == errno.EPERM or e.errno == errno.EACCES:\n os.chmod(path, stat.S_IRWXU)\n os.remove(path)\n return\n raise\n\n\ndef mkdir(path):\n \"\"\"Recursive mkdir, doesnt fail if already existing\"\"\"\n if os.path.exists(path):\n return\n os.makedirs(path)\n\n\ndef path_exists(path, basedir):\n \"\"\"Case sensitive, for windows, optional\n basedir for skip caps check for tmp folders in testing for example (returned always\n in lowercase for some strange reason)\"\"\"\n exists = os.path.exists(path)\n if not exists or sys.platform == \"linux2\":\n return exists\n\n path = os.path.normpath(path)\n path = os.path.relpath(path, basedir)\n chunks = path.split(os.sep)\n tmp = basedir\n\n for chunk in chunks:\n if chunk and chunk not in os.listdir(tmp):\n return False\n tmp = os.path.normpath(tmp + os.sep + chunk)\n return True\n\n\ndef gzopen_without_timestamps(name, mode=\"r\", fileobj=None, compresslevel=None, **kwargs):\n \"\"\" !! Method overrided by laso to pass mtime=0 (!=None) to avoid time.time() was\n setted in Gzip file causing md5 to change. Not possible using the\n previous tarfile open because arguments are not passed to GzipFile constructor\n \"\"\"\n from tarfile import CompressionError, ReadError\n\n compresslevel = compresslevel or int(os.getenv(\"CONAN_COMPRESSION_LEVEL\", 9))\n\n if mode not in (\"r\", \"w\"):\n raise ValueError(\"mode must be 'r' or 'w'\")\n\n try:\n import gzip\n gzip.GzipFile\n except (ImportError, AttributeError):\n raise CompressionError(\"gzip module is not available\")\n\n try:\n fileobj = gzip.GzipFile(name, mode, compresslevel, fileobj, mtime=0)\n except OSError:\n if fileobj is not None and mode == 'r':\n raise ReadError(\"not a gzip file\")\n raise\n\n try:\n t = tarfile.TarFile.taropen(name, mode, fileobj, **kwargs)\n except IOError:\n fileobj.close()\n if mode == 'r':\n raise ReadError(\"not a gzip file\")\n raise\n except:\n fileobj.close()\n raise\n t._extfileobj = False\n return t\n\n\ndef tar_extract(fileobj, destination_dir):\n \"\"\"Extract tar file controlling not absolute paths and fixing the routes\n if the tar was zipped in windows\"\"\"\n def badpath(path, base):\n # joinpath will ignore base if path is absolute\n return not realpath(abspath(joinpath(base, path))).startswith(base)\n\n def safemembers(members):\n base = realpath(abspath(destination_dir))\n\n for finfo in members:\n if badpath(finfo.name, base) or finfo.islnk():\n continue\n else:\n # Fixes unzip a windows zipped file in linux\n finfo.name = finfo.name.replace(\"\\\\\", \"/\")\n yield finfo\n\n the_tar = tarfile.open(fileobj=fileobj)\n # NOTE: The errorlevel=2 has been removed because it was failing in Win10, it didn't allow to\n # \"could not change modification time\", with time=0\n # the_tar.errorlevel = 2 # raise exception if any error\n the_tar.extractall(path=destination_dir, members=safemembers(the_tar))\n the_tar.close()\n\n\ndef list_folder_subdirs(basedir, level):\n ret = []\n for root, dirs, _ in walk(basedir):\n rel_path = os.path.relpath(root, basedir)\n if rel_path == \".\":\n continue\n dir_split = rel_path.split(os.sep)\n if len(dir_split) == level:\n ret.append(\"/\".join(dir_split))\n dirs[:] = [] # Stop iterate subdirs\n return ret\n\n\ndef exception_message_safe(exc):\n try:\n return str(exc)\n except:\n return decode_text(repr(exc))\n", "path": "conans/util/files.py"}]}
4,071
114
gh_patches_debug_31896
rasdani/github-patches
git_diff
rootpy__rootpy-785
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- basestring Hi there I found the following issue: If I'm using the F1 object from rootpy.plotting and try to access a parameter using [parnr] (the __getitem__) methode, I get the following error: `NameError: name 'basestring' is not defined` I'm using python 3.6 which doesn't has the basestring data type anymore.. https://github.com/rootpy/rootpy/blob/457e074056a916fff848978ef68b7f5107856e47/rootpy/plotting/func.py#L63 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `rootpy/plotting/func.py` Content: ``` 1 from __future__ import absolute_import 2 3 from .. import QROOT 4 from ..decorators import snake_case_methods 5 from .base import Plottable 6 from ..base import NameOnlyObject 7 8 9 __all__ = [ 10 'F1', 11 'F2', 12 'F3', 13 ] 14 15 class BaseFunction(object): 16 class ParProxy(object): 17 def __init__(self, fcn, idx): 18 self.fcn_ = fcn 19 self.idx_ = idx 20 21 @property 22 def index(self): 23 return self.idx_ 24 25 @property 26 def name(self): 27 return self.fcn_.GetParName(self.idx_) 28 29 @name.setter 30 def name(self, val): 31 return self.fcn_.SetParName(self.idx_, val) 32 33 @property 34 def value(self): 35 return self.fcn_.GetParameter(self.idx_) 36 37 @value.setter 38 def value(self, val): 39 self.fcn_.SetParameter(self.idx_, val) 40 41 @property 42 def error(self): 43 return self.fcn_.GetParError(self.idx_) 44 45 @error.setter 46 def error(self, val): 47 return self.fcn_.SetParError(self.idx_, val) 48 49 @property 50 def limits(self): 51 m = QROOT.Double() 52 M = QROOT.Double() 53 self.fcn_.GetParLimits(self.idx_, m, M) 54 return float(m), float(M) 55 56 @limits.setter 57 def limits(self, val): 58 if not hastattr(val, '__len__') and len(val) != 2: 59 raise RuntimeError('Function limits must be a tuple size 2') 60 self.fcn_.SetParLimits(self.idx_, val[0], val[1]) 61 62 def __getitem__(self, value): 63 if isinstance(value, basestring): 64 idx = self.GetParNumber(value) 65 elif isinstance(value, int): 66 idx = value 67 else: 68 raise ValueError('Function index must be a integer or a string') 69 return BaseFunction.ParProxy(self, idx) 70 71 72 @snake_case_methods 73 class F1(Plottable, NameOnlyObject, BaseFunction, QROOT.TF1): 74 _ROOT = QROOT.TF1 75 76 def __init__(self, *args, **kwargs): 77 name = kwargs.pop('name', None) 78 super(F1, self).__init__(*args, name=name) 79 self._post_init(**kwargs) 80 81 82 @snake_case_methods 83 class F2(Plottable, NameOnlyObject, BaseFunction, QROOT.TF2): 84 _ROOT = QROOT.TF2 85 86 def __init__(self, *args, **kwargs): 87 name = kwargs.pop('name', None) 88 super(F2, self).__init__(*args, name=name) 89 self._post_init(**kwargs) 90 91 92 @snake_case_methods 93 class F3(Plottable, NameOnlyObject, BaseFunction, QROOT.TF3): 94 _ROOT = QROOT.TF3 95 96 def __init__(self, *args, **kwargs): 97 name = kwargs.pop('name', None) 98 super(F3, self).__init__(*args, name=name) 99 self._post_init(**kwargs) 100 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/rootpy/plotting/func.py b/rootpy/plotting/func.py --- a/rootpy/plotting/func.py +++ b/rootpy/plotting/func.py @@ -4,7 +4,7 @@ from ..decorators import snake_case_methods from .base import Plottable from ..base import NameOnlyObject - +import six __all__ = [ 'F1', @@ -17,7 +17,7 @@ def __init__(self, fcn, idx): self.fcn_ = fcn self.idx_ = idx - + @property def index(self): return self.idx_ @@ -25,7 +25,7 @@ @property def name(self): return self.fcn_.GetParName(self.idx_) - + @name.setter def name(self, val): return self.fcn_.SetParName(self.idx_, val) @@ -55,14 +55,14 @@ @limits.setter def limits(self, val): - if not hastattr(val, '__len__') and len(val) != 2: + if not hasattr(val, '__len__') and len(val) != 2: raise RuntimeError('Function limits must be a tuple size 2') self.fcn_.SetParLimits(self.idx_, val[0], val[1]) def __getitem__(self, value): - if isinstance(value, basestring): + if isinstance(value, six.string_types): idx = self.GetParNumber(value) - elif isinstance(value, int): + elif isinstance(value, six.integer_types): idx = value else: raise ValueError('Function index must be a integer or a string')
{"golden_diff": "diff --git a/rootpy/plotting/func.py b/rootpy/plotting/func.py\n--- a/rootpy/plotting/func.py\n+++ b/rootpy/plotting/func.py\n@@ -4,7 +4,7 @@\n from ..decorators import snake_case_methods\n from .base import Plottable\n from ..base import NameOnlyObject\n-\n+import six\n \n __all__ = [\n 'F1',\n@@ -17,7 +17,7 @@\n def __init__(self, fcn, idx):\n self.fcn_ = fcn\n self.idx_ = idx\n- \n+\n @property\n def index(self):\n return self.idx_\n@@ -25,7 +25,7 @@\n @property\n def name(self):\n return self.fcn_.GetParName(self.idx_)\n- \n+\n @name.setter\n def name(self, val):\n return self.fcn_.SetParName(self.idx_, val)\n@@ -55,14 +55,14 @@\n \n @limits.setter\n def limits(self, val):\n- if not hastattr(val, '__len__') and len(val) != 2:\n+ if not hasattr(val, '__len__') and len(val) != 2:\n raise RuntimeError('Function limits must be a tuple size 2')\n self.fcn_.SetParLimits(self.idx_, val[0], val[1])\n \n def __getitem__(self, value):\n- if isinstance(value, basestring):\n+ if isinstance(value, six.string_types):\n idx = self.GetParNumber(value)\n- elif isinstance(value, int):\n+ elif isinstance(value, six.integer_types):\n idx = value\n else:\n raise ValueError('Function index must be a integer or a string')\n", "issue": "basestring\nHi there\r\nI found the following issue:\r\nIf I'm using the F1 object from rootpy.plotting and try to access a parameter using [parnr] (the __getitem__) methode, I get the following error:\r\n`NameError: name 'basestring' is not defined`\r\nI'm using python 3.6 which doesn't has the basestring data type anymore..\r\n\r\nhttps://github.com/rootpy/rootpy/blob/457e074056a916fff848978ef68b7f5107856e47/rootpy/plotting/func.py#L63\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom .. import QROOT\nfrom ..decorators import snake_case_methods\nfrom .base import Plottable\nfrom ..base import NameOnlyObject\n\n\n__all__ = [\n 'F1',\n 'F2',\n 'F3',\n]\n\nclass BaseFunction(object):\n class ParProxy(object):\n def __init__(self, fcn, idx):\n self.fcn_ = fcn\n self.idx_ = idx\n \n @property\n def index(self):\n return self.idx_\n\n @property\n def name(self):\n return self.fcn_.GetParName(self.idx_)\n \n @name.setter\n def name(self, val):\n return self.fcn_.SetParName(self.idx_, val)\n\n @property\n def value(self):\n return self.fcn_.GetParameter(self.idx_)\n\n @value.setter\n def value(self, val):\n self.fcn_.SetParameter(self.idx_, val)\n\n @property\n def error(self):\n return self.fcn_.GetParError(self.idx_)\n\n @error.setter\n def error(self, val):\n return self.fcn_.SetParError(self.idx_, val)\n\n @property\n def limits(self):\n m = QROOT.Double()\n M = QROOT.Double()\n self.fcn_.GetParLimits(self.idx_, m, M)\n return float(m), float(M)\n\n @limits.setter\n def limits(self, val):\n if not hastattr(val, '__len__') and len(val) != 2:\n raise RuntimeError('Function limits must be a tuple size 2')\n self.fcn_.SetParLimits(self.idx_, val[0], val[1])\n\n def __getitem__(self, value):\n if isinstance(value, basestring):\n idx = self.GetParNumber(value)\n elif isinstance(value, int):\n idx = value\n else:\n raise ValueError('Function index must be a integer or a string')\n return BaseFunction.ParProxy(self, idx)\n\n\n@snake_case_methods\nclass F1(Plottable, NameOnlyObject, BaseFunction, QROOT.TF1):\n _ROOT = QROOT.TF1\n\n def __init__(self, *args, **kwargs):\n name = kwargs.pop('name', None)\n super(F1, self).__init__(*args, name=name)\n self._post_init(**kwargs)\n\n\n@snake_case_methods\nclass F2(Plottable, NameOnlyObject, BaseFunction, QROOT.TF2):\n _ROOT = QROOT.TF2\n\n def __init__(self, *args, **kwargs):\n name = kwargs.pop('name', None)\n super(F2, self).__init__(*args, name=name)\n self._post_init(**kwargs)\n\n\n@snake_case_methods\nclass F3(Plottable, NameOnlyObject, BaseFunction, QROOT.TF3):\n _ROOT = QROOT.TF3\n\n def __init__(self, *args, **kwargs):\n name = kwargs.pop('name', None)\n super(F3, self).__init__(*args, name=name)\n self._post_init(**kwargs)\n", "path": "rootpy/plotting/func.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nfrom .. import QROOT\nfrom ..decorators import snake_case_methods\nfrom .base import Plottable\nfrom ..base import NameOnlyObject\nimport six\n\n__all__ = [\n 'F1',\n 'F2',\n 'F3',\n]\n\nclass BaseFunction(object):\n class ParProxy(object):\n def __init__(self, fcn, idx):\n self.fcn_ = fcn\n self.idx_ = idx\n\n @property\n def index(self):\n return self.idx_\n\n @property\n def name(self):\n return self.fcn_.GetParName(self.idx_)\n\n @name.setter\n def name(self, val):\n return self.fcn_.SetParName(self.idx_, val)\n\n @property\n def value(self):\n return self.fcn_.GetParameter(self.idx_)\n\n @value.setter\n def value(self, val):\n self.fcn_.SetParameter(self.idx_, val)\n\n @property\n def error(self):\n return self.fcn_.GetParError(self.idx_)\n\n @error.setter\n def error(self, val):\n return self.fcn_.SetParError(self.idx_, val)\n\n @property\n def limits(self):\n m = QROOT.Double()\n M = QROOT.Double()\n self.fcn_.GetParLimits(self.idx_, m, M)\n return float(m), float(M)\n\n @limits.setter\n def limits(self, val):\n if not hasattr(val, '__len__') and len(val) != 2:\n raise RuntimeError('Function limits must be a tuple size 2')\n self.fcn_.SetParLimits(self.idx_, val[0], val[1])\n\n def __getitem__(self, value):\n if isinstance(value, six.string_types):\n idx = self.GetParNumber(value)\n elif isinstance(value, six.integer_types):\n idx = value\n else:\n raise ValueError('Function index must be a integer or a string')\n return BaseFunction.ParProxy(self, idx)\n\n\n@snake_case_methods\nclass F1(Plottable, NameOnlyObject, BaseFunction, QROOT.TF1):\n _ROOT = QROOT.TF1\n\n def __init__(self, *args, **kwargs):\n name = kwargs.pop('name', None)\n super(F1, self).__init__(*args, name=name)\n self._post_init(**kwargs)\n\n\n@snake_case_methods\nclass F2(Plottable, NameOnlyObject, BaseFunction, QROOT.TF2):\n _ROOT = QROOT.TF2\n\n def __init__(self, *args, **kwargs):\n name = kwargs.pop('name', None)\n super(F2, self).__init__(*args, name=name)\n self._post_init(**kwargs)\n\n\n@snake_case_methods\nclass F3(Plottable, NameOnlyObject, BaseFunction, QROOT.TF3):\n _ROOT = QROOT.TF3\n\n def __init__(self, *args, **kwargs):\n name = kwargs.pop('name', None)\n super(F3, self).__init__(*args, name=name)\n self._post_init(**kwargs)\n", "path": "rootpy/plotting/func.py"}]}
1,288
389
gh_patches_debug_19990
rasdani/github-patches
git_diff
Parsl__parsl-201
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Passing Files breaks over IPP The new File class contains a dictionary that maps DataFutures for each site to which it is being staged and contains a reference to the DataManager. Neither of these are pickle-able. So if we do something like this :+1: ``` data = File("foo.txt") fu = remote_app(inputs=[data]) fu.result() # <--- We'll get an error from here ``` Here's the relevant piece from the exception traceback : ``` File "/usr/local/lib/python3.5/dist-packages/ipyparallel/serialize/serialize.py", line 112, in serialize_object buffers.insert(0, pickle.dumps(cobj, PICKLE_PROTOCOL)) TypeError: can't pickle _thread.lock objects ``` I believe that the File object is the best place to hold the Future information about itself, and that would give us the opportunity to do smarter file staging in the future. So I propose that we fix this with a custom pickler for the File class. This is blocker for 0.5.0. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `parsl/data_provider/files.py` Content: ``` 1 """Define the File Type. 2 3 The primary purpose of the File object is to track the protocol to be used 4 to transfer the file as well as to give the appropriate filepath depending 5 on where(client-side, remote-side, intermediary-side) the File.filepath is 6 being called from 7 """ 8 9 import os 10 import logging 11 from urllib.parse import urlparse 12 from parsl.data_provider.data_manager import DataManager 13 14 15 logger = logging.getLogger(__name__) 16 17 18 class File(str): 19 """The Parsl File Class. 20 21 This is planned to be a very simple class that simply 22 captures various attributes of a file, and relies on client-side and worker-side 23 systems to enable to appropriate transfer of files. 24 """ 25 26 def __init__(self, url, dman=None, cache=False, caching_dir=".", staging='direct'): 27 """Construct a File object from a url string. 28 29 Args: 30 - url (string) : url string of the file e.g. 31 - 'input.txt' 32 - 'file:///scratch/proj101/input.txt' 33 - 'globus://go#ep1/~/data/input.txt' 34 - 'globus://ddb59aef-6d04-11e5-ba46-22000b92c6ec/home/johndoe/data/input.txt' 35 - dman (DataManager) : data manager 36 """ 37 self.url = url 38 parsed_url = urlparse(self.url) 39 self.scheme = parsed_url.scheme if parsed_url.scheme else 'file' 40 self.netloc = parsed_url.netloc 41 self.path = parsed_url.path 42 self.filename = os.path.basename(self.path) 43 self.dman = dman if dman else DataManager.get_data_manager() 44 self.data_future = {} 45 if self.scheme != 'file': 46 self.dman.add_file(self) 47 48 self.cache = cache 49 self.caching_dir = caching_dir 50 self.staging = staging 51 52 def __str__(self): 53 return self.filepath 54 55 def __repr__(self): 56 return self.__str__() 57 58 def __fspath__(self): 59 return self.filepath 60 61 @property 62 def filepath(self): 63 """Return the resolved filepath on the side where it is called from. 64 65 The appropriate filepath will be returned when called from within 66 an app running remotely as well as regular python on the client side. 67 68 Args: 69 - self 70 Returns: 71 - filepath (string) 72 """ 73 if self.scheme == 'globus': 74 if hasattr(self, 'local_path'): 75 return self.local_path 76 77 if 'exec_site' not in globals() or self.staging == 'direct': 78 # Assume local and direct 79 return self.path 80 else: 81 # Return self.path for now 82 return self.path 83 84 def stage_in(self, site=None): 85 """Transport file from the site of origin to local site.""" 86 return self.dman.stage_in(self, site) 87 88 def stage_out(self): 89 """Transport file from local filesystem to origin site.""" 90 return self.dman.stage_out(self) 91 92 def set_data_future(self, df, site=None): 93 self.data_future[site] = df 94 95 def get_data_future(self, site): 96 return self.data_future.get(site) 97 98 99 if __name__ == '__main__': 100 101 x = File('./files.py') 102 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/parsl/data_provider/files.py b/parsl/data_provider/files.py --- a/parsl/data_provider/files.py +++ b/parsl/data_provider/files.py @@ -95,6 +95,34 @@ def get_data_future(self, site): return self.data_future.get(site) + def __getstate__(self): + """ Overriding the default pickling method. + + The File object get's pickled and transmitted to remote sites during app + execution. This enables pickling while retaining the lockable resources + to the DFK/Client side. + """ + + state = self.__dict__.copy() + + # We have already made a copy of the future objects, they are now no longer + # reliable as means to wait for the staging events + for site in state["data_future"]: + # This is assumed to be safe, since the data_future represents staging to a specific site + # and a site will only have one filepath. + state["data_future"][site] = state["data_future"][site].filepath + + state["dman"] = None + + return state + + def __setstate__(self, state): + """ Overloading the default pickle method to reconstruct a File from serialized form + + This might require knowledge of whethere a DataManager is already present in the context. + """ + self.__dict__.update(state) + if __name__ == '__main__':
{"golden_diff": "diff --git a/parsl/data_provider/files.py b/parsl/data_provider/files.py\n--- a/parsl/data_provider/files.py\n+++ b/parsl/data_provider/files.py\n@@ -95,6 +95,34 @@\n def get_data_future(self, site):\n return self.data_future.get(site)\n \n+ def __getstate__(self):\n+ \"\"\" Overriding the default pickling method.\n+\n+ The File object get's pickled and transmitted to remote sites during app\n+ execution. This enables pickling while retaining the lockable resources\n+ to the DFK/Client side.\n+ \"\"\"\n+\n+ state = self.__dict__.copy()\n+\n+ # We have already made a copy of the future objects, they are now no longer\n+ # reliable as means to wait for the staging events\n+ for site in state[\"data_future\"]:\n+ # This is assumed to be safe, since the data_future represents staging to a specific site\n+ # and a site will only have one filepath.\n+ state[\"data_future\"][site] = state[\"data_future\"][site].filepath\n+\n+ state[\"dman\"] = None\n+\n+ return state\n+\n+ def __setstate__(self, state):\n+ \"\"\" Overloading the default pickle method to reconstruct a File from serialized form\n+\n+ This might require knowledge of whethere a DataManager is already present in the context.\n+ \"\"\"\n+ self.__dict__.update(state)\n+\n \n if __name__ == '__main__':\n", "issue": "Passing Files breaks over IPP\nThe new File class contains a dictionary that maps DataFutures for each site to which it is being staged and contains a reference to the DataManager. Neither of these are pickle-able.\r\n\r\nSo if we do something like this :+1: \r\n```\r\ndata = File(\"foo.txt\")\r\nfu = remote_app(inputs=[data])\r\nfu.result() # <--- We'll get an error from here\r\n```\r\nHere's the relevant piece from the exception traceback :\r\n```\r\nFile \"/usr/local/lib/python3.5/dist-packages/ipyparallel/serialize/serialize.py\", line 112, in serialize_object\r\n buffers.insert(0, pickle.dumps(cobj, PICKLE_PROTOCOL))\r\nTypeError: can't pickle _thread.lock objects\r\n```\r\n\r\nI believe that the File object is the best place to hold the Future information about itself, and that would give us the opportunity to do smarter file staging in the future. So I propose that we fix this with a custom pickler for the File class.\r\n\r\nThis is blocker for 0.5.0.\n", "before_files": [{"content": "\"\"\"Define the File Type.\n\nThe primary purpose of the File object is to track the protocol to be used\nto transfer the file as well as to give the appropriate filepath depending\non where(client-side, remote-side, intermediary-side) the File.filepath is\nbeing called from\n\"\"\"\n\nimport os\nimport logging\nfrom urllib.parse import urlparse\nfrom parsl.data_provider.data_manager import DataManager\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass File(str):\n \"\"\"The Parsl File Class.\n\n This is planned to be a very simple class that simply\n captures various attributes of a file, and relies on client-side and worker-side\n systems to enable to appropriate transfer of files.\n \"\"\"\n\n def __init__(self, url, dman=None, cache=False, caching_dir=\".\", staging='direct'):\n \"\"\"Construct a File object from a url string.\n\n Args:\n - url (string) : url string of the file e.g.\n - 'input.txt'\n - 'file:///scratch/proj101/input.txt'\n - 'globus://go#ep1/~/data/input.txt'\n - 'globus://ddb59aef-6d04-11e5-ba46-22000b92c6ec/home/johndoe/data/input.txt'\n - dman (DataManager) : data manager\n \"\"\"\n self.url = url\n parsed_url = urlparse(self.url)\n self.scheme = parsed_url.scheme if parsed_url.scheme else 'file'\n self.netloc = parsed_url.netloc\n self.path = parsed_url.path\n self.filename = os.path.basename(self.path)\n self.dman = dman if dman else DataManager.get_data_manager()\n self.data_future = {}\n if self.scheme != 'file':\n self.dman.add_file(self)\n\n self.cache = cache\n self.caching_dir = caching_dir\n self.staging = staging\n\n def __str__(self):\n return self.filepath\n\n def __repr__(self):\n return self.__str__()\n\n def __fspath__(self):\n return self.filepath\n\n @property\n def filepath(self):\n \"\"\"Return the resolved filepath on the side where it is called from.\n\n The appropriate filepath will be returned when called from within\n an app running remotely as well as regular python on the client side.\n\n Args:\n - self\n Returns:\n - filepath (string)\n \"\"\"\n if self.scheme == 'globus':\n if hasattr(self, 'local_path'):\n return self.local_path\n\n if 'exec_site' not in globals() or self.staging == 'direct':\n # Assume local and direct\n return self.path\n else:\n # Return self.path for now\n return self.path\n\n def stage_in(self, site=None):\n \"\"\"Transport file from the site of origin to local site.\"\"\"\n return self.dman.stage_in(self, site)\n\n def stage_out(self):\n \"\"\"Transport file from local filesystem to origin site.\"\"\"\n return self.dman.stage_out(self)\n\n def set_data_future(self, df, site=None):\n self.data_future[site] = df\n\n def get_data_future(self, site):\n return self.data_future.get(site)\n\n\nif __name__ == '__main__':\n\n x = File('./files.py')\n", "path": "parsl/data_provider/files.py"}], "after_files": [{"content": "\"\"\"Define the File Type.\n\nThe primary purpose of the File object is to track the protocol to be used\nto transfer the file as well as to give the appropriate filepath depending\non where(client-side, remote-side, intermediary-side) the File.filepath is\nbeing called from\n\"\"\"\n\nimport os\nimport logging\nfrom urllib.parse import urlparse\nfrom parsl.data_provider.data_manager import DataManager\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass File(str):\n \"\"\"The Parsl File Class.\n\n This is planned to be a very simple class that simply\n captures various attributes of a file, and relies on client-side and worker-side\n systems to enable to appropriate transfer of files.\n \"\"\"\n\n def __init__(self, url, dman=None, cache=False, caching_dir=\".\", staging='direct'):\n \"\"\"Construct a File object from a url string.\n\n Args:\n - url (string) : url string of the file e.g.\n - 'input.txt'\n - 'file:///scratch/proj101/input.txt'\n - 'globus://go#ep1/~/data/input.txt'\n - 'globus://ddb59aef-6d04-11e5-ba46-22000b92c6ec/home/johndoe/data/input.txt'\n - dman (DataManager) : data manager\n \"\"\"\n self.url = url\n parsed_url = urlparse(self.url)\n self.scheme = parsed_url.scheme if parsed_url.scheme else 'file'\n self.netloc = parsed_url.netloc\n self.path = parsed_url.path\n self.filename = os.path.basename(self.path)\n self.dman = dman if dman else DataManager.get_data_manager()\n self.data_future = {}\n if self.scheme != 'file':\n self.dman.add_file(self)\n\n self.cache = cache\n self.caching_dir = caching_dir\n self.staging = staging\n\n def __str__(self):\n return self.filepath\n\n def __repr__(self):\n return self.__str__()\n\n def __fspath__(self):\n return self.filepath\n\n @property\n def filepath(self):\n \"\"\"Return the resolved filepath on the side where it is called from.\n\n The appropriate filepath will be returned when called from within\n an app running remotely as well as regular python on the client side.\n\n Args:\n - self\n Returns:\n - filepath (string)\n \"\"\"\n if self.scheme == 'globus':\n if hasattr(self, 'local_path'):\n return self.local_path\n\n if 'exec_site' not in globals() or self.staging == 'direct':\n # Assume local and direct\n return self.path\n else:\n # Return self.path for now\n return self.path\n\n def stage_in(self, site=None):\n \"\"\"Transport file from the site of origin to local site.\"\"\"\n return self.dman.stage_in(self, site)\n\n def stage_out(self):\n \"\"\"Transport file from local filesystem to origin site.\"\"\"\n return self.dman.stage_out(self)\n\n def set_data_future(self, df, site=None):\n self.data_future[site] = df\n\n def get_data_future(self, site):\n return self.data_future.get(site)\n\n def __getstate__(self):\n \"\"\" Overriding the default pickling method.\n\n The File object get's pickled and transmitted to remote sites during app\n execution. This enables pickling while retaining the lockable resources\n to the DFK/Client side.\n \"\"\"\n\n state = self.__dict__.copy()\n\n # We have already made a copy of the future objects, they are now no longer\n # reliable as means to wait for the staging events\n for site in state[\"data_future\"]:\n # This is assumed to be safe, since the data_future represents staging to a specific site\n # and a site will only have one filepath.\n state[\"data_future\"][site] = state[\"data_future\"][site].filepath\n\n state[\"dman\"] = None\n\n return state\n\n def __setstate__(self, state):\n \"\"\" Overloading the default pickle method to reconstruct a File from serialized form\n\n This might require knowledge of whethere a DataManager is already present in the context.\n \"\"\"\n self.__dict__.update(state)\n\n\nif __name__ == '__main__':\n\n x = File('./files.py')\n", "path": "parsl/data_provider/files.py"}]}
1,402
332
gh_patches_debug_26817
rasdani/github-patches
git_diff
akvo__akvo-rsr-4850
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bug: Locations are reset on some projects ### What were you doing? A client has reported that on some random projects after the location has been set, the locations are reset within a few hours. The occurrence has mostly been observed within 24h of making the modification. ### What should've happened? Locations (and other fields) should magically be reset. ### My environment _No response_ ### Additional context This particular client has a nightly script that pulls information from an external system, of which location is a part. However, the script only modifies locations of newly created projects. Possibilities: - something is deeply wrong with Django and after a project is created it doesn't say it's created (highly unlikely) - the script has nothing to do with it and it's * human error * a bug in the frontend e.g changes are cached and when a user gets access to the network again the changes are pushed (React?) * a side effect in the backend or another script --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `akvo/rsr/models/location.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # Akvo RSR is covered by the GNU Affero General Public License. 4 # See more details in the license.txt file located at the root folder of the Akvo RSR module. 5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 6 7 8 from django.db import models 9 from django.utils.translation import ugettext_lazy as _ 10 11 from ..fields import LatitudeField, LongitudeField, ValidXMLCharField 12 from akvo.codelists.models import (Country, GeographicExactness, GeographicLocationClass, 13 GeographicLocationReach, GeographicVocabulary, LocationType) 14 from akvo.codelists.store.default_codelists import ( 15 COUNTRY, GEOGRAPHIC_EXACTNESS, GEOGRAPHIC_LOCATION_CLASS, GEOGRAPHIC_LOCATION_REACH, 16 GEOGRAPHIC_VOCABULARY, LOCATION_TYPE 17 ) 18 from akvo.utils import codelist_choices, codelist_value 19 20 21 class BaseLocation(models.Model): 22 latitude = LatitudeField( 23 _('latitude'), null=True, blank=True, db_index=True, default=None, 24 help_text=_('Use a period to denote decimals.') 25 ) 26 longitude = LongitudeField( 27 _('longitude'), null=True, blank=True, db_index=True, default=None, 28 help_text=_('Use a period to denote decimals.') 29 ) 30 city = ValidXMLCharField(_('city'), blank=True, max_length=255) 31 state = ValidXMLCharField(_('state'), blank=True, max_length=255) 32 address_1 = ValidXMLCharField(_('address 1'), max_length=255, blank=True) 33 address_2 = ValidXMLCharField(_('address 2'), max_length=255, blank=True) 34 postcode = ValidXMLCharField(_('postal code'), max_length=10, blank=True) 35 country = models.ForeignKey('Country', on_delete=models.SET_NULL, null=True, blank=True, verbose_name=_('country')) 36 37 def __str__(self): 38 return '{0}, {1}, {2}{3}'.format( 39 '{0}: {1}'.format( 40 _('Latitude'), 41 str(self.latitude) if self.latitude else _('No latitude specified')), 42 '{0}: {1}'.format( 43 _('Longitude'), 44 str(self.longitude) if self.longitude else _('No longitude specified')), 45 '{0}: {1}'.format( 46 _('Country'), 47 str(self.country.name) if self.country else _('No country specified')), 48 ' ({0})'.format(self.name) if getattr(self, 'name', None) else '' 49 ) 50 51 def delete(self, *args, **kwargs): 52 super(BaseLocation, self).delete(*args, **kwargs) 53 54 # If location_target has more locations, set the first as primary location 55 location_target = self.location_target 56 other_locations = location_target.locations.all() 57 58 if other_locations.count() > 0: 59 location_target.primary_location = other_locations.first() 60 else: 61 location_target.primary_location = None 62 63 location_target.save() 64 65 def save(self, *args, **kwargs): 66 super(BaseLocation, self).save(*args, **kwargs) 67 68 # Set location as primary location if it is the first location 69 location_target = self.location_target 70 if location_target.primary_location is None or location_target.primary_location.pk > self.pk: 71 location_target.primary_location = self 72 location_target.save() 73 74 def is_valid(self): 75 if (self.latitude is None or self.longitude is None) or \ 76 (self.latitude == 0 and self.longitude == 0) or \ 77 (self.latitude > 90 or self.latitude < -90) or \ 78 (self.longitude > 180 or self.latitude < -180): 79 return False 80 return True 81 82 class Meta: 83 app_label = 'rsr' 84 abstract = True 85 ordering = ['id', ] 86 87 88 class OrganisationLocation(BaseLocation): 89 location_target = models.ForeignKey('Organisation', on_delete=models.CASCADE, related_name='locations') 90 iati_country = ValidXMLCharField( 91 _('country'), blank=True, max_length=2, choices=codelist_choices(COUNTRY, show_code=False), 92 help_text=_('The country in which the organisation is located.') 93 ) 94 95 def iati_country_value(self): 96 return codelist_value(Country, self, 'iati_country') 97 98 def iati_country_value_unicode(self): 99 return str(self.iati_country_value()) 100 101 102 class ProjectLocation(BaseLocation): 103 104 project_relation = 'locations__in' 105 106 location_target = models.ForeignKey('Project', on_delete=models.CASCADE, related_name='locations') 107 108 # Additional IATI fields 109 reference = ValidXMLCharField( 110 _('reference'), blank=True, max_length=50, 111 help_text=_('An internal reference that describes the location in the reporting ' 112 'organisation\'s own system. For reference see: ' 113 '<a href="http://iatistandard.org/202/activity-standard/iati-activities/' 114 'iati-activity/location/#attributes" target="_blank">' 115 'http://iatistandard.org/202/activity-standard/iati-activities/iati-activity/' 116 'location/#attributes</a>.') 117 ) 118 location_code = ValidXMLCharField( 119 _('code'), blank=True, max_length=25, 120 help_text=_('Enter a code to identify the region. Codes are based on DAC region codes. ' 121 'Where an activity is considered global, the code 998 can be used. For ' 122 'reference: <a href="http://www.oecd.org/dac/stats/dacandcrscodelists.htm" ' 123 'target="_blank">http://www.oecd.org/dac/stats/dacandcrscodelists.htm</a>.') 124 ) 125 vocabulary = ValidXMLCharField(_('vocabulary'), blank=True, max_length=2, 126 choices=codelist_choices(GEOGRAPHIC_VOCABULARY)) 127 name = ValidXMLCharField( 128 _('name'), blank=True, max_length=100, 129 help_text=_('The human-readable name for the location.') 130 ) 131 description = ValidXMLCharField( 132 _('location description'), blank=True, max_length=2000, 133 help_text=_('This provides free text space for providing an additional description, if ' 134 'needed, of the actual target of the activity. A description that qualifies ' 135 'the location, not the activity.') 136 ) 137 activity_description = ValidXMLCharField( 138 _('activity description'), blank=True, max_length=2000, 139 help_text=_('A description that qualifies the activity taking place at the location. ' 140 'This should not duplicate information provided in the main activity ' 141 'description, and should typically be used to distinguish between activities ' 142 'at multiple locations within a single iati-activity record.') 143 ) 144 exactness = ValidXMLCharField( 145 _('location precision'), blank=True, max_length=1, 146 choices=codelist_choices(GEOGRAPHIC_EXACTNESS), 147 help_text=_('Defines whether the location represents the most distinct point reasonably ' 148 'possible for this type of activity or is an approximation due to lack of ' 149 'more detailed information.') 150 ) 151 location_reach = ValidXMLCharField( 152 _('reach'), blank=True, max_length=1, choices=codelist_choices(GEOGRAPHIC_LOCATION_REACH), 153 help_text=_('Does this location describe where the activity takes place or where the ' 154 'intended beneficiaries reside?') 155 ) 156 location_class = ValidXMLCharField( 157 _('class'), blank=True, max_length=1, choices=codelist_choices(GEOGRAPHIC_LOCATION_CLASS), 158 help_text=_('Does the location refer to a physical structure such as a building, a ' 159 'populated place (e.g. city or village), an administrative division, or ' 160 'another topological feature (e.g. river, nature reserve)? For reference: ' 161 '<a href="http://iatistandard.org/202/codelists/GeographicLocationClass/" ' 162 'target="_blank">http://iatistandard.org/202/codelists/' 163 'GeographicLocationClass/</a>.') 164 ) 165 feature_designation = ValidXMLCharField( 166 _('feature designation'), blank=True, max_length=5, 167 choices=codelist_choices(LOCATION_TYPE), 168 help_text=_('A more refined coded classification of the type of feature referred to by ' 169 'this location. For reference: <a href="http://iatistandard.org/202/codelists/' 170 'LocationType/" target="_blank">http://iatistandard.org/202/codelists/' 171 'LocationType/</a>.') 172 ) 173 174 def iati_country(self): 175 return codelist_value(Country, self, 'country') 176 177 def iati_country_unicode(self): 178 return str(self.iati_country()) 179 180 def iati_vocabulary(self): 181 return codelist_value(GeographicVocabulary, self, 'vocabulary') 182 183 def iati_vocabulary_unicode(self): 184 return str(self.iati_vocabulary()) 185 186 def iati_exactness(self): 187 return codelist_value(GeographicExactness, self, 'exactness') 188 189 def iati_exactness_unicode(self): 190 return str(self.iati_exactness()) 191 192 def iati_reach(self): 193 return codelist_value(GeographicLocationReach, self, 'location_reach') 194 195 def iati_reach_unicode(self): 196 return str(self.iati_reach()) 197 198 def iati_class(self): 199 return codelist_value(GeographicLocationClass, self, 'location_class') 200 201 def iati_class_unicode(self): 202 return str(self.iati_class()) 203 204 def iati_designation(self): 205 return codelist_value(LocationType, self, 'feature_designation') 206 207 def iati_designation_unicode(self): 208 return str(self.iati_designation()) 209 210 211 # Over-riding fields doesn't work in Django < 1.10, and hence this hack. 212 ProjectLocation._meta.get_field('country').help_text = _( 213 'The country or countries that benefit(s) from the activity.' 214 ) 215 216 217 class AdministrativeLocation(models.Model): 218 219 project_relation = 'locations__administratives__in' 220 221 location = models.ForeignKey( 222 'ProjectLocation', on_delete=models.CASCADE, verbose_name=_('location'), related_name='administratives' 223 ) 224 code = ValidXMLCharField( 225 _('administrative code'), blank=True, max_length=25, 226 help_text=_('Coded identification of national and sub-national divisions according to ' 227 'recognised administrative boundary repositories. Multiple levels may be ' 228 'reported.') 229 ) 230 vocabulary = ValidXMLCharField( 231 _('administrative vocabulary'), blank=True, max_length=2, 232 choices=codelist_choices(GEOGRAPHIC_VOCABULARY), 233 help_text=_('For reference: <a href="http://iatistandard.org/202/codelists/' 234 'GeographicVocabulary/" target="_blank">http://iatistandard.org/202/codelists/' 235 'GeographicVocabulary/</a>.') 236 ) 237 238 level = models.PositiveSmallIntegerField(_('administrative level'), blank=True, null=True) 239 240 def __str__(self): 241 return str(self.code) if self.code else '%s' % _('No code specified') 242 243 def iati_vocabulary(self): 244 return codelist_value(GeographicVocabulary, self, 'vocabulary') 245 246 def iati_vocabulary_unicode(self): 247 return str(self.iati_vocabulary()) 248 249 class Meta: 250 app_label = 'rsr' 251 verbose_name = _('location administrative') 252 verbose_name_plural = _('location administratives') 253 ordering = ('pk',) 254 255 256 class ProjectUpdateLocation(BaseLocation): 257 location_target = models.ForeignKey('ProjectUpdate', on_delete=models.CASCADE, related_name='locations') 258 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/akvo/rsr/models/location.py b/akvo/rsr/models/location.py --- a/akvo/rsr/models/location.py +++ b/akvo/rsr/models/location.py @@ -3,9 +3,11 @@ # Akvo RSR is covered by the GNU Affero General Public License. # See more details in the license.txt file located at the root folder of the Akvo RSR module. # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. - +import logging from django.db import models +from django.db.models.signals import pre_delete +from django.dispatch import receiver from django.utils.translation import ugettext_lazy as _ from ..fields import LatitudeField, LongitudeField, ValidXMLCharField @@ -213,9 +215,20 @@ 'The country or countries that benefit(s) from the activity.' ) +logger = logging.getLogger(__name__) + + +@receiver(pre_delete, sender=ProjectLocation) +def on_projectlocation_delete(sender, instance: ProjectLocation, using, **kwargs): + logger.warning( + "About to delete ProjectLocation(%s) %s of project(%s) %s", + instance.id, instance, + instance.location_target.id, instance.location_target, + stack_info=True + ) -class AdministrativeLocation(models.Model): +class AdministrativeLocation(models.Model): project_relation = 'locations__administratives__in' location = models.ForeignKey(
{"golden_diff": "diff --git a/akvo/rsr/models/location.py b/akvo/rsr/models/location.py\n--- a/akvo/rsr/models/location.py\n+++ b/akvo/rsr/models/location.py\n@@ -3,9 +3,11 @@\n # Akvo RSR is covered by the GNU Affero General Public License.\n # See more details in the license.txt file located at the root folder of the Akvo RSR module.\n # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n-\n+import logging\n \n from django.db import models\n+from django.db.models.signals import pre_delete\n+from django.dispatch import receiver\n from django.utils.translation import ugettext_lazy as _\n \n from ..fields import LatitudeField, LongitudeField, ValidXMLCharField\n@@ -213,9 +215,20 @@\n 'The country or countries that benefit(s) from the activity.'\n )\n \n+logger = logging.getLogger(__name__)\n+\n+\n+@receiver(pre_delete, sender=ProjectLocation)\n+def on_projectlocation_delete(sender, instance: ProjectLocation, using, **kwargs):\n+ logger.warning(\n+ \"About to delete ProjectLocation(%s) %s of project(%s) %s\",\n+ instance.id, instance,\n+ instance.location_target.id, instance.location_target,\n+ stack_info=True\n+ )\n \n-class AdministrativeLocation(models.Model):\n \n+class AdministrativeLocation(models.Model):\n project_relation = 'locations__administratives__in'\n \n location = models.ForeignKey(\n", "issue": "Bug: Locations are reset on some projects\n### What were you doing?\n\nA client has reported that on some random projects after the location has been set, the locations are reset within a few hours. The occurrence has mostly been observed within 24h of making the modification.\n\n### What should've happened?\n\nLocations (and other fields) should magically be reset.\n\n### My environment\n\n_No response_\n\n### Additional context\n\nThis particular client has a nightly script that pulls information from an external system, of which location is a part. However, the script only modifies locations of newly created projects.\r\n\r\nPossibilities:\r\n\r\n - something is deeply wrong with Django and after a project is created it doesn't say it's created (highly unlikely)\r\n - the script has nothing to do with it and it's\r\n * human error\r\n * a bug in the frontend e.g changes are cached and when a user gets access to the network again the changes are pushed (React?)\r\n * a side effect in the backend or another script\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ..fields import LatitudeField, LongitudeField, ValidXMLCharField\nfrom akvo.codelists.models import (Country, GeographicExactness, GeographicLocationClass,\n GeographicLocationReach, GeographicVocabulary, LocationType)\nfrom akvo.codelists.store.default_codelists import (\n COUNTRY, GEOGRAPHIC_EXACTNESS, GEOGRAPHIC_LOCATION_CLASS, GEOGRAPHIC_LOCATION_REACH,\n GEOGRAPHIC_VOCABULARY, LOCATION_TYPE\n)\nfrom akvo.utils import codelist_choices, codelist_value\n\n\nclass BaseLocation(models.Model):\n latitude = LatitudeField(\n _('latitude'), null=True, blank=True, db_index=True, default=None,\n help_text=_('Use a period to denote decimals.')\n )\n longitude = LongitudeField(\n _('longitude'), null=True, blank=True, db_index=True, default=None,\n help_text=_('Use a period to denote decimals.')\n )\n city = ValidXMLCharField(_('city'), blank=True, max_length=255)\n state = ValidXMLCharField(_('state'), blank=True, max_length=255)\n address_1 = ValidXMLCharField(_('address 1'), max_length=255, blank=True)\n address_2 = ValidXMLCharField(_('address 2'), max_length=255, blank=True)\n postcode = ValidXMLCharField(_('postal code'), max_length=10, blank=True)\n country = models.ForeignKey('Country', on_delete=models.SET_NULL, null=True, blank=True, verbose_name=_('country'))\n\n def __str__(self):\n return '{0}, {1}, {2}{3}'.format(\n '{0}: {1}'.format(\n _('Latitude'),\n str(self.latitude) if self.latitude else _('No latitude specified')),\n '{0}: {1}'.format(\n _('Longitude'),\n str(self.longitude) if self.longitude else _('No longitude specified')),\n '{0}: {1}'.format(\n _('Country'),\n str(self.country.name) if self.country else _('No country specified')),\n ' ({0})'.format(self.name) if getattr(self, 'name', None) else ''\n )\n\n def delete(self, *args, **kwargs):\n super(BaseLocation, self).delete(*args, **kwargs)\n\n # If location_target has more locations, set the first as primary location\n location_target = self.location_target\n other_locations = location_target.locations.all()\n\n if other_locations.count() > 0:\n location_target.primary_location = other_locations.first()\n else:\n location_target.primary_location = None\n\n location_target.save()\n\n def save(self, *args, **kwargs):\n super(BaseLocation, self).save(*args, **kwargs)\n\n # Set location as primary location if it is the first location\n location_target = self.location_target\n if location_target.primary_location is None or location_target.primary_location.pk > self.pk:\n location_target.primary_location = self\n location_target.save()\n\n def is_valid(self):\n if (self.latitude is None or self.longitude is None) or \\\n (self.latitude == 0 and self.longitude == 0) or \\\n (self.latitude > 90 or self.latitude < -90) or \\\n (self.longitude > 180 or self.latitude < -180):\n return False\n return True\n\n class Meta:\n app_label = 'rsr'\n abstract = True\n ordering = ['id', ]\n\n\nclass OrganisationLocation(BaseLocation):\n location_target = models.ForeignKey('Organisation', on_delete=models.CASCADE, related_name='locations')\n iati_country = ValidXMLCharField(\n _('country'), blank=True, max_length=2, choices=codelist_choices(COUNTRY, show_code=False),\n help_text=_('The country in which the organisation is located.')\n )\n\n def iati_country_value(self):\n return codelist_value(Country, self, 'iati_country')\n\n def iati_country_value_unicode(self):\n return str(self.iati_country_value())\n\n\nclass ProjectLocation(BaseLocation):\n\n project_relation = 'locations__in'\n\n location_target = models.ForeignKey('Project', on_delete=models.CASCADE, related_name='locations')\n\n # Additional IATI fields\n reference = ValidXMLCharField(\n _('reference'), blank=True, max_length=50,\n help_text=_('An internal reference that describes the location in the reporting '\n 'organisation\\'s own system. For reference see: '\n '<a href=\"http://iatistandard.org/202/activity-standard/iati-activities/'\n 'iati-activity/location/#attributes\" target=\"_blank\">'\n 'http://iatistandard.org/202/activity-standard/iati-activities/iati-activity/'\n 'location/#attributes</a>.')\n )\n location_code = ValidXMLCharField(\n _('code'), blank=True, max_length=25,\n help_text=_('Enter a code to identify the region. Codes are based on DAC region codes. '\n 'Where an activity is considered global, the code 998 can be used. For '\n 'reference: <a href=\"http://www.oecd.org/dac/stats/dacandcrscodelists.htm\" '\n 'target=\"_blank\">http://www.oecd.org/dac/stats/dacandcrscodelists.htm</a>.')\n )\n vocabulary = ValidXMLCharField(_('vocabulary'), blank=True, max_length=2,\n choices=codelist_choices(GEOGRAPHIC_VOCABULARY))\n name = ValidXMLCharField(\n _('name'), blank=True, max_length=100,\n help_text=_('The human-readable name for the location.')\n )\n description = ValidXMLCharField(\n _('location description'), blank=True, max_length=2000,\n help_text=_('This provides free text space for providing an additional description, if '\n 'needed, of the actual target of the activity. A description that qualifies '\n 'the location, not the activity.')\n )\n activity_description = ValidXMLCharField(\n _('activity description'), blank=True, max_length=2000,\n help_text=_('A description that qualifies the activity taking place at the location. '\n 'This should not duplicate information provided in the main activity '\n 'description, and should typically be used to distinguish between activities '\n 'at multiple locations within a single iati-activity record.')\n )\n exactness = ValidXMLCharField(\n _('location precision'), blank=True, max_length=1,\n choices=codelist_choices(GEOGRAPHIC_EXACTNESS),\n help_text=_('Defines whether the location represents the most distinct point reasonably '\n 'possible for this type of activity or is an approximation due to lack of '\n 'more detailed information.')\n )\n location_reach = ValidXMLCharField(\n _('reach'), blank=True, max_length=1, choices=codelist_choices(GEOGRAPHIC_LOCATION_REACH),\n help_text=_('Does this location describe where the activity takes place or where the '\n 'intended beneficiaries reside?')\n )\n location_class = ValidXMLCharField(\n _('class'), blank=True, max_length=1, choices=codelist_choices(GEOGRAPHIC_LOCATION_CLASS),\n help_text=_('Does the location refer to a physical structure such as a building, a '\n 'populated place (e.g. city or village), an administrative division, or '\n 'another topological feature (e.g. river, nature reserve)? For reference: '\n '<a href=\"http://iatistandard.org/202/codelists/GeographicLocationClass/\" '\n 'target=\"_blank\">http://iatistandard.org/202/codelists/'\n 'GeographicLocationClass/</a>.')\n )\n feature_designation = ValidXMLCharField(\n _('feature designation'), blank=True, max_length=5,\n choices=codelist_choices(LOCATION_TYPE),\n help_text=_('A more refined coded classification of the type of feature referred to by '\n 'this location. For reference: <a href=\"http://iatistandard.org/202/codelists/'\n 'LocationType/\" target=\"_blank\">http://iatistandard.org/202/codelists/'\n 'LocationType/</a>.')\n )\n\n def iati_country(self):\n return codelist_value(Country, self, 'country')\n\n def iati_country_unicode(self):\n return str(self.iati_country())\n\n def iati_vocabulary(self):\n return codelist_value(GeographicVocabulary, self, 'vocabulary')\n\n def iati_vocabulary_unicode(self):\n return str(self.iati_vocabulary())\n\n def iati_exactness(self):\n return codelist_value(GeographicExactness, self, 'exactness')\n\n def iati_exactness_unicode(self):\n return str(self.iati_exactness())\n\n def iati_reach(self):\n return codelist_value(GeographicLocationReach, self, 'location_reach')\n\n def iati_reach_unicode(self):\n return str(self.iati_reach())\n\n def iati_class(self):\n return codelist_value(GeographicLocationClass, self, 'location_class')\n\n def iati_class_unicode(self):\n return str(self.iati_class())\n\n def iati_designation(self):\n return codelist_value(LocationType, self, 'feature_designation')\n\n def iati_designation_unicode(self):\n return str(self.iati_designation())\n\n\n# Over-riding fields doesn't work in Django < 1.10, and hence this hack.\nProjectLocation._meta.get_field('country').help_text = _(\n 'The country or countries that benefit(s) from the activity.'\n)\n\n\nclass AdministrativeLocation(models.Model):\n\n project_relation = 'locations__administratives__in'\n\n location = models.ForeignKey(\n 'ProjectLocation', on_delete=models.CASCADE, verbose_name=_('location'), related_name='administratives'\n )\n code = ValidXMLCharField(\n _('administrative code'), blank=True, max_length=25,\n help_text=_('Coded identification of national and sub-national divisions according to '\n 'recognised administrative boundary repositories. Multiple levels may be '\n 'reported.')\n )\n vocabulary = ValidXMLCharField(\n _('administrative vocabulary'), blank=True, max_length=2,\n choices=codelist_choices(GEOGRAPHIC_VOCABULARY),\n help_text=_('For reference: <a href=\"http://iatistandard.org/202/codelists/'\n 'GeographicVocabulary/\" target=\"_blank\">http://iatistandard.org/202/codelists/'\n 'GeographicVocabulary/</a>.')\n )\n\n level = models.PositiveSmallIntegerField(_('administrative level'), blank=True, null=True)\n\n def __str__(self):\n return str(self.code) if self.code else '%s' % _('No code specified')\n\n def iati_vocabulary(self):\n return codelist_value(GeographicVocabulary, self, 'vocabulary')\n\n def iati_vocabulary_unicode(self):\n return str(self.iati_vocabulary())\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _('location administrative')\n verbose_name_plural = _('location administratives')\n ordering = ('pk',)\n\n\nclass ProjectUpdateLocation(BaseLocation):\n location_target = models.ForeignKey('ProjectUpdate', on_delete=models.CASCADE, related_name='locations')\n", "path": "akvo/rsr/models/location.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\nimport logging\n\nfrom django.db import models\nfrom django.db.models.signals import pre_delete\nfrom django.dispatch import receiver\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ..fields import LatitudeField, LongitudeField, ValidXMLCharField\nfrom akvo.codelists.models import (Country, GeographicExactness, GeographicLocationClass,\n GeographicLocationReach, GeographicVocabulary, LocationType)\nfrom akvo.codelists.store.default_codelists import (\n COUNTRY, GEOGRAPHIC_EXACTNESS, GEOGRAPHIC_LOCATION_CLASS, GEOGRAPHIC_LOCATION_REACH,\n GEOGRAPHIC_VOCABULARY, LOCATION_TYPE\n)\nfrom akvo.utils import codelist_choices, codelist_value\n\n\nclass BaseLocation(models.Model):\n latitude = LatitudeField(\n _('latitude'), null=True, blank=True, db_index=True, default=None,\n help_text=_('Use a period to denote decimals.')\n )\n longitude = LongitudeField(\n _('longitude'), null=True, blank=True, db_index=True, default=None,\n help_text=_('Use a period to denote decimals.')\n )\n city = ValidXMLCharField(_('city'), blank=True, max_length=255)\n state = ValidXMLCharField(_('state'), blank=True, max_length=255)\n address_1 = ValidXMLCharField(_('address 1'), max_length=255, blank=True)\n address_2 = ValidXMLCharField(_('address 2'), max_length=255, blank=True)\n postcode = ValidXMLCharField(_('postal code'), max_length=10, blank=True)\n country = models.ForeignKey('Country', on_delete=models.SET_NULL, null=True, blank=True, verbose_name=_('country'))\n\n def __str__(self):\n return '{0}, {1}, {2}{3}'.format(\n '{0}: {1}'.format(\n _('Latitude'),\n str(self.latitude) if self.latitude else _('No latitude specified')),\n '{0}: {1}'.format(\n _('Longitude'),\n str(self.longitude) if self.longitude else _('No longitude specified')),\n '{0}: {1}'.format(\n _('Country'),\n str(self.country.name) if self.country else _('No country specified')),\n ' ({0})'.format(self.name) if getattr(self, 'name', None) else ''\n )\n\n def delete(self, *args, **kwargs):\n super(BaseLocation, self).delete(*args, **kwargs)\n\n # If location_target has more locations, set the first as primary location\n location_target = self.location_target\n other_locations = location_target.locations.all()\n\n if other_locations.count() > 0:\n location_target.primary_location = other_locations.first()\n else:\n location_target.primary_location = None\n\n location_target.save()\n\n def save(self, *args, **kwargs):\n super(BaseLocation, self).save(*args, **kwargs)\n\n # Set location as primary location if it is the first location\n location_target = self.location_target\n if location_target.primary_location is None or location_target.primary_location.pk > self.pk:\n location_target.primary_location = self\n location_target.save()\n\n def is_valid(self):\n if (self.latitude is None or self.longitude is None) or \\\n (self.latitude == 0 and self.longitude == 0) or \\\n (self.latitude > 90 or self.latitude < -90) or \\\n (self.longitude > 180 or self.latitude < -180):\n return False\n return True\n\n class Meta:\n app_label = 'rsr'\n abstract = True\n ordering = ['id', ]\n\n\nclass OrganisationLocation(BaseLocation):\n location_target = models.ForeignKey('Organisation', on_delete=models.CASCADE, related_name='locations')\n iati_country = ValidXMLCharField(\n _('country'), blank=True, max_length=2, choices=codelist_choices(COUNTRY, show_code=False),\n help_text=_('The country in which the organisation is located.')\n )\n\n def iati_country_value(self):\n return codelist_value(Country, self, 'iati_country')\n\n def iati_country_value_unicode(self):\n return str(self.iati_country_value())\n\n\nclass ProjectLocation(BaseLocation):\n\n project_relation = 'locations__in'\n\n location_target = models.ForeignKey('Project', on_delete=models.CASCADE, related_name='locations')\n\n # Additional IATI fields\n reference = ValidXMLCharField(\n _('reference'), blank=True, max_length=50,\n help_text=_('An internal reference that describes the location in the reporting '\n 'organisation\\'s own system. For reference see: '\n '<a href=\"http://iatistandard.org/202/activity-standard/iati-activities/'\n 'iati-activity/location/#attributes\" target=\"_blank\">'\n 'http://iatistandard.org/202/activity-standard/iati-activities/iati-activity/'\n 'location/#attributes</a>.')\n )\n location_code = ValidXMLCharField(\n _('code'), blank=True, max_length=25,\n help_text=_('Enter a code to identify the region. Codes are based on DAC region codes. '\n 'Where an activity is considered global, the code 998 can be used. For '\n 'reference: <a href=\"http://www.oecd.org/dac/stats/dacandcrscodelists.htm\" '\n 'target=\"_blank\">http://www.oecd.org/dac/stats/dacandcrscodelists.htm</a>.')\n )\n vocabulary = ValidXMLCharField(_('vocabulary'), blank=True, max_length=2,\n choices=codelist_choices(GEOGRAPHIC_VOCABULARY))\n name = ValidXMLCharField(\n _('name'), blank=True, max_length=100,\n help_text=_('The human-readable name for the location.')\n )\n description = ValidXMLCharField(\n _('location description'), blank=True, max_length=2000,\n help_text=_('This provides free text space for providing an additional description, if '\n 'needed, of the actual target of the activity. A description that qualifies '\n 'the location, not the activity.')\n )\n activity_description = ValidXMLCharField(\n _('activity description'), blank=True, max_length=2000,\n help_text=_('A description that qualifies the activity taking place at the location. '\n 'This should not duplicate information provided in the main activity '\n 'description, and should typically be used to distinguish between activities '\n 'at multiple locations within a single iati-activity record.')\n )\n exactness = ValidXMLCharField(\n _('location precision'), blank=True, max_length=1,\n choices=codelist_choices(GEOGRAPHIC_EXACTNESS),\n help_text=_('Defines whether the location represents the most distinct point reasonably '\n 'possible for this type of activity or is an approximation due to lack of '\n 'more detailed information.')\n )\n location_reach = ValidXMLCharField(\n _('reach'), blank=True, max_length=1, choices=codelist_choices(GEOGRAPHIC_LOCATION_REACH),\n help_text=_('Does this location describe where the activity takes place or where the '\n 'intended beneficiaries reside?')\n )\n location_class = ValidXMLCharField(\n _('class'), blank=True, max_length=1, choices=codelist_choices(GEOGRAPHIC_LOCATION_CLASS),\n help_text=_('Does the location refer to a physical structure such as a building, a '\n 'populated place (e.g. city or village), an administrative division, or '\n 'another topological feature (e.g. river, nature reserve)? For reference: '\n '<a href=\"http://iatistandard.org/202/codelists/GeographicLocationClass/\" '\n 'target=\"_blank\">http://iatistandard.org/202/codelists/'\n 'GeographicLocationClass/</a>.')\n )\n feature_designation = ValidXMLCharField(\n _('feature designation'), blank=True, max_length=5,\n choices=codelist_choices(LOCATION_TYPE),\n help_text=_('A more refined coded classification of the type of feature referred to by '\n 'this location. For reference: <a href=\"http://iatistandard.org/202/codelists/'\n 'LocationType/\" target=\"_blank\">http://iatistandard.org/202/codelists/'\n 'LocationType/</a>.')\n )\n\n def iati_country(self):\n return codelist_value(Country, self, 'country')\n\n def iati_country_unicode(self):\n return str(self.iati_country())\n\n def iati_vocabulary(self):\n return codelist_value(GeographicVocabulary, self, 'vocabulary')\n\n def iati_vocabulary_unicode(self):\n return str(self.iati_vocabulary())\n\n def iati_exactness(self):\n return codelist_value(GeographicExactness, self, 'exactness')\n\n def iati_exactness_unicode(self):\n return str(self.iati_exactness())\n\n def iati_reach(self):\n return codelist_value(GeographicLocationReach, self, 'location_reach')\n\n def iati_reach_unicode(self):\n return str(self.iati_reach())\n\n def iati_class(self):\n return codelist_value(GeographicLocationClass, self, 'location_class')\n\n def iati_class_unicode(self):\n return str(self.iati_class())\n\n def iati_designation(self):\n return codelist_value(LocationType, self, 'feature_designation')\n\n def iati_designation_unicode(self):\n return str(self.iati_designation())\n\n\n# Over-riding fields doesn't work in Django < 1.10, and hence this hack.\nProjectLocation._meta.get_field('country').help_text = _(\n 'The country or countries that benefit(s) from the activity.'\n)\n\nlogger = logging.getLogger(__name__)\n\n\n@receiver(pre_delete, sender=ProjectLocation)\ndef on_projectlocation_delete(sender, instance: ProjectLocation, using, **kwargs):\n logger.warning(\n \"About to delete ProjectLocation(%s) %s of project(%s) %s\",\n instance.id, instance,\n instance.location_target.id, instance.location_target,\n stack_info=True\n )\n\n\nclass AdministrativeLocation(models.Model):\n project_relation = 'locations__administratives__in'\n\n location = models.ForeignKey(\n 'ProjectLocation', on_delete=models.CASCADE, verbose_name=_('location'), related_name='administratives'\n )\n code = ValidXMLCharField(\n _('administrative code'), blank=True, max_length=25,\n help_text=_('Coded identification of national and sub-national divisions according to '\n 'recognised administrative boundary repositories. Multiple levels may be '\n 'reported.')\n )\n vocabulary = ValidXMLCharField(\n _('administrative vocabulary'), blank=True, max_length=2,\n choices=codelist_choices(GEOGRAPHIC_VOCABULARY),\n help_text=_('For reference: <a href=\"http://iatistandard.org/202/codelists/'\n 'GeographicVocabulary/\" target=\"_blank\">http://iatistandard.org/202/codelists/'\n 'GeographicVocabulary/</a>.')\n )\n\n level = models.PositiveSmallIntegerField(_('administrative level'), blank=True, null=True)\n\n def __str__(self):\n return str(self.code) if self.code else '%s' % _('No code specified')\n\n def iati_vocabulary(self):\n return codelist_value(GeographicVocabulary, self, 'vocabulary')\n\n def iati_vocabulary_unicode(self):\n return str(self.iati_vocabulary())\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _('location administrative')\n verbose_name_plural = _('location administratives')\n ordering = ('pk',)\n\n\nclass ProjectUpdateLocation(BaseLocation):\n location_target = models.ForeignKey('ProjectUpdate', on_delete=models.CASCADE, related_name='locations')\n", "path": "akvo/rsr/models/location.py"}]}
3,656
325
gh_patches_debug_64419
rasdani/github-patches
git_diff
pwndbg__pwndbg-584
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- r2: 'NoneType' object has no attribute 'cast' (<class 'AttributeError'>) ### Description This happens when i initiate r2 after loading a binary in pwndbg I have tested both in wsl and a 64bit ubuntu machine same behavior sorta ### Steps to reproduce 1. Load a binary 2. Run r2 Exception occured: r2: 'NoneType' object has no attribute 'cast' (<class 'AttributeError'>) Traceback (most recent call last): File "/root/reverse/pwndbg/pwndbg/commands/__init__.py", line 135, in __call__ return self.function(*args, **kwargs) File "/root/reverse/pwndbg/pwndbg/commands/__init__.py", line 215, in _OnlyWithFile return function(*a, **kw) File "/root/reverse/pwndbg/pwndbg/commands/radare2.py", line 28, in r2 addr = pwndbg.regs.pc File "/root/reverse/pwndbg/pwndbg/memoize.py", line 48, in __call__ value = self.func(*args, **kwargs) File "/root/reverse/pwndbg/pwndbg/regs.py", line 280, in __getattr__ value = value.cast(pwndbg.typeinfo.ptrdiff) AttributeError: 'NoneType' object has no attribute 'cast' ### My setup Gdb: 7.11.1 Python: 3.5.2 (default, Nov 12 2018, 13:43:14) [GCC 5.4.0 20160609] Pwndbg: 1.1.0 build: 054f209 Capstone: 4.0.1024 Unicorn: 1.0.1 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pwndbg/commands/radare2.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 from __future__ import absolute_import 4 from __future__ import division 5 from __future__ import print_function 6 from __future__ import unicode_literals 7 8 import argparse 9 import subprocess 10 11 import pwndbg.commands 12 13 parser = argparse.ArgumentParser(description='Launches radare2', 14 epilog="Example: r2 -- -S -AA") 15 parser.add_argument('--no-seek', action='store_true', 16 help='Do not seek to current pc') 17 parser.add_argument('arguments', nargs='*', type=str, 18 help='Arguments to pass to radare') 19 20 21 @pwndbg.commands.ArgparsedCommand(parser) 22 @pwndbg.commands.OnlyWithFile 23 def r2(arguments, no_seek=False): 24 filename = pwndbg.file.get_file(pwndbg.proc.exe) 25 26 # Build up the command line to run 27 cmd = ['radare2', filename] 28 addr = pwndbg.regs.pc 29 if pwndbg.elf.get_elf_info(filename).is_pie: 30 addr -= pwndbg.elf.exe().address 31 if not no_seek and pwndbg.proc.alive: 32 cmd.extend(['-s', hex(addr)]) 33 cmd += arguments 34 35 try: 36 subprocess.call(cmd) 37 except Exception: 38 print("Could not run radare2. Please ensure it's installed and in $PATH.") 39 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pwndbg/commands/radare2.py b/pwndbg/commands/radare2.py --- a/pwndbg/commands/radare2.py +++ b/pwndbg/commands/radare2.py @@ -25,11 +25,12 @@ # Build up the command line to run cmd = ['radare2', filename] - addr = pwndbg.regs.pc - if pwndbg.elf.get_elf_info(filename).is_pie: - addr -= pwndbg.elf.exe().address - if not no_seek and pwndbg.proc.alive: - cmd.extend(['-s', hex(addr)]) + if pwndbg.proc.alive: + addr = pwndbg.regs.pc + if pwndbg.elf.get_elf_info(filename).is_pie: + addr -= pwndbg.elf.exe().address + if not no_seek: + cmd.extend(['-s', hex(addr)]) cmd += arguments try:
{"golden_diff": "diff --git a/pwndbg/commands/radare2.py b/pwndbg/commands/radare2.py\n--- a/pwndbg/commands/radare2.py\n+++ b/pwndbg/commands/radare2.py\n@@ -25,11 +25,12 @@\n \n # Build up the command line to run\n cmd = ['radare2', filename]\n- addr = pwndbg.regs.pc\n- if pwndbg.elf.get_elf_info(filename).is_pie:\n- addr -= pwndbg.elf.exe().address\n- if not no_seek and pwndbg.proc.alive:\n- cmd.extend(['-s', hex(addr)])\n+ if pwndbg.proc.alive:\n+ addr = pwndbg.regs.pc\n+ if pwndbg.elf.get_elf_info(filename).is_pie:\n+ addr -= pwndbg.elf.exe().address\n+ if not no_seek:\n+ cmd.extend(['-s', hex(addr)])\n cmd += arguments\n \n try:\n", "issue": "r2: 'NoneType' object has no attribute 'cast' (<class 'AttributeError'>)\n### Description\r\n\r\n\r\nThis happens when i initiate r2 after loading a binary in pwndbg \r\nI have tested both in wsl and a 64bit ubuntu machine same behavior sorta \r\n\r\n\r\n### Steps to reproduce\r\n\r\n\r\n1. Load a binary \r\n2. Run r2 \r\nException occured: r2: 'NoneType' object has no attribute 'cast' (<class 'AttributeError'>)\r\nTraceback (most recent call last):\r\n File \"/root/reverse/pwndbg/pwndbg/commands/__init__.py\", line 135, in __call__\r\n return self.function(*args, **kwargs)\r\n File \"/root/reverse/pwndbg/pwndbg/commands/__init__.py\", line 215, in _OnlyWithFile\r\n return function(*a, **kw)\r\n File \"/root/reverse/pwndbg/pwndbg/commands/radare2.py\", line 28, in r2\r\n addr = pwndbg.regs.pc\r\n File \"/root/reverse/pwndbg/pwndbg/memoize.py\", line 48, in __call__\r\n value = self.func(*args, **kwargs)\r\n File \"/root/reverse/pwndbg/pwndbg/regs.py\", line 280, in __getattr__\r\n value = value.cast(pwndbg.typeinfo.ptrdiff)\r\nAttributeError: 'NoneType' object has no attribute 'cast'\r\n\r\n\r\n\r\n\r\n\r\n### My setup\r\n\r\n\r\nGdb: 7.11.1\r\nPython: 3.5.2 (default, Nov 12 2018, 13:43:14) [GCC 5.4.0 20160609]\r\nPwndbg: 1.1.0 build: 054f209\r\nCapstone: 4.0.1024\r\nUnicorn: 1.0.1\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport subprocess\n\nimport pwndbg.commands\n\nparser = argparse.ArgumentParser(description='Launches radare2',\n epilog=\"Example: r2 -- -S -AA\")\nparser.add_argument('--no-seek', action='store_true',\n help='Do not seek to current pc')\nparser.add_argument('arguments', nargs='*', type=str,\n help='Arguments to pass to radare')\n\n\[email protected](parser)\[email protected]\ndef r2(arguments, no_seek=False):\n filename = pwndbg.file.get_file(pwndbg.proc.exe)\n\n # Build up the command line to run\n cmd = ['radare2', filename]\n addr = pwndbg.regs.pc\n if pwndbg.elf.get_elf_info(filename).is_pie:\n addr -= pwndbg.elf.exe().address\n if not no_seek and pwndbg.proc.alive:\n cmd.extend(['-s', hex(addr)])\n cmd += arguments\n\n try:\n subprocess.call(cmd)\n except Exception:\n print(\"Could not run radare2. Please ensure it's installed and in $PATH.\")\n", "path": "pwndbg/commands/radare2.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport subprocess\n\nimport pwndbg.commands\n\nparser = argparse.ArgumentParser(description='Launches radare2',\n epilog=\"Example: r2 -- -S -AA\")\nparser.add_argument('--no-seek', action='store_true',\n help='Do not seek to current pc')\nparser.add_argument('arguments', nargs='*', type=str,\n help='Arguments to pass to radare')\n\n\[email protected](parser)\[email protected]\ndef r2(arguments, no_seek=False):\n filename = pwndbg.file.get_file(pwndbg.proc.exe)\n\n # Build up the command line to run\n cmd = ['radare2', filename]\n if pwndbg.proc.alive:\n addr = pwndbg.regs.pc\n if pwndbg.elf.get_elf_info(filename).is_pie:\n addr -= pwndbg.elf.exe().address\n if not no_seek:\n cmd.extend(['-s', hex(addr)])\n cmd += arguments\n\n try:\n subprocess.call(cmd)\n except Exception:\n print(\"Could not run radare2. Please ensure it's installed and in $PATH.\")\n", "path": "pwndbg/commands/radare2.py"}]}
1,067
232
gh_patches_debug_19134
rasdani/github-patches
git_diff
pre-commit__pre-commit-1521
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Executable `prettier` not found hello hello! As discussed on discord, I'm having an issue running prettier via pre-commit: ```bash $ pre-commit --version pre-commit 2.5.1 $ cat .pre-commit-config.yaml repos: - repo: https://github.com/prettier/prettier rev: 2.0.5 hooks: - id: prettier $ pre-commit clean Cleaned /home/rkm/.cache/pre-commit. > pre-commit run prettier --files README.md [INFO] Initializing environment for https://github.com/prettier/prettier. [INFO] Installing environment for https://github.com/prettier/prettier. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... prettier.................................................................Failed - hook id: prettier - exit code: 1 Executable `prettier` not found ``` it seems like prettier is installed correctly, but the symlink to it is not created: ```bash $ find ~/.cache/pre-commit/ -name prettier.js /home/rkm/.cache/pre-commit/repoes79dg4v/bin/prettier.js $ ls -l $(find ~/.cache/pre-commit/ -name node_env-default)/bin total 70376 -rwxr-xr-x. 1 rkm rkm 3702 Jun 17 17:30 activate -rwxr-xr-x. 1 rkm rkm 3964 Jun 17 17:30 activate.fish -rwxr-xr-x. 1 rkm rkm 72052312 Jun 2 14:33 node lrwxrwxrwx. 1 rkm rkm 4 Jun 17 17:30 nodejs -> node lrwxrwxrwx. 1 rkm rkm 38 Jun 17 17:30 npm -> ../lib/node_modules/npm/bin/npm-cli.js lrwxrwxrwx. 1 rkm rkm 38 Jun 17 17:30 npx -> ../lib/node_modules/npm/bin/npx-cli.js -rwxr-xr-x. 1 rkm rkm 355 Jun 17 17:30 shim ``` (doing the same in a docker container results in a `prettier` symlink being created there). I suspect my VM may be borked somehow, but not sure how to debug this further. Any thoughts? Thanks! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pre_commit/languages/node.py` Content: ``` 1 import contextlib 2 import functools 3 import os 4 import sys 5 from typing import Generator 6 from typing import Sequence 7 from typing import Tuple 8 9 import pre_commit.constants as C 10 from pre_commit import parse_shebang 11 from pre_commit.envcontext import envcontext 12 from pre_commit.envcontext import PatchesT 13 from pre_commit.envcontext import Var 14 from pre_commit.hook import Hook 15 from pre_commit.languages import helpers 16 from pre_commit.languages.python import bin_dir 17 from pre_commit.prefix import Prefix 18 from pre_commit.util import clean_path_on_failure 19 from pre_commit.util import cmd_output 20 from pre_commit.util import cmd_output_b 21 22 ENVIRONMENT_DIR = 'node_env' 23 healthy = helpers.basic_healthy 24 25 26 @functools.lru_cache(maxsize=1) 27 def get_default_version() -> str: 28 # nodeenv does not yet support `-n system` on windows 29 if sys.platform == 'win32': 30 return C.DEFAULT 31 # if node is already installed, we can save a bunch of setup time by 32 # using the installed version 33 elif all(parse_shebang.find_executable(exe) for exe in ('node', 'npm')): 34 return 'system' 35 else: 36 return C.DEFAULT 37 38 39 def _envdir(prefix: Prefix, version: str) -> str: 40 directory = helpers.environment_dir(ENVIRONMENT_DIR, version) 41 return prefix.path(directory) 42 43 44 def get_env_patch(venv: str) -> PatchesT: 45 if sys.platform == 'cygwin': # pragma: no cover 46 _, win_venv, _ = cmd_output('cygpath', '-w', venv) 47 install_prefix = fr'{win_venv.strip()}\bin' 48 lib_dir = 'lib' 49 elif sys.platform == 'win32': # pragma: no cover 50 install_prefix = bin_dir(venv) 51 lib_dir = 'Scripts' 52 else: # pragma: win32 no cover 53 install_prefix = venv 54 lib_dir = 'lib' 55 return ( 56 ('NODE_VIRTUAL_ENV', venv), 57 ('NPM_CONFIG_PREFIX', install_prefix), 58 ('npm_config_prefix', install_prefix), 59 ('NODE_PATH', os.path.join(venv, lib_dir, 'node_modules')), 60 ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))), 61 ) 62 63 64 @contextlib.contextmanager 65 def in_env( 66 prefix: Prefix, 67 language_version: str, 68 ) -> Generator[None, None, None]: 69 with envcontext(get_env_patch(_envdir(prefix, language_version))): 70 yield 71 72 73 def install_environment( 74 prefix: Prefix, version: str, additional_dependencies: Sequence[str], 75 ) -> None: 76 additional_dependencies = tuple(additional_dependencies) 77 assert prefix.exists('package.json') 78 envdir = _envdir(prefix, version) 79 80 # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath 81 if sys.platform == 'win32': # pragma: no cover 82 envdir = fr'\\?\{os.path.normpath(envdir)}' 83 with clean_path_on_failure(envdir): 84 cmd = [ 85 sys.executable, '-mnodeenv', '--prebuilt', '--clean-src', envdir, 86 ] 87 if version != C.DEFAULT: 88 cmd.extend(['-n', version]) 89 cmd_output_b(*cmd) 90 91 with in_env(prefix, version): 92 # https://npm.community/t/npm-install-g-git-vs-git-clone-cd-npm-install-g/5449 93 # install as if we installed from git 94 helpers.run_setup_cmd(prefix, ('npm', 'install')) 95 helpers.run_setup_cmd( 96 prefix, 97 ('npm', 'install', '-g', '.', *additional_dependencies), 98 ) 99 100 101 def run_hook( 102 hook: Hook, 103 file_args: Sequence[str], 104 color: bool, 105 ) -> Tuple[int, bytes]: 106 with in_env(hook.prefix, hook.language_version): 107 return helpers.run_xargs(hook, hook.cmd, file_args, color=color) 108 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py --- a/pre_commit/languages/node.py +++ b/pre_commit/languages/node.py @@ -10,6 +10,7 @@ from pre_commit import parse_shebang from pre_commit.envcontext import envcontext from pre_commit.envcontext import PatchesT +from pre_commit.envcontext import UNSET from pre_commit.envcontext import Var from pre_commit.hook import Hook from pre_commit.languages import helpers @@ -56,6 +57,8 @@ ('NODE_VIRTUAL_ENV', venv), ('NPM_CONFIG_PREFIX', install_prefix), ('npm_config_prefix', install_prefix), + ('NPM_CONFIG_USERCONFIG', UNSET), + ('npm_config_userconfig', UNSET), ('NODE_PATH', os.path.join(venv, lib_dir, 'node_modules')), ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))), )
{"golden_diff": "diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py\n--- a/pre_commit/languages/node.py\n+++ b/pre_commit/languages/node.py\n@@ -10,6 +10,7 @@\n from pre_commit import parse_shebang\n from pre_commit.envcontext import envcontext\n from pre_commit.envcontext import PatchesT\n+from pre_commit.envcontext import UNSET\n from pre_commit.envcontext import Var\n from pre_commit.hook import Hook\n from pre_commit.languages import helpers\n@@ -56,6 +57,8 @@\n ('NODE_VIRTUAL_ENV', venv),\n ('NPM_CONFIG_PREFIX', install_prefix),\n ('npm_config_prefix', install_prefix),\n+ ('NPM_CONFIG_USERCONFIG', UNSET),\n+ ('npm_config_userconfig', UNSET),\n ('NODE_PATH', os.path.join(venv, lib_dir, 'node_modules')),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n", "issue": "Executable `prettier` not found\nhello hello!\r\n\r\nAs discussed on discord, I'm having an issue running prettier via pre-commit:\r\n\r\n```bash\r\n$ pre-commit --version\r\npre-commit 2.5.1\r\n\r\n$ cat .pre-commit-config.yaml\r\nrepos:\r\n - repo: https://github.com/prettier/prettier\r\n rev: 2.0.5\r\n hooks:\r\n - id: prettier\r\n\r\n$ pre-commit clean\r\nCleaned /home/rkm/.cache/pre-commit.\r\n\r\n> pre-commit run prettier --files README.md\r\n[INFO] Initializing environment for https://github.com/prettier/prettier.\r\n[INFO] Installing environment for https://github.com/prettier/prettier.\r\n[INFO] Once installed this environment will be reused.\r\n[INFO] This may take a few minutes...\r\nprettier.................................................................Failed\r\n- hook id: prettier\r\n- exit code: 1\r\n\r\nExecutable `prettier` not found\r\n```\r\n\r\nit seems like prettier is installed correctly, but the symlink to it is not created:\r\n\r\n```bash\r\n$ find ~/.cache/pre-commit/ -name prettier.js\r\n/home/rkm/.cache/pre-commit/repoes79dg4v/bin/prettier.js\r\n\r\n$ ls -l $(find ~/.cache/pre-commit/ -name node_env-default)/bin\r\ntotal 70376\r\n-rwxr-xr-x. 1 rkm rkm 3702 Jun 17 17:30 activate\r\n-rwxr-xr-x. 1 rkm rkm 3964 Jun 17 17:30 activate.fish\r\n-rwxr-xr-x. 1 rkm rkm 72052312 Jun 2 14:33 node\r\nlrwxrwxrwx. 1 rkm rkm 4 Jun 17 17:30 nodejs -> node\r\nlrwxrwxrwx. 1 rkm rkm 38 Jun 17 17:30 npm -> ../lib/node_modules/npm/bin/npm-cli.js\r\nlrwxrwxrwx. 1 rkm rkm 38 Jun 17 17:30 npx -> ../lib/node_modules/npm/bin/npx-cli.js\r\n-rwxr-xr-x. 1 rkm rkm 355 Jun 17 17:30 shim \r\n```\r\n\r\n(doing the same in a docker container results in a `prettier` symlink being created there).\r\n\r\nI suspect my VM may be borked somehow, but not sure how to debug this further. Any thoughts? Thanks!\r\n\n", "before_files": [{"content": "import contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Generator\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit import parse_shebang\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.languages.python import bin_dir\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'node_env'\nhealthy = helpers.basic_healthy\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str:\n # nodeenv does not yet support `-n system` on windows\n if sys.platform == 'win32':\n return C.DEFAULT\n # if node is already installed, we can save a bunch of setup time by\n # using the installed version\n elif all(parse_shebang.find_executable(exe) for exe in ('node', 'npm')):\n return 'system'\n else:\n return C.DEFAULT\n\n\ndef _envdir(prefix: Prefix, version: str) -> str:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n return prefix.path(directory)\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n if sys.platform == 'cygwin': # pragma: no cover\n _, win_venv, _ = cmd_output('cygpath', '-w', venv)\n install_prefix = fr'{win_venv.strip()}\\bin'\n lib_dir = 'lib'\n elif sys.platform == 'win32': # pragma: no cover\n install_prefix = bin_dir(venv)\n lib_dir = 'Scripts'\n else: # pragma: win32 no cover\n install_prefix = venv\n lib_dir = 'lib'\n return (\n ('NODE_VIRTUAL_ENV', venv),\n ('NPM_CONFIG_PREFIX', install_prefix),\n ('npm_config_prefix', install_prefix),\n ('NODE_PATH', os.path.join(venv, lib_dir, 'node_modules')),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n with envcontext(get_env_patch(_envdir(prefix, language_version))):\n yield\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None:\n additional_dependencies = tuple(additional_dependencies)\n assert prefix.exists('package.json')\n envdir = _envdir(prefix, version)\n\n # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath\n if sys.platform == 'win32': # pragma: no cover\n envdir = fr'\\\\?\\{os.path.normpath(envdir)}'\n with clean_path_on_failure(envdir):\n cmd = [\n sys.executable, '-mnodeenv', '--prebuilt', '--clean-src', envdir,\n ]\n if version != C.DEFAULT:\n cmd.extend(['-n', version])\n cmd_output_b(*cmd)\n\n with in_env(prefix, version):\n # https://npm.community/t/npm-install-g-git-vs-git-clone-cd-npm-install-g/5449\n # install as if we installed from git\n helpers.run_setup_cmd(prefix, ('npm', 'install'))\n helpers.run_setup_cmd(\n prefix,\n ('npm', 'install', '-g', '.', *additional_dependencies),\n )\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/node.py"}], "after_files": [{"content": "import contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Generator\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit import parse_shebang\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.languages.python import bin_dir\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'node_env'\nhealthy = helpers.basic_healthy\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str:\n # nodeenv does not yet support `-n system` on windows\n if sys.platform == 'win32':\n return C.DEFAULT\n # if node is already installed, we can save a bunch of setup time by\n # using the installed version\n elif all(parse_shebang.find_executable(exe) for exe in ('node', 'npm')):\n return 'system'\n else:\n return C.DEFAULT\n\n\ndef _envdir(prefix: Prefix, version: str) -> str:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n return prefix.path(directory)\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n if sys.platform == 'cygwin': # pragma: no cover\n _, win_venv, _ = cmd_output('cygpath', '-w', venv)\n install_prefix = fr'{win_venv.strip()}\\bin'\n lib_dir = 'lib'\n elif sys.platform == 'win32': # pragma: no cover\n install_prefix = bin_dir(venv)\n lib_dir = 'Scripts'\n else: # pragma: win32 no cover\n install_prefix = venv\n lib_dir = 'lib'\n return (\n ('NODE_VIRTUAL_ENV', venv),\n ('NPM_CONFIG_PREFIX', install_prefix),\n ('npm_config_prefix', install_prefix),\n ('NPM_CONFIG_USERCONFIG', UNSET),\n ('npm_config_userconfig', UNSET),\n ('NODE_PATH', os.path.join(venv, lib_dir, 'node_modules')),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n with envcontext(get_env_patch(_envdir(prefix, language_version))):\n yield\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None:\n additional_dependencies = tuple(additional_dependencies)\n assert prefix.exists('package.json')\n envdir = _envdir(prefix, version)\n\n # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath\n if sys.platform == 'win32': # pragma: no cover\n envdir = fr'\\\\?\\{os.path.normpath(envdir)}'\n with clean_path_on_failure(envdir):\n cmd = [\n sys.executable, '-mnodeenv', '--prebuilt', '--clean-src', envdir,\n ]\n if version != C.DEFAULT:\n cmd.extend(['-n', version])\n cmd_output_b(*cmd)\n\n with in_env(prefix, version):\n # https://npm.community/t/npm-install-g-git-vs-git-clone-cd-npm-install-g/5449\n # install as if we installed from git\n helpers.run_setup_cmd(prefix, ('npm', 'install'))\n helpers.run_setup_cmd(\n prefix,\n ('npm', 'install', '-g', '.', *additional_dependencies),\n )\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/node.py"}]}
1,958
213
gh_patches_debug_4989
rasdani/github-patches
git_diff
getmoto__moto-1701
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- SSM `get_parameter_by_path` should return root parameters when path is just '/' When you have a parameter with out any path in its name ie: `Name='foo', Description='A test parameter', Value='bar', Type='String'` and you run call get_parameters_by_path and pass in a Path of `'/'` it will return all parameters that don't have a path. However in Moto it returns only an empty list. Example Test File: ```python import boto3 from moto import mock_ssm ssm = boto3.client('ssm') parameter = ssm.put_parameter( Name='foo', Description='a test parameter', Value='bar', Type='String' ) parameters = ssm.get_parameters_by_path( Path='/' ) print(parameters) # {'Parameters': [{'Name': 'foo', 'Type': 'String', 'Value': 'bar', 'Version': 1}], 'ResponseMetadata': {}} with mock_ssm(): client = boto3.client('ssm') params = client.get_parameters_by_path(Path='/') print(params) # {'Parameters': [], 'ResponseMetadata': {}} ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `moto/ssm/models.py` Content: ``` 1 from __future__ import unicode_literals 2 3 from collections import defaultdict 4 5 from moto.core import BaseBackend, BaseModel 6 from moto.ec2 import ec2_backends 7 8 import datetime 9 import time 10 import uuid 11 12 13 class Parameter(BaseModel): 14 def __init__(self, name, value, type, description, keyid, last_modified_date, version): 15 self.name = name 16 self.type = type 17 self.description = description 18 self.keyid = keyid 19 self.last_modified_date = last_modified_date 20 self.version = version 21 22 if self.type == 'SecureString': 23 self.value = self.encrypt(value) 24 else: 25 self.value = value 26 27 def encrypt(self, value): 28 return 'kms:{}:'.format(self.keyid or 'default') + value 29 30 def decrypt(self, value): 31 if self.type != 'SecureString': 32 return value 33 34 prefix = 'kms:{}:'.format(self.keyid or 'default') 35 if value.startswith(prefix): 36 return value[len(prefix):] 37 38 def response_object(self, decrypt=False): 39 r = { 40 'Name': self.name, 41 'Type': self.type, 42 'Value': self.decrypt(self.value) if decrypt else self.value, 43 'Version': self.version, 44 } 45 46 return r 47 48 def describe_response_object(self, decrypt=False): 49 r = self.response_object(decrypt) 50 r['LastModifiedDate'] = int(self.last_modified_date) 51 r['LastModifiedUser'] = 'N/A' 52 53 if self.description: 54 r['Description'] = self.description 55 56 if self.keyid: 57 r['KeyId'] = self.keyid 58 return r 59 60 61 class SimpleSystemManagerBackend(BaseBackend): 62 63 def __init__(self): 64 self._parameters = {} 65 self._resource_tags = defaultdict(lambda: defaultdict(dict)) 66 67 def delete_parameter(self, name): 68 try: 69 del self._parameters[name] 70 except KeyError: 71 pass 72 73 def delete_parameters(self, names): 74 result = [] 75 for name in names: 76 try: 77 del self._parameters[name] 78 result.append(name) 79 except KeyError: 80 pass 81 return result 82 83 def get_all_parameters(self): 84 result = [] 85 for k, _ in self._parameters.items(): 86 result.append(self._parameters[k]) 87 return result 88 89 def get_parameters(self, names, with_decryption): 90 result = [] 91 for name in names: 92 if name in self._parameters: 93 result.append(self._parameters[name]) 94 return result 95 96 def get_parameters_by_path(self, path, with_decryption, recursive, filters=None): 97 """Implement the get-parameters-by-path-API in the backend.""" 98 result = [] 99 # path could be with or without a trailing /. we handle this 100 # difference here. 101 path = path.rstrip('/') + '/' 102 for param in self._parameters: 103 if not param.startswith(path): 104 continue 105 if '/' in param[len(path) + 1:] and not recursive: 106 continue 107 if not self._match_filters(self._parameters[param], filters): 108 continue 109 result.append(self._parameters[param]) 110 111 return result 112 113 @staticmethod 114 def _match_filters(parameter, filters=None): 115 """Return True if the given parameter matches all the filters""" 116 for filter_obj in (filters or []): 117 key = filter_obj['Key'] 118 option = filter_obj.get('Option', 'Equals') 119 values = filter_obj.get('Values', []) 120 121 what = None 122 if key == 'Type': 123 what = parameter.type 124 elif key == 'KeyId': 125 what = parameter.keyid 126 127 if option == 'Equals'\ 128 and not any(what == value for value in values): 129 return False 130 elif option == 'BeginsWith'\ 131 and not any(what.startswith(value) for value in values): 132 return False 133 # True if no false match (or no filters at all) 134 return True 135 136 def get_parameter(self, name, with_decryption): 137 if name in self._parameters: 138 return self._parameters[name] 139 return None 140 141 def put_parameter(self, name, description, value, type, keyid, overwrite): 142 previous_parameter = self._parameters.get(name) 143 version = 1 144 145 if previous_parameter: 146 version = previous_parameter.version + 1 147 148 if not overwrite: 149 return 150 151 last_modified_date = time.time() 152 self._parameters[name] = Parameter( 153 name, value, type, description, keyid, last_modified_date, version) 154 return version 155 156 def add_tags_to_resource(self, resource_type, resource_id, tags): 157 for key, value in tags.items(): 158 self._resource_tags[resource_type][resource_id][key] = value 159 160 def remove_tags_from_resource(self, resource_type, resource_id, keys): 161 tags = self._resource_tags[resource_type][resource_id] 162 for key in keys: 163 if key in tags: 164 del tags[key] 165 166 def list_tags_for_resource(self, resource_type, resource_id): 167 return self._resource_tags[resource_type][resource_id] 168 169 def send_command(self, **kwargs): 170 instances = kwargs.get('InstanceIds', []) 171 now = datetime.datetime.now() 172 expires_after = now + datetime.timedelta(0, int(kwargs.get('TimeoutSeconds', 3600))) 173 return { 174 'Command': { 175 'CommandId': str(uuid.uuid4()), 176 'DocumentName': kwargs['DocumentName'], 177 'Comment': kwargs.get('Comment'), 178 'ExpiresAfter': expires_after.isoformat(), 179 'Parameters': kwargs['Parameters'], 180 'InstanceIds': kwargs['InstanceIds'], 181 'Targets': kwargs.get('targets'), 182 'RequestedDateTime': now.isoformat(), 183 'Status': 'Success', 184 'StatusDetails': 'string', 185 'OutputS3Region': kwargs.get('OutputS3Region'), 186 'OutputS3BucketName': kwargs.get('OutputS3BucketName'), 187 'OutputS3KeyPrefix': kwargs.get('OutputS3KeyPrefix'), 188 'MaxConcurrency': 'string', 189 'MaxErrors': 'string', 190 'TargetCount': len(instances), 191 'CompletedCount': len(instances), 192 'ErrorCount': 0, 193 'ServiceRole': kwargs.get('ServiceRoleArn'), 194 'NotificationConfig': { 195 'NotificationArn': 'string', 196 'NotificationEvents': ['Success'], 197 'NotificationType': 'Command' 198 } 199 } 200 } 201 202 203 ssm_backends = {} 204 for region, ec2_backend in ec2_backends.items(): 205 ssm_backends[region] = SimpleSystemManagerBackend() 206 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/moto/ssm/models.py b/moto/ssm/models.py --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -100,7 +100,7 @@ # difference here. path = path.rstrip('/') + '/' for param in self._parameters: - if not param.startswith(path): + if path != '/' and not param.startswith(path): continue if '/' in param[len(path) + 1:] and not recursive: continue
{"golden_diff": "diff --git a/moto/ssm/models.py b/moto/ssm/models.py\n--- a/moto/ssm/models.py\n+++ b/moto/ssm/models.py\n@@ -100,7 +100,7 @@\n # difference here.\n path = path.rstrip('/') + '/'\n for param in self._parameters:\n- if not param.startswith(path):\n+ if path != '/' and not param.startswith(path):\n continue\n if '/' in param[len(path) + 1:] and not recursive:\n continue\n", "issue": "SSM `get_parameter_by_path` should return root parameters when path is just '/'\nWhen you have a parameter with out any path in its name ie: `Name='foo', Description='A test parameter', Value='bar', Type='String'` and you run call get_parameters_by_path and pass in a Path of `'/'` it will return all parameters that don't have a path.\r\n\r\nHowever in Moto it returns only an empty list.\r\n\r\nExample Test File:\r\n```python\r\nimport boto3\r\nfrom moto import mock_ssm\r\n\r\nssm = boto3.client('ssm')\r\n\r\nparameter = ssm.put_parameter(\r\n Name='foo',\r\n Description='a test parameter',\r\n Value='bar',\r\n Type='String'\r\n)\r\n\r\nparameters = ssm.get_parameters_by_path(\r\n Path='/'\r\n)\r\n\r\nprint(parameters)\r\n# {'Parameters': [{'Name': 'foo', 'Type': 'String', 'Value': 'bar', 'Version': 1}], 'ResponseMetadata': {}}\r\n\r\nwith mock_ssm():\r\n client = boto3.client('ssm')\r\n params = client.get_parameters_by_path(Path='/')\r\n print(params)\r\n # {'Parameters': [], 'ResponseMetadata': {}}\r\n```\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom collections import defaultdict\n\nfrom moto.core import BaseBackend, BaseModel\nfrom moto.ec2 import ec2_backends\n\nimport datetime\nimport time\nimport uuid\n\n\nclass Parameter(BaseModel):\n def __init__(self, name, value, type, description, keyid, last_modified_date, version):\n self.name = name\n self.type = type\n self.description = description\n self.keyid = keyid\n self.last_modified_date = last_modified_date\n self.version = version\n\n if self.type == 'SecureString':\n self.value = self.encrypt(value)\n else:\n self.value = value\n\n def encrypt(self, value):\n return 'kms:{}:'.format(self.keyid or 'default') + value\n\n def decrypt(self, value):\n if self.type != 'SecureString':\n return value\n\n prefix = 'kms:{}:'.format(self.keyid or 'default')\n if value.startswith(prefix):\n return value[len(prefix):]\n\n def response_object(self, decrypt=False):\n r = {\n 'Name': self.name,\n 'Type': self.type,\n 'Value': self.decrypt(self.value) if decrypt else self.value,\n 'Version': self.version,\n }\n\n return r\n\n def describe_response_object(self, decrypt=False):\n r = self.response_object(decrypt)\n r['LastModifiedDate'] = int(self.last_modified_date)\n r['LastModifiedUser'] = 'N/A'\n\n if self.description:\n r['Description'] = self.description\n\n if self.keyid:\n r['KeyId'] = self.keyid\n return r\n\n\nclass SimpleSystemManagerBackend(BaseBackend):\n\n def __init__(self):\n self._parameters = {}\n self._resource_tags = defaultdict(lambda: defaultdict(dict))\n\n def delete_parameter(self, name):\n try:\n del self._parameters[name]\n except KeyError:\n pass\n\n def delete_parameters(self, names):\n result = []\n for name in names:\n try:\n del self._parameters[name]\n result.append(name)\n except KeyError:\n pass\n return result\n\n def get_all_parameters(self):\n result = []\n for k, _ in self._parameters.items():\n result.append(self._parameters[k])\n return result\n\n def get_parameters(self, names, with_decryption):\n result = []\n for name in names:\n if name in self._parameters:\n result.append(self._parameters[name])\n return result\n\n def get_parameters_by_path(self, path, with_decryption, recursive, filters=None):\n \"\"\"Implement the get-parameters-by-path-API in the backend.\"\"\"\n result = []\n # path could be with or without a trailing /. we handle this\n # difference here.\n path = path.rstrip('/') + '/'\n for param in self._parameters:\n if not param.startswith(path):\n continue\n if '/' in param[len(path) + 1:] and not recursive:\n continue\n if not self._match_filters(self._parameters[param], filters):\n continue\n result.append(self._parameters[param])\n\n return result\n\n @staticmethod\n def _match_filters(parameter, filters=None):\n \"\"\"Return True if the given parameter matches all the filters\"\"\"\n for filter_obj in (filters or []):\n key = filter_obj['Key']\n option = filter_obj.get('Option', 'Equals')\n values = filter_obj.get('Values', [])\n\n what = None\n if key == 'Type':\n what = parameter.type\n elif key == 'KeyId':\n what = parameter.keyid\n\n if option == 'Equals'\\\n and not any(what == value for value in values):\n return False\n elif option == 'BeginsWith'\\\n and not any(what.startswith(value) for value in values):\n return False\n # True if no false match (or no filters at all)\n return True\n\n def get_parameter(self, name, with_decryption):\n if name in self._parameters:\n return self._parameters[name]\n return None\n\n def put_parameter(self, name, description, value, type, keyid, overwrite):\n previous_parameter = self._parameters.get(name)\n version = 1\n\n if previous_parameter:\n version = previous_parameter.version + 1\n\n if not overwrite:\n return\n\n last_modified_date = time.time()\n self._parameters[name] = Parameter(\n name, value, type, description, keyid, last_modified_date, version)\n return version\n\n def add_tags_to_resource(self, resource_type, resource_id, tags):\n for key, value in tags.items():\n self._resource_tags[resource_type][resource_id][key] = value\n\n def remove_tags_from_resource(self, resource_type, resource_id, keys):\n tags = self._resource_tags[resource_type][resource_id]\n for key in keys:\n if key in tags:\n del tags[key]\n\n def list_tags_for_resource(self, resource_type, resource_id):\n return self._resource_tags[resource_type][resource_id]\n\n def send_command(self, **kwargs):\n instances = kwargs.get('InstanceIds', [])\n now = datetime.datetime.now()\n expires_after = now + datetime.timedelta(0, int(kwargs.get('TimeoutSeconds', 3600)))\n return {\n 'Command': {\n 'CommandId': str(uuid.uuid4()),\n 'DocumentName': kwargs['DocumentName'],\n 'Comment': kwargs.get('Comment'),\n 'ExpiresAfter': expires_after.isoformat(),\n 'Parameters': kwargs['Parameters'],\n 'InstanceIds': kwargs['InstanceIds'],\n 'Targets': kwargs.get('targets'),\n 'RequestedDateTime': now.isoformat(),\n 'Status': 'Success',\n 'StatusDetails': 'string',\n 'OutputS3Region': kwargs.get('OutputS3Region'),\n 'OutputS3BucketName': kwargs.get('OutputS3BucketName'),\n 'OutputS3KeyPrefix': kwargs.get('OutputS3KeyPrefix'),\n 'MaxConcurrency': 'string',\n 'MaxErrors': 'string',\n 'TargetCount': len(instances),\n 'CompletedCount': len(instances),\n 'ErrorCount': 0,\n 'ServiceRole': kwargs.get('ServiceRoleArn'),\n 'NotificationConfig': {\n 'NotificationArn': 'string',\n 'NotificationEvents': ['Success'],\n 'NotificationType': 'Command'\n }\n }\n }\n\n\nssm_backends = {}\nfor region, ec2_backend in ec2_backends.items():\n ssm_backends[region] = SimpleSystemManagerBackend()\n", "path": "moto/ssm/models.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom collections import defaultdict\n\nfrom moto.core import BaseBackend, BaseModel\nfrom moto.ec2 import ec2_backends\n\nimport datetime\nimport time\nimport uuid\n\n\nclass Parameter(BaseModel):\n def __init__(self, name, value, type, description, keyid, last_modified_date, version):\n self.name = name\n self.type = type\n self.description = description\n self.keyid = keyid\n self.last_modified_date = last_modified_date\n self.version = version\n\n if self.type == 'SecureString':\n self.value = self.encrypt(value)\n else:\n self.value = value\n\n def encrypt(self, value):\n return 'kms:{}:'.format(self.keyid or 'default') + value\n\n def decrypt(self, value):\n if self.type != 'SecureString':\n return value\n\n prefix = 'kms:{}:'.format(self.keyid or 'default')\n if value.startswith(prefix):\n return value[len(prefix):]\n\n def response_object(self, decrypt=False):\n r = {\n 'Name': self.name,\n 'Type': self.type,\n 'Value': self.decrypt(self.value) if decrypt else self.value,\n 'Version': self.version,\n }\n\n return r\n\n def describe_response_object(self, decrypt=False):\n r = self.response_object(decrypt)\n r['LastModifiedDate'] = int(self.last_modified_date)\n r['LastModifiedUser'] = 'N/A'\n\n if self.description:\n r['Description'] = self.description\n\n if self.keyid:\n r['KeyId'] = self.keyid\n return r\n\n\nclass SimpleSystemManagerBackend(BaseBackend):\n\n def __init__(self):\n self._parameters = {}\n self._resource_tags = defaultdict(lambda: defaultdict(dict))\n\n def delete_parameter(self, name):\n try:\n del self._parameters[name]\n except KeyError:\n pass\n\n def delete_parameters(self, names):\n result = []\n for name in names:\n try:\n del self._parameters[name]\n result.append(name)\n except KeyError:\n pass\n return result\n\n def get_all_parameters(self):\n result = []\n for k, _ in self._parameters.items():\n result.append(self._parameters[k])\n return result\n\n def get_parameters(self, names, with_decryption):\n result = []\n for name in names:\n if name in self._parameters:\n result.append(self._parameters[name])\n return result\n\n def get_parameters_by_path(self, path, with_decryption, recursive, filters=None):\n \"\"\"Implement the get-parameters-by-path-API in the backend.\"\"\"\n result = []\n # path could be with or without a trailing /. we handle this\n # difference here.\n path = path.rstrip('/') + '/'\n for param in self._parameters:\n if path != '/' and not param.startswith(path):\n continue\n if '/' in param[len(path) + 1:] and not recursive:\n continue\n if not self._match_filters(self._parameters[param], filters):\n continue\n result.append(self._parameters[param])\n\n return result\n\n @staticmethod\n def _match_filters(parameter, filters=None):\n \"\"\"Return True if the given parameter matches all the filters\"\"\"\n for filter_obj in (filters or []):\n key = filter_obj['Key']\n option = filter_obj.get('Option', 'Equals')\n values = filter_obj.get('Values', [])\n\n what = None\n if key == 'Type':\n what = parameter.type\n elif key == 'KeyId':\n what = parameter.keyid\n\n if option == 'Equals'\\\n and not any(what == value for value in values):\n return False\n elif option == 'BeginsWith'\\\n and not any(what.startswith(value) for value in values):\n return False\n # True if no false match (or no filters at all)\n return True\n\n def get_parameter(self, name, with_decryption):\n if name in self._parameters:\n return self._parameters[name]\n return None\n\n def put_parameter(self, name, description, value, type, keyid, overwrite):\n previous_parameter = self._parameters.get(name)\n version = 1\n\n if previous_parameter:\n version = previous_parameter.version + 1\n\n if not overwrite:\n return\n\n last_modified_date = time.time()\n self._parameters[name] = Parameter(\n name, value, type, description, keyid, last_modified_date, version)\n return version\n\n def add_tags_to_resource(self, resource_type, resource_id, tags):\n for key, value in tags.items():\n self._resource_tags[resource_type][resource_id][key] = value\n\n def remove_tags_from_resource(self, resource_type, resource_id, keys):\n tags = self._resource_tags[resource_type][resource_id]\n for key in keys:\n if key in tags:\n del tags[key]\n\n def list_tags_for_resource(self, resource_type, resource_id):\n return self._resource_tags[resource_type][resource_id]\n\n def send_command(self, **kwargs):\n instances = kwargs.get('InstanceIds', [])\n now = datetime.datetime.now()\n expires_after = now + datetime.timedelta(0, int(kwargs.get('TimeoutSeconds', 3600)))\n return {\n 'Command': {\n 'CommandId': str(uuid.uuid4()),\n 'DocumentName': kwargs['DocumentName'],\n 'Comment': kwargs.get('Comment'),\n 'ExpiresAfter': expires_after.isoformat(),\n 'Parameters': kwargs['Parameters'],\n 'InstanceIds': kwargs['InstanceIds'],\n 'Targets': kwargs.get('targets'),\n 'RequestedDateTime': now.isoformat(),\n 'Status': 'Success',\n 'StatusDetails': 'string',\n 'OutputS3Region': kwargs.get('OutputS3Region'),\n 'OutputS3BucketName': kwargs.get('OutputS3BucketName'),\n 'OutputS3KeyPrefix': kwargs.get('OutputS3KeyPrefix'),\n 'MaxConcurrency': 'string',\n 'MaxErrors': 'string',\n 'TargetCount': len(instances),\n 'CompletedCount': len(instances),\n 'ErrorCount': 0,\n 'ServiceRole': kwargs.get('ServiceRoleArn'),\n 'NotificationConfig': {\n 'NotificationArn': 'string',\n 'NotificationEvents': ['Success'],\n 'NotificationType': 'Command'\n }\n }\n }\n\n\nssm_backends = {}\nfor region, ec2_backend in ec2_backends.items():\n ssm_backends[region] = SimpleSystemManagerBackend()\n", "path": "moto/ssm/models.py"}]}
2,441
113
gh_patches_debug_648
rasdani/github-patches
git_diff
pex-tool__pex-2000
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Release 2.1.117 On the docket: + [x] Published pex on github no longer works with PyPy since 2.1.109 #1995 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pex/version.py` Content: ``` 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.116" 5 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.116" +__version__ = "2.1.117"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.116\"\n+__version__ = \"2.1.117\"\n", "issue": "Release 2.1.117\nOn the docket:\r\n+ [x] Published pex on github no longer works with PyPy since 2.1.109 #1995\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.116\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.117\"\n", "path": "pex/version.py"}]}
353
98
gh_patches_debug_16737
rasdani/github-patches
git_diff
ansible__awx-13913
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- awx.awx.group option preserve hosts fails when there are no hosts. ### Please confirm the following - [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html). - [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates. - [X] I understand that AWX is open source software provided for free and that I might not receive a timely response. ### Summary when using awx.awx.groups the preserve hosts/groups options fails if there are no groups/hosts. ### AWX version 19.4.0 ### Select the relevant components - [ ] UI - [ ] API - [ ] Docs ### Installation method minikube ### Modifications no ### Ansible version _No response_ ### Operating system _No response_ ### Web browser _No response_ ### Steps to reproduce ``` - name: Add host to temporary group for new vms awx.awx.group: name: new_vms inventory: "Temporary Inventory" hosts: - "{{ name }}" validate_certs: false preserve_existing_hosts: True when: - use_aap_inventory - vm is succeeded ``` ### Expected results It runs and adds the host to the empty group ### Actual results ``` TypeError: 'NoneType' object is not subscriptabl ``` ### Additional information Because [This line](https://github.com/ansible/awx/blob/593eebf062cd1f73c117502e0491f3c8532695a3/awx_collection/plugins/modules/group.py#L174) returns NoneType when there are no hosts in the group. module needs to be updated to fix this issue. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `awx_collection/plugins/modules/group.py` Content: ``` 1 #!/usr/bin/python 2 # coding: utf-8 -*- 3 4 # (c) 2017, Wayne Witzel III <[email protected]> 5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) 6 7 from __future__ import absolute_import, division, print_function 8 9 __metaclass__ = type 10 11 12 ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} 13 14 15 DOCUMENTATION = ''' 16 --- 17 module: group 18 author: "Wayne Witzel III (@wwitzel3)" 19 short_description: create, update, or destroy Automation Platform Controller group. 20 description: 21 - Create, update, or destroy Automation Platform Controller groups. See 22 U(https://www.ansible.com/tower) for an overview. 23 options: 24 name: 25 description: 26 - The name to use for the group. 27 required: True 28 type: str 29 description: 30 description: 31 - The description to use for the group. 32 type: str 33 inventory: 34 description: 35 - Inventory the group should be made a member of. 36 required: True 37 type: str 38 variables: 39 description: 40 - Variables to use for the group. 41 type: dict 42 hosts: 43 description: 44 - List of hosts that should be put in this group. 45 type: list 46 elements: str 47 children: 48 description: 49 - List of groups that should be nested inside in this group. 50 type: list 51 elements: str 52 aliases: 53 - groups 54 preserve_existing_hosts: 55 description: 56 - Provide option (False by default) to preserves existing hosts in an existing group. 57 default: False 58 type: bool 59 preserve_existing_children: 60 description: 61 - Provide option (False by default) to preserves existing children in an existing group. 62 default: False 63 type: bool 64 aliases: 65 - preserve_existing_groups 66 state: 67 description: 68 - Desired state of the resource. 69 default: "present" 70 choices: ["present", "absent", "exists"] 71 type: str 72 new_name: 73 description: 74 - A new name for this group (for renaming) 75 type: str 76 extends_documentation_fragment: awx.awx.auth 77 ''' 78 79 80 EXAMPLES = ''' 81 - name: Add group 82 group: 83 name: localhost 84 description: "Local Host Group" 85 inventory: "Local Inventory" 86 state: present 87 controller_config_file: "~/tower_cli.cfg" 88 89 - name: Add group 90 group: 91 name: Cities 92 description: "Local Host Group" 93 inventory: Default Inventory 94 hosts: 95 - fda 96 children: 97 - NewYork 98 preserve_existing_hosts: True 99 preserve_existing_children: True 100 ''' 101 102 from ..module_utils.controller_api import ControllerAPIModule 103 import json 104 105 106 def main(): 107 # Any additional arguments that are not fields of the item can be added here 108 argument_spec = dict( 109 name=dict(required=True), 110 new_name=dict(), 111 description=dict(), 112 inventory=dict(required=True), 113 variables=dict(type='dict'), 114 hosts=dict(type='list', elements='str'), 115 children=dict(type='list', elements='str', aliases=['groups']), 116 preserve_existing_hosts=dict(type='bool', default=False), 117 preserve_existing_children=dict(type='bool', default=False, aliases=['preserve_existing_groups']), 118 state=dict(choices=['present', 'absent', 'exists'], default='present'), 119 ) 120 121 # Create a module for ourselves 122 module = ControllerAPIModule(argument_spec=argument_spec) 123 124 # Extract our parameters 125 name = module.params.get('name') 126 new_name = module.params.get('new_name') 127 inventory = module.params.get('inventory') 128 description = module.params.get('description') 129 state = module.params.pop('state') 130 preserve_existing_hosts = module.params.get('preserve_existing_hosts') 131 preserve_existing_children = module.params.get('preserve_existing_children') 132 variables = module.params.get('variables') 133 134 # Attempt to look up the related items the user specified (these will fail the module if not found) 135 inventory_id = module.resolve_name_to_id('inventories', inventory) 136 137 # Attempt to look up the object based on the provided name and inventory ID 138 group = module.get_one('groups', name_or_id=name, check_exists=(state == 'exists'), **{'data': {'inventory': inventory_id}}) 139 140 if state == 'absent': 141 # If the state was absent we can let the module delete it if needed, the module will handle exiting from this 142 module.delete_if_needed(group) 143 144 # Create the data that gets sent for create and update 145 group_fields = { 146 'name': new_name if new_name else (module.get_item_name(group) if group else name), 147 'inventory': inventory_id, 148 } 149 if description is not None: 150 group_fields['description'] = description 151 if variables is not None: 152 group_fields['variables'] = json.dumps(variables) 153 154 association_fields = {} 155 for resource, relationship in (('hosts', 'hosts'), ('groups', 'children')): 156 name_list = module.params.get(relationship) 157 if name_list is None: 158 continue 159 id_list = [] 160 for sub_name in name_list: 161 sub_obj = module.get_one( 162 resource, 163 name_or_id=sub_name, 164 **{ 165 'data': {'inventory': inventory_id}, 166 } 167 ) 168 if sub_obj is None: 169 module.fail_json(msg='Could not find {0} with name {1}'.format(resource, sub_name)) 170 id_list.append(sub_obj['id']) 171 # Preserve existing objects 172 if (preserve_existing_hosts and relationship == 'hosts') or (preserve_existing_children and relationship == 'children'): 173 preserve_existing_check = module.get_all_endpoint(group['related'][relationship]) 174 for sub_obj in preserve_existing_check['json']['results']: 175 id_list.append(sub_obj['id']) 176 if id_list: 177 association_fields[relationship] = id_list 178 179 # If the state was present we can let the module build or update the existing group, this will return on its own 180 module.create_or_update_if_needed(group, group_fields, endpoint='groups', item_type='group', associations=association_fields) 181 182 183 if __name__ == '__main__': 184 main() 185 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/awx_collection/plugins/modules/group.py b/awx_collection/plugins/modules/group.py --- a/awx_collection/plugins/modules/group.py +++ b/awx_collection/plugins/modules/group.py @@ -170,9 +170,11 @@ id_list.append(sub_obj['id']) # Preserve existing objects if (preserve_existing_hosts and relationship == 'hosts') or (preserve_existing_children and relationship == 'children'): - preserve_existing_check = module.get_all_endpoint(group['related'][relationship]) - for sub_obj in preserve_existing_check['json']['results']: - id_list.append(sub_obj['id']) + if group: + preserve_existing_check = module.get_all_endpoint(group['related'][relationship]) + for sub_obj in preserve_existing_check['json']['results']: + if 'id' in sub_obj: + id_list.append(sub_obj['id']) if id_list: association_fields[relationship] = id_list
{"golden_diff": "diff --git a/awx_collection/plugins/modules/group.py b/awx_collection/plugins/modules/group.py\n--- a/awx_collection/plugins/modules/group.py\n+++ b/awx_collection/plugins/modules/group.py\n@@ -170,9 +170,11 @@\n id_list.append(sub_obj['id'])\n # Preserve existing objects\n if (preserve_existing_hosts and relationship == 'hosts') or (preserve_existing_children and relationship == 'children'):\n- preserve_existing_check = module.get_all_endpoint(group['related'][relationship])\n- for sub_obj in preserve_existing_check['json']['results']:\n- id_list.append(sub_obj['id'])\n+ if group:\n+ preserve_existing_check = module.get_all_endpoint(group['related'][relationship])\n+ for sub_obj in preserve_existing_check['json']['results']:\n+ if 'id' in sub_obj:\n+ id_list.append(sub_obj['id'])\n if id_list:\n association_fields[relationship] = id_list\n", "issue": "awx.awx.group option preserve hosts fails when there are no hosts.\n### Please confirm the following\n\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\n\n### Summary\n\nwhen using awx.awx.groups the preserve hosts/groups options fails if there are no groups/hosts.\r\n\r\n\r\n\n\n### AWX version\n\n19.4.0\n\n### Select the relevant components\n\n- [ ] UI\n- [ ] API\n- [ ] Docs\n\n### Installation method\n\nminikube\n\n### Modifications\n\nno\n\n### Ansible version\n\n_No response_\n\n### Operating system\n\n_No response_\n\n### Web browser\n\n_No response_\n\n### Steps to reproduce\n\n```\r\n - name: Add host to temporary group for new vms\r\n awx.awx.group:\r\n name: new_vms\r\n inventory: \"Temporary Inventory\"\r\n hosts:\r\n - \"{{ name }}\"\r\n validate_certs: false\r\n preserve_existing_hosts: True\r\n when:\r\n - use_aap_inventory\r\n - vm is succeeded\r\n```\n\n### Expected results\n\nIt runs and adds the host to the empty group\n\n### Actual results\n\n```\r\nTypeError: 'NoneType' object is not subscriptabl\r\n```\n\n### Additional information\n\nBecause [This line](https://github.com/ansible/awx/blob/593eebf062cd1f73c117502e0491f3c8532695a3/awx_collection/plugins/modules/group.py#L174) returns NoneType when there are no hosts in the group.\r\n\r\nmodule needs to be updated to fix this issue.\n", "before_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# (c) 2017, Wayne Witzel III <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: group\nauthor: \"Wayne Witzel III (@wwitzel3)\"\nshort_description: create, update, or destroy Automation Platform Controller group.\ndescription:\n - Create, update, or destroy Automation Platform Controller groups. See\n U(https://www.ansible.com/tower) for an overview.\noptions:\n name:\n description:\n - The name to use for the group.\n required: True\n type: str\n description:\n description:\n - The description to use for the group.\n type: str\n inventory:\n description:\n - Inventory the group should be made a member of.\n required: True\n type: str\n variables:\n description:\n - Variables to use for the group.\n type: dict\n hosts:\n description:\n - List of hosts that should be put in this group.\n type: list\n elements: str\n children:\n description:\n - List of groups that should be nested inside in this group.\n type: list\n elements: str\n aliases:\n - groups\n preserve_existing_hosts:\n description:\n - Provide option (False by default) to preserves existing hosts in an existing group.\n default: False\n type: bool\n preserve_existing_children:\n description:\n - Provide option (False by default) to preserves existing children in an existing group.\n default: False\n type: bool\n aliases:\n - preserve_existing_groups\n state:\n description:\n - Desired state of the resource.\n default: \"present\"\n choices: [\"present\", \"absent\", \"exists\"]\n type: str\n new_name:\n description:\n - A new name for this group (for renaming)\n type: str\nextends_documentation_fragment: awx.awx.auth\n'''\n\n\nEXAMPLES = '''\n- name: Add group\n group:\n name: localhost\n description: \"Local Host Group\"\n inventory: \"Local Inventory\"\n state: present\n controller_config_file: \"~/tower_cli.cfg\"\n\n- name: Add group\n group:\n name: Cities\n description: \"Local Host Group\"\n inventory: Default Inventory\n hosts:\n - fda\n children:\n - NewYork\n preserve_existing_hosts: True\n preserve_existing_children: True\n'''\n\nfrom ..module_utils.controller_api import ControllerAPIModule\nimport json\n\n\ndef main():\n # Any additional arguments that are not fields of the item can be added here\n argument_spec = dict(\n name=dict(required=True),\n new_name=dict(),\n description=dict(),\n inventory=dict(required=True),\n variables=dict(type='dict'),\n hosts=dict(type='list', elements='str'),\n children=dict(type='list', elements='str', aliases=['groups']),\n preserve_existing_hosts=dict(type='bool', default=False),\n preserve_existing_children=dict(type='bool', default=False, aliases=['preserve_existing_groups']),\n state=dict(choices=['present', 'absent', 'exists'], default='present'),\n )\n\n # Create a module for ourselves\n module = ControllerAPIModule(argument_spec=argument_spec)\n\n # Extract our parameters\n name = module.params.get('name')\n new_name = module.params.get('new_name')\n inventory = module.params.get('inventory')\n description = module.params.get('description')\n state = module.params.pop('state')\n preserve_existing_hosts = module.params.get('preserve_existing_hosts')\n preserve_existing_children = module.params.get('preserve_existing_children')\n variables = module.params.get('variables')\n\n # Attempt to look up the related items the user specified (these will fail the module if not found)\n inventory_id = module.resolve_name_to_id('inventories', inventory)\n\n # Attempt to look up the object based on the provided name and inventory ID\n group = module.get_one('groups', name_or_id=name, check_exists=(state == 'exists'), **{'data': {'inventory': inventory_id}})\n\n if state == 'absent':\n # If the state was absent we can let the module delete it if needed, the module will handle exiting from this\n module.delete_if_needed(group)\n\n # Create the data that gets sent for create and update\n group_fields = {\n 'name': new_name if new_name else (module.get_item_name(group) if group else name),\n 'inventory': inventory_id,\n }\n if description is not None:\n group_fields['description'] = description\n if variables is not None:\n group_fields['variables'] = json.dumps(variables)\n\n association_fields = {}\n for resource, relationship in (('hosts', 'hosts'), ('groups', 'children')):\n name_list = module.params.get(relationship)\n if name_list is None:\n continue\n id_list = []\n for sub_name in name_list:\n sub_obj = module.get_one(\n resource,\n name_or_id=sub_name,\n **{\n 'data': {'inventory': inventory_id},\n }\n )\n if sub_obj is None:\n module.fail_json(msg='Could not find {0} with name {1}'.format(resource, sub_name))\n id_list.append(sub_obj['id'])\n # Preserve existing objects\n if (preserve_existing_hosts and relationship == 'hosts') or (preserve_existing_children and relationship == 'children'):\n preserve_existing_check = module.get_all_endpoint(group['related'][relationship])\n for sub_obj in preserve_existing_check['json']['results']:\n id_list.append(sub_obj['id'])\n if id_list:\n association_fields[relationship] = id_list\n\n # If the state was present we can let the module build or update the existing group, this will return on its own\n module.create_or_update_if_needed(group, group_fields, endpoint='groups', item_type='group', associations=association_fields)\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/group.py"}], "after_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# (c) 2017, Wayne Witzel III <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: group\nauthor: \"Wayne Witzel III (@wwitzel3)\"\nshort_description: create, update, or destroy Automation Platform Controller group.\ndescription:\n - Create, update, or destroy Automation Platform Controller groups. See\n U(https://www.ansible.com/tower) for an overview.\noptions:\n name:\n description:\n - The name to use for the group.\n required: True\n type: str\n description:\n description:\n - The description to use for the group.\n type: str\n inventory:\n description:\n - Inventory the group should be made a member of.\n required: True\n type: str\n variables:\n description:\n - Variables to use for the group.\n type: dict\n hosts:\n description:\n - List of hosts that should be put in this group.\n type: list\n elements: str\n children:\n description:\n - List of groups that should be nested inside in this group.\n type: list\n elements: str\n aliases:\n - groups\n preserve_existing_hosts:\n description:\n - Provide option (False by default) to preserves existing hosts in an existing group.\n default: False\n type: bool\n preserve_existing_children:\n description:\n - Provide option (False by default) to preserves existing children in an existing group.\n default: False\n type: bool\n aliases:\n - preserve_existing_groups\n state:\n description:\n - Desired state of the resource.\n default: \"present\"\n choices: [\"present\", \"absent\", \"exists\"]\n type: str\n new_name:\n description:\n - A new name for this group (for renaming)\n type: str\nextends_documentation_fragment: awx.awx.auth\n'''\n\n\nEXAMPLES = '''\n- name: Add group\n group:\n name: localhost\n description: \"Local Host Group\"\n inventory: \"Local Inventory\"\n state: present\n controller_config_file: \"~/tower_cli.cfg\"\n\n- name: Add group\n group:\n name: Cities\n description: \"Local Host Group\"\n inventory: Default Inventory\n hosts:\n - fda\n children:\n - NewYork\n preserve_existing_hosts: True\n preserve_existing_children: True\n'''\n\nfrom ..module_utils.controller_api import ControllerAPIModule\nimport json\n\n\ndef main():\n # Any additional arguments that are not fields of the item can be added here\n argument_spec = dict(\n name=dict(required=True),\n new_name=dict(),\n description=dict(),\n inventory=dict(required=True),\n variables=dict(type='dict'),\n hosts=dict(type='list', elements='str'),\n children=dict(type='list', elements='str', aliases=['groups']),\n preserve_existing_hosts=dict(type='bool', default=False),\n preserve_existing_children=dict(type='bool', default=False, aliases=['preserve_existing_groups']),\n state=dict(choices=['present', 'absent', 'exists'], default='present'),\n )\n\n # Create a module for ourselves\n module = ControllerAPIModule(argument_spec=argument_spec)\n\n # Extract our parameters\n name = module.params.get('name')\n new_name = module.params.get('new_name')\n inventory = module.params.get('inventory')\n description = module.params.get('description')\n state = module.params.pop('state')\n preserve_existing_hosts = module.params.get('preserve_existing_hosts')\n preserve_existing_children = module.params.get('preserve_existing_children')\n variables = module.params.get('variables')\n\n # Attempt to look up the related items the user specified (these will fail the module if not found)\n inventory_id = module.resolve_name_to_id('inventories', inventory)\n\n # Attempt to look up the object based on the provided name and inventory ID\n group = module.get_one('groups', name_or_id=name, check_exists=(state == 'exists'), **{'data': {'inventory': inventory_id}})\n\n if state == 'absent':\n # If the state was absent we can let the module delete it if needed, the module will handle exiting from this\n module.delete_if_needed(group)\n\n # Create the data that gets sent for create and update\n group_fields = {\n 'name': new_name if new_name else (module.get_item_name(group) if group else name),\n 'inventory': inventory_id,\n }\n if description is not None:\n group_fields['description'] = description\n if variables is not None:\n group_fields['variables'] = json.dumps(variables)\n\n association_fields = {}\n for resource, relationship in (('hosts', 'hosts'), ('groups', 'children')):\n name_list = module.params.get(relationship)\n if name_list is None:\n continue\n id_list = []\n for sub_name in name_list:\n sub_obj = module.get_one(\n resource,\n name_or_id=sub_name,\n **{\n 'data': {'inventory': inventory_id},\n }\n )\n if sub_obj is None:\n module.fail_json(msg='Could not find {0} with name {1}'.format(resource, sub_name))\n id_list.append(sub_obj['id'])\n # Preserve existing objects\n if (preserve_existing_hosts and relationship == 'hosts') or (preserve_existing_children and relationship == 'children'):\n if group:\n preserve_existing_check = module.get_all_endpoint(group['related'][relationship])\n for sub_obj in preserve_existing_check['json']['results']:\n if 'id' in sub_obj:\n id_list.append(sub_obj['id'])\n if id_list:\n association_fields[relationship] = id_list\n\n # If the state was present we can let the module build or update the existing group, this will return on its own\n module.create_or_update_if_needed(group, group_fields, endpoint='groups', item_type='group', associations=association_fields)\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/group.py"}]}
2,486
207
gh_patches_debug_1820
rasdani/github-patches
git_diff
scrapy__scrapy-4585
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Downloadable documentation is missing for versions 2.0 and 2.1 on readthedocs.org For some reason downloadable documentation on https://readthedocs.org/projects/scrapy/downloads/ is available only up to version 1.8. That's a minor issue, but I think that I'm not the only one who prefers to read technical papers in the pdf format (to be able to take notes). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/conf.py` Content: ``` 1 # Scrapy documentation build configuration file, created by 2 # sphinx-quickstart on Mon Nov 24 12:02:52 2008. 3 # 4 # This file is execfile()d with the current directory set to its containing dir. 5 # 6 # The contents of this file are pickled, so don't put values in the namespace 7 # that aren't pickleable (module imports are okay, they're removed automatically). 8 # 9 # All configuration values have a default; values that are commented out 10 # serve to show the default. 11 12 import sys 13 from datetime import datetime 14 from os import path 15 16 # If your extensions are in another directory, add it here. If the directory 17 # is relative to the documentation root, use os.path.abspath to make it 18 # absolute, like shown here. 19 sys.path.append(path.join(path.dirname(__file__), "_ext")) 20 sys.path.insert(0, path.dirname(path.dirname(__file__))) 21 22 23 # General configuration 24 # --------------------- 25 26 # Add any Sphinx extension module names here, as strings. They can be extensions 27 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 28 extensions = [ 29 'hoverxref.extension', 30 'notfound.extension', 31 'scrapydocs', 32 'sphinx.ext.autodoc', 33 'sphinx.ext.coverage', 34 'sphinx.ext.intersphinx', 35 'sphinx.ext.viewcode', 36 ] 37 38 # Add any paths that contain templates here, relative to this directory. 39 templates_path = ['_templates'] 40 41 # The suffix of source filenames. 42 source_suffix = '.rst' 43 44 # The encoding of source files. 45 #source_encoding = 'utf-8' 46 47 # The master toctree document. 48 master_doc = 'index' 49 50 # General information about the project. 51 project = 'Scrapy' 52 copyright = '2008–{}, Scrapy developers'.format(datetime.now().year) 53 54 # The version info for the project you're documenting, acts as replacement for 55 # |version| and |release|, also used in various other places throughout the 56 # built documents. 57 # 58 # The short X.Y version. 59 try: 60 import scrapy 61 version = '.'.join(map(str, scrapy.version_info[:2])) 62 release = scrapy.__version__ 63 except ImportError: 64 version = '' 65 release = '' 66 67 # The language for content autogenerated by Sphinx. Refer to documentation 68 # for a list of supported languages. 69 language = 'en' 70 71 # There are two options for replacing |today|: either, you set today to some 72 # non-false value, then it is used: 73 #today = '' 74 # Else, today_fmt is used as the format for a strftime call. 75 #today_fmt = '%B %d, %Y' 76 77 # List of documents that shouldn't be included in the build. 78 #unused_docs = [] 79 80 exclude_patterns = ['build'] 81 82 # List of directories, relative to source directory, that shouldn't be searched 83 # for source files. 84 exclude_trees = ['.build'] 85 86 # The reST default role (used for this markup: `text`) to use for all documents. 87 #default_role = None 88 89 # If true, '()' will be appended to :func: etc. cross-reference text. 90 #add_function_parentheses = True 91 92 # If true, the current module name will be prepended to all description 93 # unit titles (such as .. function::). 94 #add_module_names = True 95 96 # If true, sectionauthor and moduleauthor directives will be shown in the 97 # output. They are ignored by default. 98 #show_authors = False 99 100 # The name of the Pygments (syntax highlighting) style to use. 101 pygments_style = 'sphinx' 102 103 104 # Options for HTML output 105 # ----------------------- 106 107 # The theme to use for HTML and HTML Help pages. See the documentation for 108 # a list of builtin themes. 109 html_theme = 'sphinx_rtd_theme' 110 111 # Theme options are theme-specific and customize the look and feel of a theme 112 # further. For a list of options available for each theme, see the 113 # documentation. 114 #html_theme_options = {} 115 116 # Add any paths that contain custom themes here, relative to this directory. 117 # Add path to the RTD explicitly to robustify builds (otherwise might 118 # fail in a clean Debian build env) 119 import sphinx_rtd_theme 120 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 121 122 123 # The style sheet to use for HTML and HTML Help pages. A file of that name 124 # must exist either in Sphinx' static/ path, or in one of the custom paths 125 # given in html_static_path. 126 # html_style = 'scrapydoc.css' 127 128 # The name for this set of Sphinx documents. If None, it defaults to 129 # "<project> v<release> documentation". 130 #html_title = None 131 132 # A shorter title for the navigation bar. Default is the same as html_title. 133 #html_short_title = None 134 135 # The name of an image file (relative to this directory) to place at the top 136 # of the sidebar. 137 #html_logo = None 138 139 # The name of an image file (within the static path) to use as favicon of the 140 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 141 # pixels large. 142 #html_favicon = None 143 144 # Add any paths that contain custom static files (such as style sheets) here, 145 # relative to this directory. They are copied after the builtin static files, 146 # so a file named "default.css" will overwrite the builtin "default.css". 147 html_static_path = ['_static'] 148 149 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 150 # using the given strftime format. 151 html_last_updated_fmt = '%b %d, %Y' 152 153 # Custom sidebar templates, maps document names to template names. 154 #html_sidebars = {} 155 156 # Additional templates that should be rendered to pages, maps page names to 157 # template names. 158 #html_additional_pages = {} 159 160 # If false, no module index is generated. 161 #html_use_modindex = True 162 163 # If false, no index is generated. 164 #html_use_index = True 165 166 # If true, the index is split into individual pages for each letter. 167 #html_split_index = False 168 169 # If true, the reST sources are included in the HTML build as _sources/<name>. 170 html_copy_source = True 171 172 # If true, an OpenSearch description file will be output, and all pages will 173 # contain a <link> tag referring to it. The value of this option must be the 174 # base URL from which the finished HTML is served. 175 #html_use_opensearch = '' 176 177 # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). 178 #html_file_suffix = '' 179 180 # Output file base name for HTML help builder. 181 htmlhelp_basename = 'Scrapydoc' 182 183 184 # Options for LaTeX output 185 # ------------------------ 186 187 # The paper size ('letter' or 'a4'). 188 #latex_paper_size = 'letter' 189 190 # The font size ('10pt', '11pt' or '12pt'). 191 #latex_font_size = '10pt' 192 193 # Grouping the document tree into LaTeX files. List of tuples 194 # (source start file, target name, title, author, document class [howto/manual]). 195 latex_documents = [ 196 ('index', 'Scrapy.tex', 'Scrapy Documentation', 197 'Scrapy developers', 'manual'), 198 ] 199 200 # The name of an image file (relative to this directory) to place at the top of 201 # the title page. 202 #latex_logo = None 203 204 # For "manual" documents, if this is true, then toplevel headings are parts, 205 # not chapters. 206 #latex_use_parts = False 207 208 # Additional stuff for the LaTeX preamble. 209 #latex_preamble = '' 210 211 # Documents to append as an appendix to all manuals. 212 #latex_appendices = [] 213 214 # If false, no module index is generated. 215 #latex_use_modindex = True 216 217 218 # Options for the linkcheck builder 219 # --------------------------------- 220 221 # A list of regular expressions that match URIs that should not be checked when 222 # doing a linkcheck build. 223 linkcheck_ignore = [ 224 'http://localhost:\d+', 'http://hg.scrapy.org', 225 'http://directory.google.com/' 226 ] 227 228 229 # Options for the Coverage extension 230 # ---------------------------------- 231 coverage_ignore_pyobjects = [ 232 # Contract’s add_pre_hook and add_post_hook are not documented because 233 # they should be transparent to contract developers, for whom pre_hook and 234 # post_hook should be the actual concern. 235 r'\bContract\.add_(pre|post)_hook$', 236 237 # ContractsManager is an internal class, developers are not expected to 238 # interact with it directly in any way. 239 r'\bContractsManager\b$', 240 241 # For default contracts we only want to document their general purpose in 242 # their __init__ method, the methods they reimplement to achieve that purpose 243 # should be irrelevant to developers using those contracts. 244 r'\w+Contract\.(adjust_request_args|(pre|post)_process)$', 245 246 # Methods of downloader middlewares are not documented, only the classes 247 # themselves, since downloader middlewares are controlled through Scrapy 248 # settings. 249 r'^scrapy\.downloadermiddlewares\.\w*?\.(\w*?Middleware|DownloaderStats)\.', 250 251 # Base classes of downloader middlewares are implementation details that 252 # are not meant for users. 253 r'^scrapy\.downloadermiddlewares\.\w*?\.Base\w*?Middleware', 254 255 # Private exception used by the command-line interface implementation. 256 r'^scrapy\.exceptions\.UsageError', 257 258 # Methods of BaseItemExporter subclasses are only documented in 259 # BaseItemExporter. 260 r'^scrapy\.exporters\.(?!BaseItemExporter\b)\w*?\.', 261 262 # Extension behavior is only modified through settings. Methods of 263 # extension classes, as well as helper functions, are implementation 264 # details that are not documented. 265 r'^scrapy\.extensions\.[a-z]\w*?\.[A-Z]\w*?\.', # methods 266 r'^scrapy\.extensions\.[a-z]\w*?\.[a-z]', # helper functions 267 268 # Never documented before, and deprecated now. 269 r'^scrapy\.item\.DictItem$', 270 r'^scrapy\.linkextractors\.FilteringLinkExtractor$', 271 272 # Implementation detail of LxmlLinkExtractor 273 r'^scrapy\.linkextractors\.lxmlhtml\.LxmlParserLinkExtractor', 274 ] 275 276 277 # Options for the InterSphinx extension 278 # ------------------------------------- 279 280 intersphinx_mapping = { 281 'coverage': ('https://coverage.readthedocs.io/en/stable', None), 282 'cssselect': ('https://cssselect.readthedocs.io/en/latest', None), 283 'pytest': ('https://docs.pytest.org/en/latest', None), 284 'python': ('https://docs.python.org/3', None), 285 'sphinx': ('https://www.sphinx-doc.org/en/master', None), 286 'tox': ('https://tox.readthedocs.io/en/latest', None), 287 'twisted': ('https://twistedmatrix.com/documents/current', None), 288 'twistedapi': ('https://twistedmatrix.com/documents/current/api', None), 289 } 290 291 292 # Options for sphinx-hoverxref options 293 # ------------------------------------ 294 295 hoverxref_auto_ref = True 296 hoverxref_role_types = { 297 "class": "tooltip", 298 "confval": "tooltip", 299 "hoverxref": "tooltip", 300 "mod": "tooltip", 301 "ref": "tooltip", 302 } 303 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -100,6 +100,9 @@ # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' +# List of Sphinx warnings that will not be raised +suppress_warnings = ['epub.unknown_project_files'] + # Options for HTML output # -----------------------
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -100,6 +100,9 @@\n # The name of the Pygments (syntax highlighting) style to use.\n pygments_style = 'sphinx'\n \n+# List of Sphinx warnings that will not be raised\n+suppress_warnings = ['epub.unknown_project_files']\n+\n \n # Options for HTML output\n # -----------------------\n", "issue": "Downloadable documentation is missing for versions 2.0 and 2.1 on readthedocs.org\nFor some reason downloadable documentation on https://readthedocs.org/projects/scrapy/downloads/ is available only up to version 1.8.\r\n\r\nThat's a minor issue, but I think that I'm not the only one who prefers to read technical papers in the pdf format (to be able to take notes).\r\n\n", "before_files": [{"content": "# Scrapy documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov 24 12:02:52 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickleable (module imports are okay, they're removed automatically).\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nfrom datetime import datetime\nfrom os import path\n\n# If your extensions are in another directory, add it here. If the directory\n# is relative to the documentation root, use os.path.abspath to make it\n# absolute, like shown here.\nsys.path.append(path.join(path.dirname(__file__), \"_ext\"))\nsys.path.insert(0, path.dirname(path.dirname(__file__)))\n\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'hoverxref.extension',\n 'notfound.extension',\n 'scrapydocs',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.viewcode',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Scrapy'\ncopyright = '2008\u2013{}, Scrapy developers'.format(datetime.now().year)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\ntry:\n import scrapy\n version = '.'.join(map(str, scrapy.version_info[:2]))\n release = scrapy.__version__\nexcept ImportError:\n version = ''\n release = ''\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\nlanguage = 'en'\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\nexclude_patterns = ['build']\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = ['.build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n\n# Options for HTML output\n# -----------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# Add path to the RTD explicitly to robustify builds (otherwise might\n# fail in a clean Debian build env)\nimport sphinx_rtd_theme\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n# html_style = 'scrapydoc.css'\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Scrapydoc'\n\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\n#latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [\n ('index', 'Scrapy.tex', 'Scrapy Documentation',\n 'Scrapy developers', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\n\n\n# Options for the linkcheck builder\n# ---------------------------------\n\n# A list of regular expressions that match URIs that should not be checked when\n# doing a linkcheck build.\nlinkcheck_ignore = [\n 'http://localhost:\\d+', 'http://hg.scrapy.org',\n 'http://directory.google.com/'\n]\n\n\n# Options for the Coverage extension\n# ----------------------------------\ncoverage_ignore_pyobjects = [\n # Contract\u2019s add_pre_hook and add_post_hook are not documented because\n # they should be transparent to contract developers, for whom pre_hook and\n # post_hook should be the actual concern.\n r'\\bContract\\.add_(pre|post)_hook$',\n\n # ContractsManager is an internal class, developers are not expected to\n # interact with it directly in any way.\n r'\\bContractsManager\\b$',\n\n # For default contracts we only want to document their general purpose in\n # their __init__ method, the methods they reimplement to achieve that purpose\n # should be irrelevant to developers using those contracts.\n r'\\w+Contract\\.(adjust_request_args|(pre|post)_process)$',\n\n # Methods of downloader middlewares are not documented, only the classes\n # themselves, since downloader middlewares are controlled through Scrapy\n # settings.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.(\\w*?Middleware|DownloaderStats)\\.',\n\n # Base classes of downloader middlewares are implementation details that\n # are not meant for users.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.Base\\w*?Middleware',\n\n # Private exception used by the command-line interface implementation.\n r'^scrapy\\.exceptions\\.UsageError',\n\n # Methods of BaseItemExporter subclasses are only documented in\n # BaseItemExporter.\n r'^scrapy\\.exporters\\.(?!BaseItemExporter\\b)\\w*?\\.',\n\n # Extension behavior is only modified through settings. Methods of\n # extension classes, as well as helper functions, are implementation\n # details that are not documented.\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[A-Z]\\w*?\\.', # methods\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[a-z]', # helper functions\n\n # Never documented before, and deprecated now.\n r'^scrapy\\.item\\.DictItem$',\n r'^scrapy\\.linkextractors\\.FilteringLinkExtractor$',\n\n # Implementation detail of LxmlLinkExtractor\n r'^scrapy\\.linkextractors\\.lxmlhtml\\.LxmlParserLinkExtractor',\n]\n\n\n# Options for the InterSphinx extension\n# -------------------------------------\n\nintersphinx_mapping = {\n 'coverage': ('https://coverage.readthedocs.io/en/stable', None),\n 'cssselect': ('https://cssselect.readthedocs.io/en/latest', None),\n 'pytest': ('https://docs.pytest.org/en/latest', None),\n 'python': ('https://docs.python.org/3', None),\n 'sphinx': ('https://www.sphinx-doc.org/en/master', None),\n 'tox': ('https://tox.readthedocs.io/en/latest', None),\n 'twisted': ('https://twistedmatrix.com/documents/current', None),\n 'twistedapi': ('https://twistedmatrix.com/documents/current/api', None),\n}\n\n\n# Options for sphinx-hoverxref options\n# ------------------------------------\n\nhoverxref_auto_ref = True\nhoverxref_role_types = {\n \"class\": \"tooltip\",\n \"confval\": \"tooltip\",\n \"hoverxref\": \"tooltip\",\n \"mod\": \"tooltip\",\n \"ref\": \"tooltip\",\n}\n", "path": "docs/conf.py"}], "after_files": [{"content": "# Scrapy documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov 24 12:02:52 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickleable (module imports are okay, they're removed automatically).\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nfrom datetime import datetime\nfrom os import path\n\n# If your extensions are in another directory, add it here. If the directory\n# is relative to the documentation root, use os.path.abspath to make it\n# absolute, like shown here.\nsys.path.append(path.join(path.dirname(__file__), \"_ext\"))\nsys.path.insert(0, path.dirname(path.dirname(__file__)))\n\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'hoverxref.extension',\n 'notfound.extension',\n 'scrapydocs',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.viewcode',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Scrapy'\ncopyright = '2008\u2013{}, Scrapy developers'.format(datetime.now().year)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\ntry:\n import scrapy\n version = '.'.join(map(str, scrapy.version_info[:2]))\n release = scrapy.__version__\nexcept ImportError:\n version = ''\n release = ''\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\nlanguage = 'en'\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\nexclude_patterns = ['build']\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = ['.build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# List of Sphinx warnings that will not be raised\nsuppress_warnings = ['epub.unknown_project_files']\n\n\n# Options for HTML output\n# -----------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# Add path to the RTD explicitly to robustify builds (otherwise might\n# fail in a clean Debian build env)\nimport sphinx_rtd_theme\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n# html_style = 'scrapydoc.css'\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Scrapydoc'\n\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\n#latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [\n ('index', 'Scrapy.tex', 'Scrapy Documentation',\n 'Scrapy developers', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\n\n\n# Options for the linkcheck builder\n# ---------------------------------\n\n# A list of regular expressions that match URIs that should not be checked when\n# doing a linkcheck build.\nlinkcheck_ignore = [\n 'http://localhost:\\d+', 'http://hg.scrapy.org',\n 'http://directory.google.com/'\n]\n\n\n# Options for the Coverage extension\n# ----------------------------------\ncoverage_ignore_pyobjects = [\n # Contract\u2019s add_pre_hook and add_post_hook are not documented because\n # they should be transparent to contract developers, for whom pre_hook and\n # post_hook should be the actual concern.\n r'\\bContract\\.add_(pre|post)_hook$',\n\n # ContractsManager is an internal class, developers are not expected to\n # interact with it directly in any way.\n r'\\bContractsManager\\b$',\n\n # For default contracts we only want to document their general purpose in\n # their __init__ method, the methods they reimplement to achieve that purpose\n # should be irrelevant to developers using those contracts.\n r'\\w+Contract\\.(adjust_request_args|(pre|post)_process)$',\n\n # Methods of downloader middlewares are not documented, only the classes\n # themselves, since downloader middlewares are controlled through Scrapy\n # settings.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.(\\w*?Middleware|DownloaderStats)\\.',\n\n # Base classes of downloader middlewares are implementation details that\n # are not meant for users.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.Base\\w*?Middleware',\n\n # Private exception used by the command-line interface implementation.\n r'^scrapy\\.exceptions\\.UsageError',\n\n # Methods of BaseItemExporter subclasses are only documented in\n # BaseItemExporter.\n r'^scrapy\\.exporters\\.(?!BaseItemExporter\\b)\\w*?\\.',\n\n # Extension behavior is only modified through settings. Methods of\n # extension classes, as well as helper functions, are implementation\n # details that are not documented.\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[A-Z]\\w*?\\.', # methods\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[a-z]', # helper functions\n\n # Never documented before, and deprecated now.\n r'^scrapy\\.item\\.DictItem$',\n r'^scrapy\\.linkextractors\\.FilteringLinkExtractor$',\n\n # Implementation detail of LxmlLinkExtractor\n r'^scrapy\\.linkextractors\\.lxmlhtml\\.LxmlParserLinkExtractor',\n]\n\n\n# Options for the InterSphinx extension\n# -------------------------------------\n\nintersphinx_mapping = {\n 'coverage': ('https://coverage.readthedocs.io/en/stable', None),\n 'cssselect': ('https://cssselect.readthedocs.io/en/latest', None),\n 'pytest': ('https://docs.pytest.org/en/latest', None),\n 'python': ('https://docs.python.org/3', None),\n 'sphinx': ('https://www.sphinx-doc.org/en/master', None),\n 'tox': ('https://tox.readthedocs.io/en/latest', None),\n 'twisted': ('https://twistedmatrix.com/documents/current', None),\n 'twistedapi': ('https://twistedmatrix.com/documents/current/api', None),\n}\n\n\n# Options for sphinx-hoverxref options\n# ------------------------------------\n\nhoverxref_auto_ref = True\nhoverxref_role_types = {\n \"class\": \"tooltip\",\n \"confval\": \"tooltip\",\n \"hoverxref\": \"tooltip\",\n \"mod\": \"tooltip\",\n \"ref\": \"tooltip\",\n}\n", "path": "docs/conf.py"}]}
3,661
95
gh_patches_debug_10025
rasdani/github-patches
git_diff
bridgecrewio__checkov-5170
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- CKV_DOCKER_11 false positive when `--platform` is used **Describe the issue** CKV_DOCKER_11 false positive when `--platform` is used (possibly other arguments as well) For reference: _"CKV_DOCKER_11: "Ensure From Alias are unique for multistage builds."_ In other words, make sure you add `as myAlias` at the end of your `FROM` line **Examples** This will PASS as expected: `FROM node:16 as build` Now, add `--platform` and it will FAIL: `FROM --platform=linux/amd64 node:16 as build` **Version (please complete the following information):** ``` > checkov -v 2.3.240 ``` **Additional context** Add any other context about the problem here. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checkov/dockerfile/checks/AliasIsUnique.py` Content: ``` 1 from __future__ import annotations 2 3 from typing import TYPE_CHECKING 4 5 from checkov.common.models.enums import CheckCategories, CheckResult 6 from checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck 7 8 if TYPE_CHECKING: 9 from dockerfile_parse.parser import _Instruction 10 11 12 class AliasIsUnique(BaseDockerfileCheck): 13 def __init__(self) -> None: 14 """ 15 Ensure From Alias are unique for multistage builds. 16 """ 17 name = "Ensure From Alias are unique for multistage builds." 18 id = "CKV_DOCKER_11" 19 supported_instructions = ("FROM",) 20 categories = (CheckCategories.CONVENTION,) 21 super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions) 22 23 def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]: 24 alias = [] 25 for instruction in conf: 26 if " as " in instruction["value"]: 27 temp = instruction["value"].split() 28 alias += [temp[2]] 29 30 if len(alias) == len(set(alias)): 31 return CheckResult.PASSED, None 32 else: 33 return CheckResult.FAILED, [conf[0]] 34 35 36 check = AliasIsUnique() 37 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checkov/dockerfile/checks/AliasIsUnique.py b/checkov/dockerfile/checks/AliasIsUnique.py --- a/checkov/dockerfile/checks/AliasIsUnique.py +++ b/checkov/dockerfile/checks/AliasIsUnique.py @@ -24,13 +24,12 @@ alias = [] for instruction in conf: if " as " in instruction["value"]: - temp = instruction["value"].split() - alias += [temp[2]] + alias.append(instruction["value"].rsplit(maxsplit=1)[-1]) if len(alias) == len(set(alias)): return CheckResult.PASSED, None - else: - return CheckResult.FAILED, [conf[0]] + + return CheckResult.FAILED, [conf[0]] check = AliasIsUnique()
{"golden_diff": "diff --git a/checkov/dockerfile/checks/AliasIsUnique.py b/checkov/dockerfile/checks/AliasIsUnique.py\n--- a/checkov/dockerfile/checks/AliasIsUnique.py\n+++ b/checkov/dockerfile/checks/AliasIsUnique.py\n@@ -24,13 +24,12 @@\n alias = []\n for instruction in conf:\n if \" as \" in instruction[\"value\"]:\n- temp = instruction[\"value\"].split()\n- alias += [temp[2]]\n+ alias.append(instruction[\"value\"].rsplit(maxsplit=1)[-1])\n \n if len(alias) == len(set(alias)):\n return CheckResult.PASSED, None\n- else:\n- return CheckResult.FAILED, [conf[0]]\n+\n+ return CheckResult.FAILED, [conf[0]]\n \n \n check = AliasIsUnique()\n", "issue": "CKV_DOCKER_11 false positive when `--platform` is used\n**Describe the issue**\r\n\r\nCKV_DOCKER_11 false positive when `--platform` is used (possibly other arguments as well)\r\n\r\nFor reference: _\"CKV_DOCKER_11: \"Ensure From Alias are unique for multistage builds.\"_ In other words, make sure you add `as myAlias` at the end of your `FROM` line\r\n\r\n**Examples**\r\n\r\nThis will PASS as expected:\r\n`FROM node:16 as build`\r\n\r\nNow, add `--platform` and it will FAIL:\r\n`FROM --platform=linux/amd64 node:16 as build`\r\n\r\n**Version (please complete the following information):**\r\n```\r\n> checkov -v \r\n2.3.240\r\n```\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\nif TYPE_CHECKING:\n from dockerfile_parse.parser import _Instruction\n\n\nclass AliasIsUnique(BaseDockerfileCheck):\n def __init__(self) -> None:\n \"\"\"\n Ensure From Alias are unique for multistage builds.\n \"\"\"\n name = \"Ensure From Alias are unique for multistage builds.\"\n id = \"CKV_DOCKER_11\"\n supported_instructions = (\"FROM\",)\n categories = (CheckCategories.CONVENTION,)\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:\n alias = []\n for instruction in conf:\n if \" as \" in instruction[\"value\"]:\n temp = instruction[\"value\"].split()\n alias += [temp[2]]\n\n if len(alias) == len(set(alias)):\n return CheckResult.PASSED, None\n else:\n return CheckResult.FAILED, [conf[0]]\n\n\ncheck = AliasIsUnique()\n", "path": "checkov/dockerfile/checks/AliasIsUnique.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\nif TYPE_CHECKING:\n from dockerfile_parse.parser import _Instruction\n\n\nclass AliasIsUnique(BaseDockerfileCheck):\n def __init__(self) -> None:\n \"\"\"\n Ensure From Alias are unique for multistage builds.\n \"\"\"\n name = \"Ensure From Alias are unique for multistage builds.\"\n id = \"CKV_DOCKER_11\"\n supported_instructions = (\"FROM\",)\n categories = (CheckCategories.CONVENTION,)\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:\n alias = []\n for instruction in conf:\n if \" as \" in instruction[\"value\"]:\n alias.append(instruction[\"value\"].rsplit(maxsplit=1)[-1])\n\n if len(alias) == len(set(alias)):\n return CheckResult.PASSED, None\n\n return CheckResult.FAILED, [conf[0]]\n\n\ncheck = AliasIsUnique()\n", "path": "checkov/dockerfile/checks/AliasIsUnique.py"}]}
797
187
gh_patches_debug_2837
rasdani/github-patches
git_diff
instadeepai__Mava-654
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [TEST] Jax MAPPO System Integration ### What do you want to test? Jax MAPPO full integration test ### Outline of test structure * Unit tests (if possible) * Test component interactions * Ensure not crashing during standard executor and trainer steps ### Definition of done Passing checks, cover all basic component interactions, edge cases considered ### Mandatory checklist before making a PR * [ ] The success criteria laid down in “Definition of done” are met. * [ ] Test code is documented - docstrings for methods and classes, static types for arguments. * [ ] Documentation is updated - README, CONTRIBUTING, or other documentation. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mava/systems/jax/launcher.py` Content: ``` 1 # python3 2 # Copyright 2021 InstaDeep Ltd. All rights reserved. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 """General launcher for systems""" 17 from typing import Any, Dict, List, Optional, Union 18 19 import launchpad as lp 20 import reverb 21 22 from mava.utils import lp_utils 23 from mava.utils.builder_utils import copy_node_fn 24 25 26 class NodeType: 27 """Specify launchpad node types that systems can use.""" 28 29 reverb = lp.ReverbNode 30 courier = lp.CourierNode 31 32 33 class Launcher: 34 """This mava launcher can be used to launch multi-node systems using either single \ 35 or distributed computation.""" 36 37 def __init__( 38 self, 39 multi_process: bool, 40 nodes_on_gpu: List = [], 41 single_process_trainer_period: int = 1, 42 single_process_evaluator_period: int = 10, 43 single_process_max_episodes: Optional[int] = None, 44 name: str = "System", 45 terminal: str = "current_terminal", 46 lp_launch_type: Union[ 47 str, lp.LaunchType 48 ] = lp.LaunchType.LOCAL_MULTI_PROCESSING, 49 ) -> None: 50 """Initialise the launcher. 51 52 If multi-process, set up the launchpad program. 53 Otherwise, create a dictionary for the nodes in the system. 54 55 Args: 56 multi_process : whether to use launchpad to run nodes on separate processes. 57 nodes_on_gpu : which nodes should be run on the GPU. 58 single_process_trainer_period : number of episodes between single process 59 trainer steps. 60 single_process_evaluator_period : num episodes between single process 61 evaluator steps. 62 single_process_max_episodes: maximum number of episodes to run 63 before termination. 64 name : launchpad program name. 65 terminal : terminal for launchpad processes to be shown on. 66 lp_launch_type: launchpad launch type. 67 """ 68 self._multi_process = multi_process 69 self._name = name 70 self._single_process_trainer_period = single_process_trainer_period 71 self._single_process_evaluator_period = single_process_evaluator_period 72 self._single_process_max_episodes = single_process_max_episodes 73 self._terminal = terminal 74 self._lp_launch_type = lp_launch_type 75 if multi_process: 76 self._program = lp.Program(name=name) 77 self._nodes_on_gpu = nodes_on_gpu 78 else: 79 self._nodes: List = [] 80 self._node_dict: Dict = { 81 "data_server": None, 82 "parameter_server": None, 83 "executor": None, 84 "evaluator": None, 85 "trainer": None, 86 } 87 88 def add( 89 self, 90 node_fn: Any, 91 arguments: Any = [], 92 node_type: Union[lp.ReverbNode, lp.CourierNode] = NodeType.courier, 93 name: str = "Node", 94 ) -> Any: 95 """Add a node to the system. 96 97 If multi-processing, add a node to the existing launchpad program, 98 grouped under the given name. 99 This means that when multi-processing, 100 you can have multiple nodes of the same name (e.g. executor). 101 If system is single-process, only one node per name is allowed in the system. 102 103 Args: 104 node_fn : Function returning the system process that will run on the node. 105 arguments : Arguments used when initialising the system process. 106 node_type : Type of launchpad node to use. 107 name : Node name (e.g. executor). 108 109 Raises: 110 ValueError: if single-process and node name is not supported. 111 ValueError: if single-process and trying to init a node more than once. 112 113 Returns: 114 The system process or launchpad node. 115 """ 116 # Create a list of arguments 117 if type(arguments) is not list: 118 arguments = [arguments] 119 120 if self._multi_process: 121 with self._program.group(name): 122 node = self._program.add_node(node_type(node_fn, *arguments)) 123 return node 124 else: 125 if name not in self._node_dict: 126 raise ValueError( 127 f"{name} is not a valid node name." 128 + "Single process currently only supports " 129 + "nodes named: {list(self._node_dict.keys())}" 130 ) 131 elif self._node_dict[name] is not None: 132 raise ValueError( 133 f"Node named {name} initialised more than once." 134 + "Single process currently only supports one node per type." 135 ) 136 137 node_fn = copy_node_fn(node_fn) 138 process = node_fn(*arguments) 139 if node_type == lp.ReverbNode: 140 # Assigning server to self to keep it alive. 141 self._replay_server = reverb.Server(process, port=None) 142 process = reverb.Client(f"localhost:{self._replay_server.port}") 143 self._nodes.append(process) 144 self._node_dict[name] = process 145 return process 146 147 def get_nodes(self) -> List[Any]: 148 """Get the nodes of a single-process system. 149 150 Raises: 151 ValueError: if system is multi-process. 152 153 Returns: 154 System nodes. 155 """ 156 if self._multi_process: 157 raise ValueError("Get nodes only implemented for single process setups.") 158 159 return self._nodes 160 161 def launch(self) -> None: 162 """Launch the launchpad program or start the single-process system loop. 163 164 Returns: 165 None. 166 """ 167 if self._multi_process: 168 local_resources = lp_utils.to_device( 169 program_nodes=self._program.groups.keys(), 170 nodes_on_gpu=self._nodes_on_gpu, 171 ) 172 173 lp.launch( 174 self._program, 175 launch_type=self._lp_launch_type, 176 terminal=self._terminal, 177 local_resources=local_resources, 178 ) 179 else: 180 episode = 1 181 step = 1 182 executor_steps = 0 183 184 data_server = self._node_dict["data_server"] 185 _ = self._node_dict["parameter_server"] 186 executor = self._node_dict["executor"] 187 evaluator = self._node_dict["evaluator"] 188 trainer = self._node_dict["trainer"] 189 190 # getting the maximum queue size 191 queue_threshold = data_server.server_info()["trainer"].max_size 192 193 while ( 194 self._single_process_max_episodes is None 195 or episode <= self._single_process_max_episodes 196 ): 197 # if the queue is too full we skip the executor to ensure that the 198 # executor won't hang when trying to push experience 199 if data_server.server_info()["trainer"].current_size < int( 200 queue_threshold * 0.75 201 ): 202 executor_stats = executor.run_episode_and_log() 203 executor_steps += executor_stats["episode_length"] 204 205 print(f"Episode {episode} completed.") 206 episode += 1 207 208 # if the queue has less than sample_batch_size samples in it we skip 209 # the trainer to ensure that the trainer won't hang 210 if ( 211 data_server.server_info()["trainer"].current_size 212 >= trainer.store.global_config.sample_batch_size 213 and step % self._single_process_trainer_period == 0 214 ): 215 _ = trainer.step() # logging done in trainer 216 print("Performed trainer step.") 217 if step % self._single_process_evaluator_period == 0: 218 _ = evaluator.run_episode_and_log() 219 print("Performed evaluator run.") 220 221 step += 1 222 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mava/systems/jax/launcher.py b/mava/systems/jax/launcher.py --- a/mava/systems/jax/launcher.py +++ b/mava/systems/jax/launcher.py @@ -119,6 +119,7 @@ if self._multi_process: with self._program.group(name): + node_fn = copy_node_fn(node_fn) node = self._program.add_node(node_type(node_fn, *arguments)) return node else:
{"golden_diff": "diff --git a/mava/systems/jax/launcher.py b/mava/systems/jax/launcher.py\n--- a/mava/systems/jax/launcher.py\n+++ b/mava/systems/jax/launcher.py\n@@ -119,6 +119,7 @@\n \n if self._multi_process:\n with self._program.group(name):\n+ node_fn = copy_node_fn(node_fn)\n node = self._program.add_node(node_type(node_fn, *arguments))\n return node\n else:\n", "issue": "[TEST] Jax MAPPO System Integration\n### What do you want to test?\r\nJax MAPPO full integration test\r\n\r\n### Outline of test structure\r\n* Unit tests (if possible)\r\n* Test component interactions\r\n* Ensure not crashing during standard executor and trainer steps\r\n\r\n### Definition of done\r\nPassing checks, cover all basic component interactions, edge cases considered\r\n\r\n### Mandatory checklist before making a PR\r\n* [ ] The success criteria laid down in \u201cDefinition of done\u201d are met.\r\n* [ ] Test code is documented - docstrings for methods and classes, static types for arguments.\r\n* [ ] Documentation is updated - README, CONTRIBUTING, or other documentation.\n", "before_files": [{"content": "# python3\n# Copyright 2021 InstaDeep Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"General launcher for systems\"\"\"\nfrom typing import Any, Dict, List, Optional, Union\n\nimport launchpad as lp\nimport reverb\n\nfrom mava.utils import lp_utils\nfrom mava.utils.builder_utils import copy_node_fn\n\n\nclass NodeType:\n \"\"\"Specify launchpad node types that systems can use.\"\"\"\n\n reverb = lp.ReverbNode\n courier = lp.CourierNode\n\n\nclass Launcher:\n \"\"\"This mava launcher can be used to launch multi-node systems using either single \\\n or distributed computation.\"\"\"\n\n def __init__(\n self,\n multi_process: bool,\n nodes_on_gpu: List = [],\n single_process_trainer_period: int = 1,\n single_process_evaluator_period: int = 10,\n single_process_max_episodes: Optional[int] = None,\n name: str = \"System\",\n terminal: str = \"current_terminal\",\n lp_launch_type: Union[\n str, lp.LaunchType\n ] = lp.LaunchType.LOCAL_MULTI_PROCESSING,\n ) -> None:\n \"\"\"Initialise the launcher.\n\n If multi-process, set up the launchpad program.\n Otherwise, create a dictionary for the nodes in the system.\n\n Args:\n multi_process : whether to use launchpad to run nodes on separate processes.\n nodes_on_gpu : which nodes should be run on the GPU.\n single_process_trainer_period : number of episodes between single process\n trainer steps.\n single_process_evaluator_period : num episodes between single process\n evaluator steps.\n single_process_max_episodes: maximum number of episodes to run\n before termination.\n name : launchpad program name.\n terminal : terminal for launchpad processes to be shown on.\n lp_launch_type: launchpad launch type.\n \"\"\"\n self._multi_process = multi_process\n self._name = name\n self._single_process_trainer_period = single_process_trainer_period\n self._single_process_evaluator_period = single_process_evaluator_period\n self._single_process_max_episodes = single_process_max_episodes\n self._terminal = terminal\n self._lp_launch_type = lp_launch_type\n if multi_process:\n self._program = lp.Program(name=name)\n self._nodes_on_gpu = nodes_on_gpu\n else:\n self._nodes: List = []\n self._node_dict: Dict = {\n \"data_server\": None,\n \"parameter_server\": None,\n \"executor\": None,\n \"evaluator\": None,\n \"trainer\": None,\n }\n\n def add(\n self,\n node_fn: Any,\n arguments: Any = [],\n node_type: Union[lp.ReverbNode, lp.CourierNode] = NodeType.courier,\n name: str = \"Node\",\n ) -> Any:\n \"\"\"Add a node to the system.\n\n If multi-processing, add a node to the existing launchpad program,\n grouped under the given name.\n This means that when multi-processing,\n you can have multiple nodes of the same name (e.g. executor).\n If system is single-process, only one node per name is allowed in the system.\n\n Args:\n node_fn : Function returning the system process that will run on the node.\n arguments : Arguments used when initialising the system process.\n node_type : Type of launchpad node to use.\n name : Node name (e.g. executor).\n\n Raises:\n ValueError: if single-process and node name is not supported.\n ValueError: if single-process and trying to init a node more than once.\n\n Returns:\n The system process or launchpad node.\n \"\"\"\n # Create a list of arguments\n if type(arguments) is not list:\n arguments = [arguments]\n\n if self._multi_process:\n with self._program.group(name):\n node = self._program.add_node(node_type(node_fn, *arguments))\n return node\n else:\n if name not in self._node_dict:\n raise ValueError(\n f\"{name} is not a valid node name.\"\n + \"Single process currently only supports \"\n + \"nodes named: {list(self._node_dict.keys())}\"\n )\n elif self._node_dict[name] is not None:\n raise ValueError(\n f\"Node named {name} initialised more than once.\"\n + \"Single process currently only supports one node per type.\"\n )\n\n node_fn = copy_node_fn(node_fn)\n process = node_fn(*arguments)\n if node_type == lp.ReverbNode:\n # Assigning server to self to keep it alive.\n self._replay_server = reverb.Server(process, port=None)\n process = reverb.Client(f\"localhost:{self._replay_server.port}\")\n self._nodes.append(process)\n self._node_dict[name] = process\n return process\n\n def get_nodes(self) -> List[Any]:\n \"\"\"Get the nodes of a single-process system.\n\n Raises:\n ValueError: if system is multi-process.\n\n Returns:\n System nodes.\n \"\"\"\n if self._multi_process:\n raise ValueError(\"Get nodes only implemented for single process setups.\")\n\n return self._nodes\n\n def launch(self) -> None:\n \"\"\"Launch the launchpad program or start the single-process system loop.\n\n Returns:\n None.\n \"\"\"\n if self._multi_process:\n local_resources = lp_utils.to_device(\n program_nodes=self._program.groups.keys(),\n nodes_on_gpu=self._nodes_on_gpu,\n )\n\n lp.launch(\n self._program,\n launch_type=self._lp_launch_type,\n terminal=self._terminal,\n local_resources=local_resources,\n )\n else:\n episode = 1\n step = 1\n executor_steps = 0\n\n data_server = self._node_dict[\"data_server\"]\n _ = self._node_dict[\"parameter_server\"]\n executor = self._node_dict[\"executor\"]\n evaluator = self._node_dict[\"evaluator\"]\n trainer = self._node_dict[\"trainer\"]\n\n # getting the maximum queue size\n queue_threshold = data_server.server_info()[\"trainer\"].max_size\n\n while (\n self._single_process_max_episodes is None\n or episode <= self._single_process_max_episodes\n ):\n # if the queue is too full we skip the executor to ensure that the\n # executor won't hang when trying to push experience\n if data_server.server_info()[\"trainer\"].current_size < int(\n queue_threshold * 0.75\n ):\n executor_stats = executor.run_episode_and_log()\n executor_steps += executor_stats[\"episode_length\"]\n\n print(f\"Episode {episode} completed.\")\n episode += 1\n\n # if the queue has less than sample_batch_size samples in it we skip\n # the trainer to ensure that the trainer won't hang\n if (\n data_server.server_info()[\"trainer\"].current_size\n >= trainer.store.global_config.sample_batch_size\n and step % self._single_process_trainer_period == 0\n ):\n _ = trainer.step() # logging done in trainer\n print(\"Performed trainer step.\")\n if step % self._single_process_evaluator_period == 0:\n _ = evaluator.run_episode_and_log()\n print(\"Performed evaluator run.\")\n\n step += 1\n", "path": "mava/systems/jax/launcher.py"}], "after_files": [{"content": "# python3\n# Copyright 2021 InstaDeep Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"General launcher for systems\"\"\"\nfrom typing import Any, Dict, List, Optional, Union\n\nimport launchpad as lp\nimport reverb\n\nfrom mava.utils import lp_utils\nfrom mava.utils.builder_utils import copy_node_fn\n\n\nclass NodeType:\n \"\"\"Specify launchpad node types that systems can use.\"\"\"\n\n reverb = lp.ReverbNode\n courier = lp.CourierNode\n\n\nclass Launcher:\n \"\"\"This mava launcher can be used to launch multi-node systems using either single \\\n or distributed computation.\"\"\"\n\n def __init__(\n self,\n multi_process: bool,\n nodes_on_gpu: List = [],\n single_process_trainer_period: int = 1,\n single_process_evaluator_period: int = 10,\n single_process_max_episodes: Optional[int] = None,\n name: str = \"System\",\n terminal: str = \"current_terminal\",\n lp_launch_type: Union[\n str, lp.LaunchType\n ] = lp.LaunchType.LOCAL_MULTI_PROCESSING,\n ) -> None:\n \"\"\"Initialise the launcher.\n\n If multi-process, set up the launchpad program.\n Otherwise, create a dictionary for the nodes in the system.\n\n Args:\n multi_process : whether to use launchpad to run nodes on separate processes.\n nodes_on_gpu : which nodes should be run on the GPU.\n single_process_trainer_period : number of episodes between single process\n trainer steps.\n single_process_evaluator_period : num episodes between single process\n evaluator steps.\n single_process_max_episodes: maximum number of episodes to run\n before termination.\n name : launchpad program name.\n terminal : terminal for launchpad processes to be shown on.\n lp_launch_type: launchpad launch type.\n \"\"\"\n self._multi_process = multi_process\n self._name = name\n self._single_process_trainer_period = single_process_trainer_period\n self._single_process_evaluator_period = single_process_evaluator_period\n self._single_process_max_episodes = single_process_max_episodes\n self._terminal = terminal\n self._lp_launch_type = lp_launch_type\n if multi_process:\n self._program = lp.Program(name=name)\n self._nodes_on_gpu = nodes_on_gpu\n else:\n self._nodes: List = []\n self._node_dict: Dict = {\n \"data_server\": None,\n \"parameter_server\": None,\n \"executor\": None,\n \"evaluator\": None,\n \"trainer\": None,\n }\n\n def add(\n self,\n node_fn: Any,\n arguments: Any = [],\n node_type: Union[lp.ReverbNode, lp.CourierNode] = NodeType.courier,\n name: str = \"Node\",\n ) -> Any:\n \"\"\"Add a node to the system.\n\n If multi-processing, add a node to the existing launchpad program,\n grouped under the given name.\n This means that when multi-processing,\n you can have multiple nodes of the same name (e.g. executor).\n If system is single-process, only one node per name is allowed in the system.\n\n Args:\n node_fn : Function returning the system process that will run on the node.\n arguments : Arguments used when initialising the system process.\n node_type : Type of launchpad node to use.\n name : Node name (e.g. executor).\n\n Raises:\n ValueError: if single-process and node name is not supported.\n ValueError: if single-process and trying to init a node more than once.\n\n Returns:\n The system process or launchpad node.\n \"\"\"\n # Create a list of arguments\n if type(arguments) is not list:\n arguments = [arguments]\n\n if self._multi_process:\n with self._program.group(name):\n node_fn = copy_node_fn(node_fn)\n node = self._program.add_node(node_type(node_fn, *arguments))\n return node\n else:\n if name not in self._node_dict:\n raise ValueError(\n f\"{name} is not a valid node name.\"\n + \"Single process currently only supports \"\n + \"nodes named: {list(self._node_dict.keys())}\"\n )\n elif self._node_dict[name] is not None:\n raise ValueError(\n f\"Node named {name} initialised more than once.\"\n + \"Single process currently only supports one node per type.\"\n )\n\n node_fn = copy_node_fn(node_fn)\n process = node_fn(*arguments)\n if node_type == lp.ReverbNode:\n # Assigning server to self to keep it alive.\n self._replay_server = reverb.Server(process, port=None)\n process = reverb.Client(f\"localhost:{self._replay_server.port}\")\n self._nodes.append(process)\n self._node_dict[name] = process\n return process\n\n def get_nodes(self) -> List[Any]:\n \"\"\"Get the nodes of a single-process system.\n\n Raises:\n ValueError: if system is multi-process.\n\n Returns:\n System nodes.\n \"\"\"\n if self._multi_process:\n raise ValueError(\"Get nodes only implemented for single process setups.\")\n\n return self._nodes\n\n def launch(self) -> None:\n \"\"\"Launch the launchpad program or start the single-process system loop.\n\n Returns:\n None.\n \"\"\"\n if self._multi_process:\n local_resources = lp_utils.to_device(\n program_nodes=self._program.groups.keys(),\n nodes_on_gpu=self._nodes_on_gpu,\n )\n\n lp.launch(\n self._program,\n launch_type=self._lp_launch_type,\n terminal=self._terminal,\n local_resources=local_resources,\n )\n else:\n episode = 1\n step = 1\n executor_steps = 0\n\n data_server = self._node_dict[\"data_server\"]\n _ = self._node_dict[\"parameter_server\"]\n executor = self._node_dict[\"executor\"]\n evaluator = self._node_dict[\"evaluator\"]\n trainer = self._node_dict[\"trainer\"]\n\n # getting the maximum queue size\n queue_threshold = data_server.server_info()[\"trainer\"].max_size\n\n while (\n self._single_process_max_episodes is None\n or episode <= self._single_process_max_episodes\n ):\n # if the queue is too full we skip the executor to ensure that the\n # executor won't hang when trying to push experience\n if data_server.server_info()[\"trainer\"].current_size < int(\n queue_threshold * 0.75\n ):\n executor_stats = executor.run_episode_and_log()\n executor_steps += executor_stats[\"episode_length\"]\n\n print(f\"Episode {episode} completed.\")\n episode += 1\n\n # if the queue has less than sample_batch_size samples in it we skip\n # the trainer to ensure that the trainer won't hang\n if (\n data_server.server_info()[\"trainer\"].current_size\n >= trainer.store.global_config.sample_batch_size\n and step % self._single_process_trainer_period == 0\n ):\n _ = trainer.step() # logging done in trainer\n print(\"Performed trainer step.\")\n if step % self._single_process_evaluator_period == 0:\n _ = evaluator.run_episode_and_log()\n print(\"Performed evaluator run.\")\n\n step += 1\n", "path": "mava/systems/jax/launcher.py"}]}
2,641
112
gh_patches_debug_39155
rasdani/github-patches
git_diff
openstates__openstates-scrapers-2311
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- OH failing since at least 2018-04-18 OH has been failing since 2018-04-18 Based on automated runs it appears that OH has not run successfully in 2 days (2018-04-18). ``` 02:46:44 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb602/disapprove 02:46:45 INFO pupa: save bill HB 602 in 132 as bill_cb814bda-43a5-11e8-a12a-020dd716e63e.json 02:46:45 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb603/ 02:46:46 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb603/actions 02:46:47 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb603/votes 02:46:48 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb603/cmtevotes 02:46:49 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb603/veto 02:46:50 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb603/disapprove 02:46:51 INFO pupa: save bill HB 603 in 132 as bill_cf14d0f0-43a5-11e8-a12a-020dd716e63e.json 02:46:51 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb604/ 02:46:52 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb604/actions 02:46:53 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb604/votes 02:46:54 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb604/cmtevotes 02:46:55 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb604/veto 02:46:56 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb604/disapprove 02:46:57 INFO pupa: save bill HB 604 in 132 as bill_d2b4f550-43a5-11e8-a12a-020dd716e63e.json 02:46:57 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb605/ 02:46:58 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb605/actions 02:46:59 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb605/votes 02:47:00 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb605/cmtevotes 02:47:01 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb605/veto 02:47:02 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb605/disapprove 02:47:03 INFO pupa: save bill HB 605 in 132 as bill_d6465452-43a5-11e8-a12a-020dd716e63e.json 02:47:03 INFO scrapelib: GET - http://www.ohiosenate.gov/members/senate-directory loaded Open States pupa settings... oh (scrape, import) bills: {} people: {} Traceback (most recent call last): File "/opt/openstates/venv-pupa//bin/pupa", line 11, in <module> load_entry_point('pupa', 'console_scripts', 'pupa')() File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/__main__.py", line 68, in main subcommands[args.subcommand].handle(args, other) File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 260, in handle return self.do_handle(args, other, juris) File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 305, in do_handle report['scrape'] = self.do_scrape(juris, args, scrapers) File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 173, in do_scrape report[scraper_name] = scraper.do_scrape(**scrape_args) File "/opt/openstates/venv-pupa/src/pupa/pupa/scrape/base.py", line 111, in do_scrape for obj in self.scrape(**kwargs) or []: File "/opt/openstates/openstates/openstates/oh/people.py", line 48, in scrape yield from self.scrape_page('upper', CHAMBER_URLS['upper']) File "/opt/openstates/openstates/openstates/oh/people.py", line 131, in scrape_page page = self.get(url).text File "/opt/openstates/venv-pupa/lib/python3.5/site-packages/requests/sessions.py", line 521, in get return self.request('GET', url, **kwargs) File "/opt/openstates/venv-pupa/lib/python3.5/site-packages/scrapelib/__init__.py", line 292, in request raise HTTPError(resp) scrapelib.HTTPError: 404 while retrieving http://www.ohiosenate.gov/members/senate-directory ``` Visit http://bobsled.openstates.org for more info. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `openstates/oh/people.py` Content: ``` 1 import re 2 3 from pupa.scrape import Person, Scraper, Organization 4 5 import lxml.html 6 7 8 JOINT_COMMITTEE_OVERRIDE = [ # without Joint" in the name. 9 "State Controlling Board", 10 "Legislative Service Commission", 11 "Correctional Institution Inspection Committee" 12 ] 13 14 SUBCOMMITTEES = { 15 # The Senate only has Finance subcommittees 16 "Finance - Corrections Subcommittee": "Finance", 17 "Finance - Education Subcommittee": "Finance", 18 "Finance - General Government Subcommittee": "Finance", 19 "Finance - Higher Ed Subcommittee": "Finance", 20 "Finance - Workforce Subcommittee": "Finance", 21 22 # The House has mostly Finance, but also one more 23 "Community and Family Advancement Subcommittee on Minority Affairs": 24 "Community and Family Advancement", 25 "Finance Subcommittee on Agriculture Development and Natural Resources": 26 "Finance", 27 "Finance Subcommittee on Health and Human Services": "Finance", 28 "Finance Subcommittee on Higher Education": "Finance", 29 "Finance Subcommittee on Primary and Secondary Education": "Finance", 30 "Finance Subcommittee on Transportation": "Finance", 31 "Finance Subcommittee on State Government and Agency Review": "Finance" 32 } 33 34 CHAMBER_URLS = { 35 'upper': "http://www.ohiosenate.gov/members/senate-directory", 36 'lower': "http://www.ohiohouse.gov/members/member-directory" 37 } 38 39 committee_cache = {} 40 41 42 class OHLegislatorScraper(Scraper): 43 def scrape(self, chamber=None): 44 self.committees = {} 45 if chamber: 46 yield from self.scrape_page(chamber, CHAMBER_URLS[chamber]) 47 else: 48 yield from self.scrape_page('upper', CHAMBER_URLS['upper']) 49 yield from self.scrape_page('lower', CHAMBER_URLS['lower']) 50 yield from self.committees.values() 51 52 def fetch_committee_positions(self, a): 53 page = self.get(a.attrib['href']).text 54 page = lxml.html.fromstring(page) 55 page.make_links_absolute(a.attrib['href']) 56 ret = {} 57 for entry in page.xpath("//div[@class='committeeMembers']//td//a"): 58 person = re.sub( 59 "\s+", " ", re.sub("\(.*\)", "", entry.text or "")).strip() 60 61 if person == "": 62 continue 63 64 title = entry.xpath(".//div[@class='title']/text()") or None 65 66 if title: 67 title = title[0] 68 ret[person] = title 69 70 return ret 71 72 def scrape_homepage(self, leg, chamber, homepage): 73 page = self.get(homepage).text 74 page = lxml.html.fromstring(page) 75 page.make_links_absolute(homepage) 76 bio = page.xpath( 77 "//div[@class='biography']//div[@class='right']//p/text()") 78 if bio != []: 79 bio = bio[0] 80 leg.extras['biography'] = bio 81 82 fax_line = [ 83 x.strip() for x in 84 page.xpath( 85 "//div[@class='contactModule']/div[@class='data']/text()" 86 ) 87 if "Fax" in x 88 ] 89 if fax_line: 90 fax_number = re.search( 91 r'(\(\d{3}\)\s\d{3}\-\d{4})', fax_line[0] 92 ).group(1) 93 leg.add_contact_detail(type='fax', value=fax_number, note='Capitol Office') 94 95 ctties = page.xpath("//div[@class='committeeList']//a") 96 for a in ctties: 97 entry = a.text_content() 98 99 if entry in committee_cache: 100 committee_positions = committee_cache[entry] 101 else: 102 committee_positions = self.fetch_committee_positions(a) 103 committee_cache[entry] = committee_positions 104 105 chmbr = "legislature" if "joint" in entry.lower() else chamber 106 if entry in JOINT_COMMITTEE_OVERRIDE: 107 chmbr = "legislature" 108 109 kwargs = {} 110 111 if "subcommittee" in entry.lower(): 112 if entry in SUBCOMMITTEES: 113 kwargs['subcommittee'] = entry 114 entry = SUBCOMMITTEES[entry] 115 else: 116 self.warning("No subcommittee known: '%s'" % (entry)) 117 raise Exception 118 if (chmbr, entry) not in self.committees: 119 org = Organization( 120 name=entry, 121 chamber=chmbr, 122 classification='committee', 123 ) 124 self.committees[(chmbr, entry)] = org 125 else: 126 org = self.committees[(chmbr, entry)] 127 org.add_source(homepage) 128 leg.add_membership(org) 129 130 def scrape_page(self, chamber, url): 131 page = self.get(url).text 132 page = lxml.html.fromstring(page) 133 page.make_links_absolute(url) 134 135 for legislator in page.xpath( 136 "//div[contains(concat(' ', normalize-space(@class), ' '), " 137 "' memberModule ')]" 138 ): 139 img = legislator.xpath( 140 ".//div[@class='thumbnail']//img")[0].attrib['src'] 141 data = legislator.xpath(".//div[@class='data']")[0] 142 homepage = data.xpath(".//a[@class='black']")[0] 143 full_name = homepage.text_content() 144 145 if "Vacant" in full_name: 146 continue 147 148 homepage = homepage.attrib['href'] 149 party = data.xpath( 150 ".//span[@class='partyLetter']")[0].text_content() 151 party = {"R": "Republican", "D": "Democratic"}[party] 152 office_lines = data.xpath("child::text()") 153 phone = office_lines.pop(-1) 154 office = "\n".join(office_lines) 155 h3 = data.xpath("./h3") 156 if len(h3): 157 h3 = h3[0] 158 district = h3.xpath("./br")[0].tail.replace("District", "" 159 ).strip() 160 else: 161 district = re.findall( 162 "\d+\.png", 163 legislator.attrib['style'] 164 )[-1].split(".", 1)[0] 165 166 full_name = re.sub("\s+", " ", full_name).strip() 167 email = ( 168 'rep{0:0{width}}@ohiohouse.gov' 169 if chamber == 'lower' else 170 'sd{0:0{width}}@ohiosenate.gov' 171 ).format(int(district), width=2) 172 173 leg = Person(name=full_name, district=district, 174 party=party, primary_org=chamber, 175 image=img) 176 177 leg.add_contact_detail(type='address', value=office, note='Capitol Office') 178 leg.add_contact_detail(type='voice', value=phone, note='Capitol Office') 179 leg.add_contact_detail(type='email', value=email, note='Capitol Office') 180 181 self.scrape_homepage(leg, chamber, homepage) 182 183 leg.add_source(url) 184 leg.add_link(homepage) 185 yield leg 186 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/openstates/oh/people.py b/openstates/oh/people.py --- a/openstates/oh/people.py +++ b/openstates/oh/people.py @@ -32,7 +32,7 @@ } CHAMBER_URLS = { - 'upper': "http://www.ohiosenate.gov/members/senate-directory", + 'upper': "http://www.ohiosenate.gov/senators", 'lower': "http://www.ohiohouse.gov/members/member-directory" } @@ -45,8 +45,8 @@ if chamber: yield from self.scrape_page(chamber, CHAMBER_URLS[chamber]) else: - yield from self.scrape_page('upper', CHAMBER_URLS['upper']) - yield from self.scrape_page('lower', CHAMBER_URLS['lower']) + yield from self.scrape_senator_page('upper', CHAMBER_URLS['upper']) + yield from self.scrape_member_page('lower', CHAMBER_URLS['lower']) yield from self.committees.values() def fetch_committee_positions(self, a): @@ -127,7 +127,7 @@ org.add_source(homepage) leg.add_membership(org) - def scrape_page(self, chamber, url): + def scrape_member_page(self, chamber, url): page = self.get(url).text page = lxml.html.fromstring(page) page.make_links_absolute(url) @@ -183,3 +183,44 @@ leg.add_source(url) leg.add_link(homepage) yield leg + + def scrape_senator_page(self, chamber, url): + page = self.get(url).text + page = lxml.html.fromstring(page) + page.make_links_absolute(url) + + for legislator in page.xpath("//div[contains(concat(' ', normalize-space(@class), ' '), " + "' portraitContainer ')]"): + img = legislator.xpath(".//div[@class='profileThumbnailBoundingBox']/@style")[0] + img = img[img.find('(')+1:img.find(')')] + full_name = legislator.xpath(".//div[@class='profileName']/a/text()")[0] + homepage_url = legislator.xpath(".//a[@class='profileImageLink']")[0].attrib['href'] + district = legislator.xpath(".//div[@class='profileDistrict']" + "/a/text()")[0].split("#")[1] + + if "Vacant" in full_name: + continue + + homepage = self.get(homepage_url).text + page = lxml.html.fromstring(homepage) + phone = page.xpath("//div[@class='phone']/span/text()")[0] + + address_lines = page.xpath("//div[@class='address']/span/text()") + address = "\n".join(address_lines) + + email = ( + 'rep{0:0{width}}@ohiohouse.gov' + if chamber == 'lower' else + 'sd{0:0{width}}@ohiosenate.gov' + ).format(int(district), width=2) + + leg = Person(name=full_name, district=district, + primary_org=chamber, image=img) + + leg.add_contact_detail(type='address', value=address, note='Capitol Office') + leg.add_contact_detail(type='voice', value=phone, note='Capitol Office') + leg.add_contact_detail(type='email', value=email, note='Capitol Office') + + leg.add_source(url) + leg.add_link(homepage_url) + yield leg
{"golden_diff": "diff --git a/openstates/oh/people.py b/openstates/oh/people.py\n--- a/openstates/oh/people.py\n+++ b/openstates/oh/people.py\n@@ -32,7 +32,7 @@\n }\n \n CHAMBER_URLS = {\n- 'upper': \"http://www.ohiosenate.gov/members/senate-directory\",\n+ 'upper': \"http://www.ohiosenate.gov/senators\",\n 'lower': \"http://www.ohiohouse.gov/members/member-directory\"\n }\n \n@@ -45,8 +45,8 @@\n if chamber:\n yield from self.scrape_page(chamber, CHAMBER_URLS[chamber])\n else:\n- yield from self.scrape_page('upper', CHAMBER_URLS['upper'])\n- yield from self.scrape_page('lower', CHAMBER_URLS['lower'])\n+ yield from self.scrape_senator_page('upper', CHAMBER_URLS['upper'])\n+ yield from self.scrape_member_page('lower', CHAMBER_URLS['lower'])\n yield from self.committees.values()\n \n def fetch_committee_positions(self, a):\n@@ -127,7 +127,7 @@\n org.add_source(homepage)\n leg.add_membership(org)\n \n- def scrape_page(self, chamber, url):\n+ def scrape_member_page(self, chamber, url):\n page = self.get(url).text\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n@@ -183,3 +183,44 @@\n leg.add_source(url)\n leg.add_link(homepage)\n yield leg\n+\n+ def scrape_senator_page(self, chamber, url):\n+ page = self.get(url).text\n+ page = lxml.html.fromstring(page)\n+ page.make_links_absolute(url)\n+\n+ for legislator in page.xpath(\"//div[contains(concat(' ', normalize-space(@class), ' '), \"\n+ \"' portraitContainer ')]\"):\n+ img = legislator.xpath(\".//div[@class='profileThumbnailBoundingBox']/@style\")[0]\n+ img = img[img.find('(')+1:img.find(')')]\n+ full_name = legislator.xpath(\".//div[@class='profileName']/a/text()\")[0]\n+ homepage_url = legislator.xpath(\".//a[@class='profileImageLink']\")[0].attrib['href']\n+ district = legislator.xpath(\".//div[@class='profileDistrict']\"\n+ \"/a/text()\")[0].split(\"#\")[1]\n+\n+ if \"Vacant\" in full_name:\n+ continue\n+\n+ homepage = self.get(homepage_url).text\n+ page = lxml.html.fromstring(homepage)\n+ phone = page.xpath(\"//div[@class='phone']/span/text()\")[0]\n+\n+ address_lines = page.xpath(\"//div[@class='address']/span/text()\")\n+ address = \"\\n\".join(address_lines)\n+\n+ email = (\n+ 'rep{0:0{width}}@ohiohouse.gov'\n+ if chamber == 'lower' else\n+ 'sd{0:0{width}}@ohiosenate.gov'\n+ ).format(int(district), width=2)\n+\n+ leg = Person(name=full_name, district=district,\n+ primary_org=chamber, image=img)\n+\n+ leg.add_contact_detail(type='address', value=address, note='Capitol Office')\n+ leg.add_contact_detail(type='voice', value=phone, note='Capitol Office')\n+ leg.add_contact_detail(type='email', value=email, note='Capitol Office')\n+\n+ leg.add_source(url)\n+ leg.add_link(homepage_url)\n+ yield leg\n", "issue": "OH failing since at least 2018-04-18\nOH has been failing since 2018-04-18\n\nBased on automated runs it appears that OH has not run successfully in 2 days (2018-04-18).\n\n\n```\n 02:46:44 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb602/disapprove\n02:46:45 INFO pupa: save bill HB 602 in 132 as bill_cb814bda-43a5-11e8-a12a-020dd716e63e.json\n02:46:45 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb603/\n02:46:46 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb603/actions\n02:46:47 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb603/votes\n02:46:48 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb603/cmtevotes\n02:46:49 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb603/veto\n02:46:50 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb603/disapprove\n02:46:51 INFO pupa: save bill HB 603 in 132 as bill_cf14d0f0-43a5-11e8-a12a-020dd716e63e.json\n02:46:51 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb604/\n02:46:52 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb604/actions\n02:46:53 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb604/votes\n02:46:54 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb604/cmtevotes\n02:46:55 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb604/veto\n02:46:56 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb604/disapprove\n02:46:57 INFO pupa: save bill HB 604 in 132 as bill_d2b4f550-43a5-11e8-a12a-020dd716e63e.json\n02:46:57 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb605/\n02:46:58 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb605/actions\n02:46:59 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb605/votes\n02:47:00 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb605/cmtevotes\n02:47:01 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb605/veto\n02:47:02 INFO scrapelib: GET - http://search-prod.lis.state.oh.us/solarapi/v1/general_assembly_132/bills/hb605/disapprove\n02:47:03 INFO pupa: save bill HB 605 in 132 as bill_d6465452-43a5-11e8-a12a-020dd716e63e.json\n02:47:03 INFO scrapelib: GET - http://www.ohiosenate.gov/members/senate-directory\nloaded Open States pupa settings...\noh (scrape, import)\n bills: {}\n people: {}\nTraceback (most recent call last):\n File \"/opt/openstates/venv-pupa//bin/pupa\", line 11, in <module>\n load_entry_point('pupa', 'console_scripts', 'pupa')()\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/__main__.py\", line 68, in main\n subcommands[args.subcommand].handle(args, other)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py\", line 260, in handle\n return self.do_handle(args, other, juris)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py\", line 305, in do_handle\n report['scrape'] = self.do_scrape(juris, args, scrapers)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py\", line 173, in do_scrape\n report[scraper_name] = scraper.do_scrape(**scrape_args)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/scrape/base.py\", line 111, in do_scrape\n for obj in self.scrape(**kwargs) or []:\n File \"/opt/openstates/openstates/openstates/oh/people.py\", line 48, in scrape\n yield from self.scrape_page('upper', CHAMBER_URLS['upper'])\n File \"/opt/openstates/openstates/openstates/oh/people.py\", line 131, in scrape_page\n page = self.get(url).text\n File \"/opt/openstates/venv-pupa/lib/python3.5/site-packages/requests/sessions.py\", line 521, in get\n return self.request('GET', url, **kwargs)\n File \"/opt/openstates/venv-pupa/lib/python3.5/site-packages/scrapelib/__init__.py\", line 292, in request\n raise HTTPError(resp)\nscrapelib.HTTPError: 404 while retrieving http://www.ohiosenate.gov/members/senate-directory\n```\n\nVisit http://bobsled.openstates.org for more info.\n\n", "before_files": [{"content": "import re\n\nfrom pupa.scrape import Person, Scraper, Organization\n\nimport lxml.html\n\n\nJOINT_COMMITTEE_OVERRIDE = [ # without Joint\" in the name.\n \"State Controlling Board\",\n \"Legislative Service Commission\",\n \"Correctional Institution Inspection Committee\"\n]\n\nSUBCOMMITTEES = {\n # The Senate only has Finance subcommittees\n \"Finance - Corrections Subcommittee\": \"Finance\",\n \"Finance - Education Subcommittee\": \"Finance\",\n \"Finance - General Government Subcommittee\": \"Finance\",\n \"Finance - Higher Ed Subcommittee\": \"Finance\",\n \"Finance - Workforce Subcommittee\": \"Finance\",\n\n # The House has mostly Finance, but also one more\n \"Community and Family Advancement Subcommittee on Minority Affairs\":\n \"Community and Family Advancement\",\n \"Finance Subcommittee on Agriculture Development and Natural Resources\":\n \"Finance\",\n \"Finance Subcommittee on Health and Human Services\": \"Finance\",\n \"Finance Subcommittee on Higher Education\": \"Finance\",\n \"Finance Subcommittee on Primary and Secondary Education\": \"Finance\",\n \"Finance Subcommittee on Transportation\": \"Finance\",\n \"Finance Subcommittee on State Government and Agency Review\": \"Finance\"\n}\n\nCHAMBER_URLS = {\n 'upper': \"http://www.ohiosenate.gov/members/senate-directory\",\n 'lower': \"http://www.ohiohouse.gov/members/member-directory\"\n}\n\ncommittee_cache = {}\n\n\nclass OHLegislatorScraper(Scraper):\n def scrape(self, chamber=None):\n self.committees = {}\n if chamber:\n yield from self.scrape_page(chamber, CHAMBER_URLS[chamber])\n else:\n yield from self.scrape_page('upper', CHAMBER_URLS['upper'])\n yield from self.scrape_page('lower', CHAMBER_URLS['lower'])\n yield from self.committees.values()\n\n def fetch_committee_positions(self, a):\n page = self.get(a.attrib['href']).text\n page = lxml.html.fromstring(page)\n page.make_links_absolute(a.attrib['href'])\n ret = {}\n for entry in page.xpath(\"//div[@class='committeeMembers']//td//a\"):\n person = re.sub(\n \"\\s+\", \" \", re.sub(\"\\(.*\\)\", \"\", entry.text or \"\")).strip()\n\n if person == \"\":\n continue\n\n title = entry.xpath(\".//div[@class='title']/text()\") or None\n\n if title:\n title = title[0]\n ret[person] = title\n\n return ret\n\n def scrape_homepage(self, leg, chamber, homepage):\n page = self.get(homepage).text\n page = lxml.html.fromstring(page)\n page.make_links_absolute(homepage)\n bio = page.xpath(\n \"//div[@class='biography']//div[@class='right']//p/text()\")\n if bio != []:\n bio = bio[0]\n leg.extras['biography'] = bio\n\n fax_line = [\n x.strip() for x in\n page.xpath(\n \"//div[@class='contactModule']/div[@class='data']/text()\"\n )\n if \"Fax\" in x\n ]\n if fax_line:\n fax_number = re.search(\n r'(\\(\\d{3}\\)\\s\\d{3}\\-\\d{4})', fax_line[0]\n ).group(1)\n leg.add_contact_detail(type='fax', value=fax_number, note='Capitol Office')\n\n ctties = page.xpath(\"//div[@class='committeeList']//a\")\n for a in ctties:\n entry = a.text_content()\n\n if entry in committee_cache:\n committee_positions = committee_cache[entry]\n else:\n committee_positions = self.fetch_committee_positions(a)\n committee_cache[entry] = committee_positions\n\n chmbr = \"legislature\" if \"joint\" in entry.lower() else chamber\n if entry in JOINT_COMMITTEE_OVERRIDE:\n chmbr = \"legislature\"\n\n kwargs = {}\n\n if \"subcommittee\" in entry.lower():\n if entry in SUBCOMMITTEES:\n kwargs['subcommittee'] = entry\n entry = SUBCOMMITTEES[entry]\n else:\n self.warning(\"No subcommittee known: '%s'\" % (entry))\n raise Exception\n if (chmbr, entry) not in self.committees:\n org = Organization(\n name=entry,\n chamber=chmbr,\n classification='committee',\n )\n self.committees[(chmbr, entry)] = org\n else:\n org = self.committees[(chmbr, entry)]\n org.add_source(homepage)\n leg.add_membership(org)\n\n def scrape_page(self, chamber, url):\n page = self.get(url).text\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n\n for legislator in page.xpath(\n \"//div[contains(concat(' ', normalize-space(@class), ' '), \"\n \"' memberModule ')]\"\n ):\n img = legislator.xpath(\n \".//div[@class='thumbnail']//img\")[0].attrib['src']\n data = legislator.xpath(\".//div[@class='data']\")[0]\n homepage = data.xpath(\".//a[@class='black']\")[0]\n full_name = homepage.text_content()\n\n if \"Vacant\" in full_name:\n continue\n\n homepage = homepage.attrib['href']\n party = data.xpath(\n \".//span[@class='partyLetter']\")[0].text_content()\n party = {\"R\": \"Republican\", \"D\": \"Democratic\"}[party]\n office_lines = data.xpath(\"child::text()\")\n phone = office_lines.pop(-1)\n office = \"\\n\".join(office_lines)\n h3 = data.xpath(\"./h3\")\n if len(h3):\n h3 = h3[0]\n district = h3.xpath(\"./br\")[0].tail.replace(\"District\", \"\"\n ).strip()\n else:\n district = re.findall(\n \"\\d+\\.png\",\n legislator.attrib['style']\n )[-1].split(\".\", 1)[0]\n\n full_name = re.sub(\"\\s+\", \" \", full_name).strip()\n email = (\n 'rep{0:0{width}}@ohiohouse.gov'\n if chamber == 'lower' else\n 'sd{0:0{width}}@ohiosenate.gov'\n ).format(int(district), width=2)\n\n leg = Person(name=full_name, district=district,\n party=party, primary_org=chamber,\n image=img)\n\n leg.add_contact_detail(type='address', value=office, note='Capitol Office')\n leg.add_contact_detail(type='voice', value=phone, note='Capitol Office')\n leg.add_contact_detail(type='email', value=email, note='Capitol Office')\n\n self.scrape_homepage(leg, chamber, homepage)\n\n leg.add_source(url)\n leg.add_link(homepage)\n yield leg\n", "path": "openstates/oh/people.py"}], "after_files": [{"content": "import re\n\nfrom pupa.scrape import Person, Scraper, Organization\n\nimport lxml.html\n\n\nJOINT_COMMITTEE_OVERRIDE = [ # without Joint\" in the name.\n \"State Controlling Board\",\n \"Legislative Service Commission\",\n \"Correctional Institution Inspection Committee\"\n]\n\nSUBCOMMITTEES = {\n # The Senate only has Finance subcommittees\n \"Finance - Corrections Subcommittee\": \"Finance\",\n \"Finance - Education Subcommittee\": \"Finance\",\n \"Finance - General Government Subcommittee\": \"Finance\",\n \"Finance - Higher Ed Subcommittee\": \"Finance\",\n \"Finance - Workforce Subcommittee\": \"Finance\",\n\n # The House has mostly Finance, but also one more\n \"Community and Family Advancement Subcommittee on Minority Affairs\":\n \"Community and Family Advancement\",\n \"Finance Subcommittee on Agriculture Development and Natural Resources\":\n \"Finance\",\n \"Finance Subcommittee on Health and Human Services\": \"Finance\",\n \"Finance Subcommittee on Higher Education\": \"Finance\",\n \"Finance Subcommittee on Primary and Secondary Education\": \"Finance\",\n \"Finance Subcommittee on Transportation\": \"Finance\",\n \"Finance Subcommittee on State Government and Agency Review\": \"Finance\"\n}\n\nCHAMBER_URLS = {\n 'upper': \"http://www.ohiosenate.gov/senators\",\n 'lower': \"http://www.ohiohouse.gov/members/member-directory\"\n}\n\ncommittee_cache = {}\n\n\nclass OHLegislatorScraper(Scraper):\n def scrape(self, chamber=None):\n self.committees = {}\n if chamber:\n yield from self.scrape_page(chamber, CHAMBER_URLS[chamber])\n else:\n yield from self.scrape_senator_page('upper', CHAMBER_URLS['upper'])\n yield from self.scrape_member_page('lower', CHAMBER_URLS['lower'])\n yield from self.committees.values()\n\n def fetch_committee_positions(self, a):\n page = self.get(a.attrib['href']).text\n page = lxml.html.fromstring(page)\n page.make_links_absolute(a.attrib['href'])\n ret = {}\n for entry in page.xpath(\"//div[@class='committeeMembers']//td//a\"):\n person = re.sub(\n \"\\s+\", \" \", re.sub(\"\\(.*\\)\", \"\", entry.text or \"\")).strip()\n\n if person == \"\":\n continue\n\n title = entry.xpath(\".//div[@class='title']/text()\") or None\n\n if title:\n title = title[0]\n ret[person] = title\n\n return ret\n\n def scrape_homepage(self, leg, chamber, homepage):\n page = self.get(homepage).text\n page = lxml.html.fromstring(page)\n page.make_links_absolute(homepage)\n bio = page.xpath(\n \"//div[@class='biography']//div[@class='right']//p/text()\")\n if bio != []:\n bio = bio[0]\n leg.extras['biography'] = bio\n\n fax_line = [\n x.strip() for x in\n page.xpath(\n \"//div[@class='contactModule']/div[@class='data']/text()\"\n )\n if \"Fax\" in x\n ]\n if fax_line:\n fax_number = re.search(\n r'(\\(\\d{3}\\)\\s\\d{3}\\-\\d{4})', fax_line[0]\n ).group(1)\n leg.add_contact_detail(type='fax', value=fax_number, note='Capitol Office')\n\n ctties = page.xpath(\"//div[@class='committeeList']//a\")\n for a in ctties:\n entry = a.text_content()\n\n if entry in committee_cache:\n committee_positions = committee_cache[entry]\n else:\n committee_positions = self.fetch_committee_positions(a)\n committee_cache[entry] = committee_positions\n\n chmbr = \"legislature\" if \"joint\" in entry.lower() else chamber\n if entry in JOINT_COMMITTEE_OVERRIDE:\n chmbr = \"legislature\"\n\n kwargs = {}\n\n if \"subcommittee\" in entry.lower():\n if entry in SUBCOMMITTEES:\n kwargs['subcommittee'] = entry\n entry = SUBCOMMITTEES[entry]\n else:\n self.warning(\"No subcommittee known: '%s'\" % (entry))\n raise Exception\n if (chmbr, entry) not in self.committees:\n org = Organization(\n name=entry,\n chamber=chmbr,\n classification='committee',\n )\n self.committees[(chmbr, entry)] = org\n else:\n org = self.committees[(chmbr, entry)]\n org.add_source(homepage)\n leg.add_membership(org)\n\n def scrape_member_page(self, chamber, url):\n page = self.get(url).text\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n\n for legislator in page.xpath(\n \"//div[contains(concat(' ', normalize-space(@class), ' '), \"\n \"' memberModule ')]\"\n ):\n img = legislator.xpath(\n \".//div[@class='thumbnail']//img\")[0].attrib['src']\n data = legislator.xpath(\".//div[@class='data']\")[0]\n homepage = data.xpath(\".//a[@class='black']\")[0]\n full_name = homepage.text_content()\n\n if \"Vacant\" in full_name:\n continue\n\n homepage = homepage.attrib['href']\n party = data.xpath(\n \".//span[@class='partyLetter']\")[0].text_content()\n party = {\"R\": \"Republican\", \"D\": \"Democratic\"}[party]\n office_lines = data.xpath(\"child::text()\")\n phone = office_lines.pop(-1)\n office = \"\\n\".join(office_lines)\n h3 = data.xpath(\"./h3\")\n if len(h3):\n h3 = h3[0]\n district = h3.xpath(\"./br\")[0].tail.replace(\"District\", \"\"\n ).strip()\n else:\n district = re.findall(\n \"\\d+\\.png\",\n legislator.attrib['style']\n )[-1].split(\".\", 1)[0]\n\n full_name = re.sub(\"\\s+\", \" \", full_name).strip()\n email = (\n 'rep{0:0{width}}@ohiohouse.gov'\n if chamber == 'lower' else\n 'sd{0:0{width}}@ohiosenate.gov'\n ).format(int(district), width=2)\n\n leg = Person(name=full_name, district=district,\n party=party, primary_org=chamber,\n image=img)\n\n leg.add_contact_detail(type='address', value=office, note='Capitol Office')\n leg.add_contact_detail(type='voice', value=phone, note='Capitol Office')\n leg.add_contact_detail(type='email', value=email, note='Capitol Office')\n\n self.scrape_homepage(leg, chamber, homepage)\n\n leg.add_source(url)\n leg.add_link(homepage)\n yield leg\n\n def scrape_senator_page(self, chamber, url):\n page = self.get(url).text\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n\n for legislator in page.xpath(\"//div[contains(concat(' ', normalize-space(@class), ' '), \"\n \"' portraitContainer ')]\"):\n img = legislator.xpath(\".//div[@class='profileThumbnailBoundingBox']/@style\")[0]\n img = img[img.find('(')+1:img.find(')')]\n full_name = legislator.xpath(\".//div[@class='profileName']/a/text()\")[0]\n homepage_url = legislator.xpath(\".//a[@class='profileImageLink']\")[0].attrib['href']\n district = legislator.xpath(\".//div[@class='profileDistrict']\"\n \"/a/text()\")[0].split(\"#\")[1]\n\n if \"Vacant\" in full_name:\n continue\n\n homepage = self.get(homepage_url).text\n page = lxml.html.fromstring(homepage)\n phone = page.xpath(\"//div[@class='phone']/span/text()\")[0]\n\n address_lines = page.xpath(\"//div[@class='address']/span/text()\")\n address = \"\\n\".join(address_lines)\n\n email = (\n 'rep{0:0{width}}@ohiohouse.gov'\n if chamber == 'lower' else\n 'sd{0:0{width}}@ohiosenate.gov'\n ).format(int(district), width=2)\n\n leg = Person(name=full_name, district=district,\n primary_org=chamber, image=img)\n\n leg.add_contact_detail(type='address', value=address, note='Capitol Office')\n leg.add_contact_detail(type='voice', value=phone, note='Capitol Office')\n leg.add_contact_detail(type='email', value=email, note='Capitol Office')\n\n leg.add_source(url)\n leg.add_link(homepage_url)\n yield leg\n", "path": "openstates/oh/people.py"}]}
3,950
810
gh_patches_debug_37821
rasdani/github-patches
git_diff
sunpy__sunpy-3056
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- AIA FITS headers have inaccurate HGS coordinates AIA FITS headers apparently have inaccurate Heliographic Stonyhurst (HGS) coordinates (`HGLN_OBS` and `HGLT_OBS`). For example, the distance from Earth center does not match the orbit radius. We currently use these keywords to generate the `observer_coordinate` for an AIA map, so we provide an inaccurate observer location. The headers also have Heliographic Aries Ecliptic (HAE) coordinates (`HAEX_OBS`, `HAEY_OBS`, and `HAEZ_OBS`), and the HAE coordinates are inconsistent with the HGS coordinates in the same header. We have previously verified the accuracy of SunPy's transformation from HAE to HGS (e.g., https://github.com/sunpy/sunpy/issues/2445#issuecomment-364531159). The HAE coordinates appear to be credible, and likely should be trusted over the HGS coordinates. My recommendation is for `AIAMap` to override the generation of `observer_coordinate` to use the HAE coordinates. Discuss. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sunpy/map/sources/sdo.py` Content: ``` 1 """SDO Map subclass definitions""" 2 #pylint: disable=W0221,W0222,E1101,E1121 3 4 __author__ = "Keith Hughitt" 5 __email__ = "[email protected]" 6 7 import matplotlib.pyplot as plt 8 9 from astropy.visualization.mpl_normalize import ImageNormalize 10 from astropy.visualization import AsinhStretch 11 12 from sunpy.map import GenericMap 13 from sunpy.map.sources.source_type import source_stretch 14 15 __all__ = ['AIAMap', 'HMIMap'] 16 17 18 class AIAMap(GenericMap): 19 """AIA Image Map. 20 21 The Atmospheric Imaging Assembly is a set of four telescopes that employ 22 normal-incidence, multi-layer coated optics to provide narrow-band imaging 23 of the Sun. It provides high resolution full-disk images of the corona and 24 transition region up to 0.5 solar radii above the solar limb with 1.5 25 arcsecond angular resolution and 12-second temporal resolution. It observes 26 the Sun in the following seven extreme ultraviolet bandpasses: 94 A 27 (Fe XVIII), 131 A (Fe VIII, XXI), 171 A (Fe IX), 193 A (Fe XII, XXIV), 28 211 A (Fe XIV), 304 A (He II), 335 A (Fe XVI). One telescope observes 29 in the visible 1600 A (C IV) and the nearby continuun (1700 A). 30 31 References 32 ---------- 33 * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`_ 34 * `Instrument Page <https://aia.lmsal.com>`_ 35 * `Fits Header keywords <http://jsoc.stanford.edu/doc/keywords/AIA/AIA02840_A_AIA-SDO_FITS_Keyword_Documents.pdf>`_ 36 * `Analysis Guide <https://www.lmsal.com/sdodocs/doc/dcur/SDOD0060.zip/zip/entry/>`_ 37 * `Instrument Paper <https://doi.org/10.1007/s11207-011-9776-8>`_ 38 * `wavelengths and temperature response reference <https://www.lmsal.com/sdodocs/doc/dcur/SDOD0060.zip/zip/entry/figures/aia_tel_resp.png>`_ 39 """ 40 41 def __init__(self, data, header, **kwargs): 42 GenericMap.__init__(self, data, header, **kwargs) 43 44 # Fill in some missing info 45 self.meta['detector'] = "AIA" 46 self._nickname = self.detector 47 self.plot_settings['cmap'] = plt.get_cmap(self._get_cmap_name()) 48 self.plot_settings['norm'] = ImageNormalize(stretch=source_stretch(self.meta, AsinhStretch(0.01))) 49 50 @property 51 def observatory(self): 52 """ 53 Returns the observatory. 54 """ 55 return self.meta['telescop'].split('/')[0] 56 57 @classmethod 58 def is_datasource_for(cls, data, header, **kwargs): 59 """Determines if header corresponds to an AIA image""" 60 return header.get('instrume', '').startswith('AIA') 61 62 63 class HMIMap(GenericMap): 64 """HMI Image Map. 65 66 HMI consists of a refracting telescope, a polarization selector, 67 an image stabilization system, a narrow band tunable filter 68 and two 4096 pixel CCD cameras. It observes the full solar disk in the Fe I 69 absorption line at 6173 Angstrom with a resolution of 1 arc-second. 70 HMI takes images in a sequence of tuning and polarizations at a 4-second 71 cadence for each camera. One camera is dedicated to a 45 s Doppler and 72 line-of-sight field sequence while the other to a 90 s vector field 73 sequence. 74 75 References 76 ---------- 77 * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`_ 78 * `Instrument Page <http://hmi.stanford.edu>`_ 79 * `Analysis Guide <http://hmi.stanford.edu/doc/magnetic/guide.pdf>`_ 80 """ 81 def __init__(self, data, header, **kwargs): 82 83 GenericMap.__init__(self, data, header, **kwargs) 84 85 self.meta['detector'] = "HMI" 86 # self.meta['instrme'] = "HMI" 87 # self.meta['obsrvtry'] = "SDO" 88 self._nickname = self.detector 89 90 @property 91 def measurement(self): 92 """ 93 Returns the measurement type. 94 """ 95 return self.meta['content'].split(" ")[0].lower() 96 97 @property 98 def observatory(self): 99 """ 100 Returns the observatory. 101 """ 102 return self.meta['telescop'].split('/')[0] 103 104 @classmethod 105 def is_datasource_for(cls, data, header, **kwargs): 106 """Determines if header corresponds to an HMI image""" 107 return header.get('instrume', '').startswith('HMI') 108 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sunpy/map/sources/sdo.py b/sunpy/map/sources/sdo.py --- a/sunpy/map/sources/sdo.py +++ b/sunpy/map/sources/sdo.py @@ -6,6 +6,14 @@ import matplotlib.pyplot as plt +from astropy.coordinates import CartesianRepresentation, SkyCoord +# Versions of Astropy that do not have HeliocentricMeanEcliptic have the same frame +# with the incorrect name HeliocentricTrueEcliptic +try: + from astropy.coordinates import HeliocentricMeanEcliptic +except ImportError: + from astropy.coordinates import HeliocentricTrueEcliptic as HeliocentricMeanEcliptic +import astropy.units as u from astropy.visualization.mpl_normalize import ImageNormalize from astropy.visualization import AsinhStretch @@ -28,6 +36,13 @@ 211 A (Fe XIV), 304 A (He II), 335 A (Fe XVI). One telescope observes in the visible 1600 A (C IV) and the nearby continuun (1700 A). + Notes + ----- + Observer location: The standard AIA FITS header provides the spacecraft location in multiple + coordinate systems, including Heliocentric Aries Ecliptic (HAE) and Heliographic Stonyhurst + (HGS). SunPy uses the provided HAE coordinates due to accuracy concerns with the provided + HGS coordinates, but other software packages may make different choices. + References ---------- * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`_ @@ -47,6 +62,45 @@ self.plot_settings['cmap'] = plt.get_cmap(self._get_cmap_name()) self.plot_settings['norm'] = ImageNormalize(stretch=source_stretch(self.meta, AsinhStretch(0.01))) + @property + def observer_coordinate(self): + """ + The Heliographic Stonyhurst Coordinate of the observer. + + This coordinate is determined using the Heliocentric Aries Ecliptic (HAE) coordinates + in the header. + """ + vector = CartesianRepresentation(self.meta['haex_obs'], + self.meta['haey_obs'], + self.meta['haez_obs']) + coord = SkyCoord(vector * u.m, frame=HeliocentricMeanEcliptic, obstime=self.date) + return coord.heliographic_stonyhurst + + @property + def heliographic_latitude(self): + """Heliographic latitude.""" + return self.observer_coordinate.lat + + @property + def heliographic_longitude(self): + """Heliographic longitude.""" + return self.observer_coordinate.lon + + @property + def carrington_latitude(self): + """Carrington latitude.""" + return self.observer_coordinate.heliographic_carrington.lat + + @property + def carrington_longitude(self): + """Carrington longitude.""" + return self.observer_coordinate.heliographic_carrington.lon + + @property + def dsun(self): + """The observer distance from the Sun.""" + return self.observer_coordinate.radius.to('m') + @property def observatory(self): """
{"golden_diff": "diff --git a/sunpy/map/sources/sdo.py b/sunpy/map/sources/sdo.py\n--- a/sunpy/map/sources/sdo.py\n+++ b/sunpy/map/sources/sdo.py\n@@ -6,6 +6,14 @@\n \n import matplotlib.pyplot as plt\n \n+from astropy.coordinates import CartesianRepresentation, SkyCoord\n+# Versions of Astropy that do not have HeliocentricMeanEcliptic have the same frame\n+# with the incorrect name HeliocentricTrueEcliptic\n+try:\n+ from astropy.coordinates import HeliocentricMeanEcliptic\n+except ImportError:\n+ from astropy.coordinates import HeliocentricTrueEcliptic as HeliocentricMeanEcliptic\n+import astropy.units as u\n from astropy.visualization.mpl_normalize import ImageNormalize\n from astropy.visualization import AsinhStretch\n \n@@ -28,6 +36,13 @@\n 211 A (Fe XIV), 304 A (He II), 335 A (Fe XVI). One telescope observes\n in the visible 1600 A (C IV) and the nearby continuun (1700 A).\n \n+ Notes\n+ -----\n+ Observer location: The standard AIA FITS header provides the spacecraft location in multiple\n+ coordinate systems, including Heliocentric Aries Ecliptic (HAE) and Heliographic Stonyhurst\n+ (HGS). SunPy uses the provided HAE coordinates due to accuracy concerns with the provided\n+ HGS coordinates, but other software packages may make different choices.\n+\n References\n ----------\n * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`_\n@@ -47,6 +62,45 @@\n self.plot_settings['cmap'] = plt.get_cmap(self._get_cmap_name())\n self.plot_settings['norm'] = ImageNormalize(stretch=source_stretch(self.meta, AsinhStretch(0.01)))\n \n+ @property\n+ def observer_coordinate(self):\n+ \"\"\"\n+ The Heliographic Stonyhurst Coordinate of the observer.\n+\n+ This coordinate is determined using the Heliocentric Aries Ecliptic (HAE) coordinates\n+ in the header.\n+ \"\"\"\n+ vector = CartesianRepresentation(self.meta['haex_obs'],\n+ self.meta['haey_obs'],\n+ self.meta['haez_obs'])\n+ coord = SkyCoord(vector * u.m, frame=HeliocentricMeanEcliptic, obstime=self.date)\n+ return coord.heliographic_stonyhurst\n+\n+ @property\n+ def heliographic_latitude(self):\n+ \"\"\"Heliographic latitude.\"\"\"\n+ return self.observer_coordinate.lat\n+\n+ @property\n+ def heliographic_longitude(self):\n+ \"\"\"Heliographic longitude.\"\"\"\n+ return self.observer_coordinate.lon\n+\n+ @property\n+ def carrington_latitude(self):\n+ \"\"\"Carrington latitude.\"\"\"\n+ return self.observer_coordinate.heliographic_carrington.lat\n+\n+ @property\n+ def carrington_longitude(self):\n+ \"\"\"Carrington longitude.\"\"\"\n+ return self.observer_coordinate.heliographic_carrington.lon\n+\n+ @property\n+ def dsun(self):\n+ \"\"\"The observer distance from the Sun.\"\"\"\n+ return self.observer_coordinate.radius.to('m')\n+\n @property\n def observatory(self):\n \"\"\"\n", "issue": "AIA FITS headers have inaccurate HGS coordinates\nAIA FITS headers apparently have inaccurate Heliographic Stonyhurst (HGS) coordinates (`HGLN_OBS` and `HGLT_OBS`). For example, the distance from Earth center does not match the orbit radius. We currently use these keywords to generate the `observer_coordinate` for an AIA map, so we provide an inaccurate observer location.\r\n\r\nThe headers also have Heliographic Aries Ecliptic (HAE) coordinates (`HAEX_OBS`, `HAEY_OBS`, and `HAEZ_OBS`), and the HAE coordinates are inconsistent with the HGS coordinates in the same header. We have previously verified the accuracy of SunPy's transformation from HAE to HGS (e.g., https://github.com/sunpy/sunpy/issues/2445#issuecomment-364531159). The HAE coordinates appear to be credible, and likely should be trusted over the HGS coordinates.\r\n\r\nMy recommendation is for `AIAMap` to override the generation of `observer_coordinate` to use the HAE coordinates. Discuss.\n", "before_files": [{"content": "\"\"\"SDO Map subclass definitions\"\"\"\n#pylint: disable=W0221,W0222,E1101,E1121\n\n__author__ = \"Keith Hughitt\"\n__email__ = \"[email protected]\"\n\nimport matplotlib.pyplot as plt\n\nfrom astropy.visualization.mpl_normalize import ImageNormalize\nfrom astropy.visualization import AsinhStretch\n\nfrom sunpy.map import GenericMap\nfrom sunpy.map.sources.source_type import source_stretch\n\n__all__ = ['AIAMap', 'HMIMap']\n\n\nclass AIAMap(GenericMap):\n \"\"\"AIA Image Map.\n\n The Atmospheric Imaging Assembly is a set of four telescopes that employ\n normal-incidence, multi-layer coated optics to provide narrow-band imaging\n of the Sun. It provides high resolution full-disk images of the corona and\n transition region up to 0.5 solar radii above the solar limb with 1.5\n arcsecond angular resolution and 12-second temporal resolution. It observes\n the Sun in the following seven extreme ultraviolet bandpasses: 94 A\n (Fe XVIII), 131 A (Fe VIII, XXI), 171 A (Fe IX), 193 A (Fe XII, XXIV),\n 211 A (Fe XIV), 304 A (He II), 335 A (Fe XVI). One telescope observes\n in the visible 1600 A (C IV) and the nearby continuun (1700 A).\n\n References\n ----------\n * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`_\n * `Instrument Page <https://aia.lmsal.com>`_\n * `Fits Header keywords <http://jsoc.stanford.edu/doc/keywords/AIA/AIA02840_A_AIA-SDO_FITS_Keyword_Documents.pdf>`_\n * `Analysis Guide <https://www.lmsal.com/sdodocs/doc/dcur/SDOD0060.zip/zip/entry/>`_\n * `Instrument Paper <https://doi.org/10.1007/s11207-011-9776-8>`_\n * `wavelengths and temperature response reference <https://www.lmsal.com/sdodocs/doc/dcur/SDOD0060.zip/zip/entry/figures/aia_tel_resp.png>`_\n \"\"\"\n\n def __init__(self, data, header, **kwargs):\n GenericMap.__init__(self, data, header, **kwargs)\n\n # Fill in some missing info\n self.meta['detector'] = \"AIA\"\n self._nickname = self.detector\n self.plot_settings['cmap'] = plt.get_cmap(self._get_cmap_name())\n self.plot_settings['norm'] = ImageNormalize(stretch=source_stretch(self.meta, AsinhStretch(0.01)))\n\n @property\n def observatory(self):\n \"\"\"\n Returns the observatory.\n \"\"\"\n return self.meta['telescop'].split('/')[0]\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an AIA image\"\"\"\n return header.get('instrume', '').startswith('AIA')\n\n\nclass HMIMap(GenericMap):\n \"\"\"HMI Image Map.\n\n HMI consists of a refracting telescope, a polarization selector,\n an image stabilization system, a narrow band tunable filter\n and two 4096 pixel CCD cameras. It observes the full solar disk in the Fe I\n absorption line at 6173 Angstrom with a resolution of 1 arc-second.\n HMI takes images in a sequence of tuning and polarizations at a 4-second\n cadence for each camera. One camera is dedicated to a 45 s Doppler and\n line-of-sight field sequence while the other to a 90 s vector field\n sequence.\n\n References\n ----------\n * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`_\n * `Instrument Page <http://hmi.stanford.edu>`_\n * `Analysis Guide <http://hmi.stanford.edu/doc/magnetic/guide.pdf>`_\n \"\"\"\n def __init__(self, data, header, **kwargs):\n\n GenericMap.__init__(self, data, header, **kwargs)\n\n self.meta['detector'] = \"HMI\"\n# self.meta['instrme'] = \"HMI\"\n# self.meta['obsrvtry'] = \"SDO\"\n self._nickname = self.detector\n\n @property\n def measurement(self):\n \"\"\"\n Returns the measurement type.\n \"\"\"\n return self.meta['content'].split(\" \")[0].lower()\n\n @property\n def observatory(self):\n \"\"\"\n Returns the observatory.\n \"\"\"\n return self.meta['telescop'].split('/')[0]\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an HMI image\"\"\"\n return header.get('instrume', '').startswith('HMI')\n", "path": "sunpy/map/sources/sdo.py"}], "after_files": [{"content": "\"\"\"SDO Map subclass definitions\"\"\"\n#pylint: disable=W0221,W0222,E1101,E1121\n\n__author__ = \"Keith Hughitt\"\n__email__ = \"[email protected]\"\n\nimport matplotlib.pyplot as plt\n\nfrom astropy.coordinates import CartesianRepresentation, SkyCoord\n# Versions of Astropy that do not have HeliocentricMeanEcliptic have the same frame\n# with the incorrect name HeliocentricTrueEcliptic\ntry:\n from astropy.coordinates import HeliocentricMeanEcliptic\nexcept ImportError:\n from astropy.coordinates import HeliocentricTrueEcliptic as HeliocentricMeanEcliptic\nimport astropy.units as u\nfrom astropy.visualization.mpl_normalize import ImageNormalize\nfrom astropy.visualization import AsinhStretch\n\nfrom sunpy.map import GenericMap\nfrom sunpy.map.sources.source_type import source_stretch\n\n__all__ = ['AIAMap', 'HMIMap']\n\n\nclass AIAMap(GenericMap):\n \"\"\"AIA Image Map.\n\n The Atmospheric Imaging Assembly is a set of four telescopes that employ\n normal-incidence, multi-layer coated optics to provide narrow-band imaging\n of the Sun. It provides high resolution full-disk images of the corona and\n transition region up to 0.5 solar radii above the solar limb with 1.5\n arcsecond angular resolution and 12-second temporal resolution. It observes\n the Sun in the following seven extreme ultraviolet bandpasses: 94 A\n (Fe XVIII), 131 A (Fe VIII, XXI), 171 A (Fe IX), 193 A (Fe XII, XXIV),\n 211 A (Fe XIV), 304 A (He II), 335 A (Fe XVI). One telescope observes\n in the visible 1600 A (C IV) and the nearby continuun (1700 A).\n\n Notes\n -----\n Observer location: The standard AIA FITS header provides the spacecraft location in multiple\n coordinate systems, including Heliocentric Aries Ecliptic (HAE) and Heliographic Stonyhurst\n (HGS). SunPy uses the provided HAE coordinates due to accuracy concerns with the provided\n HGS coordinates, but other software packages may make different choices.\n\n References\n ----------\n * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`_\n * `Instrument Page <https://aia.lmsal.com>`_\n * `Fits Header keywords <http://jsoc.stanford.edu/doc/keywords/AIA/AIA02840_A_AIA-SDO_FITS_Keyword_Documents.pdf>`_\n * `Analysis Guide <https://www.lmsal.com/sdodocs/doc/dcur/SDOD0060.zip/zip/entry/>`_\n * `Instrument Paper <https://doi.org/10.1007/s11207-011-9776-8>`_\n * `wavelengths and temperature response reference <https://www.lmsal.com/sdodocs/doc/dcur/SDOD0060.zip/zip/entry/figures/aia_tel_resp.png>`_\n \"\"\"\n\n def __init__(self, data, header, **kwargs):\n GenericMap.__init__(self, data, header, **kwargs)\n\n # Fill in some missing info\n self.meta['detector'] = \"AIA\"\n self._nickname = self.detector\n self.plot_settings['cmap'] = plt.get_cmap(self._get_cmap_name())\n self.plot_settings['norm'] = ImageNormalize(stretch=source_stretch(self.meta, AsinhStretch(0.01)))\n\n @property\n def observer_coordinate(self):\n \"\"\"\n The Heliographic Stonyhurst Coordinate of the observer.\n\n This coordinate is determined using the Heliocentric Aries Ecliptic (HAE) coordinates\n in the header.\n \"\"\"\n vector = CartesianRepresentation(self.meta['haex_obs'],\n self.meta['haey_obs'],\n self.meta['haez_obs'])\n coord = SkyCoord(vector * u.m, frame=HeliocentricMeanEcliptic, obstime=self.date)\n return coord.heliographic_stonyhurst\n\n @property\n def heliographic_latitude(self):\n \"\"\"Heliographic latitude.\"\"\"\n return self.observer_coordinate.lat\n\n @property\n def heliographic_longitude(self):\n \"\"\"Heliographic longitude.\"\"\"\n return self.observer_coordinate.lon\n\n @property\n def carrington_latitude(self):\n \"\"\"Carrington latitude.\"\"\"\n return self.observer_coordinate.heliographic_carrington.lat\n\n @property\n def carrington_longitude(self):\n \"\"\"Carrington longitude.\"\"\"\n return self.observer_coordinate.heliographic_carrington.lon\n\n @property\n def dsun(self):\n \"\"\"The observer distance from the Sun.\"\"\"\n return self.observer_coordinate.radius.to('m')\n\n @property\n def observatory(self):\n \"\"\"\n Returns the observatory.\n \"\"\"\n return self.meta['telescop'].split('/')[0]\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an AIA image\"\"\"\n return header.get('instrume', '').startswith('AIA')\n\n\nclass HMIMap(GenericMap):\n \"\"\"HMI Image Map.\n\n HMI consists of a refracting telescope, a polarization selector,\n an image stabilization system, a narrow band tunable filter\n and two 4096 pixel CCD cameras. It observes the full solar disk in the Fe I\n absorption line at 6173 Angstrom with a resolution of 1 arc-second.\n HMI takes images in a sequence of tuning and polarizations at a 4-second\n cadence for each camera. One camera is dedicated to a 45 s Doppler and\n line-of-sight field sequence while the other to a 90 s vector field\n sequence.\n\n References\n ----------\n * `SDO Mission Page <https://sdo.gsfc.nasa.gov/>`_\n * `Instrument Page <http://hmi.stanford.edu>`_\n * `Analysis Guide <http://hmi.stanford.edu/doc/magnetic/guide.pdf>`_\n \"\"\"\n def __init__(self, data, header, **kwargs):\n\n GenericMap.__init__(self, data, header, **kwargs)\n\n self.meta['detector'] = \"HMI\"\n# self.meta['instrme'] = \"HMI\"\n# self.meta['obsrvtry'] = \"SDO\"\n self._nickname = self.detector\n\n @property\n def measurement(self):\n \"\"\"\n Returns the measurement type.\n \"\"\"\n return self.meta['content'].split(\" \")[0].lower()\n\n @property\n def observatory(self):\n \"\"\"\n Returns the observatory.\n \"\"\"\n return self.meta['telescop'].split('/')[0]\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an HMI image\"\"\"\n return header.get('instrume', '').startswith('HMI')\n", "path": "sunpy/map/sources/sdo.py"}]}
1,858
761
gh_patches_debug_35532
rasdani/github-patches
git_diff
getsentry__sentry-53789
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Slack alert message doesn't match up with why alert was actually triggered ### Environment SaaS (https://sentry.io/) ### Steps to Reproduce Hi Team, A customer reported to us that the messages in their Slack alerts doesn't match up with the real reason the alert was triggered (displayed correctly once you click into the alert and go to the alert page). I was able to replicate this by setting up a similar type of alert. 1. Set up a metric alert to trigger when `Number of errors is X% higher in Y minutes compared to same time one week ago` 2. Alert action is to be notified via Slack, specifically a Slack channel 3. Receive Slack alert <img width="1269" alt="Screenshot 2023-05-04 at 3 45 03 PM" src="https://user-images.githubusercontent.com/98921471/236560084-f64cf7cf-7856-4921-8c3d-ca70433afda8.png"> ### Expected Result When the alert triggers, I expect the message in Slack to say: "157 events in the last 10 minutes". This is verified by the alert details of why the alert triggered. Alternatively, it can also be expected to say: "Number of errors is 89.16% higher in 10 minutes compared to the same time a week ago". <img width="1277" alt="Screenshot 2023-05-04 at 3 45 14 PM" src="https://user-images.githubusercontent.com/98921471/236560515-38e4361c-0c8e-4c53-8986-fcd5a67f40fe.png"> ### Actual Result The Slack alert message says "168.478260.. events in the last 10 minutes". This doesn't match up with the 157 events that I see in the alert details. For the customer, it said "400.0 events in the last 60 minutes" when it should've said "4 events in the last 60 minutes", which is 2 orders of magnitudes off. <img width="544" alt="Screenshot 2023-05-04 at 3 44 30 PM" src="https://user-images.githubusercontent.com/98921471/236560823-894e8da5-6918-416c-8f65-f182456424d7.png"> ### Product Area Alerts ### Link _No response_ ### DSN _No response_ ### Version _No response_ ┆Issue is synchronized with this [Jira Improvement](https://getsentry.atlassian.net/browse/FEEDBACK-1825) by [Unito](https://www.unito.io) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/sentry/integrations/metric_alerts.py` Content: ``` 1 from datetime import timedelta 2 from typing import Optional 3 from urllib import parse 4 5 from django.db.models import Max 6 from django.urls import reverse 7 from django.utils.translation import gettext as _ 8 9 from sentry.constants import CRASH_RATE_ALERT_AGGREGATE_ALIAS 10 from sentry.incidents.logic import get_incident_aggregates 11 from sentry.incidents.models import ( 12 INCIDENT_STATUS, 13 AlertRule, 14 Incident, 15 IncidentStatus, 16 IncidentTrigger, 17 ) 18 from sentry.utils.assets import get_asset_url 19 from sentry.utils.http import absolute_uri 20 21 QUERY_AGGREGATION_DISPLAY = { 22 "count()": "events", 23 "count_unique(tags[sentry:user])": "users affected", 24 "percentage(sessions_crashed, sessions)": "% sessions crash free rate", 25 "percentage(users_crashed, users)": "% users crash free rate", 26 } 27 LOGO_URL = absolute_uri(get_asset_url("sentry", "images/sentry-email-avatar.png")) 28 29 30 def get_metric_count_from_incident(incident: Incident) -> str: 31 """Returns the current or last count of an incident aggregate.""" 32 incident_trigger = ( 33 IncidentTrigger.objects.filter(incident=incident).order_by("-date_modified").first() 34 ) 35 if incident_trigger: 36 alert_rule_trigger = incident_trigger.alert_rule_trigger 37 # TODO: If we're relying on this and expecting possible delays between a 38 # trigger fired and this function running, then this could actually be 39 # incorrect if they changed the trigger's time window in this time period. 40 # Should we store it? 41 start = incident_trigger.date_modified - timedelta( 42 seconds=alert_rule_trigger.alert_rule.snuba_query.time_window 43 ) 44 end = incident_trigger.date_modified 45 else: 46 start, end = None, None 47 48 return get_incident_aggregates(incident=incident, start=start, end=end).get("count") 49 50 51 def get_incident_status_text(alert_rule: AlertRule, metric_value: str) -> str: 52 """Returns a human readable current status of an incident""" 53 agg_display_key = alert_rule.snuba_query.aggregate 54 55 if CRASH_RATE_ALERT_AGGREGATE_ALIAS in alert_rule.snuba_query.aggregate: 56 agg_display_key = agg_display_key.split(f"AS {CRASH_RATE_ALERT_AGGREGATE_ALIAS}")[0].strip() 57 58 agg_text = QUERY_AGGREGATION_DISPLAY.get(agg_display_key, alert_rule.snuba_query.aggregate) 59 60 if agg_text.startswith("%"): 61 if metric_value is not None: 62 metric_and_agg_text = f"{metric_value}{agg_text}" 63 else: 64 metric_and_agg_text = f"No{agg_text[1:]}" 65 else: 66 metric_and_agg_text = f"{metric_value} {agg_text}" 67 68 time_window = alert_rule.snuba_query.time_window // 60 69 interval = "minute" if time_window == 1 else "minutes" 70 text = _("%(metric_and_agg_text)s in the last %(time_window)d %(interval)s") % { 71 "metric_and_agg_text": metric_and_agg_text, 72 "time_window": time_window, 73 "interval": interval, 74 } 75 76 return text 77 78 79 def incident_attachment_info(incident, new_status: IncidentStatus, metric_value=None): 80 alert_rule = incident.alert_rule 81 82 status = INCIDENT_STATUS[new_status] 83 84 if metric_value is None: 85 metric_value = get_metric_count_from_incident(incident) 86 87 text = get_incident_status_text(alert_rule, metric_value) 88 title = f"{status}: {alert_rule.name}" 89 90 title_link = alert_rule.organization.absolute_url( 91 reverse( 92 "sentry-metric-alert-details", 93 kwargs={ 94 "organization_slug": alert_rule.organization.slug, 95 "alert_rule_id": alert_rule.id, 96 }, 97 ), 98 query=parse.urlencode({"alert": str(incident.identifier)}), 99 ) 100 101 return { 102 "title": title, 103 "text": text, 104 "logo_url": LOGO_URL, 105 "status": status, 106 "ts": incident.date_started, 107 "title_link": title_link, 108 } 109 110 111 def metric_alert_attachment_info( 112 alert_rule: AlertRule, 113 selected_incident: Optional[Incident] = None, 114 new_status: Optional[IncidentStatus] = None, 115 metric_value: Optional[str] = None, 116 ): 117 latest_incident = None 118 if selected_incident is None: 119 try: 120 # Use .get() instead of .first() to avoid sorting table by id 121 latest_incident = Incident.objects.filter( 122 id__in=Incident.objects.filter(alert_rule=alert_rule) 123 .values("alert_rule_id") 124 .annotate(incident_id=Max("id")) 125 .values("incident_id") 126 ).get() 127 except Incident.DoesNotExist: 128 latest_incident = None 129 130 if new_status: 131 status = INCIDENT_STATUS[new_status] 132 elif selected_incident: 133 status = INCIDENT_STATUS[IncidentStatus(selected_incident.status)] 134 elif latest_incident: 135 status = INCIDENT_STATUS[IncidentStatus(latest_incident.status)] 136 else: 137 status = INCIDENT_STATUS[IncidentStatus.CLOSED] 138 139 query = None 140 if selected_incident: 141 query = parse.urlencode({"alert": str(selected_incident.identifier)}) 142 title = f"{status}: {alert_rule.name}" 143 title_link = alert_rule.organization.absolute_url( 144 reverse( 145 "sentry-metric-alert-details", 146 kwargs={ 147 "organization_slug": alert_rule.organization.slug, 148 "alert_rule_id": alert_rule.id, 149 }, 150 ), 151 query=query, 152 ) 153 154 if metric_value is None: 155 if ( 156 selected_incident is None 157 and latest_incident 158 and latest_incident.status != IncidentStatus.CLOSED 159 ): 160 # Without a selected incident, use latest incident if it is not resolved 161 incident_info = latest_incident 162 else: 163 incident_info = selected_incident 164 165 if incident_info: 166 metric_value = get_metric_count_from_incident(incident_info) 167 168 text = "" 169 if metric_value is not None and status != INCIDENT_STATUS[IncidentStatus.CLOSED]: 170 text = get_incident_status_text(alert_rule, metric_value) 171 172 date_started = None 173 if selected_incident: 174 date_started = selected_incident.date_started 175 176 last_triggered_date = None 177 if latest_incident: 178 last_triggered_date = latest_incident.date_started 179 180 return { 181 "title": title, 182 "text": text, 183 "logo_url": LOGO_URL, 184 "status": status, 185 "date_started": date_started, 186 "last_triggered_date": last_triggered_date, 187 "title_link": title_link, 188 } 189 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/sentry/integrations/metric_alerts.py b/src/sentry/integrations/metric_alerts.py --- a/src/sentry/integrations/metric_alerts.py +++ b/src/sentry/integrations/metric_alerts.py @@ -11,6 +11,7 @@ from sentry.incidents.models import ( INCIDENT_STATUS, AlertRule, + AlertRuleThresholdType, Incident, IncidentStatus, IncidentTrigger, @@ -25,6 +26,16 @@ "percentage(users_crashed, users)": "% users crash free rate", } LOGO_URL = absolute_uri(get_asset_url("sentry", "images/sentry-email-avatar.png")) +# These should be the same as the options in the frontend +# COMPARISON_DELTA_OPTIONS +TEXT_COMPARISON_DELTA = { + 5: ("same time 5 minutes ago"), # 5 minutes + 15: ("same time 15 minutes ago"), # 15 minutes + 60: ("same time one hour ago"), # one hour + 1440: ("same time one day ago"), # one day + 10080: ("same time one week ago"), # one week + 43200: ("same time one month ago"), # 30 days +} def get_metric_count_from_incident(incident: Incident) -> str: @@ -67,14 +78,27 @@ time_window = alert_rule.snuba_query.time_window // 60 interval = "minute" if time_window == 1 else "minutes" - text = _("%(metric_and_agg_text)s in the last %(time_window)d %(interval)s") % { + # % change alerts have a comparison delta + if alert_rule.comparison_delta: + metric_and_agg_text = f"{agg_text.capitalize()} {int(metric_value)}%" + higher_or_lower = ( + "higher" if alert_rule.threshold_type == AlertRuleThresholdType.ABOVE.value else "lower" + ) + comparison_delta_minutes = alert_rule.comparison_delta // 60 + comparison_string = TEXT_COMPARISON_DELTA.get( + comparison_delta_minutes, f"same time {comparison_delta_minutes} minutes ago" + ) + return _( + f"{metric_and_agg_text} {higher_or_lower} in the last {time_window} {interval} " + f"compared to the {comparison_string}" + ) + + return _("%(metric_and_agg_text)s in the last %(time_window)d %(interval)s") % { "metric_and_agg_text": metric_and_agg_text, "time_window": time_window, "interval": interval, } - return text - def incident_attachment_info(incident, new_status: IncidentStatus, metric_value=None): alert_rule = incident.alert_rule
{"golden_diff": "diff --git a/src/sentry/integrations/metric_alerts.py b/src/sentry/integrations/metric_alerts.py\n--- a/src/sentry/integrations/metric_alerts.py\n+++ b/src/sentry/integrations/metric_alerts.py\n@@ -11,6 +11,7 @@\n from sentry.incidents.models import (\n INCIDENT_STATUS,\n AlertRule,\n+ AlertRuleThresholdType,\n Incident,\n IncidentStatus,\n IncidentTrigger,\n@@ -25,6 +26,16 @@\n \"percentage(users_crashed, users)\": \"% users crash free rate\",\n }\n LOGO_URL = absolute_uri(get_asset_url(\"sentry\", \"images/sentry-email-avatar.png\"))\n+# These should be the same as the options in the frontend\n+# COMPARISON_DELTA_OPTIONS\n+TEXT_COMPARISON_DELTA = {\n+ 5: (\"same time 5 minutes ago\"), # 5 minutes\n+ 15: (\"same time 15 minutes ago\"), # 15 minutes\n+ 60: (\"same time one hour ago\"), # one hour\n+ 1440: (\"same time one day ago\"), # one day\n+ 10080: (\"same time one week ago\"), # one week\n+ 43200: (\"same time one month ago\"), # 30 days\n+}\n \n \n def get_metric_count_from_incident(incident: Incident) -> str:\n@@ -67,14 +78,27 @@\n \n time_window = alert_rule.snuba_query.time_window // 60\n interval = \"minute\" if time_window == 1 else \"minutes\"\n- text = _(\"%(metric_and_agg_text)s in the last %(time_window)d %(interval)s\") % {\n+ # % change alerts have a comparison delta\n+ if alert_rule.comparison_delta:\n+ metric_and_agg_text = f\"{agg_text.capitalize()} {int(metric_value)}%\"\n+ higher_or_lower = (\n+ \"higher\" if alert_rule.threshold_type == AlertRuleThresholdType.ABOVE.value else \"lower\"\n+ )\n+ comparison_delta_minutes = alert_rule.comparison_delta // 60\n+ comparison_string = TEXT_COMPARISON_DELTA.get(\n+ comparison_delta_minutes, f\"same time {comparison_delta_minutes} minutes ago\"\n+ )\n+ return _(\n+ f\"{metric_and_agg_text} {higher_or_lower} in the last {time_window} {interval} \"\n+ f\"compared to the {comparison_string}\"\n+ )\n+\n+ return _(\"%(metric_and_agg_text)s in the last %(time_window)d %(interval)s\") % {\n \"metric_and_agg_text\": metric_and_agg_text,\n \"time_window\": time_window,\n \"interval\": interval,\n }\n \n- return text\n-\n \n def incident_attachment_info(incident, new_status: IncidentStatus, metric_value=None):\n alert_rule = incident.alert_rule\n", "issue": "Slack alert message doesn't match up with why alert was actually triggered\n### Environment\n\nSaaS (https://sentry.io/)\n\n### Steps to Reproduce\n\nHi Team,\n\nA customer reported to us that the messages in their Slack alerts doesn't match up with the real reason the alert was triggered (displayed correctly once you click into the alert and go to the alert page). I was able to replicate this by setting up a similar type of alert.\n1. Set up a metric alert to trigger when `Number of errors is X% higher in Y minutes compared to same time one week ago`\n2. Alert action is to be notified via Slack, specifically a Slack channel\n3. Receive Slack alert\n<img width=\"1269\" alt=\"Screenshot 2023-05-04 at 3 45 03 PM\" src=\"https://user-images.githubusercontent.com/98921471/236560084-f64cf7cf-7856-4921-8c3d-ca70433afda8.png\">\n\n\n### Expected Result\n\nWhen the alert triggers, I expect the message in Slack to say: \"157 events in the last 10 minutes\". This is verified by the alert details of why the alert triggered.\n\nAlternatively, it can also be expected to say: \"Number of errors is 89.16% higher in 10 minutes compared to the same time a week ago\".\n<img width=\"1277\" alt=\"Screenshot 2023-05-04 at 3 45 14 PM\" src=\"https://user-images.githubusercontent.com/98921471/236560515-38e4361c-0c8e-4c53-8986-fcd5a67f40fe.png\">\n\n\n### Actual Result\n\nThe Slack alert message says \"168.478260.. events in the last 10 minutes\". This doesn't match up with the 157 events that I see in the alert details.\n\nFor the customer, it said \"400.0 events in the last 60 minutes\" when it should've said \"4 events in the last 60 minutes\", which is 2 orders of magnitudes off.\n<img width=\"544\" alt=\"Screenshot 2023-05-04 at 3 44 30 PM\" src=\"https://user-images.githubusercontent.com/98921471/236560823-894e8da5-6918-416c-8f65-f182456424d7.png\">\n\n\n### Product Area\n\nAlerts\n\n### Link\n\n_No response_\n\n### DSN\n\n_No response_\n\n### Version\n\n_No response_\n\n\u2506Issue is synchronized with this [Jira Improvement](https://getsentry.atlassian.net/browse/FEEDBACK-1825) by [Unito](https://www.unito.io)\n\n", "before_files": [{"content": "from datetime import timedelta\nfrom typing import Optional\nfrom urllib import parse\n\nfrom django.db.models import Max\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\n\nfrom sentry.constants import CRASH_RATE_ALERT_AGGREGATE_ALIAS\nfrom sentry.incidents.logic import get_incident_aggregates\nfrom sentry.incidents.models import (\n INCIDENT_STATUS,\n AlertRule,\n Incident,\n IncidentStatus,\n IncidentTrigger,\n)\nfrom sentry.utils.assets import get_asset_url\nfrom sentry.utils.http import absolute_uri\n\nQUERY_AGGREGATION_DISPLAY = {\n \"count()\": \"events\",\n \"count_unique(tags[sentry:user])\": \"users affected\",\n \"percentage(sessions_crashed, sessions)\": \"% sessions crash free rate\",\n \"percentage(users_crashed, users)\": \"% users crash free rate\",\n}\nLOGO_URL = absolute_uri(get_asset_url(\"sentry\", \"images/sentry-email-avatar.png\"))\n\n\ndef get_metric_count_from_incident(incident: Incident) -> str:\n \"\"\"Returns the current or last count of an incident aggregate.\"\"\"\n incident_trigger = (\n IncidentTrigger.objects.filter(incident=incident).order_by(\"-date_modified\").first()\n )\n if incident_trigger:\n alert_rule_trigger = incident_trigger.alert_rule_trigger\n # TODO: If we're relying on this and expecting possible delays between a\n # trigger fired and this function running, then this could actually be\n # incorrect if they changed the trigger's time window in this time period.\n # Should we store it?\n start = incident_trigger.date_modified - timedelta(\n seconds=alert_rule_trigger.alert_rule.snuba_query.time_window\n )\n end = incident_trigger.date_modified\n else:\n start, end = None, None\n\n return get_incident_aggregates(incident=incident, start=start, end=end).get(\"count\")\n\n\ndef get_incident_status_text(alert_rule: AlertRule, metric_value: str) -> str:\n \"\"\"Returns a human readable current status of an incident\"\"\"\n agg_display_key = alert_rule.snuba_query.aggregate\n\n if CRASH_RATE_ALERT_AGGREGATE_ALIAS in alert_rule.snuba_query.aggregate:\n agg_display_key = agg_display_key.split(f\"AS {CRASH_RATE_ALERT_AGGREGATE_ALIAS}\")[0].strip()\n\n agg_text = QUERY_AGGREGATION_DISPLAY.get(agg_display_key, alert_rule.snuba_query.aggregate)\n\n if agg_text.startswith(\"%\"):\n if metric_value is not None:\n metric_and_agg_text = f\"{metric_value}{agg_text}\"\n else:\n metric_and_agg_text = f\"No{agg_text[1:]}\"\n else:\n metric_and_agg_text = f\"{metric_value} {agg_text}\"\n\n time_window = alert_rule.snuba_query.time_window // 60\n interval = \"minute\" if time_window == 1 else \"minutes\"\n text = _(\"%(metric_and_agg_text)s in the last %(time_window)d %(interval)s\") % {\n \"metric_and_agg_text\": metric_and_agg_text,\n \"time_window\": time_window,\n \"interval\": interval,\n }\n\n return text\n\n\ndef incident_attachment_info(incident, new_status: IncidentStatus, metric_value=None):\n alert_rule = incident.alert_rule\n\n status = INCIDENT_STATUS[new_status]\n\n if metric_value is None:\n metric_value = get_metric_count_from_incident(incident)\n\n text = get_incident_status_text(alert_rule, metric_value)\n title = f\"{status}: {alert_rule.name}\"\n\n title_link = alert_rule.organization.absolute_url(\n reverse(\n \"sentry-metric-alert-details\",\n kwargs={\n \"organization_slug\": alert_rule.organization.slug,\n \"alert_rule_id\": alert_rule.id,\n },\n ),\n query=parse.urlencode({\"alert\": str(incident.identifier)}),\n )\n\n return {\n \"title\": title,\n \"text\": text,\n \"logo_url\": LOGO_URL,\n \"status\": status,\n \"ts\": incident.date_started,\n \"title_link\": title_link,\n }\n\n\ndef metric_alert_attachment_info(\n alert_rule: AlertRule,\n selected_incident: Optional[Incident] = None,\n new_status: Optional[IncidentStatus] = None,\n metric_value: Optional[str] = None,\n):\n latest_incident = None\n if selected_incident is None:\n try:\n # Use .get() instead of .first() to avoid sorting table by id\n latest_incident = Incident.objects.filter(\n id__in=Incident.objects.filter(alert_rule=alert_rule)\n .values(\"alert_rule_id\")\n .annotate(incident_id=Max(\"id\"))\n .values(\"incident_id\")\n ).get()\n except Incident.DoesNotExist:\n latest_incident = None\n\n if new_status:\n status = INCIDENT_STATUS[new_status]\n elif selected_incident:\n status = INCIDENT_STATUS[IncidentStatus(selected_incident.status)]\n elif latest_incident:\n status = INCIDENT_STATUS[IncidentStatus(latest_incident.status)]\n else:\n status = INCIDENT_STATUS[IncidentStatus.CLOSED]\n\n query = None\n if selected_incident:\n query = parse.urlencode({\"alert\": str(selected_incident.identifier)})\n title = f\"{status}: {alert_rule.name}\"\n title_link = alert_rule.organization.absolute_url(\n reverse(\n \"sentry-metric-alert-details\",\n kwargs={\n \"organization_slug\": alert_rule.organization.slug,\n \"alert_rule_id\": alert_rule.id,\n },\n ),\n query=query,\n )\n\n if metric_value is None:\n if (\n selected_incident is None\n and latest_incident\n and latest_incident.status != IncidentStatus.CLOSED\n ):\n # Without a selected incident, use latest incident if it is not resolved\n incident_info = latest_incident\n else:\n incident_info = selected_incident\n\n if incident_info:\n metric_value = get_metric_count_from_incident(incident_info)\n\n text = \"\"\n if metric_value is not None and status != INCIDENT_STATUS[IncidentStatus.CLOSED]:\n text = get_incident_status_text(alert_rule, metric_value)\n\n date_started = None\n if selected_incident:\n date_started = selected_incident.date_started\n\n last_triggered_date = None\n if latest_incident:\n last_triggered_date = latest_incident.date_started\n\n return {\n \"title\": title,\n \"text\": text,\n \"logo_url\": LOGO_URL,\n \"status\": status,\n \"date_started\": date_started,\n \"last_triggered_date\": last_triggered_date,\n \"title_link\": title_link,\n }\n", "path": "src/sentry/integrations/metric_alerts.py"}], "after_files": [{"content": "from datetime import timedelta\nfrom typing import Optional\nfrom urllib import parse\n\nfrom django.db.models import Max\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\n\nfrom sentry.constants import CRASH_RATE_ALERT_AGGREGATE_ALIAS\nfrom sentry.incidents.logic import get_incident_aggregates\nfrom sentry.incidents.models import (\n INCIDENT_STATUS,\n AlertRule,\n AlertRuleThresholdType,\n Incident,\n IncidentStatus,\n IncidentTrigger,\n)\nfrom sentry.utils.assets import get_asset_url\nfrom sentry.utils.http import absolute_uri\n\nQUERY_AGGREGATION_DISPLAY = {\n \"count()\": \"events\",\n \"count_unique(tags[sentry:user])\": \"users affected\",\n \"percentage(sessions_crashed, sessions)\": \"% sessions crash free rate\",\n \"percentage(users_crashed, users)\": \"% users crash free rate\",\n}\nLOGO_URL = absolute_uri(get_asset_url(\"sentry\", \"images/sentry-email-avatar.png\"))\n# These should be the same as the options in the frontend\n# COMPARISON_DELTA_OPTIONS\nTEXT_COMPARISON_DELTA = {\n 5: (\"same time 5 minutes ago\"), # 5 minutes\n 15: (\"same time 15 minutes ago\"), # 15 minutes\n 60: (\"same time one hour ago\"), # one hour\n 1440: (\"same time one day ago\"), # one day\n 10080: (\"same time one week ago\"), # one week\n 43200: (\"same time one month ago\"), # 30 days\n}\n\n\ndef get_metric_count_from_incident(incident: Incident) -> str:\n \"\"\"Returns the current or last count of an incident aggregate.\"\"\"\n incident_trigger = (\n IncidentTrigger.objects.filter(incident=incident).order_by(\"-date_modified\").first()\n )\n if incident_trigger:\n alert_rule_trigger = incident_trigger.alert_rule_trigger\n # TODO: If we're relying on this and expecting possible delays between a\n # trigger fired and this function running, then this could actually be\n # incorrect if they changed the trigger's time window in this time period.\n # Should we store it?\n start = incident_trigger.date_modified - timedelta(\n seconds=alert_rule_trigger.alert_rule.snuba_query.time_window\n )\n end = incident_trigger.date_modified\n else:\n start, end = None, None\n\n return get_incident_aggregates(incident=incident, start=start, end=end).get(\"count\")\n\n\ndef get_incident_status_text(alert_rule: AlertRule, metric_value: str) -> str:\n \"\"\"Returns a human readable current status of an incident\"\"\"\n agg_display_key = alert_rule.snuba_query.aggregate\n\n if CRASH_RATE_ALERT_AGGREGATE_ALIAS in alert_rule.snuba_query.aggregate:\n agg_display_key = agg_display_key.split(f\"AS {CRASH_RATE_ALERT_AGGREGATE_ALIAS}\")[0].strip()\n\n agg_text = QUERY_AGGREGATION_DISPLAY.get(agg_display_key, alert_rule.snuba_query.aggregate)\n\n if agg_text.startswith(\"%\"):\n if metric_value is not None:\n metric_and_agg_text = f\"{metric_value}{agg_text}\"\n else:\n metric_and_agg_text = f\"No{agg_text[1:]}\"\n else:\n metric_and_agg_text = f\"{metric_value} {agg_text}\"\n\n time_window = alert_rule.snuba_query.time_window // 60\n interval = \"minute\" if time_window == 1 else \"minutes\"\n # % change alerts have a comparison delta\n if alert_rule.comparison_delta:\n metric_and_agg_text = f\"{agg_text.capitalize()} {int(metric_value)}%\"\n higher_or_lower = (\n \"higher\" if alert_rule.threshold_type == AlertRuleThresholdType.ABOVE.value else \"lower\"\n )\n comparison_delta_minutes = alert_rule.comparison_delta // 60\n comparison_string = TEXT_COMPARISON_DELTA.get(\n comparison_delta_minutes, f\"same time {comparison_delta_minutes} minutes ago\"\n )\n return _(\n f\"{metric_and_agg_text} {higher_or_lower} in the last {time_window} {interval} \"\n f\"compared to the {comparison_string}\"\n )\n\n return _(\"%(metric_and_agg_text)s in the last %(time_window)d %(interval)s\") % {\n \"metric_and_agg_text\": metric_and_agg_text,\n \"time_window\": time_window,\n \"interval\": interval,\n }\n\n\ndef incident_attachment_info(incident, new_status: IncidentStatus, metric_value=None):\n alert_rule = incident.alert_rule\n\n status = INCIDENT_STATUS[new_status]\n\n if metric_value is None:\n metric_value = get_metric_count_from_incident(incident)\n\n text = get_incident_status_text(alert_rule, metric_value)\n title = f\"{status}: {alert_rule.name}\"\n\n title_link = alert_rule.organization.absolute_url(\n reverse(\n \"sentry-metric-alert-details\",\n kwargs={\n \"organization_slug\": alert_rule.organization.slug,\n \"alert_rule_id\": alert_rule.id,\n },\n ),\n query=parse.urlencode({\"alert\": str(incident.identifier)}),\n )\n\n return {\n \"title\": title,\n \"text\": text,\n \"logo_url\": LOGO_URL,\n \"status\": status,\n \"ts\": incident.date_started,\n \"title_link\": title_link,\n }\n\n\ndef metric_alert_attachment_info(\n alert_rule: AlertRule,\n selected_incident: Optional[Incident] = None,\n new_status: Optional[IncidentStatus] = None,\n metric_value: Optional[str] = None,\n):\n latest_incident = None\n if selected_incident is None:\n try:\n # Use .get() instead of .first() to avoid sorting table by id\n latest_incident = Incident.objects.filter(\n id__in=Incident.objects.filter(alert_rule=alert_rule)\n .values(\"alert_rule_id\")\n .annotate(incident_id=Max(\"id\"))\n .values(\"incident_id\")\n ).get()\n except Incident.DoesNotExist:\n latest_incident = None\n\n if new_status:\n status = INCIDENT_STATUS[new_status]\n elif selected_incident:\n status = INCIDENT_STATUS[IncidentStatus(selected_incident.status)]\n elif latest_incident:\n status = INCIDENT_STATUS[IncidentStatus(latest_incident.status)]\n else:\n status = INCIDENT_STATUS[IncidentStatus.CLOSED]\n\n query = None\n if selected_incident:\n query = parse.urlencode({\"alert\": str(selected_incident.identifier)})\n title = f\"{status}: {alert_rule.name}\"\n title_link = alert_rule.organization.absolute_url(\n reverse(\n \"sentry-metric-alert-details\",\n kwargs={\n \"organization_slug\": alert_rule.organization.slug,\n \"alert_rule_id\": alert_rule.id,\n },\n ),\n query=query,\n )\n\n if metric_value is None:\n if (\n selected_incident is None\n and latest_incident\n and latest_incident.status != IncidentStatus.CLOSED\n ):\n # Without a selected incident, use latest incident if it is not resolved\n incident_info = latest_incident\n else:\n incident_info = selected_incident\n\n if incident_info:\n metric_value = get_metric_count_from_incident(incident_info)\n\n text = \"\"\n if metric_value is not None and status != INCIDENT_STATUS[IncidentStatus.CLOSED]:\n text = get_incident_status_text(alert_rule, metric_value)\n\n date_started = None\n if selected_incident:\n date_started = selected_incident.date_started\n\n last_triggered_date = None\n if latest_incident:\n last_triggered_date = latest_incident.date_started\n\n return {\n \"title\": title,\n \"text\": text,\n \"logo_url\": LOGO_URL,\n \"status\": status,\n \"date_started\": date_started,\n \"last_triggered_date\": last_triggered_date,\n \"title_link\": title_link,\n }\n", "path": "src/sentry/integrations/metric_alerts.py"}]}
2,825
644
gh_patches_debug_17639
rasdani/github-patches
git_diff
wagtail__wagtail-715
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- search fields can't be overridden In the past, you were able to override a search field of a parent class by redefining it. This functionality appears to be broken in Wagtail 0.7 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wagtail/wagtailsearch/index.py` Content: ``` 1 import warnings 2 3 from six import string_types 4 5 from django.db import models 6 7 8 class Indexed(object): 9 @classmethod 10 def indexed_get_parent(cls, require_model=True): 11 for base in cls.__bases__: 12 if issubclass(base, Indexed) and (issubclass(base, models.Model) or require_model is False): 13 return base 14 15 @classmethod 16 def indexed_get_content_type(cls): 17 # Work out content type 18 content_type = (cls._meta.app_label + '_' + cls.__name__).lower() 19 20 # Get parent content type 21 parent = cls.indexed_get_parent() 22 if parent: 23 parent_content_type = parent.indexed_get_content_type() 24 return parent_content_type + '_' + content_type 25 else: 26 return content_type 27 28 @classmethod 29 def indexed_get_toplevel_content_type(cls): 30 # Get parent content type 31 parent = cls.indexed_get_parent() 32 if parent: 33 return parent.indexed_get_content_type() 34 else: 35 # At toplevel, return this content type 36 return (cls._meta.app_label + '_' + cls.__name__).lower() 37 38 @classmethod 39 def get_search_fields(cls): 40 return cls.search_fields 41 42 @classmethod 43 def get_searchable_search_fields(cls): 44 return filter(lambda field: isinstance(field, SearchField), cls.get_search_fields()) 45 46 @classmethod 47 def get_filterable_search_fields(cls): 48 return filter(lambda field: isinstance(field, FilterField), cls.get_search_fields()) 49 50 @classmethod 51 def get_indexed_objects(cls): 52 return cls.objects.all() 53 54 search_fields = () 55 56 57 class BaseField(object): 58 suffix = '' 59 60 def __init__(self, field_name, **kwargs): 61 self.field_name = field_name 62 self.kwargs = kwargs 63 64 def get_field(self, cls): 65 return cls._meta.get_field_by_name(self.field_name)[0] 66 67 def get_attname(self, cls): 68 try: 69 field = self.get_field(cls) 70 return field.attname 71 except models.fields.FieldDoesNotExist: 72 return self.field_name 73 74 def get_index_name(self, cls): 75 return self.get_attname(cls) + self.suffix 76 77 def get_type(self, cls): 78 if 'type' in self.kwargs: 79 return self.kwargs['type'] 80 81 try: 82 field = self.get_field(cls) 83 return field.get_internal_type() 84 except models.fields.FieldDoesNotExist: 85 return 'CharField' 86 87 def get_value(self, obj): 88 try: 89 field = self.get_field(obj.__class__) 90 return field._get_val_from_obj(obj) 91 except models.fields.FieldDoesNotExist: 92 value = getattr(obj, self.field_name, None) 93 if hasattr(value, '__call__'): 94 value = value() 95 return value 96 97 def __repr__(self): 98 return '<%s: %s>' % (self.__class__.__name__, self.field_name) 99 100 101 class SearchField(BaseField): 102 def __init__(self, field_name, boost=None, partial_match=False, **kwargs): 103 super(SearchField, self).__init__(field_name, **kwargs) 104 self.boost = boost 105 self.partial_match = partial_match 106 107 108 class FilterField(BaseField): 109 suffix = '_filter' 110 111 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wagtail/wagtailsearch/index.py b/wagtail/wagtailsearch/index.py --- a/wagtail/wagtailsearch/index.py +++ b/wagtail/wagtailsearch/index.py @@ -37,15 +37,26 @@ @classmethod def get_search_fields(cls): - return cls.search_fields + search_fields = {} + + for field in cls.search_fields: + search_fields[(type(field), field.field_name)] = field + + return list(search_fields.values()) @classmethod def get_searchable_search_fields(cls): - return filter(lambda field: isinstance(field, SearchField), cls.get_search_fields()) + return [ + field for field in cls.get_search_fields() + if isinstance(field, SearchField) + ] @classmethod def get_filterable_search_fields(cls): - return filter(lambda field: isinstance(field, FilterField), cls.get_search_fields()) + return [ + field for field in cls.get_search_fields() + if isinstance(field, FilterField) + ] @classmethod def get_indexed_objects(cls):
{"golden_diff": "diff --git a/wagtail/wagtailsearch/index.py b/wagtail/wagtailsearch/index.py\n--- a/wagtail/wagtailsearch/index.py\n+++ b/wagtail/wagtailsearch/index.py\n@@ -37,15 +37,26 @@\n \n @classmethod\n def get_search_fields(cls):\n- return cls.search_fields\n+ search_fields = {}\n+\n+ for field in cls.search_fields:\n+ search_fields[(type(field), field.field_name)] = field\n+\n+ return list(search_fields.values())\n \n @classmethod\n def get_searchable_search_fields(cls):\n- return filter(lambda field: isinstance(field, SearchField), cls.get_search_fields())\n+ return [\n+ field for field in cls.get_search_fields()\n+ if isinstance(field, SearchField)\n+ ]\n \n @classmethod\n def get_filterable_search_fields(cls):\n- return filter(lambda field: isinstance(field, FilterField), cls.get_search_fields())\n+ return [\n+ field for field in cls.get_search_fields()\n+ if isinstance(field, FilterField)\n+ ]\n \n @classmethod\n def get_indexed_objects(cls):\n", "issue": "search fields can't be overridden\nIn the past, you were able to override a search field of a parent class by redefining it. This functionality appears to be broken in Wagtail 0.7\n\n", "before_files": [{"content": "import warnings\n\nfrom six import string_types\n\nfrom django.db import models\n\n\nclass Indexed(object):\n @classmethod\n def indexed_get_parent(cls, require_model=True):\n for base in cls.__bases__:\n if issubclass(base, Indexed) and (issubclass(base, models.Model) or require_model is False):\n return base\n\n @classmethod\n def indexed_get_content_type(cls):\n # Work out content type\n content_type = (cls._meta.app_label + '_' + cls.__name__).lower()\n\n # Get parent content type\n parent = cls.indexed_get_parent()\n if parent:\n parent_content_type = parent.indexed_get_content_type()\n return parent_content_type + '_' + content_type\n else:\n return content_type\n\n @classmethod\n def indexed_get_toplevel_content_type(cls):\n # Get parent content type\n parent = cls.indexed_get_parent()\n if parent:\n return parent.indexed_get_content_type()\n else:\n # At toplevel, return this content type\n return (cls._meta.app_label + '_' + cls.__name__).lower()\n\n @classmethod\n def get_search_fields(cls):\n return cls.search_fields\n\n @classmethod\n def get_searchable_search_fields(cls):\n return filter(lambda field: isinstance(field, SearchField), cls.get_search_fields())\n\n @classmethod\n def get_filterable_search_fields(cls):\n return filter(lambda field: isinstance(field, FilterField), cls.get_search_fields())\n\n @classmethod\n def get_indexed_objects(cls):\n return cls.objects.all()\n\n search_fields = ()\n\n\nclass BaseField(object):\n suffix = ''\n\n def __init__(self, field_name, **kwargs):\n self.field_name = field_name\n self.kwargs = kwargs\n\n def get_field(self, cls):\n return cls._meta.get_field_by_name(self.field_name)[0]\n\n def get_attname(self, cls):\n try:\n field = self.get_field(cls)\n return field.attname\n except models.fields.FieldDoesNotExist:\n return self.field_name\n\n def get_index_name(self, cls):\n return self.get_attname(cls) + self.suffix\n\n def get_type(self, cls):\n if 'type' in self.kwargs:\n return self.kwargs['type']\n\n try:\n field = self.get_field(cls)\n return field.get_internal_type()\n except models.fields.FieldDoesNotExist:\n return 'CharField'\n\n def get_value(self, obj):\n try:\n field = self.get_field(obj.__class__)\n return field._get_val_from_obj(obj)\n except models.fields.FieldDoesNotExist:\n value = getattr(obj, self.field_name, None)\n if hasattr(value, '__call__'):\n value = value()\n return value\n\n def __repr__(self):\n return '<%s: %s>' % (self.__class__.__name__, self.field_name)\n\n\nclass SearchField(BaseField):\n def __init__(self, field_name, boost=None, partial_match=False, **kwargs):\n super(SearchField, self).__init__(field_name, **kwargs)\n self.boost = boost\n self.partial_match = partial_match\n\n\nclass FilterField(BaseField):\n suffix = '_filter'\n\n", "path": "wagtail/wagtailsearch/index.py"}], "after_files": [{"content": "import warnings\n\nfrom six import string_types\n\nfrom django.db import models\n\n\nclass Indexed(object):\n @classmethod\n def indexed_get_parent(cls, require_model=True):\n for base in cls.__bases__:\n if issubclass(base, Indexed) and (issubclass(base, models.Model) or require_model is False):\n return base\n\n @classmethod\n def indexed_get_content_type(cls):\n # Work out content type\n content_type = (cls._meta.app_label + '_' + cls.__name__).lower()\n\n # Get parent content type\n parent = cls.indexed_get_parent()\n if parent:\n parent_content_type = parent.indexed_get_content_type()\n return parent_content_type + '_' + content_type\n else:\n return content_type\n\n @classmethod\n def indexed_get_toplevel_content_type(cls):\n # Get parent content type\n parent = cls.indexed_get_parent()\n if parent:\n return parent.indexed_get_content_type()\n else:\n # At toplevel, return this content type\n return (cls._meta.app_label + '_' + cls.__name__).lower()\n\n @classmethod\n def get_search_fields(cls):\n search_fields = {}\n\n for field in cls.search_fields:\n search_fields[(type(field), field.field_name)] = field\n\n return list(search_fields.values())\n\n @classmethod\n def get_searchable_search_fields(cls):\n return [\n field for field in cls.get_search_fields()\n if isinstance(field, SearchField)\n ]\n\n @classmethod\n def get_filterable_search_fields(cls):\n return [\n field for field in cls.get_search_fields()\n if isinstance(field, FilterField)\n ]\n\n @classmethod\n def get_indexed_objects(cls):\n return cls.objects.all()\n\n search_fields = ()\n\n\nclass BaseField(object):\n suffix = ''\n\n def __init__(self, field_name, **kwargs):\n self.field_name = field_name\n self.kwargs = kwargs\n\n def get_field(self, cls):\n return cls._meta.get_field_by_name(self.field_name)[0]\n\n def get_attname(self, cls):\n try:\n field = self.get_field(cls)\n return field.attname\n except models.fields.FieldDoesNotExist:\n return self.field_name\n\n def get_index_name(self, cls):\n return self.get_attname(cls) + self.suffix\n\n def get_type(self, cls):\n if 'type' in self.kwargs:\n return self.kwargs['type']\n\n try:\n field = self.get_field(cls)\n return field.get_internal_type()\n except models.fields.FieldDoesNotExist:\n return 'CharField'\n\n def get_value(self, obj):\n try:\n field = self.get_field(obj.__class__)\n return field._get_val_from_obj(obj)\n except models.fields.FieldDoesNotExist:\n value = getattr(obj, self.field_name, None)\n if hasattr(value, '__call__'):\n value = value()\n return value\n\n def __repr__(self):\n return '<%s: %s>' % (self.__class__.__name__, self.field_name)\n\n\nclass SearchField(BaseField):\n def __init__(self, field_name, boost=None, partial_match=False, **kwargs):\n super(SearchField, self).__init__(field_name, **kwargs)\n self.boost = boost\n self.partial_match = partial_match\n\n\nclass FilterField(BaseField):\n suffix = '_filter'\n\n", "path": "wagtail/wagtailsearch/index.py"}]}
1,233
252
gh_patches_debug_20508
rasdani/github-patches
git_diff
oobabooga__text-generation-webui-5794
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Save UI defaults to settings.yaml Not working ### Describe the bug When I try to activate few options and try using Save UI defaults to settings.yaml it save empty settings.yaml ### Is there an existing issue for this? - [X] I have searched the existing issues ### Reproduction I check few options like in the image below: ![image](https://github.com/oobabooga/text-generation-webui/assets/4099839/1aee6ff0-abb3-4052-8831-a01fa9ef8e33) and press Save UI defaults to settings.yaml close the termenal and start again and it goes back as if I did not set those options Add to that when I check settings.yaml it is totally empty file ### Screenshot _No response_ ### Logs ```shell it does not show any log ``` ### System Info ```shell Windows 11 I9 13900 Nvidia 4090 128GB RAM ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `modules/ui.py` Content: ``` 1 import copy 2 from pathlib import Path 3 4 import gradio as gr 5 import torch 6 import yaml 7 from transformers import is_torch_xpu_available 8 9 import extensions 10 from modules import shared 11 12 with open(Path(__file__).resolve().parent / '../css/NotoSans/stylesheet.css', 'r') as f: 13 css = f.read() 14 with open(Path(__file__).resolve().parent / '../css/main.css', 'r') as f: 15 css += f.read() 16 with open(Path(__file__).resolve().parent / '../js/main.js', 'r') as f: 17 js = f.read() 18 with open(Path(__file__).resolve().parent / '../js/save_files.js', 'r') as f: 19 save_files_js = f.read() 20 with open(Path(__file__).resolve().parent / '../js/switch_tabs.js', 'r') as f: 21 switch_tabs_js = f.read() 22 with open(Path(__file__).resolve().parent / '../js/show_controls.js', 'r') as f: 23 show_controls_js = f.read() 24 with open(Path(__file__).resolve().parent / '../js/update_big_picture.js', 'r') as f: 25 update_big_picture_js = f.read() 26 27 refresh_symbol = '🔄' 28 delete_symbol = '🗑️' 29 save_symbol = '💾' 30 31 theme = gr.themes.Default( 32 font=['Noto Sans', 'Helvetica', 'ui-sans-serif', 'system-ui', 'sans-serif'], 33 font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], 34 ).set( 35 border_color_primary='#c5c5d2', 36 button_large_padding='6px 12px', 37 body_text_color_subdued='#484848', 38 background_fill_secondary='#eaeaea', 39 background_fill_primary='#fafafa', 40 ) 41 42 if Path("notification.mp3").exists(): 43 audio_notification_js = "document.querySelector('#audio_notification audio')?.play();" 44 else: 45 audio_notification_js = "" 46 47 48 def list_model_elements(): 49 elements = [ 50 'loader', 51 'filter_by_loader', 52 'cpu_memory', 53 'auto_devices', 54 'disk', 55 'cpu', 56 'bf16', 57 'load_in_8bit', 58 'trust_remote_code', 59 'no_use_fast', 60 'use_flash_attention_2', 61 'load_in_4bit', 62 'compute_dtype', 63 'quant_type', 64 'use_double_quant', 65 'wbits', 66 'groupsize', 67 'model_type', 68 'pre_layer', 69 'triton', 70 'desc_act', 71 'no_inject_fused_attention', 72 'no_inject_fused_mlp', 73 'no_use_cuda_fp16', 74 'disable_exllama', 75 'disable_exllamav2', 76 'cfg_cache', 77 'no_flash_attn', 78 'num_experts_per_token', 79 'cache_8bit', 80 'cache_4bit', 81 'autosplit', 82 'threads', 83 'threads_batch', 84 'n_batch', 85 'no_mmap', 86 'mlock', 87 'no_mul_mat_q', 88 'n_gpu_layers', 89 'tensor_split', 90 'n_ctx', 91 'gpu_split', 92 'max_seq_len', 93 'compress_pos_emb', 94 'alpha_value', 95 'rope_freq_base', 96 'numa', 97 'logits_all', 98 'no_offload_kqv', 99 'row_split', 100 'tensorcores', 101 'streaming_llm', 102 'attention_sink_size', 103 'hqq_backend', 104 ] 105 if is_torch_xpu_available(): 106 for i in range(torch.xpu.device_count()): 107 elements.append(f'gpu_memory_{i}') 108 else: 109 for i in range(torch.cuda.device_count()): 110 elements.append(f'gpu_memory_{i}') 111 112 return elements 113 114 115 def list_interface_input_elements(): 116 elements = [ 117 'max_new_tokens', 118 'auto_max_new_tokens', 119 'max_tokens_second', 120 'prompt_lookup_num_tokens', 121 'seed', 122 'temperature', 123 'temperature_last', 124 'dynamic_temperature', 125 'dynatemp_low', 126 'dynatemp_high', 127 'dynatemp_exponent', 128 'smoothing_factor', 129 'smoothing_curve', 130 'top_p', 131 'min_p', 132 'top_k', 133 'typical_p', 134 'epsilon_cutoff', 135 'eta_cutoff', 136 'repetition_penalty', 137 'presence_penalty', 138 'frequency_penalty', 139 'repetition_penalty_range', 140 'encoder_repetition_penalty', 141 'no_repeat_ngram_size', 142 'do_sample', 143 'penalty_alpha', 144 'mirostat_mode', 145 'mirostat_tau', 146 'mirostat_eta', 147 'grammar_string', 148 'negative_prompt', 149 'guidance_scale', 150 'add_bos_token', 151 'ban_eos_token', 152 'custom_token_bans', 153 'sampler_priority', 154 'truncation_length', 155 'custom_stopping_strings', 156 'skip_special_tokens', 157 'stream', 158 'tfs', 159 'top_a', 160 ] 161 162 # Chat elements 163 elements += [ 164 'textbox', 165 'start_with', 166 'character_menu', 167 'history', 168 'name1', 169 'user_bio', 170 'name2', 171 'greeting', 172 'context', 173 'mode', 174 'custom_system_message', 175 'instruction_template_str', 176 'chat_template_str', 177 'chat_style', 178 'chat-instruct_command', 179 ] 180 181 # Notebook/default elements 182 elements += [ 183 'textbox-notebook', 184 'textbox-default', 185 'output_textbox', 186 'prompt_menu-default', 187 'prompt_menu-notebook', 188 ] 189 190 # Model elements 191 elements += list_model_elements() 192 193 return elements 194 195 196 def gather_interface_values(*args): 197 output = {} 198 for i, element in enumerate(list_interface_input_elements()): 199 output[element] = args[i] 200 201 if not shared.args.multi_user: 202 shared.persistent_interface_state = output 203 204 return output 205 206 207 def apply_interface_values(state, use_persistent=False): 208 if use_persistent: 209 state = shared.persistent_interface_state 210 211 elements = list_interface_input_elements() 212 if len(state) == 0: 213 return [gr.update() for k in elements] # Dummy, do nothing 214 else: 215 return [state[k] if k in state else gr.update() for k in elements] 216 217 218 def save_settings(state, preset, extensions_list, show_controls, theme_state): 219 output = copy.deepcopy(shared.settings) 220 exclude = ['name2', 'greeting', 'context', 'turn_template', 'truncation_length'] 221 for k in state: 222 if k in shared.settings and k not in exclude: 223 output[k] = state[k] 224 225 output['preset'] = preset 226 output['prompt-default'] = state['prompt_menu-default'] 227 output['prompt-notebook'] = state['prompt_menu-notebook'] 228 output['character'] = state['character_menu'] 229 output['default_extensions'] = extensions_list 230 output['seed'] = int(output['seed']) 231 output['show_controls'] = show_controls 232 output['dark_theme'] = True if theme_state == 'dark' else False 233 234 # Save extension values in the UI 235 for extension_name in extensions_list: 236 extension = getattr(extensions, extension_name).script 237 if hasattr(extension, 'params'): 238 params = getattr(extension, 'params') 239 for param in params: 240 _id = f"{extension_name}-{param}" 241 # Only save if different from default value 242 if param not in shared.default_settings or params[param] != shared.default_settings[param]: 243 output[_id] = params[param] 244 245 # Do not save unchanged settings 246 for key in list(output.keys()): 247 if key in shared.default_settings and output[key] == shared.default_settings[key]: 248 output.pop(key) 249 250 return yaml.dump(output, sort_keys=False, width=float("inf")) 251 252 253 def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_class, interactive=True): 254 """ 255 Copied from https://github.com/AUTOMATIC1111/stable-diffusion-webui 256 """ 257 def refresh(): 258 refresh_method() 259 args = refreshed_args() if callable(refreshed_args) else refreshed_args 260 261 return gr.update(**(args or {})) 262 263 refresh_button = gr.Button(refresh_symbol, elem_classes=elem_class, interactive=interactive) 264 refresh_button.click( 265 fn=lambda: {k: tuple(v) if type(k) is list else v for k, v in refresh().items()}, 266 inputs=[], 267 outputs=[refresh_component] 268 ) 269 270 return refresh_button 271 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/modules/ui.py b/modules/ui.py --- a/modules/ui.py +++ b/modules/ui.py @@ -233,14 +233,16 @@ # Save extension values in the UI for extension_name in extensions_list: - extension = getattr(extensions, extension_name).script - if hasattr(extension, 'params'): - params = getattr(extension, 'params') - for param in params: - _id = f"{extension_name}-{param}" - # Only save if different from default value - if param not in shared.default_settings or params[param] != shared.default_settings[param]: - output[_id] = params[param] + extension = getattr(extensions, extension_name, None) + if extension: + extension = extension.script + if hasattr(extension, 'params'): + params = getattr(extension, 'params') + for param in params: + _id = f"{extension_name}-{param}" + # Only save if different from default value + if param not in shared.default_settings or params[param] != shared.default_settings[param]: + output[_id] = params[param] # Do not save unchanged settings for key in list(output.keys()):
{"golden_diff": "diff --git a/modules/ui.py b/modules/ui.py\n--- a/modules/ui.py\n+++ b/modules/ui.py\n@@ -233,14 +233,16 @@\n \n # Save extension values in the UI\n for extension_name in extensions_list:\n- extension = getattr(extensions, extension_name).script\n- if hasattr(extension, 'params'):\n- params = getattr(extension, 'params')\n- for param in params:\n- _id = f\"{extension_name}-{param}\"\n- # Only save if different from default value\n- if param not in shared.default_settings or params[param] != shared.default_settings[param]:\n- output[_id] = params[param]\n+ extension = getattr(extensions, extension_name, None)\n+ if extension:\n+ extension = extension.script\n+ if hasattr(extension, 'params'):\n+ params = getattr(extension, 'params')\n+ for param in params:\n+ _id = f\"{extension_name}-{param}\"\n+ # Only save if different from default value\n+ if param not in shared.default_settings or params[param] != shared.default_settings[param]:\n+ output[_id] = params[param]\n \n # Do not save unchanged settings\n for key in list(output.keys()):\n", "issue": "Save UI defaults to settings.yaml Not working\n### Describe the bug\n\nWhen I try to activate few options and try using Save UI defaults to settings.yaml it save empty settings.yaml\r\n\r\n\n\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Reproduction\n\nI check few options like in the image below:\r\n\r\n![image](https://github.com/oobabooga/text-generation-webui/assets/4099839/1aee6ff0-abb3-4052-8831-a01fa9ef8e33)\r\n\r\nand press Save UI defaults to settings.yaml\r\n\r\nclose the termenal and start again and it goes back as if I did not set those options\r\nAdd to that when I check settings.yaml it is totally empty file\r\n\n\n### Screenshot\n\n_No response_\n\n### Logs\n\n```shell\nit does not show any log\n```\n\n\n### System Info\n\n```shell\nWindows 11\r\nI9 13900\r\nNvidia 4090\r\n128GB RAM\n```\n\n", "before_files": [{"content": "import copy\nfrom pathlib import Path\n\nimport gradio as gr\nimport torch\nimport yaml\nfrom transformers import is_torch_xpu_available\n\nimport extensions\nfrom modules import shared\n\nwith open(Path(__file__).resolve().parent / '../css/NotoSans/stylesheet.css', 'r') as f:\n css = f.read()\nwith open(Path(__file__).resolve().parent / '../css/main.css', 'r') as f:\n css += f.read()\nwith open(Path(__file__).resolve().parent / '../js/main.js', 'r') as f:\n js = f.read()\nwith open(Path(__file__).resolve().parent / '../js/save_files.js', 'r') as f:\n save_files_js = f.read()\nwith open(Path(__file__).resolve().parent / '../js/switch_tabs.js', 'r') as f:\n switch_tabs_js = f.read()\nwith open(Path(__file__).resolve().parent / '../js/show_controls.js', 'r') as f:\n show_controls_js = f.read()\nwith open(Path(__file__).resolve().parent / '../js/update_big_picture.js', 'r') as f:\n update_big_picture_js = f.read()\n\nrefresh_symbol = '\ud83d\udd04'\ndelete_symbol = '\ud83d\uddd1\ufe0f'\nsave_symbol = '\ud83d\udcbe'\n\ntheme = gr.themes.Default(\n font=['Noto Sans', 'Helvetica', 'ui-sans-serif', 'system-ui', 'sans-serif'],\n font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'],\n).set(\n border_color_primary='#c5c5d2',\n button_large_padding='6px 12px',\n body_text_color_subdued='#484848',\n background_fill_secondary='#eaeaea',\n background_fill_primary='#fafafa',\n)\n\nif Path(\"notification.mp3\").exists():\n audio_notification_js = \"document.querySelector('#audio_notification audio')?.play();\"\nelse:\n audio_notification_js = \"\"\n\n\ndef list_model_elements():\n elements = [\n 'loader',\n 'filter_by_loader',\n 'cpu_memory',\n 'auto_devices',\n 'disk',\n 'cpu',\n 'bf16',\n 'load_in_8bit',\n 'trust_remote_code',\n 'no_use_fast',\n 'use_flash_attention_2',\n 'load_in_4bit',\n 'compute_dtype',\n 'quant_type',\n 'use_double_quant',\n 'wbits',\n 'groupsize',\n 'model_type',\n 'pre_layer',\n 'triton',\n 'desc_act',\n 'no_inject_fused_attention',\n 'no_inject_fused_mlp',\n 'no_use_cuda_fp16',\n 'disable_exllama',\n 'disable_exllamav2',\n 'cfg_cache',\n 'no_flash_attn',\n 'num_experts_per_token',\n 'cache_8bit',\n 'cache_4bit',\n 'autosplit',\n 'threads',\n 'threads_batch',\n 'n_batch',\n 'no_mmap',\n 'mlock',\n 'no_mul_mat_q',\n 'n_gpu_layers',\n 'tensor_split',\n 'n_ctx',\n 'gpu_split',\n 'max_seq_len',\n 'compress_pos_emb',\n 'alpha_value',\n 'rope_freq_base',\n 'numa',\n 'logits_all',\n 'no_offload_kqv',\n 'row_split',\n 'tensorcores',\n 'streaming_llm',\n 'attention_sink_size',\n 'hqq_backend',\n ]\n if is_torch_xpu_available():\n for i in range(torch.xpu.device_count()):\n elements.append(f'gpu_memory_{i}')\n else:\n for i in range(torch.cuda.device_count()):\n elements.append(f'gpu_memory_{i}')\n\n return elements\n\n\ndef list_interface_input_elements():\n elements = [\n 'max_new_tokens',\n 'auto_max_new_tokens',\n 'max_tokens_second',\n 'prompt_lookup_num_tokens',\n 'seed',\n 'temperature',\n 'temperature_last',\n 'dynamic_temperature',\n 'dynatemp_low',\n 'dynatemp_high',\n 'dynatemp_exponent',\n 'smoothing_factor',\n 'smoothing_curve',\n 'top_p',\n 'min_p',\n 'top_k',\n 'typical_p',\n 'epsilon_cutoff',\n 'eta_cutoff',\n 'repetition_penalty',\n 'presence_penalty',\n 'frequency_penalty',\n 'repetition_penalty_range',\n 'encoder_repetition_penalty',\n 'no_repeat_ngram_size',\n 'do_sample',\n 'penalty_alpha',\n 'mirostat_mode',\n 'mirostat_tau',\n 'mirostat_eta',\n 'grammar_string',\n 'negative_prompt',\n 'guidance_scale',\n 'add_bos_token',\n 'ban_eos_token',\n 'custom_token_bans',\n 'sampler_priority',\n 'truncation_length',\n 'custom_stopping_strings',\n 'skip_special_tokens',\n 'stream',\n 'tfs',\n 'top_a',\n ]\n\n # Chat elements\n elements += [\n 'textbox',\n 'start_with',\n 'character_menu',\n 'history',\n 'name1',\n 'user_bio',\n 'name2',\n 'greeting',\n 'context',\n 'mode',\n 'custom_system_message',\n 'instruction_template_str',\n 'chat_template_str',\n 'chat_style',\n 'chat-instruct_command',\n ]\n\n # Notebook/default elements\n elements += [\n 'textbox-notebook',\n 'textbox-default',\n 'output_textbox',\n 'prompt_menu-default',\n 'prompt_menu-notebook',\n ]\n\n # Model elements\n elements += list_model_elements()\n\n return elements\n\n\ndef gather_interface_values(*args):\n output = {}\n for i, element in enumerate(list_interface_input_elements()):\n output[element] = args[i]\n\n if not shared.args.multi_user:\n shared.persistent_interface_state = output\n\n return output\n\n\ndef apply_interface_values(state, use_persistent=False):\n if use_persistent:\n state = shared.persistent_interface_state\n\n elements = list_interface_input_elements()\n if len(state) == 0:\n return [gr.update() for k in elements] # Dummy, do nothing\n else:\n return [state[k] if k in state else gr.update() for k in elements]\n\n\ndef save_settings(state, preset, extensions_list, show_controls, theme_state):\n output = copy.deepcopy(shared.settings)\n exclude = ['name2', 'greeting', 'context', 'turn_template', 'truncation_length']\n for k in state:\n if k in shared.settings and k not in exclude:\n output[k] = state[k]\n\n output['preset'] = preset\n output['prompt-default'] = state['prompt_menu-default']\n output['prompt-notebook'] = state['prompt_menu-notebook']\n output['character'] = state['character_menu']\n output['default_extensions'] = extensions_list\n output['seed'] = int(output['seed'])\n output['show_controls'] = show_controls\n output['dark_theme'] = True if theme_state == 'dark' else False\n\n # Save extension values in the UI\n for extension_name in extensions_list:\n extension = getattr(extensions, extension_name).script\n if hasattr(extension, 'params'):\n params = getattr(extension, 'params')\n for param in params:\n _id = f\"{extension_name}-{param}\"\n # Only save if different from default value\n if param not in shared.default_settings or params[param] != shared.default_settings[param]:\n output[_id] = params[param]\n\n # Do not save unchanged settings\n for key in list(output.keys()):\n if key in shared.default_settings and output[key] == shared.default_settings[key]:\n output.pop(key)\n\n return yaml.dump(output, sort_keys=False, width=float(\"inf\"))\n\n\ndef create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_class, interactive=True):\n \"\"\"\n Copied from https://github.com/AUTOMATIC1111/stable-diffusion-webui\n \"\"\"\n def refresh():\n refresh_method()\n args = refreshed_args() if callable(refreshed_args) else refreshed_args\n\n return gr.update(**(args or {}))\n\n refresh_button = gr.Button(refresh_symbol, elem_classes=elem_class, interactive=interactive)\n refresh_button.click(\n fn=lambda: {k: tuple(v) if type(k) is list else v for k, v in refresh().items()},\n inputs=[],\n outputs=[refresh_component]\n )\n\n return refresh_button\n", "path": "modules/ui.py"}], "after_files": [{"content": "import copy\nfrom pathlib import Path\n\nimport gradio as gr\nimport torch\nimport yaml\nfrom transformers import is_torch_xpu_available\n\nimport extensions\nfrom modules import shared\n\nwith open(Path(__file__).resolve().parent / '../css/NotoSans/stylesheet.css', 'r') as f:\n css = f.read()\nwith open(Path(__file__).resolve().parent / '../css/main.css', 'r') as f:\n css += f.read()\nwith open(Path(__file__).resolve().parent / '../js/main.js', 'r') as f:\n js = f.read()\nwith open(Path(__file__).resolve().parent / '../js/save_files.js', 'r') as f:\n save_files_js = f.read()\nwith open(Path(__file__).resolve().parent / '../js/switch_tabs.js', 'r') as f:\n switch_tabs_js = f.read()\nwith open(Path(__file__).resolve().parent / '../js/show_controls.js', 'r') as f:\n show_controls_js = f.read()\nwith open(Path(__file__).resolve().parent / '../js/update_big_picture.js', 'r') as f:\n update_big_picture_js = f.read()\n\nrefresh_symbol = '\ud83d\udd04'\ndelete_symbol = '\ud83d\uddd1\ufe0f'\nsave_symbol = '\ud83d\udcbe'\n\ntheme = gr.themes.Default(\n font=['Noto Sans', 'Helvetica', 'ui-sans-serif', 'system-ui', 'sans-serif'],\n font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'],\n).set(\n border_color_primary='#c5c5d2',\n button_large_padding='6px 12px',\n body_text_color_subdued='#484848',\n background_fill_secondary='#eaeaea',\n background_fill_primary='#fafafa',\n)\n\nif Path(\"notification.mp3\").exists():\n audio_notification_js = \"document.querySelector('#audio_notification audio')?.play();\"\nelse:\n audio_notification_js = \"\"\n\n\ndef list_model_elements():\n elements = [\n 'loader',\n 'filter_by_loader',\n 'cpu_memory',\n 'auto_devices',\n 'disk',\n 'cpu',\n 'bf16',\n 'load_in_8bit',\n 'trust_remote_code',\n 'no_use_fast',\n 'use_flash_attention_2',\n 'load_in_4bit',\n 'compute_dtype',\n 'quant_type',\n 'use_double_quant',\n 'wbits',\n 'groupsize',\n 'model_type',\n 'pre_layer',\n 'triton',\n 'desc_act',\n 'no_inject_fused_attention',\n 'no_inject_fused_mlp',\n 'no_use_cuda_fp16',\n 'disable_exllama',\n 'disable_exllamav2',\n 'cfg_cache',\n 'no_flash_attn',\n 'num_experts_per_token',\n 'cache_8bit',\n 'cache_4bit',\n 'autosplit',\n 'threads',\n 'threads_batch',\n 'n_batch',\n 'no_mmap',\n 'mlock',\n 'no_mul_mat_q',\n 'n_gpu_layers',\n 'tensor_split',\n 'n_ctx',\n 'gpu_split',\n 'max_seq_len',\n 'compress_pos_emb',\n 'alpha_value',\n 'rope_freq_base',\n 'numa',\n 'logits_all',\n 'no_offload_kqv',\n 'row_split',\n 'tensorcores',\n 'streaming_llm',\n 'attention_sink_size',\n 'hqq_backend',\n ]\n if is_torch_xpu_available():\n for i in range(torch.xpu.device_count()):\n elements.append(f'gpu_memory_{i}')\n else:\n for i in range(torch.cuda.device_count()):\n elements.append(f'gpu_memory_{i}')\n\n return elements\n\n\ndef list_interface_input_elements():\n elements = [\n 'max_new_tokens',\n 'auto_max_new_tokens',\n 'max_tokens_second',\n 'prompt_lookup_num_tokens',\n 'seed',\n 'temperature',\n 'temperature_last',\n 'dynamic_temperature',\n 'dynatemp_low',\n 'dynatemp_high',\n 'dynatemp_exponent',\n 'smoothing_factor',\n 'smoothing_curve',\n 'top_p',\n 'min_p',\n 'top_k',\n 'typical_p',\n 'epsilon_cutoff',\n 'eta_cutoff',\n 'repetition_penalty',\n 'presence_penalty',\n 'frequency_penalty',\n 'repetition_penalty_range',\n 'encoder_repetition_penalty',\n 'no_repeat_ngram_size',\n 'do_sample',\n 'penalty_alpha',\n 'mirostat_mode',\n 'mirostat_tau',\n 'mirostat_eta',\n 'grammar_string',\n 'negative_prompt',\n 'guidance_scale',\n 'add_bos_token',\n 'ban_eos_token',\n 'custom_token_bans',\n 'sampler_priority',\n 'truncation_length',\n 'custom_stopping_strings',\n 'skip_special_tokens',\n 'stream',\n 'tfs',\n 'top_a',\n ]\n\n # Chat elements\n elements += [\n 'textbox',\n 'start_with',\n 'character_menu',\n 'history',\n 'name1',\n 'user_bio',\n 'name2',\n 'greeting',\n 'context',\n 'mode',\n 'custom_system_message',\n 'instruction_template_str',\n 'chat_template_str',\n 'chat_style',\n 'chat-instruct_command',\n ]\n\n # Notebook/default elements\n elements += [\n 'textbox-notebook',\n 'textbox-default',\n 'output_textbox',\n 'prompt_menu-default',\n 'prompt_menu-notebook',\n ]\n\n # Model elements\n elements += list_model_elements()\n\n return elements\n\n\ndef gather_interface_values(*args):\n output = {}\n for i, element in enumerate(list_interface_input_elements()):\n output[element] = args[i]\n\n if not shared.args.multi_user:\n shared.persistent_interface_state = output\n\n return output\n\n\ndef apply_interface_values(state, use_persistent=False):\n if use_persistent:\n state = shared.persistent_interface_state\n\n elements = list_interface_input_elements()\n if len(state) == 0:\n return [gr.update() for k in elements] # Dummy, do nothing\n else:\n return [state[k] if k in state else gr.update() for k in elements]\n\n\ndef save_settings(state, preset, extensions_list, show_controls, theme_state):\n output = copy.deepcopy(shared.settings)\n exclude = ['name2', 'greeting', 'context', 'turn_template', 'truncation_length']\n for k in state:\n if k in shared.settings and k not in exclude:\n output[k] = state[k]\n\n output['preset'] = preset\n output['prompt-default'] = state['prompt_menu-default']\n output['prompt-notebook'] = state['prompt_menu-notebook']\n output['character'] = state['character_menu']\n output['default_extensions'] = extensions_list\n output['seed'] = int(output['seed'])\n output['show_controls'] = show_controls\n output['dark_theme'] = True if theme_state == 'dark' else False\n\n # Save extension values in the UI\n for extension_name in extensions_list:\n extension = getattr(extensions, extension_name, None)\n if extension:\n extension = extension.script\n if hasattr(extension, 'params'):\n params = getattr(extension, 'params')\n for param in params:\n _id = f\"{extension_name}-{param}\"\n # Only save if different from default value\n if param not in shared.default_settings or params[param] != shared.default_settings[param]:\n output[_id] = params[param]\n\n # Do not save unchanged settings\n for key in list(output.keys()):\n if key in shared.default_settings and output[key] == shared.default_settings[key]:\n output.pop(key)\n\n return yaml.dump(output, sort_keys=False, width=float(\"inf\"))\n\n\ndef create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_class, interactive=True):\n \"\"\"\n Copied from https://github.com/AUTOMATIC1111/stable-diffusion-webui\n \"\"\"\n def refresh():\n refresh_method()\n args = refreshed_args() if callable(refreshed_args) else refreshed_args\n\n return gr.update(**(args or {}))\n\n refresh_button = gr.Button(refresh_symbol, elem_classes=elem_class, interactive=interactive)\n refresh_button.click(\n fn=lambda: {k: tuple(v) if type(k) is list else v for k, v in refresh().items()},\n inputs=[],\n outputs=[refresh_component]\n )\n\n return refresh_button\n", "path": "modules/ui.py"}]}
3,050
269
gh_patches_debug_60812
rasdani/github-patches
git_diff
huggingface__peft-1046
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- add_weighted_adapter() is unusable, throws error: "Invalid type <class 'list'> found in target_modules" ### System Info - `transformers` version: 4.34.0 - Platform: Linux-6.5.6-arch2-1-x86_64-with-glibc2.35 - Python version: 3.10.12 - Huggingface_hub version: 0.17.3 - Safetensors version: 0.4.0 - Accelerate version: 0.23.0 - Accelerate config: not found - PyTorch version (GPU?): 2.1.0+cu121 (True) - Tensorflow version (GPU?): not installed (NA) - Flax version (CPU?/GPU?/TPU?): not installed (NA) - Jax version: not installed - JaxLib version: not installed - Using GPU in script?: false - Using distributed or parallel set-up in script?: false ### Who can help? @pacman100 @you ### Information - [ ] The official example scripts - [X] My own modified scripts ### Tasks - [ ] An officially supported task in the `examples` folder - [X] My own task or dataset (give details below) ### Reproduction Load the model: ``` model = AutoModelForCausalLM.from_pretrained( "RWKV/rwkv-4-169m-pile" device_map="auto" ) ``` Convert it into a PeftModel (to make the `add_weighted_adapter()` method available): ``` model = PeftModel.from_pretrained( model, f"{adapter_dir}/{adapter}", adapter_name='adapter_1' ) ``` Load the 2nd adapter: ``` model.load_adapter( f"{adapter_dir}/{adapter}", adapter_name='adapter_2' ) ``` Attempt to merge the two adapters: ``` model.add_weighted_adapter( adapters=['adapter_1', 'adapter_2'], weights=[0.5, 0.5], adapter_name="combined", combination_type="svd", ) ``` This will result in the error message: ``` Invalid type <class 'list'> found in target_modules ``` ### Expected behavior I have 2 fully trained LoRA adapters. The only configuration difference between them is the rank and alpha used for each. I would like to merge the two adapters into a new adapter, and set that adapter to the "active" one. However, trying to use the `add_weighted_adapter()` method always results in the following error: ``` Invalid type <class 'list'> found in target_modules ``` I would expect the `add_weighted_adapter()` method to accept a list, for the "target_modules" argument. If you can provide any advice, I would greatly appreciate it. I suspect that this is either unsupported and/or not fully-implemented; or, it has something to do with the way I'm attaching adapters. I've tried a bunch of alternate configurations, but I'm not having luck. Thanks in advance for any help you might provide. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/peft/config.py` Content: ``` 1 # coding=utf-8 2 # Copyright 2023-present the HuggingFace Inc. team. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 import inspect 16 import json 17 import os 18 from dataclasses import asdict, dataclass, field 19 from typing import Dict, Optional, Union 20 21 from huggingface_hub import hf_hub_download 22 from transformers.utils import PushToHubMixin 23 24 from .utils import CONFIG_NAME, PeftType, TaskType 25 26 27 @dataclass 28 class PeftConfigMixin(PushToHubMixin): 29 r""" 30 This is the base configuration class for PEFT adapter models. It contains all the methods that are common to all 31 PEFT adapter models. This class inherits from [`~transformers.utils.PushToHubMixin`] which contains the methods to 32 push your model to the Hub. The method `save_pretrained` will save the configuration of your adapter model in a 33 directory. The method `from_pretrained` will load the configuration of your adapter model from a directory. 34 35 Args: 36 peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use. 37 """ 38 peft_type: Optional[PeftType] = field(default=None, metadata={"help": "The type of PEFT model."}) 39 auto_mapping: Optional[dict] = field( 40 default=None, metadata={"help": "An auto mapping dict to help retrieve the base model class if needed."} 41 ) 42 43 def to_dict(self) -> Dict: 44 return asdict(self) 45 46 def save_pretrained(self, save_directory: str, **kwargs) -> None: 47 r""" 48 This method saves the configuration of your adapter model in a directory. 49 50 Args: 51 save_directory (`str`): 52 The directory where the configuration will be saved. 53 kwargs (additional keyword arguments, *optional*): 54 Additional keyword arguments passed along to the [`~transformers.utils.PushToHubMixin.push_to_hub`] 55 method. 56 """ 57 if os.path.isfile(save_directory): 58 raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") 59 60 os.makedirs(save_directory, exist_ok=True) 61 auto_mapping_dict = kwargs.pop("auto_mapping_dict", None) 62 63 output_dict = asdict(self) 64 # converting set type to list 65 for key, value in output_dict.items(): 66 if isinstance(value, set): 67 output_dict[key] = list(value) 68 69 output_path = os.path.join(save_directory, CONFIG_NAME) 70 71 # Add auto mapping details for custom models. 72 if auto_mapping_dict is not None: 73 output_dict["auto_mapping"] = auto_mapping_dict 74 75 # save it 76 with open(output_path, "w") as writer: 77 writer.write(json.dumps(output_dict, indent=2, sort_keys=True)) 78 79 @classmethod 80 def from_pretrained(cls, pretrained_model_name_or_path: str, subfolder: Optional[str] = None, **kwargs): 81 r""" 82 This method loads the configuration of your adapter model from a directory. 83 84 Args: 85 pretrained_model_name_or_path (`str`): 86 The directory or the Hub repository id where the configuration is saved. 87 kwargs (additional keyword arguments, *optional*): 88 Additional keyword arguments passed along to the child class initialization. 89 """ 90 # Avoid circular dependency .. TODO: fix this with a larger refactor 91 from peft.mapping import PEFT_TYPE_TO_CONFIG_MAPPING 92 93 path = ( 94 os.path.join(pretrained_model_name_or_path, subfolder) 95 if subfolder is not None 96 else pretrained_model_name_or_path 97 ) 98 99 hf_hub_download_kwargs, class_kwargs, _ = cls._split_kwargs(kwargs) 100 101 if os.path.isfile(os.path.join(path, CONFIG_NAME)): 102 config_file = os.path.join(path, CONFIG_NAME) 103 else: 104 try: 105 config_file = hf_hub_download( 106 pretrained_model_name_or_path, CONFIG_NAME, subfolder=subfolder, **hf_hub_download_kwargs 107 ) 108 except Exception: 109 raise ValueError(f"Can't find '{CONFIG_NAME}' at '{pretrained_model_name_or_path}'") 110 111 loaded_attributes = cls.from_json_file(config_file) 112 113 # TODO: this hack is needed to fix the following issue (on commit 702f937): 114 # if someone saves a default config and loads it back with `PeftConfig` class it yields to 115 # not loading the correct config class. 116 117 # from peft import AdaLoraConfig, PeftConfig 118 # peft_config = AdaLoraConfig() 119 # print(peft_config) 120 # >>> AdaLoraConfig(peft_type=<PeftType.ADALORA: 'ADALORA'>, auto_mapping=None, base_model_name_or_path=None, 121 # revision=None, task_type=None, inference_mode=False, r=8, target_modules=None, lora_alpha=8, lora_dropout=0.0, ... 122 # 123 # peft_config.save_pretrained("./test_config") 124 # peft_config = PeftConfig.from_pretrained("./test_config") 125 # print(peft_config) 126 # >>> PeftConfig(peft_type='ADALORA', auto_mapping=None, base_model_name_or_path=None, revision=None, task_type=None, inference_mode=False) 127 if "peft_type" in loaded_attributes: 128 peft_type = loaded_attributes["peft_type"] 129 config_cls = PEFT_TYPE_TO_CONFIG_MAPPING[peft_type] 130 else: 131 config_cls = cls 132 133 config = config_cls(**class_kwargs) 134 135 for key, value in loaded_attributes.items(): 136 if hasattr(config, key): 137 setattr(config, key, value) 138 139 return config 140 141 @classmethod 142 def from_json_file(cls, path_json_file: str, **kwargs): 143 r""" 144 Loads a configuration file from a json file. 145 146 Args: 147 path_json_file (`str`): 148 The path to the json file. 149 """ 150 with open(path_json_file, "r") as file: 151 json_object = json.load(file) 152 153 return json_object 154 155 @classmethod 156 def _split_kwargs(cls, kwargs): 157 hf_hub_download_kwargs = {} 158 class_kwargs = {} 159 other_kwargs = {} 160 161 for key, value in kwargs.items(): 162 if key in inspect.signature(hf_hub_download).parameters: 163 hf_hub_download_kwargs[key] = value 164 elif key in list(cls.__annotations__): 165 class_kwargs[key] = value 166 else: 167 other_kwargs[key] = value 168 169 return hf_hub_download_kwargs, class_kwargs, other_kwargs 170 171 @classmethod 172 def _get_peft_type( 173 cls, 174 model_id: str, 175 **hf_hub_download_kwargs, 176 ): 177 subfolder = hf_hub_download_kwargs.get("subfolder", None) 178 179 path = os.path.join(model_id, subfolder) if subfolder is not None else model_id 180 181 if os.path.isfile(os.path.join(path, CONFIG_NAME)): 182 config_file = os.path.join(path, CONFIG_NAME) 183 else: 184 try: 185 config_file = hf_hub_download( 186 model_id, 187 CONFIG_NAME, 188 **hf_hub_download_kwargs, 189 ) 190 except Exception: 191 raise ValueError(f"Can't find '{CONFIG_NAME}' at '{model_id}'") 192 193 loaded_attributes = cls.from_json_file(config_file) 194 return loaded_attributes["peft_type"] 195 196 @property 197 def is_prompt_learning(self) -> bool: 198 r""" 199 Utility method to check if the configuration is for prompt learning. 200 """ 201 return False 202 203 @property 204 def is_adaption_prompt(self) -> bool: 205 """Return True if this is an adaption prompt config.""" 206 return False 207 208 209 @dataclass 210 class PeftConfig(PeftConfigMixin): 211 """ 212 This is the base configuration class to store the configuration of a [`PeftModel`]. 213 214 Args: 215 peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use. 216 task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform. 217 inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode. 218 """ 219 220 base_model_name_or_path: str = field(default=None, metadata={"help": "The name of the base model to use."}) 221 revision: str = field(default=None, metadata={"help": "The specific model version to use."}) 222 peft_type: Union[str, PeftType] = field(default=None, metadata={"help": "Peft type"}) 223 task_type: Union[str, TaskType] = field(default=None, metadata={"help": "Task type"}) 224 inference_mode: bool = field(default=False, metadata={"help": "Whether to use inference mode"}) 225 226 227 @dataclass 228 class PromptLearningConfig(PeftConfig): 229 """ 230 This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or 231 [`PromptTuning`]. 232 233 Args: 234 num_virtual_tokens (`int`): The number of virtual tokens to use. 235 token_dim (`int`): The hidden embedding dimension of the base transformer model. 236 num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model. 237 num_attention_heads (`int`): The number of attention heads in the base transformer model. 238 num_layers (`int`): The number of layers in the base transformer model. 239 """ 240 241 num_virtual_tokens: int = field(default=None, metadata={"help": "Number of virtual tokens"}) 242 token_dim: int = field( 243 default=None, metadata={"help": "The hidden embedding dimension of the base transformer model"} 244 ) 245 num_transformer_submodules: Optional[int] = field( 246 default=None, metadata={"help": "Number of transformer submodules"} 247 ) 248 num_attention_heads: Optional[int] = field(default=None, metadata={"help": "Number of attention heads"}) 249 num_layers: Optional[int] = field(default=None, metadata={"help": "Number of transformer layers"}) 250 251 @property 252 def is_prompt_learning(self) -> bool: 253 r""" 254 Utility method to check if the configuration is for prompt learning. 255 """ 256 return True 257 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/peft/config.py b/src/peft/config.py --- a/src/peft/config.py +++ b/src/peft/config.py @@ -130,12 +130,8 @@ else: config_cls = cls - config = config_cls(**class_kwargs) - - for key, value in loaded_attributes.items(): - if hasattr(config, key): - setattr(config, key, value) - + kwargs = {**class_kwargs, **loaded_attributes} + config = config_cls(**kwargs) return config @classmethod
{"golden_diff": "diff --git a/src/peft/config.py b/src/peft/config.py\n--- a/src/peft/config.py\n+++ b/src/peft/config.py\n@@ -130,12 +130,8 @@\n else:\n config_cls = cls\n \n- config = config_cls(**class_kwargs)\n-\n- for key, value in loaded_attributes.items():\n- if hasattr(config, key):\n- setattr(config, key, value)\n-\n+ kwargs = {**class_kwargs, **loaded_attributes}\n+ config = config_cls(**kwargs)\n return config\n \n @classmethod\n", "issue": "add_weighted_adapter() is unusable, throws error: \"Invalid type <class 'list'> found in target_modules\"\n### System Info\n\n- `transformers` version: 4.34.0\r\n- Platform: Linux-6.5.6-arch2-1-x86_64-with-glibc2.35\r\n- Python version: 3.10.12\r\n- Huggingface_hub version: 0.17.3\r\n- Safetensors version: 0.4.0\r\n- Accelerate version: 0.23.0\r\n- Accelerate config: not found\r\n- PyTorch version (GPU?): 2.1.0+cu121 (True)\r\n- Tensorflow version (GPU?): not installed (NA)\r\n- Flax version (CPU?/GPU?/TPU?): not installed (NA)\r\n- Jax version: not installed\r\n- JaxLib version: not installed\r\n- Using GPU in script?: false\r\n- Using distributed or parallel set-up in script?: false\n\n### Who can help?\n\n@pacman100 @you\n\n### Information\n\n- [ ] The official example scripts\n- [X] My own modified scripts\n\n### Tasks\n\n- [ ] An officially supported task in the `examples` folder\n- [X] My own task or dataset (give details below)\n\n### Reproduction\n\nLoad the model:\r\n```\r\nmodel = AutoModelForCausalLM.from_pretrained(\r\n \"RWKV/rwkv-4-169m-pile\"\r\n device_map=\"auto\"\r\n )\r\n```\r\nConvert it into a PeftModel (to make the `add_weighted_adapter()` method available):\r\n```\r\nmodel = PeftModel.from_pretrained(\r\n model, f\"{adapter_dir}/{adapter}\", adapter_name='adapter_1'\r\n )\r\n```\r\nLoad the 2nd adapter:\r\n```\r\nmodel.load_adapter(\r\n f\"{adapter_dir}/{adapter}\", adapter_name='adapter_2'\r\n )\r\n```\r\nAttempt to merge the two adapters:\r\n```\r\nmodel.add_weighted_adapter(\r\n adapters=['adapter_1', 'adapter_2'],\r\n weights=[0.5, 0.5],\r\n adapter_name=\"combined\",\r\n combination_type=\"svd\",\r\n )\r\n```\r\nThis will result in the error message:\r\n```\r\nInvalid type <class 'list'> found in target_modules\r\n```\n\n### Expected behavior\n\nI have 2 fully trained LoRA adapters. The only configuration difference between them is the rank and alpha used for each.\r\n\r\nI would like to merge the two adapters into a new adapter, and set that adapter to the \"active\" one.\r\n\r\nHowever, trying to use the `add_weighted_adapter()` method always results in the following error:\r\n```\r\nInvalid type <class 'list'> found in target_modules\r\n```\r\n\r\nI would expect the `add_weighted_adapter()` method to accept a list, for the \"target_modules\" argument.\r\n\r\nIf you can provide any advice, I would greatly appreciate it. I suspect that this is either unsupported and/or not fully-implemented; or, it has something to do with the way I'm attaching adapters. I've tried a bunch of alternate configurations, but I'm not having luck.\r\n\r\nThanks in advance for any help you might provide.\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2023-present the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport inspect\nimport json\nimport os\nfrom dataclasses import asdict, dataclass, field\nfrom typing import Dict, Optional, Union\n\nfrom huggingface_hub import hf_hub_download\nfrom transformers.utils import PushToHubMixin\n\nfrom .utils import CONFIG_NAME, PeftType, TaskType\n\n\n@dataclass\nclass PeftConfigMixin(PushToHubMixin):\n r\"\"\"\n This is the base configuration class for PEFT adapter models. It contains all the methods that are common to all\n PEFT adapter models. This class inherits from [`~transformers.utils.PushToHubMixin`] which contains the methods to\n push your model to the Hub. The method `save_pretrained` will save the configuration of your adapter model in a\n directory. The method `from_pretrained` will load the configuration of your adapter model from a directory.\n\n Args:\n peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.\n \"\"\"\n peft_type: Optional[PeftType] = field(default=None, metadata={\"help\": \"The type of PEFT model.\"})\n auto_mapping: Optional[dict] = field(\n default=None, metadata={\"help\": \"An auto mapping dict to help retrieve the base model class if needed.\"}\n )\n\n def to_dict(self) -> Dict:\n return asdict(self)\n\n def save_pretrained(self, save_directory: str, **kwargs) -> None:\n r\"\"\"\n This method saves the configuration of your adapter model in a directory.\n\n Args:\n save_directory (`str`):\n The directory where the configuration will be saved.\n kwargs (additional keyword arguments, *optional*):\n Additional keyword arguments passed along to the [`~transformers.utils.PushToHubMixin.push_to_hub`]\n method.\n \"\"\"\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n auto_mapping_dict = kwargs.pop(\"auto_mapping_dict\", None)\n\n output_dict = asdict(self)\n # converting set type to list\n for key, value in output_dict.items():\n if isinstance(value, set):\n output_dict[key] = list(value)\n\n output_path = os.path.join(save_directory, CONFIG_NAME)\n\n # Add auto mapping details for custom models.\n if auto_mapping_dict is not None:\n output_dict[\"auto_mapping\"] = auto_mapping_dict\n\n # save it\n with open(output_path, \"w\") as writer:\n writer.write(json.dumps(output_dict, indent=2, sort_keys=True))\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: str, subfolder: Optional[str] = None, **kwargs):\n r\"\"\"\n This method loads the configuration of your adapter model from a directory.\n\n Args:\n pretrained_model_name_or_path (`str`):\n The directory or the Hub repository id where the configuration is saved.\n kwargs (additional keyword arguments, *optional*):\n Additional keyword arguments passed along to the child class initialization.\n \"\"\"\n # Avoid circular dependency .. TODO: fix this with a larger refactor\n from peft.mapping import PEFT_TYPE_TO_CONFIG_MAPPING\n\n path = (\n os.path.join(pretrained_model_name_or_path, subfolder)\n if subfolder is not None\n else pretrained_model_name_or_path\n )\n\n hf_hub_download_kwargs, class_kwargs, _ = cls._split_kwargs(kwargs)\n\n if os.path.isfile(os.path.join(path, CONFIG_NAME)):\n config_file = os.path.join(path, CONFIG_NAME)\n else:\n try:\n config_file = hf_hub_download(\n pretrained_model_name_or_path, CONFIG_NAME, subfolder=subfolder, **hf_hub_download_kwargs\n )\n except Exception:\n raise ValueError(f\"Can't find '{CONFIG_NAME}' at '{pretrained_model_name_or_path}'\")\n\n loaded_attributes = cls.from_json_file(config_file)\n\n # TODO: this hack is needed to fix the following issue (on commit 702f937):\n # if someone saves a default config and loads it back with `PeftConfig` class it yields to\n # not loading the correct config class.\n\n # from peft import AdaLoraConfig, PeftConfig\n # peft_config = AdaLoraConfig()\n # print(peft_config)\n # >>> AdaLoraConfig(peft_type=<PeftType.ADALORA: 'ADALORA'>, auto_mapping=None, base_model_name_or_path=None,\n # revision=None, task_type=None, inference_mode=False, r=8, target_modules=None, lora_alpha=8, lora_dropout=0.0, ...\n #\n # peft_config.save_pretrained(\"./test_config\")\n # peft_config = PeftConfig.from_pretrained(\"./test_config\")\n # print(peft_config)\n # >>> PeftConfig(peft_type='ADALORA', auto_mapping=None, base_model_name_or_path=None, revision=None, task_type=None, inference_mode=False)\n if \"peft_type\" in loaded_attributes:\n peft_type = loaded_attributes[\"peft_type\"]\n config_cls = PEFT_TYPE_TO_CONFIG_MAPPING[peft_type]\n else:\n config_cls = cls\n\n config = config_cls(**class_kwargs)\n\n for key, value in loaded_attributes.items():\n if hasattr(config, key):\n setattr(config, key, value)\n\n return config\n\n @classmethod\n def from_json_file(cls, path_json_file: str, **kwargs):\n r\"\"\"\n Loads a configuration file from a json file.\n\n Args:\n path_json_file (`str`):\n The path to the json file.\n \"\"\"\n with open(path_json_file, \"r\") as file:\n json_object = json.load(file)\n\n return json_object\n\n @classmethod\n def _split_kwargs(cls, kwargs):\n hf_hub_download_kwargs = {}\n class_kwargs = {}\n other_kwargs = {}\n\n for key, value in kwargs.items():\n if key in inspect.signature(hf_hub_download).parameters:\n hf_hub_download_kwargs[key] = value\n elif key in list(cls.__annotations__):\n class_kwargs[key] = value\n else:\n other_kwargs[key] = value\n\n return hf_hub_download_kwargs, class_kwargs, other_kwargs\n\n @classmethod\n def _get_peft_type(\n cls,\n model_id: str,\n **hf_hub_download_kwargs,\n ):\n subfolder = hf_hub_download_kwargs.get(\"subfolder\", None)\n\n path = os.path.join(model_id, subfolder) if subfolder is not None else model_id\n\n if os.path.isfile(os.path.join(path, CONFIG_NAME)):\n config_file = os.path.join(path, CONFIG_NAME)\n else:\n try:\n config_file = hf_hub_download(\n model_id,\n CONFIG_NAME,\n **hf_hub_download_kwargs,\n )\n except Exception:\n raise ValueError(f\"Can't find '{CONFIG_NAME}' at '{model_id}'\")\n\n loaded_attributes = cls.from_json_file(config_file)\n return loaded_attributes[\"peft_type\"]\n\n @property\n def is_prompt_learning(self) -> bool:\n r\"\"\"\n Utility method to check if the configuration is for prompt learning.\n \"\"\"\n return False\n\n @property\n def is_adaption_prompt(self) -> bool:\n \"\"\"Return True if this is an adaption prompt config.\"\"\"\n return False\n\n\n@dataclass\nclass PeftConfig(PeftConfigMixin):\n \"\"\"\n This is the base configuration class to store the configuration of a [`PeftModel`].\n\n Args:\n peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.\n task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform.\n inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode.\n \"\"\"\n\n base_model_name_or_path: str = field(default=None, metadata={\"help\": \"The name of the base model to use.\"})\n revision: str = field(default=None, metadata={\"help\": \"The specific model version to use.\"})\n peft_type: Union[str, PeftType] = field(default=None, metadata={\"help\": \"Peft type\"})\n task_type: Union[str, TaskType] = field(default=None, metadata={\"help\": \"Task type\"})\n inference_mode: bool = field(default=False, metadata={\"help\": \"Whether to use inference mode\"})\n\n\n@dataclass\nclass PromptLearningConfig(PeftConfig):\n \"\"\"\n This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or\n [`PromptTuning`].\n\n Args:\n num_virtual_tokens (`int`): The number of virtual tokens to use.\n token_dim (`int`): The hidden embedding dimension of the base transformer model.\n num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model.\n num_attention_heads (`int`): The number of attention heads in the base transformer model.\n num_layers (`int`): The number of layers in the base transformer model.\n \"\"\"\n\n num_virtual_tokens: int = field(default=None, metadata={\"help\": \"Number of virtual tokens\"})\n token_dim: int = field(\n default=None, metadata={\"help\": \"The hidden embedding dimension of the base transformer model\"}\n )\n num_transformer_submodules: Optional[int] = field(\n default=None, metadata={\"help\": \"Number of transformer submodules\"}\n )\n num_attention_heads: Optional[int] = field(default=None, metadata={\"help\": \"Number of attention heads\"})\n num_layers: Optional[int] = field(default=None, metadata={\"help\": \"Number of transformer layers\"})\n\n @property\n def is_prompt_learning(self) -> bool:\n r\"\"\"\n Utility method to check if the configuration is for prompt learning.\n \"\"\"\n return True\n", "path": "src/peft/config.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2023-present the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport inspect\nimport json\nimport os\nfrom dataclasses import asdict, dataclass, field\nfrom typing import Dict, Optional, Union\n\nfrom huggingface_hub import hf_hub_download\nfrom transformers.utils import PushToHubMixin\n\nfrom .utils import CONFIG_NAME, PeftType, TaskType\n\n\n@dataclass\nclass PeftConfigMixin(PushToHubMixin):\n r\"\"\"\n This is the base configuration class for PEFT adapter models. It contains all the methods that are common to all\n PEFT adapter models. This class inherits from [`~transformers.utils.PushToHubMixin`] which contains the methods to\n push your model to the Hub. The method `save_pretrained` will save the configuration of your adapter model in a\n directory. The method `from_pretrained` will load the configuration of your adapter model from a directory.\n\n Args:\n peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.\n \"\"\"\n peft_type: Optional[PeftType] = field(default=None, metadata={\"help\": \"The type of PEFT model.\"})\n auto_mapping: Optional[dict] = field(\n default=None, metadata={\"help\": \"An auto mapping dict to help retrieve the base model class if needed.\"}\n )\n\n def to_dict(self) -> Dict:\n return asdict(self)\n\n def save_pretrained(self, save_directory: str, **kwargs) -> None:\n r\"\"\"\n This method saves the configuration of your adapter model in a directory.\n\n Args:\n save_directory (`str`):\n The directory where the configuration will be saved.\n kwargs (additional keyword arguments, *optional*):\n Additional keyword arguments passed along to the [`~transformers.utils.PushToHubMixin.push_to_hub`]\n method.\n \"\"\"\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n auto_mapping_dict = kwargs.pop(\"auto_mapping_dict\", None)\n\n output_dict = asdict(self)\n # converting set type to list\n for key, value in output_dict.items():\n if isinstance(value, set):\n output_dict[key] = list(value)\n\n output_path = os.path.join(save_directory, CONFIG_NAME)\n\n # Add auto mapping details for custom models.\n if auto_mapping_dict is not None:\n output_dict[\"auto_mapping\"] = auto_mapping_dict\n\n # save it\n with open(output_path, \"w\") as writer:\n writer.write(json.dumps(output_dict, indent=2, sort_keys=True))\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: str, subfolder: Optional[str] = None, **kwargs):\n r\"\"\"\n This method loads the configuration of your adapter model from a directory.\n\n Args:\n pretrained_model_name_or_path (`str`):\n The directory or the Hub repository id where the configuration is saved.\n kwargs (additional keyword arguments, *optional*):\n Additional keyword arguments passed along to the child class initialization.\n \"\"\"\n # Avoid circular dependency .. TODO: fix this with a larger refactor\n from peft.mapping import PEFT_TYPE_TO_CONFIG_MAPPING\n\n path = (\n os.path.join(pretrained_model_name_or_path, subfolder)\n if subfolder is not None\n else pretrained_model_name_or_path\n )\n\n hf_hub_download_kwargs, class_kwargs, _ = cls._split_kwargs(kwargs)\n\n if os.path.isfile(os.path.join(path, CONFIG_NAME)):\n config_file = os.path.join(path, CONFIG_NAME)\n else:\n try:\n config_file = hf_hub_download(\n pretrained_model_name_or_path, CONFIG_NAME, subfolder=subfolder, **hf_hub_download_kwargs\n )\n except Exception:\n raise ValueError(f\"Can't find '{CONFIG_NAME}' at '{pretrained_model_name_or_path}'\")\n\n loaded_attributes = cls.from_json_file(config_file)\n\n # TODO: this hack is needed to fix the following issue (on commit 702f937):\n # if someone saves a default config and loads it back with `PeftConfig` class it yields to\n # not loading the correct config class.\n\n # from peft import AdaLoraConfig, PeftConfig\n # peft_config = AdaLoraConfig()\n # print(peft_config)\n # >>> AdaLoraConfig(peft_type=<PeftType.ADALORA: 'ADALORA'>, auto_mapping=None, base_model_name_or_path=None,\n # revision=None, task_type=None, inference_mode=False, r=8, target_modules=None, lora_alpha=8, lora_dropout=0.0, ...\n #\n # peft_config.save_pretrained(\"./test_config\")\n # peft_config = PeftConfig.from_pretrained(\"./test_config\")\n # print(peft_config)\n # >>> PeftConfig(peft_type='ADALORA', auto_mapping=None, base_model_name_or_path=None, revision=None, task_type=None, inference_mode=False)\n if \"peft_type\" in loaded_attributes:\n peft_type = loaded_attributes[\"peft_type\"]\n config_cls = PEFT_TYPE_TO_CONFIG_MAPPING[peft_type]\n else:\n config_cls = cls\n\n kwargs = {**class_kwargs, **loaded_attributes}\n config = config_cls(**kwargs)\n return config\n\n @classmethod\n def from_json_file(cls, path_json_file: str, **kwargs):\n r\"\"\"\n Loads a configuration file from a json file.\n\n Args:\n path_json_file (`str`):\n The path to the json file.\n \"\"\"\n with open(path_json_file, \"r\") as file:\n json_object = json.load(file)\n\n return json_object\n\n @classmethod\n def _split_kwargs(cls, kwargs):\n hf_hub_download_kwargs = {}\n class_kwargs = {}\n other_kwargs = {}\n\n for key, value in kwargs.items():\n if key in inspect.signature(hf_hub_download).parameters:\n hf_hub_download_kwargs[key] = value\n elif key in list(cls.__annotations__):\n class_kwargs[key] = value\n else:\n other_kwargs[key] = value\n\n return hf_hub_download_kwargs, class_kwargs, other_kwargs\n\n @classmethod\n def _get_peft_type(\n cls,\n model_id: str,\n **hf_hub_download_kwargs,\n ):\n subfolder = hf_hub_download_kwargs.get(\"subfolder\", None)\n\n path = os.path.join(model_id, subfolder) if subfolder is not None else model_id\n\n if os.path.isfile(os.path.join(path, CONFIG_NAME)):\n config_file = os.path.join(path, CONFIG_NAME)\n else:\n try:\n config_file = hf_hub_download(\n model_id,\n CONFIG_NAME,\n **hf_hub_download_kwargs,\n )\n except Exception:\n raise ValueError(f\"Can't find '{CONFIG_NAME}' at '{model_id}'\")\n\n loaded_attributes = cls.from_json_file(config_file)\n return loaded_attributes[\"peft_type\"]\n\n @property\n def is_prompt_learning(self) -> bool:\n r\"\"\"\n Utility method to check if the configuration is for prompt learning.\n \"\"\"\n return False\n\n @property\n def is_adaption_prompt(self) -> bool:\n \"\"\"Return True if this is an adaption prompt config.\"\"\"\n return False\n\n\n@dataclass\nclass PeftConfig(PeftConfigMixin):\n \"\"\"\n This is the base configuration class to store the configuration of a [`PeftModel`].\n\n Args:\n peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.\n task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform.\n inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode.\n \"\"\"\n\n base_model_name_or_path: str = field(default=None, metadata={\"help\": \"The name of the base model to use.\"})\n revision: str = field(default=None, metadata={\"help\": \"The specific model version to use.\"})\n peft_type: Union[str, PeftType] = field(default=None, metadata={\"help\": \"Peft type\"})\n task_type: Union[str, TaskType] = field(default=None, metadata={\"help\": \"Task type\"})\n inference_mode: bool = field(default=False, metadata={\"help\": \"Whether to use inference mode\"})\n\n\n@dataclass\nclass PromptLearningConfig(PeftConfig):\n \"\"\"\n This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or\n [`PromptTuning`].\n\n Args:\n num_virtual_tokens (`int`): The number of virtual tokens to use.\n token_dim (`int`): The hidden embedding dimension of the base transformer model.\n num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model.\n num_attention_heads (`int`): The number of attention heads in the base transformer model.\n num_layers (`int`): The number of layers in the base transformer model.\n \"\"\"\n\n num_virtual_tokens: int = field(default=None, metadata={\"help\": \"Number of virtual tokens\"})\n token_dim: int = field(\n default=None, metadata={\"help\": \"The hidden embedding dimension of the base transformer model\"}\n )\n num_transformer_submodules: Optional[int] = field(\n default=None, metadata={\"help\": \"Number of transformer submodules\"}\n )\n num_attention_heads: Optional[int] = field(default=None, metadata={\"help\": \"Number of attention heads\"})\n num_layers: Optional[int] = field(default=None, metadata={\"help\": \"Number of transformer layers\"})\n\n @property\n def is_prompt_learning(self) -> bool:\n r\"\"\"\n Utility method to check if the configuration is for prompt learning.\n \"\"\"\n return True\n", "path": "src/peft/config.py"}]}
3,915
130
gh_patches_debug_24770
rasdani/github-patches
git_diff
dask__dask-1121
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- compute node names within delayed functions I love using `dask.imperative.delayed` as a decorator to hide the daskyness from the driver code, but the names generated for `dask.dot.dot_graph` leave something to be desired. Its often easy to compute unique names within the delayed function, e.g.: ``` @delayed def run(config): __task_name = "run_" + config["job_name"] res = do_stuff(config) return res ``` which can be done with a wrapper: ``` def run(config): task_name = "run_" + config["job_name"] return delayed(_run, name=task_name)(config) def _run(config) res = do_stuff(config) return res ``` Any way to get rid of the wrapper step? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dask/dot.py` Content: ``` 1 from __future__ import absolute_import, division, print_function 2 3 import re 4 from functools import partial 5 6 from graphviz import Digraph 7 8 from .core import istask, get_dependencies, ishashable 9 from .utils import funcname 10 11 12 def task_label(task): 13 """Label for a task on a dot graph. 14 15 Examples 16 -------- 17 >>> from operator import add 18 >>> task_label((add, 1, 2)) 19 'add' 20 >>> task_label((add, (add, 1, 2), 3)) 21 'add(...)' 22 """ 23 func = task[0] 24 if hasattr(func, 'funcs'): 25 if len(func.funcs) > 1: 26 return '{0}(...)'.format(funcname(func.funcs[0])) 27 else: 28 head = funcname(func.funcs[0]) 29 else: 30 head = funcname(task[0]) 31 if any(has_sub_tasks(i) for i in task[1:]): 32 return '{0}(...)'.format(head) 33 else: 34 return head 35 36 37 def has_sub_tasks(task): 38 """Returns True if the task has sub tasks""" 39 if istask(task): 40 return True 41 elif isinstance(task, list): 42 return any(has_sub_tasks(i) for i in task) 43 else: 44 return False 45 46 47 def name(x): 48 try: 49 return str(hash(x)) 50 except TypeError: 51 return str(hash(str(x))) 52 53 54 _HASHPAT = re.compile('([0-9a-z]{32})') 55 56 57 def label(x, cache=None): 58 """ 59 60 >>> label('x') 61 'x' 62 63 >>> label(('x', 1)) 64 "('x', 1)" 65 66 >>> from hashlib import md5 67 >>> x = 'x-%s-hello' % md5(b'1234').hexdigest() 68 >>> x 69 'x-81dc9bdb52d04dc20036dbd8313ed055-hello' 70 71 >>> label(x) 72 'x-#-hello' 73 """ 74 s = str(x) 75 m = re.search(_HASHPAT, s) 76 if m is not None: 77 for h in m.groups(): 78 if cache is not None: 79 n = cache.get(h, len(cache)) 80 label = '#{0}'.format(n) 81 # cache will be overwritten destructively 82 cache[h] = n 83 else: 84 label = '#' 85 s = s.replace(h, label) 86 return s 87 88 89 def to_graphviz(dsk, data_attributes=None, function_attributes=None, **kwargs): 90 if data_attributes is None: 91 data_attributes = {} 92 if function_attributes is None: 93 function_attributes = {} 94 95 attributes = {'rankdir': 'BT'} 96 attributes.update(kwargs) 97 g = Digraph(graph_attr=attributes) 98 99 seen = set() 100 cache = {} 101 102 for k, v in dsk.items(): 103 k_name = name(k) 104 if k_name not in seen: 105 seen.add(k_name) 106 g.node(k_name, label=label(k, cache=cache), shape='box', 107 **data_attributes.get(k, {})) 108 109 if istask(v): 110 func_name = name((k, 'function')) 111 if func_name not in seen: 112 seen.add(func_name) 113 g.node(func_name, label=task_label(v), shape='circle', 114 **function_attributes.get(k, {})) 115 g.edge(func_name, k_name) 116 117 for dep in get_dependencies(dsk, k): 118 dep_name = name(dep) 119 if dep_name not in seen: 120 seen.add(dep_name) 121 g.node(dep_name, label=label(dep, cache=cache), shape='box', 122 **data_attributes.get(dep, {})) 123 g.edge(dep_name, func_name) 124 elif ishashable(v) and v in dsk: 125 g.edge(name(v), k_name) 126 return g 127 128 129 IPYTHON_IMAGE_FORMATS = frozenset(['jpeg', 'png']) 130 IPYTHON_NO_DISPLAY_FORMATS = frozenset(['dot', 'pdf']) 131 132 133 def _get_display_cls(format): 134 """ 135 Get the appropriate IPython display class for `format`. 136 137 Returns `IPython.display.SVG` if format=='svg', otherwise 138 `IPython.display.Image`. 139 140 If IPython is not importable, return dummy function that swallows its 141 arguments and returns None. 142 """ 143 dummy = lambda *args, **kwargs: None 144 try: 145 import IPython.display as display 146 except ImportError: 147 # Can't return a display object if no IPython. 148 return dummy 149 150 if format in IPYTHON_NO_DISPLAY_FORMATS: 151 # IPython can't display this format natively, so just return None. 152 return dummy 153 elif format in IPYTHON_IMAGE_FORMATS: 154 # Partially apply `format` so that `Image` and `SVG` supply a uniform 155 # interface to the caller. 156 return partial(display.Image, format=format) 157 elif format == 'svg': 158 return display.SVG 159 else: 160 raise ValueError("Unknown format '%s' passed to `dot_graph`" % format) 161 162 163 def dot_graph(dsk, filename='mydask', format=None, **kwargs): 164 """ 165 Render a task graph using dot. 166 167 If `filename` is not None, write a file to disk with that name in the 168 format specified by `format`. `filename` should not include an extension. 169 170 Parameters 171 ---------- 172 dsk : dict 173 The graph to display. 174 filename : str or None, optional 175 The name (without an extension) of the file to write to disk. If 176 `filename` is None, no file will be written, and we communicate with 177 dot using only pipes. Default is 'mydask'. 178 format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional 179 Format in which to write output file. Default is 'png'. 180 **kwargs 181 Additional keyword arguments to forward to `to_graphviz`. 182 183 Returns 184 ------- 185 result : None or IPython.display.Image or IPython.display.SVG (See below.) 186 187 Notes 188 ----- 189 If IPython is installed, we return an IPython.display object in the 190 requested format. If IPython is not installed, we just return None. 191 192 We always return None if format is 'pdf' or 'dot', because IPython can't 193 display these formats natively. Passing these formats with filename=None 194 will not produce any useful output. 195 196 See Also 197 -------- 198 dask.dot.to_graphviz 199 """ 200 g = to_graphviz(dsk, **kwargs) 201 202 fmts = ['.png', '.pdf', '.dot', '.svg', '.jpeg', '.jpg'] 203 if format is None and any(filename.lower().endswith(fmt) for fmt in fmts): 204 format = filename.lower().split('.')[-1] 205 filename = filename.rsplit('.')[0] 206 207 if format is None: 208 format = 'png' 209 210 data = g.pipe(format=format) 211 if not data: 212 raise RuntimeError("Graphviz failed to properly produce an image. " 213 "This probably means your installation of graphviz " 214 "is missing png support. See: " 215 "https://github.com/ContinuumIO/anaconda-issues/" 216 "issues/485 for more information.") 217 218 display_cls = _get_display_cls(format) 219 220 if not filename: 221 return display_cls(data=data) 222 223 full_filename = '.'.join([filename, format]) 224 with open(full_filename, 'wb') as f: 225 f.write(data) 226 227 return display_cls(filename=full_filename) 228 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dask/dot.py b/dask/dot.py --- a/dask/dot.py +++ b/dask/dot.py @@ -52,6 +52,7 @@ _HASHPAT = re.compile('([0-9a-z]{32})') +_UUIDPAT = re.compile('([0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12})') def label(x, cache=None): @@ -68,21 +69,30 @@ >>> x 'x-81dc9bdb52d04dc20036dbd8313ed055-hello' + >>> label(x) + 'x-#-hello' + + >>> from uuid import uuid1 + >>> x = 'x-%s-hello' % uuid1() + >>> x # doctest: +SKIP + 'x-4c1a3d7e-0b45-11e6-8334-54ee75105593-hello' + >>> label(x) 'x-#-hello' """ s = str(x) - m = re.search(_HASHPAT, s) - if m is not None: - for h in m.groups(): - if cache is not None: - n = cache.get(h, len(cache)) - label = '#{0}'.format(n) - # cache will be overwritten destructively - cache[h] = n - else: - label = '#' - s = s.replace(h, label) + for pattern in (_HASHPAT, _UUIDPAT): + m = re.search(pattern, s) + if m is not None: + for h in m.groups(): + if cache is not None: + n = cache.get(h, len(cache)) + label = '#{0}'.format(n) + # cache will be overwritten destructively + cache[h] = n + else: + label = '#' + s = s.replace(h, label) return s
{"golden_diff": "diff --git a/dask/dot.py b/dask/dot.py\n--- a/dask/dot.py\n+++ b/dask/dot.py\n@@ -52,6 +52,7 @@\n \n \n _HASHPAT = re.compile('([0-9a-z]{32})')\n+_UUIDPAT = re.compile('([0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12})')\n \n \n def label(x, cache=None):\n@@ -68,21 +69,30 @@\n >>> x\n 'x-81dc9bdb52d04dc20036dbd8313ed055-hello'\n \n+ >>> label(x)\n+ 'x-#-hello'\n+\n+ >>> from uuid import uuid1\n+ >>> x = 'x-%s-hello' % uuid1()\n+ >>> x # doctest: +SKIP\n+ 'x-4c1a3d7e-0b45-11e6-8334-54ee75105593-hello'\n+\n >>> label(x)\n 'x-#-hello'\n \"\"\"\n s = str(x)\n- m = re.search(_HASHPAT, s)\n- if m is not None:\n- for h in m.groups():\n- if cache is not None:\n- n = cache.get(h, len(cache))\n- label = '#{0}'.format(n)\n- # cache will be overwritten destructively\n- cache[h] = n\n- else:\n- label = '#'\n- s = s.replace(h, label)\n+ for pattern in (_HASHPAT, _UUIDPAT):\n+ m = re.search(pattern, s)\n+ if m is not None:\n+ for h in m.groups():\n+ if cache is not None:\n+ n = cache.get(h, len(cache))\n+ label = '#{0}'.format(n)\n+ # cache will be overwritten destructively\n+ cache[h] = n\n+ else:\n+ label = '#'\n+ s = s.replace(h, label)\n return s\n", "issue": "compute node names within delayed functions\nI love using `dask.imperative.delayed` as a decorator to hide the daskyness from the driver code, but the names generated for `dask.dot.dot_graph` leave something to be desired. Its often easy to compute unique names within the delayed function, e.g.:\n\n```\n@delayed\ndef run(config):\n __task_name = \"run_\" + config[\"job_name\"]\n res = do_stuff(config)\n return res\n```\n\nwhich can be done with a wrapper:\n\n```\ndef run(config):\n task_name = \"run_\" + config[\"job_name\"]\n return delayed(_run, name=task_name)(config)\n\ndef _run(config)\n res = do_stuff(config)\n return res\n```\n\nAny way to get rid of the wrapper step?\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport re\nfrom functools import partial\n\nfrom graphviz import Digraph\n\nfrom .core import istask, get_dependencies, ishashable\nfrom .utils import funcname\n\n\ndef task_label(task):\n \"\"\"Label for a task on a dot graph.\n\n Examples\n --------\n >>> from operator import add\n >>> task_label((add, 1, 2))\n 'add'\n >>> task_label((add, (add, 1, 2), 3))\n 'add(...)'\n \"\"\"\n func = task[0]\n if hasattr(func, 'funcs'):\n if len(func.funcs) > 1:\n return '{0}(...)'.format(funcname(func.funcs[0]))\n else:\n head = funcname(func.funcs[0])\n else:\n head = funcname(task[0])\n if any(has_sub_tasks(i) for i in task[1:]):\n return '{0}(...)'.format(head)\n else:\n return head\n\n\ndef has_sub_tasks(task):\n \"\"\"Returns True if the task has sub tasks\"\"\"\n if istask(task):\n return True\n elif isinstance(task, list):\n return any(has_sub_tasks(i) for i in task)\n else:\n return False\n\n\ndef name(x):\n try:\n return str(hash(x))\n except TypeError:\n return str(hash(str(x)))\n\n\n_HASHPAT = re.compile('([0-9a-z]{32})')\n\n\ndef label(x, cache=None):\n \"\"\"\n\n >>> label('x')\n 'x'\n\n >>> label(('x', 1))\n \"('x', 1)\"\n\n >>> from hashlib import md5\n >>> x = 'x-%s-hello' % md5(b'1234').hexdigest()\n >>> x\n 'x-81dc9bdb52d04dc20036dbd8313ed055-hello'\n\n >>> label(x)\n 'x-#-hello'\n \"\"\"\n s = str(x)\n m = re.search(_HASHPAT, s)\n if m is not None:\n for h in m.groups():\n if cache is not None:\n n = cache.get(h, len(cache))\n label = '#{0}'.format(n)\n # cache will be overwritten destructively\n cache[h] = n\n else:\n label = '#'\n s = s.replace(h, label)\n return s\n\n\ndef to_graphviz(dsk, data_attributes=None, function_attributes=None, **kwargs):\n if data_attributes is None:\n data_attributes = {}\n if function_attributes is None:\n function_attributes = {}\n\n attributes = {'rankdir': 'BT'}\n attributes.update(kwargs)\n g = Digraph(graph_attr=attributes)\n\n seen = set()\n cache = {}\n\n for k, v in dsk.items():\n k_name = name(k)\n if k_name not in seen:\n seen.add(k_name)\n g.node(k_name, label=label(k, cache=cache), shape='box',\n **data_attributes.get(k, {}))\n\n if istask(v):\n func_name = name((k, 'function'))\n if func_name not in seen:\n seen.add(func_name)\n g.node(func_name, label=task_label(v), shape='circle',\n **function_attributes.get(k, {}))\n g.edge(func_name, k_name)\n\n for dep in get_dependencies(dsk, k):\n dep_name = name(dep)\n if dep_name not in seen:\n seen.add(dep_name)\n g.node(dep_name, label=label(dep, cache=cache), shape='box',\n **data_attributes.get(dep, {}))\n g.edge(dep_name, func_name)\n elif ishashable(v) and v in dsk:\n g.edge(name(v), k_name)\n return g\n\n\nIPYTHON_IMAGE_FORMATS = frozenset(['jpeg', 'png'])\nIPYTHON_NO_DISPLAY_FORMATS = frozenset(['dot', 'pdf'])\n\n\ndef _get_display_cls(format):\n \"\"\"\n Get the appropriate IPython display class for `format`.\n\n Returns `IPython.display.SVG` if format=='svg', otherwise\n `IPython.display.Image`.\n\n If IPython is not importable, return dummy function that swallows its\n arguments and returns None.\n \"\"\"\n dummy = lambda *args, **kwargs: None\n try:\n import IPython.display as display\n except ImportError:\n # Can't return a display object if no IPython.\n return dummy\n\n if format in IPYTHON_NO_DISPLAY_FORMATS:\n # IPython can't display this format natively, so just return None.\n return dummy\n elif format in IPYTHON_IMAGE_FORMATS:\n # Partially apply `format` so that `Image` and `SVG` supply a uniform\n # interface to the caller.\n return partial(display.Image, format=format)\n elif format == 'svg':\n return display.SVG\n else:\n raise ValueError(\"Unknown format '%s' passed to `dot_graph`\" % format)\n\n\ndef dot_graph(dsk, filename='mydask', format=None, **kwargs):\n \"\"\"\n Render a task graph using dot.\n\n If `filename` is not None, write a file to disk with that name in the\n format specified by `format`. `filename` should not include an extension.\n\n Parameters\n ----------\n dsk : dict\n The graph to display.\n filename : str or None, optional\n The name (without an extension) of the file to write to disk. If\n `filename` is None, no file will be written, and we communicate with\n dot using only pipes. Default is 'mydask'.\n format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional\n Format in which to write output file. Default is 'png'.\n **kwargs\n Additional keyword arguments to forward to `to_graphviz`.\n\n Returns\n -------\n result : None or IPython.display.Image or IPython.display.SVG (See below.)\n\n Notes\n -----\n If IPython is installed, we return an IPython.display object in the\n requested format. If IPython is not installed, we just return None.\n\n We always return None if format is 'pdf' or 'dot', because IPython can't\n display these formats natively. Passing these formats with filename=None\n will not produce any useful output.\n\n See Also\n --------\n dask.dot.to_graphviz\n \"\"\"\n g = to_graphviz(dsk, **kwargs)\n\n fmts = ['.png', '.pdf', '.dot', '.svg', '.jpeg', '.jpg']\n if format is None and any(filename.lower().endswith(fmt) for fmt in fmts):\n format = filename.lower().split('.')[-1]\n filename = filename.rsplit('.')[0]\n\n if format is None:\n format = 'png'\n\n data = g.pipe(format=format)\n if not data:\n raise RuntimeError(\"Graphviz failed to properly produce an image. \"\n \"This probably means your installation of graphviz \"\n \"is missing png support. See: \"\n \"https://github.com/ContinuumIO/anaconda-issues/\"\n \"issues/485 for more information.\")\n\n display_cls = _get_display_cls(format)\n\n if not filename:\n return display_cls(data=data)\n\n full_filename = '.'.join([filename, format])\n with open(full_filename, 'wb') as f:\n f.write(data)\n\n return display_cls(filename=full_filename)\n", "path": "dask/dot.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport re\nfrom functools import partial\n\nfrom graphviz import Digraph\n\nfrom .core import istask, get_dependencies, ishashable\nfrom .utils import funcname\n\n\ndef task_label(task):\n \"\"\"Label for a task on a dot graph.\n\n Examples\n --------\n >>> from operator import add\n >>> task_label((add, 1, 2))\n 'add'\n >>> task_label((add, (add, 1, 2), 3))\n 'add(...)'\n \"\"\"\n func = task[0]\n if hasattr(func, 'funcs'):\n if len(func.funcs) > 1:\n return '{0}(...)'.format(funcname(func.funcs[0]))\n else:\n head = funcname(func.funcs[0])\n else:\n head = funcname(task[0])\n if any(has_sub_tasks(i) for i in task[1:]):\n return '{0}(...)'.format(head)\n else:\n return head\n\n\ndef has_sub_tasks(task):\n \"\"\"Returns True if the task has sub tasks\"\"\"\n if istask(task):\n return True\n elif isinstance(task, list):\n return any(has_sub_tasks(i) for i in task)\n else:\n return False\n\n\ndef name(x):\n try:\n return str(hash(x))\n except TypeError:\n return str(hash(str(x)))\n\n\n_HASHPAT = re.compile('([0-9a-z]{32})')\n_UUIDPAT = re.compile('([0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12})')\n\n\ndef label(x, cache=None):\n \"\"\"\n\n >>> label('x')\n 'x'\n\n >>> label(('x', 1))\n \"('x', 1)\"\n\n >>> from hashlib import md5\n >>> x = 'x-%s-hello' % md5(b'1234').hexdigest()\n >>> x\n 'x-81dc9bdb52d04dc20036dbd8313ed055-hello'\n\n >>> label(x)\n 'x-#-hello'\n\n >>> from uuid import uuid1\n >>> x = 'x-%s-hello' % uuid1()\n >>> x # doctest: +SKIP\n 'x-4c1a3d7e-0b45-11e6-8334-54ee75105593-hello'\n\n >>> label(x)\n 'x-#-hello'\n \"\"\"\n s = str(x)\n for pattern in (_HASHPAT, _UUIDPAT):\n m = re.search(pattern, s)\n if m is not None:\n for h in m.groups():\n if cache is not None:\n n = cache.get(h, len(cache))\n label = '#{0}'.format(n)\n # cache will be overwritten destructively\n cache[h] = n\n else:\n label = '#'\n s = s.replace(h, label)\n return s\n\n\ndef to_graphviz(dsk, data_attributes=None, function_attributes=None, **kwargs):\n if data_attributes is None:\n data_attributes = {}\n if function_attributes is None:\n function_attributes = {}\n\n attributes = {'rankdir': 'BT'}\n attributes.update(kwargs)\n g = Digraph(graph_attr=attributes)\n\n seen = set()\n cache = {}\n\n for k, v in dsk.items():\n k_name = name(k)\n if k_name not in seen:\n seen.add(k_name)\n g.node(k_name, label=label(k, cache=cache), shape='box',\n **data_attributes.get(k, {}))\n\n if istask(v):\n func_name = name((k, 'function'))\n if func_name not in seen:\n seen.add(func_name)\n g.node(func_name, label=task_label(v), shape='circle',\n **function_attributes.get(k, {}))\n g.edge(func_name, k_name)\n\n for dep in get_dependencies(dsk, k):\n dep_name = name(dep)\n if dep_name not in seen:\n seen.add(dep_name)\n g.node(dep_name, label=label(dep, cache=cache), shape='box',\n **data_attributes.get(dep, {}))\n g.edge(dep_name, func_name)\n elif ishashable(v) and v in dsk:\n g.edge(name(v), k_name)\n return g\n\n\nIPYTHON_IMAGE_FORMATS = frozenset(['jpeg', 'png'])\nIPYTHON_NO_DISPLAY_FORMATS = frozenset(['dot', 'pdf'])\n\n\ndef _get_display_cls(format):\n \"\"\"\n Get the appropriate IPython display class for `format`.\n\n Returns `IPython.display.SVG` if format=='svg', otherwise\n `IPython.display.Image`.\n\n If IPython is not importable, return dummy function that swallows its\n arguments and returns None.\n \"\"\"\n dummy = lambda *args, **kwargs: None\n try:\n import IPython.display as display\n except ImportError:\n # Can't return a display object if no IPython.\n return dummy\n\n if format in IPYTHON_NO_DISPLAY_FORMATS:\n # IPython can't display this format natively, so just return None.\n return dummy\n elif format in IPYTHON_IMAGE_FORMATS:\n # Partially apply `format` so that `Image` and `SVG` supply a uniform\n # interface to the caller.\n return partial(display.Image, format=format)\n elif format == 'svg':\n return display.SVG\n else:\n raise ValueError(\"Unknown format '%s' passed to `dot_graph`\" % format)\n\n\ndef dot_graph(dsk, filename='mydask', format=None, **kwargs):\n \"\"\"\n Render a task graph using dot.\n\n If `filename` is not None, write a file to disk with that name in the\n format specified by `format`. `filename` should not include an extension.\n\n Parameters\n ----------\n dsk : dict\n The graph to display.\n filename : str or None, optional\n The name (without an extension) of the file to write to disk. If\n `filename` is None, no file will be written, and we communicate with\n dot using only pipes. Default is 'mydask'.\n format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional\n Format in which to write output file. Default is 'png'.\n **kwargs\n Additional keyword arguments to forward to `to_graphviz`.\n\n Returns\n -------\n result : None or IPython.display.Image or IPython.display.SVG (See below.)\n\n Notes\n -----\n If IPython is installed, we return an IPython.display object in the\n requested format. If IPython is not installed, we just return None.\n\n We always return None if format is 'pdf' or 'dot', because IPython can't\n display these formats natively. Passing these formats with filename=None\n will not produce any useful output.\n\n See Also\n --------\n dask.dot.to_graphviz\n \"\"\"\n g = to_graphviz(dsk, **kwargs)\n\n fmts = ['.png', '.pdf', '.dot', '.svg', '.jpeg', '.jpg']\n if format is None and any(filename.lower().endswith(fmt) for fmt in fmts):\n format = filename.lower().split('.')[-1]\n filename = filename.rsplit('.')[0]\n\n if format is None:\n format = 'png'\n\n data = g.pipe(format=format)\n if not data:\n raise RuntimeError(\"Graphviz failed to properly produce an image. \"\n \"This probably means your installation of graphviz \"\n \"is missing png support. See: \"\n \"https://github.com/ContinuumIO/anaconda-issues/\"\n \"issues/485 for more information.\")\n\n display_cls = _get_display_cls(format)\n\n if not filename:\n return display_cls(data=data)\n\n full_filename = '.'.join([filename, format])\n with open(full_filename, 'wb') as f:\n f.write(data)\n\n return display_cls(filename=full_filename)\n", "path": "dask/dot.py"}]}
2,682
503
gh_patches_debug_14214
rasdani/github-patches
git_diff
sublimelsp__LSP-2376
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Provide a way to save all modified files after applying workspace edits **Is your feature request related to a problem? Please describe.** When applying refactorings (like renames) it's often the case that multiple files are modified. It's a chore to then have to find and save all those modified files. **Describe the solution you'd like** We could provide a dialog after more than one file was modified asking the user whether all modified files should be saved. Note that some people don't like dialogs (#1922). Or maybe even show a "tree view" in a sheet, showing all modified files and allowing the user to navigate to them and save all at once. **Describe alternatives you've considered** - Saving all edited files one by one. - Using "save all" command but that fails when there is some unsaved buffer in the window (for example I like to have some to keep notes in them) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `plugin/save_command.py` Content: ``` 1 from .core.registry import LspTextCommand 2 from .core.settings import userprefs 3 from .core.typing import Callable, List, Type 4 from abc import ABCMeta, abstractmethod 5 import sublime 6 import sublime_plugin 7 8 9 class SaveTask(metaclass=ABCMeta): 10 """ 11 Base class for tasks that run on save. 12 13 Note: The whole task runs on the async thread. 14 """ 15 16 @classmethod 17 @abstractmethod 18 def is_applicable(cls, view: sublime.View) -> bool: 19 pass 20 21 def __init__(self, task_runner: LspTextCommand, on_done: Callable[[], None]): 22 self._task_runner = task_runner 23 self._on_done = on_done 24 self._completed = False 25 self._cancelled = False 26 self._status_key = type(self).__name__ 27 28 def run_async(self) -> None: 29 self._erase_view_status() 30 sublime.set_timeout_async(self._on_timeout, userprefs().on_save_task_timeout_ms) 31 32 def _on_timeout(self) -> None: 33 if not self._completed and not self._cancelled: 34 self._set_view_status('LSP: Timeout processing {}'.format(self.__class__.__name__)) 35 self._cancelled = True 36 self._on_done() 37 38 def cancel(self) -> None: 39 self._cancelled = True 40 41 def _set_view_status(self, text: str) -> None: 42 self._task_runner.view.set_status(self._status_key, text) 43 sublime.set_timeout_async(self._erase_view_status, 5000) 44 45 def _erase_view_status(self) -> None: 46 self._task_runner.view.erase_status(self._status_key) 47 48 def _on_complete(self) -> None: 49 assert not self._completed 50 self._completed = True 51 if not self._cancelled: 52 self._on_done() 53 54 def _purge_changes_async(self) -> None: 55 # Supermassive hack that will go away later. 56 listeners = sublime_plugin.view_event_listeners.get(self._task_runner.view.id(), []) 57 for listener in listeners: 58 if listener.__class__.__name__ == 'DocumentSyncListener': 59 listener.purge_changes_async() # type: ignore 60 break 61 62 63 class LspSaveCommand(LspTextCommand): 64 """ 65 A command used as a substitute for native save command. Runs code actions and document 66 formatting before triggering the native save command. 67 """ 68 _tasks = [] # type: List[Type[SaveTask]] 69 70 @classmethod 71 def register_task(cls, task: Type[SaveTask]) -> None: 72 assert task not in cls._tasks 73 cls._tasks.append(task) 74 75 def __init__(self, view: sublime.View) -> None: 76 super().__init__(view) 77 self._pending_tasks = [] # type: List[SaveTask] 78 79 def run(self, edit: sublime.Edit) -> None: 80 if self._pending_tasks: 81 for task in self._pending_tasks: 82 task.cancel() 83 self._pending_tasks = [] 84 sublime.set_timeout_async(self._trigger_on_pre_save_async) 85 for Task in self._tasks: 86 if Task.is_applicable(self.view): 87 self._pending_tasks.append(Task(self, self._on_task_completed_async)) 88 if self._pending_tasks: 89 sublime.set_timeout_async(self._run_next_task_async) 90 else: 91 self._trigger_native_save() 92 93 def _trigger_on_pre_save_async(self) -> None: 94 # Supermassive hack that will go away later. 95 listeners = sublime_plugin.view_event_listeners.get(self.view.id(), []) 96 for listener in listeners: 97 if listener.__class__.__name__ == 'DocumentSyncListener': 98 listener.trigger_on_pre_save_async() # type: ignore 99 break 100 101 def _run_next_task_async(self) -> None: 102 current_task = self._pending_tasks[0] 103 current_task.run_async() 104 105 def _on_task_completed_async(self) -> None: 106 self._pending_tasks.pop(0) 107 if self._pending_tasks: 108 # Even though we are on the async thread already, we want to give ST a chance to notify us about 109 # potential document changes. 110 sublime.set_timeout_async(self._run_next_task_async) 111 else: 112 self._trigger_native_save() 113 114 def _trigger_native_save(self) -> None: 115 # Triggered from set_timeout to preserve original semantics of on_pre_save handling 116 sublime.set_timeout(lambda: self.view.run_command('save', {"async": True})) 117 118 119 class LspSaveAllCommand(sublime_plugin.WindowCommand): 120 def run(self) -> None: 121 done = set() 122 for view in self.window.views(): 123 buffer_id = view.buffer_id() 124 if buffer_id in done: 125 continue 126 if not view.is_dirty(): 127 continue 128 done.add(buffer_id) 129 view.run_command("lsp_save", None) 130 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/plugin/save_command.py b/plugin/save_command.py --- a/plugin/save_command.py +++ b/plugin/save_command.py @@ -117,7 +117,7 @@ class LspSaveAllCommand(sublime_plugin.WindowCommand): - def run(self) -> None: + def run(self, only_files: bool = False) -> None: done = set() for view in self.window.views(): buffer_id = view.buffer_id() @@ -125,5 +125,7 @@ continue if not view.is_dirty(): continue + if only_files and view.file_name() is None: + continue done.add(buffer_id) view.run_command("lsp_save", None)
{"golden_diff": "diff --git a/plugin/save_command.py b/plugin/save_command.py\n--- a/plugin/save_command.py\n+++ b/plugin/save_command.py\n@@ -117,7 +117,7 @@\n \n \n class LspSaveAllCommand(sublime_plugin.WindowCommand):\n- def run(self) -> None:\n+ def run(self, only_files: bool = False) -> None:\n done = set()\n for view in self.window.views():\n buffer_id = view.buffer_id()\n@@ -125,5 +125,7 @@\n continue\n if not view.is_dirty():\n continue\n+ if only_files and view.file_name() is None:\n+ continue\n done.add(buffer_id)\n view.run_command(\"lsp_save\", None)\n", "issue": "Provide a way to save all modified files after applying workspace edits\n**Is your feature request related to a problem? Please describe.**\r\n\r\nWhen applying refactorings (like renames) it's often the case that multiple files are modified. It's a chore to then have to find and save all those modified files.\r\n\r\n**Describe the solution you'd like**\r\n\r\nWe could provide a dialog after more than one file was modified asking the user whether all modified files should be saved. Note that some people don't like dialogs (#1922).\r\n\r\nOr maybe even show a \"tree view\" in a sheet, showing all modified files and allowing the user to navigate to them and save all at once.\r\n\r\n**Describe alternatives you've considered**\r\n\r\n- Saving all edited files one by one.\r\n- Using \"save all\" command but that fails when there is some unsaved buffer in the window (for example I like to have some to keep notes in them)\r\n\n", "before_files": [{"content": "from .core.registry import LspTextCommand\nfrom .core.settings import userprefs\nfrom .core.typing import Callable, List, Type\nfrom abc import ABCMeta, abstractmethod\nimport sublime\nimport sublime_plugin\n\n\nclass SaveTask(metaclass=ABCMeta):\n \"\"\"\n Base class for tasks that run on save.\n\n Note: The whole task runs on the async thread.\n \"\"\"\n\n @classmethod\n @abstractmethod\n def is_applicable(cls, view: sublime.View) -> bool:\n pass\n\n def __init__(self, task_runner: LspTextCommand, on_done: Callable[[], None]):\n self._task_runner = task_runner\n self._on_done = on_done\n self._completed = False\n self._cancelled = False\n self._status_key = type(self).__name__\n\n def run_async(self) -> None:\n self._erase_view_status()\n sublime.set_timeout_async(self._on_timeout, userprefs().on_save_task_timeout_ms)\n\n def _on_timeout(self) -> None:\n if not self._completed and not self._cancelled:\n self._set_view_status('LSP: Timeout processing {}'.format(self.__class__.__name__))\n self._cancelled = True\n self._on_done()\n\n def cancel(self) -> None:\n self._cancelled = True\n\n def _set_view_status(self, text: str) -> None:\n self._task_runner.view.set_status(self._status_key, text)\n sublime.set_timeout_async(self._erase_view_status, 5000)\n\n def _erase_view_status(self) -> None:\n self._task_runner.view.erase_status(self._status_key)\n\n def _on_complete(self) -> None:\n assert not self._completed\n self._completed = True\n if not self._cancelled:\n self._on_done()\n\n def _purge_changes_async(self) -> None:\n # Supermassive hack that will go away later.\n listeners = sublime_plugin.view_event_listeners.get(self._task_runner.view.id(), [])\n for listener in listeners:\n if listener.__class__.__name__ == 'DocumentSyncListener':\n listener.purge_changes_async() # type: ignore\n break\n\n\nclass LspSaveCommand(LspTextCommand):\n \"\"\"\n A command used as a substitute for native save command. Runs code actions and document\n formatting before triggering the native save command.\n \"\"\"\n _tasks = [] # type: List[Type[SaveTask]]\n\n @classmethod\n def register_task(cls, task: Type[SaveTask]) -> None:\n assert task not in cls._tasks\n cls._tasks.append(task)\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self._pending_tasks = [] # type: List[SaveTask]\n\n def run(self, edit: sublime.Edit) -> None:\n if self._pending_tasks:\n for task in self._pending_tasks:\n task.cancel()\n self._pending_tasks = []\n sublime.set_timeout_async(self._trigger_on_pre_save_async)\n for Task in self._tasks:\n if Task.is_applicable(self.view):\n self._pending_tasks.append(Task(self, self._on_task_completed_async))\n if self._pending_tasks:\n sublime.set_timeout_async(self._run_next_task_async)\n else:\n self._trigger_native_save()\n\n def _trigger_on_pre_save_async(self) -> None:\n # Supermassive hack that will go away later.\n listeners = sublime_plugin.view_event_listeners.get(self.view.id(), [])\n for listener in listeners:\n if listener.__class__.__name__ == 'DocumentSyncListener':\n listener.trigger_on_pre_save_async() # type: ignore\n break\n\n def _run_next_task_async(self) -> None:\n current_task = self._pending_tasks[0]\n current_task.run_async()\n\n def _on_task_completed_async(self) -> None:\n self._pending_tasks.pop(0)\n if self._pending_tasks:\n # Even though we are on the async thread already, we want to give ST a chance to notify us about\n # potential document changes.\n sublime.set_timeout_async(self._run_next_task_async)\n else:\n self._trigger_native_save()\n\n def _trigger_native_save(self) -> None:\n # Triggered from set_timeout to preserve original semantics of on_pre_save handling\n sublime.set_timeout(lambda: self.view.run_command('save', {\"async\": True}))\n\n\nclass LspSaveAllCommand(sublime_plugin.WindowCommand):\n def run(self) -> None:\n done = set()\n for view in self.window.views():\n buffer_id = view.buffer_id()\n if buffer_id in done:\n continue\n if not view.is_dirty():\n continue\n done.add(buffer_id)\n view.run_command(\"lsp_save\", None)\n", "path": "plugin/save_command.py"}], "after_files": [{"content": "from .core.registry import LspTextCommand\nfrom .core.settings import userprefs\nfrom .core.typing import Callable, List, Type\nfrom abc import ABCMeta, abstractmethod\nimport sublime\nimport sublime_plugin\n\n\nclass SaveTask(metaclass=ABCMeta):\n \"\"\"\n Base class for tasks that run on save.\n\n Note: The whole task runs on the async thread.\n \"\"\"\n\n @classmethod\n @abstractmethod\n def is_applicable(cls, view: sublime.View) -> bool:\n pass\n\n def __init__(self, task_runner: LspTextCommand, on_done: Callable[[], None]):\n self._task_runner = task_runner\n self._on_done = on_done\n self._completed = False\n self._cancelled = False\n self._status_key = type(self).__name__\n\n def run_async(self) -> None:\n self._erase_view_status()\n sublime.set_timeout_async(self._on_timeout, userprefs().on_save_task_timeout_ms)\n\n def _on_timeout(self) -> None:\n if not self._completed and not self._cancelled:\n self._set_view_status('LSP: Timeout processing {}'.format(self.__class__.__name__))\n self._cancelled = True\n self._on_done()\n\n def cancel(self) -> None:\n self._cancelled = True\n\n def _set_view_status(self, text: str) -> None:\n self._task_runner.view.set_status(self._status_key, text)\n sublime.set_timeout_async(self._erase_view_status, 5000)\n\n def _erase_view_status(self) -> None:\n self._task_runner.view.erase_status(self._status_key)\n\n def _on_complete(self) -> None:\n assert not self._completed\n self._completed = True\n if not self._cancelled:\n self._on_done()\n\n def _purge_changes_async(self) -> None:\n # Supermassive hack that will go away later.\n listeners = sublime_plugin.view_event_listeners.get(self._task_runner.view.id(), [])\n for listener in listeners:\n if listener.__class__.__name__ == 'DocumentSyncListener':\n listener.purge_changes_async() # type: ignore\n break\n\n\nclass LspSaveCommand(LspTextCommand):\n \"\"\"\n A command used as a substitute for native save command. Runs code actions and document\n formatting before triggering the native save command.\n \"\"\"\n _tasks = [] # type: List[Type[SaveTask]]\n\n @classmethod\n def register_task(cls, task: Type[SaveTask]) -> None:\n assert task not in cls._tasks\n cls._tasks.append(task)\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self._pending_tasks = [] # type: List[SaveTask]\n\n def run(self, edit: sublime.Edit) -> None:\n if self._pending_tasks:\n for task in self._pending_tasks:\n task.cancel()\n self._pending_tasks = []\n sublime.set_timeout_async(self._trigger_on_pre_save_async)\n for Task in self._tasks:\n if Task.is_applicable(self.view):\n self._pending_tasks.append(Task(self, self._on_task_completed_async))\n if self._pending_tasks:\n sublime.set_timeout_async(self._run_next_task_async)\n else:\n self._trigger_native_save()\n\n def _trigger_on_pre_save_async(self) -> None:\n # Supermassive hack that will go away later.\n listeners = sublime_plugin.view_event_listeners.get(self.view.id(), [])\n for listener in listeners:\n if listener.__class__.__name__ == 'DocumentSyncListener':\n listener.trigger_on_pre_save_async() # type: ignore\n break\n\n def _run_next_task_async(self) -> None:\n current_task = self._pending_tasks[0]\n current_task.run_async()\n\n def _on_task_completed_async(self) -> None:\n self._pending_tasks.pop(0)\n if self._pending_tasks:\n # Even though we are on the async thread already, we want to give ST a chance to notify us about\n # potential document changes.\n sublime.set_timeout_async(self._run_next_task_async)\n else:\n self._trigger_native_save()\n\n def _trigger_native_save(self) -> None:\n # Triggered from set_timeout to preserve original semantics of on_pre_save handling\n sublime.set_timeout(lambda: self.view.run_command('save', {\"async\": True}))\n\n\nclass LspSaveAllCommand(sublime_plugin.WindowCommand):\n def run(self, only_files: bool = False) -> None:\n done = set()\n for view in self.window.views():\n buffer_id = view.buffer_id()\n if buffer_id in done:\n continue\n if not view.is_dirty():\n continue\n if only_files and view.file_name() is None:\n continue\n done.add(buffer_id)\n view.run_command(\"lsp_save\", None)\n", "path": "plugin/save_command.py"}]}
1,780
162
gh_patches_debug_23396
rasdani/github-patches
git_diff
mozilla__bugbug-188
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Author and reviewer experience should ignore backouts Author and reviewer experience should not be increased for commits that are backed out. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bugbug/repository.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # This Source Code Form is subject to the terms of the Mozilla Public 3 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 # You can obtain one at http://mozilla.org/MPL/2.0/. 5 6 import argparse 7 import concurrent.futures 8 import multiprocessing 9 import os 10 from collections import defaultdict 11 from collections import namedtuple 12 from datetime import datetime 13 14 import hglib 15 import requests 16 from dateutil.relativedelta import relativedelta 17 from parsepatch.patch import Patch 18 from tqdm import tqdm 19 20 from bugbug import db 21 22 COMMITS_DB = 'data/commits.json' 23 db.register(COMMITS_DB, 'https://www.dropbox.com/s/mz3afgncx0siijc/commits.json.xz?dl=1') 24 25 COMPONENTS = {} 26 27 Commit = namedtuple('Commit', ['node', 'author', 'desc', 'date', 'bug', 'ever_backedout', 'author_email']) 28 29 author_experience = {} 30 author_experience_90_days = {} 31 32 33 def get_commits(): 34 return db.read(COMMITS_DB) 35 36 37 def _init(repo_dir): 38 global HG 39 HG = hglib.open(repo_dir) 40 41 42 def _transform(commit): 43 desc = commit.desc.decode('utf-8') 44 45 obj = { 46 'author': commit.author.decode('utf-8'), 47 'desc': desc, 48 'date': str(commit.date), 49 'bug_id': commit.bug.decode('utf-8'), 50 'ever_backedout': commit.ever_backedout, 51 'added': 0, 52 'deleted': 0, 53 'files_modified_num': 0, 54 'types': set(), 55 'components': list(), 56 'author_experience': author_experience[commit], 57 'author_experience_90_days': author_experience_90_days[commit], 58 'author_email': commit.author_email.decode('utf-8'), 59 } 60 61 patch = HG.export(revs=[commit.node], git=True) 62 patch_data = Patch.parse_patch(patch.decode('utf-8', 'ignore'), skip_comments=False, add_lines_for_new=True) 63 for path, stats in patch_data.items(): 64 if 'added' not in stats: 65 # Must be a binary file 66 obj['types'].add('binary') 67 continue 68 69 obj['added'] += len(stats['added']) + len(stats['touched']) 70 obj['deleted'] += len(stats['deleted']) + len(stats['touched']) 71 ext = os.path.splitext(path)[1] 72 if ext in ['.js', '.jsm']: 73 type_ = 'JavaScript' 74 elif ext in ['.c', '.cpp', '.h']: 75 type_ = 'C/C++' 76 elif ext in ['.java']: 77 type_ = 'Java' 78 elif ext in ['.py']: 79 type_ = 'Python' 80 else: 81 type_ = ext 82 obj['types'].add(type_) 83 84 obj['files_modified_num'] = len(patch_data) 85 86 # Covert to a list, as a set is not JSON-serializable. 87 obj['types'] = list(obj['types']) 88 89 obj['components'] = list(set('::'.join(COMPONENTS[fl]) for fl in patch_data.keys() if COMPONENTS.get(fl))) 90 91 return obj 92 93 94 def hg_log(hg, first_rev): 95 template = '{node}\\0{author}\\0{desc}\\0{date}\\0{bug}\\0{backedoutby}\\0{author|email}\\0' 96 97 args = hglib.util.cmdbuilder(b'log', template=template, no_merges=True, rev=f'{first_rev}:tip') 98 x = hg.rawcommand(args) 99 out = x.split(b'\x00')[:-1] 100 101 revs = [] 102 for rev in hglib.util.grouper(template.count('\\0'), out): 103 posixtime = float(rev[3].split(b'.', 1)[0]) 104 dt = datetime.fromtimestamp(posixtime) 105 106 revs.append(Commit( 107 node=rev[0], 108 author=rev[1], 109 desc=rev[2], 110 date=dt, 111 bug=rev[4], 112 ever_backedout=(rev[5] != b''), 113 author_email=rev[6], 114 )) 115 116 return revs 117 118 119 def get_rev(hg, date): 120 return hg.log(date=date.strftime('%Y-%m-%d'), limit=1)[0].node.decode('utf-8') 121 122 123 def download_commits(repo_dir, date_from): 124 hg = hglib.open(repo_dir) 125 126 first_rev = get_rev(hg, date_from) 127 128 commits = hg_log(hg, first_rev) 129 commits_num = len(commits) 130 131 hg.close() 132 133 # Total previous number of commits by the author. 134 total_commits_by_author = defaultdict(int) 135 # Previous commits by the author, in a 90 days window. 136 commits_by_author = defaultdict(list) 137 138 global author_experience 139 global author_experience_90_days 140 for commit in commits: 141 author_experience[commit] = total_commits_by_author[commit.author] 142 total_commits_by_author[commit.author] += 1 143 144 # Keep only the previous commits from a window of 90 days in the commits_by_author map. 145 cut = None 146 147 for i, prev_commit in enumerate(commits_by_author[commit.author]): 148 if (commit.date - prev_commit.date).days <= 90: 149 break 150 151 cut = i 152 153 if cut is not None: 154 commits_by_author[commit.author] = commits_by_author[commit.author][cut + 1:] 155 156 author_experience_90_days[commit] = len(commits_by_author[commit.author]) 157 158 commits_by_author[commit.author].append(commit) 159 160 global COMPONENTS 161 r = requests.get('https://index.taskcluster.net/v1/task/gecko.v2.mozilla-central.latest.source.source-bugzilla-info/artifacts/public/components.json') 162 r.raise_for_status() 163 COMPONENTS = r.json() 164 165 print(f'Mining commits using {multiprocessing.cpu_count()} processes...') 166 167 with concurrent.futures.ProcessPoolExecutor(initializer=_init, initargs=(repo_dir,)) as executor: 168 commits = executor.map(_transform, commits, chunksize=64) 169 commits = tqdm(commits, total=commits_num) 170 db.write(COMMITS_DB, commits) 171 172 173 def get_commit_map(): 174 commit_map = {} 175 176 for commit in get_commits(): 177 bug_id = commit['bug_id'] 178 179 if not bug_id: 180 continue 181 182 if bug_id not in commit_map: 183 commit_map[bug_id] = [] 184 185 commit_map[bug_id].append(commit) 186 187 return commit_map 188 189 190 if __name__ == '__main__': 191 parser = argparse.ArgumentParser() 192 parser.add_argument('repository_dir', help='Path to the repository', action='store') 193 args = parser.parse_args() 194 195 two_years_and_six_months_ago = datetime.utcnow() - relativedelta(years=2, months=6) 196 197 download_commits(args.repository_dir, two_years_and_six_months_ago) 198 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bugbug/repository.py b/bugbug/repository.py --- a/bugbug/repository.py +++ b/bugbug/repository.py @@ -139,7 +139,9 @@ global author_experience_90_days for commit in commits: author_experience[commit] = total_commits_by_author[commit.author] - total_commits_by_author[commit.author] += 1 + # We don't want to consider backed out commits when calculating author/reviewer experience. + if not commit.ever_backedout: + total_commits_by_author[commit.author] += 1 # Keep only the previous commits from a window of 90 days in the commits_by_author map. cut = None @@ -155,7 +157,8 @@ author_experience_90_days[commit] = len(commits_by_author[commit.author]) - commits_by_author[commit.author].append(commit) + if not commit.ever_backedout: + commits_by_author[commit.author].append(commit) global COMPONENTS r = requests.get('https://index.taskcluster.net/v1/task/gecko.v2.mozilla-central.latest.source.source-bugzilla-info/artifacts/public/components.json')
{"golden_diff": "diff --git a/bugbug/repository.py b/bugbug/repository.py\n--- a/bugbug/repository.py\n+++ b/bugbug/repository.py\n@@ -139,7 +139,9 @@\n global author_experience_90_days\n for commit in commits:\n author_experience[commit] = total_commits_by_author[commit.author]\n- total_commits_by_author[commit.author] += 1\n+ # We don't want to consider backed out commits when calculating author/reviewer experience.\n+ if not commit.ever_backedout:\n+ total_commits_by_author[commit.author] += 1\n \n # Keep only the previous commits from a window of 90 days in the commits_by_author map.\n cut = None\n@@ -155,7 +157,8 @@\n \n author_experience_90_days[commit] = len(commits_by_author[commit.author])\n \n- commits_by_author[commit.author].append(commit)\n+ if not commit.ever_backedout:\n+ commits_by_author[commit.author].append(commit)\n \n global COMPONENTS\n r = requests.get('https://index.taskcluster.net/v1/task/gecko.v2.mozilla-central.latest.source.source-bugzilla-info/artifacts/public/components.json')\n", "issue": "Author and reviewer experience should ignore backouts\nAuthor and reviewer experience should not be increased for commits that are backed out.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport argparse\nimport concurrent.futures\nimport multiprocessing\nimport os\nfrom collections import defaultdict\nfrom collections import namedtuple\nfrom datetime import datetime\n\nimport hglib\nimport requests\nfrom dateutil.relativedelta import relativedelta\nfrom parsepatch.patch import Patch\nfrom tqdm import tqdm\n\nfrom bugbug import db\n\nCOMMITS_DB = 'data/commits.json'\ndb.register(COMMITS_DB, 'https://www.dropbox.com/s/mz3afgncx0siijc/commits.json.xz?dl=1')\n\nCOMPONENTS = {}\n\nCommit = namedtuple('Commit', ['node', 'author', 'desc', 'date', 'bug', 'ever_backedout', 'author_email'])\n\nauthor_experience = {}\nauthor_experience_90_days = {}\n\n\ndef get_commits():\n return db.read(COMMITS_DB)\n\n\ndef _init(repo_dir):\n global HG\n HG = hglib.open(repo_dir)\n\n\ndef _transform(commit):\n desc = commit.desc.decode('utf-8')\n\n obj = {\n 'author': commit.author.decode('utf-8'),\n 'desc': desc,\n 'date': str(commit.date),\n 'bug_id': commit.bug.decode('utf-8'),\n 'ever_backedout': commit.ever_backedout,\n 'added': 0,\n 'deleted': 0,\n 'files_modified_num': 0,\n 'types': set(),\n 'components': list(),\n 'author_experience': author_experience[commit],\n 'author_experience_90_days': author_experience_90_days[commit],\n 'author_email': commit.author_email.decode('utf-8'),\n }\n\n patch = HG.export(revs=[commit.node], git=True)\n patch_data = Patch.parse_patch(patch.decode('utf-8', 'ignore'), skip_comments=False, add_lines_for_new=True)\n for path, stats in patch_data.items():\n if 'added' not in stats:\n # Must be a binary file\n obj['types'].add('binary')\n continue\n\n obj['added'] += len(stats['added']) + len(stats['touched'])\n obj['deleted'] += len(stats['deleted']) + len(stats['touched'])\n ext = os.path.splitext(path)[1]\n if ext in ['.js', '.jsm']:\n type_ = 'JavaScript'\n elif ext in ['.c', '.cpp', '.h']:\n type_ = 'C/C++'\n elif ext in ['.java']:\n type_ = 'Java'\n elif ext in ['.py']:\n type_ = 'Python'\n else:\n type_ = ext\n obj['types'].add(type_)\n\n obj['files_modified_num'] = len(patch_data)\n\n # Covert to a list, as a set is not JSON-serializable.\n obj['types'] = list(obj['types'])\n\n obj['components'] = list(set('::'.join(COMPONENTS[fl]) for fl in patch_data.keys() if COMPONENTS.get(fl)))\n\n return obj\n\n\ndef hg_log(hg, first_rev):\n template = '{node}\\\\0{author}\\\\0{desc}\\\\0{date}\\\\0{bug}\\\\0{backedoutby}\\\\0{author|email}\\\\0'\n\n args = hglib.util.cmdbuilder(b'log', template=template, no_merges=True, rev=f'{first_rev}:tip')\n x = hg.rawcommand(args)\n out = x.split(b'\\x00')[:-1]\n\n revs = []\n for rev in hglib.util.grouper(template.count('\\\\0'), out):\n posixtime = float(rev[3].split(b'.', 1)[0])\n dt = datetime.fromtimestamp(posixtime)\n\n revs.append(Commit(\n node=rev[0],\n author=rev[1],\n desc=rev[2],\n date=dt,\n bug=rev[4],\n ever_backedout=(rev[5] != b''),\n author_email=rev[6],\n ))\n\n return revs\n\n\ndef get_rev(hg, date):\n return hg.log(date=date.strftime('%Y-%m-%d'), limit=1)[0].node.decode('utf-8')\n\n\ndef download_commits(repo_dir, date_from):\n hg = hglib.open(repo_dir)\n\n first_rev = get_rev(hg, date_from)\n\n commits = hg_log(hg, first_rev)\n commits_num = len(commits)\n\n hg.close()\n\n # Total previous number of commits by the author.\n total_commits_by_author = defaultdict(int)\n # Previous commits by the author, in a 90 days window.\n commits_by_author = defaultdict(list)\n\n global author_experience\n global author_experience_90_days\n for commit in commits:\n author_experience[commit] = total_commits_by_author[commit.author]\n total_commits_by_author[commit.author] += 1\n\n # Keep only the previous commits from a window of 90 days in the commits_by_author map.\n cut = None\n\n for i, prev_commit in enumerate(commits_by_author[commit.author]):\n if (commit.date - prev_commit.date).days <= 90:\n break\n\n cut = i\n\n if cut is not None:\n commits_by_author[commit.author] = commits_by_author[commit.author][cut + 1:]\n\n author_experience_90_days[commit] = len(commits_by_author[commit.author])\n\n commits_by_author[commit.author].append(commit)\n\n global COMPONENTS\n r = requests.get('https://index.taskcluster.net/v1/task/gecko.v2.mozilla-central.latest.source.source-bugzilla-info/artifacts/public/components.json')\n r.raise_for_status()\n COMPONENTS = r.json()\n\n print(f'Mining commits using {multiprocessing.cpu_count()} processes...')\n\n with concurrent.futures.ProcessPoolExecutor(initializer=_init, initargs=(repo_dir,)) as executor:\n commits = executor.map(_transform, commits, chunksize=64)\n commits = tqdm(commits, total=commits_num)\n db.write(COMMITS_DB, commits)\n\n\ndef get_commit_map():\n commit_map = {}\n\n for commit in get_commits():\n bug_id = commit['bug_id']\n\n if not bug_id:\n continue\n\n if bug_id not in commit_map:\n commit_map[bug_id] = []\n\n commit_map[bug_id].append(commit)\n\n return commit_map\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('repository_dir', help='Path to the repository', action='store')\n args = parser.parse_args()\n\n two_years_and_six_months_ago = datetime.utcnow() - relativedelta(years=2, months=6)\n\n download_commits(args.repository_dir, two_years_and_six_months_ago)\n", "path": "bugbug/repository.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport argparse\nimport concurrent.futures\nimport multiprocessing\nimport os\nfrom collections import defaultdict\nfrom collections import namedtuple\nfrom datetime import datetime\n\nimport hglib\nimport requests\nfrom dateutil.relativedelta import relativedelta\nfrom parsepatch.patch import Patch\nfrom tqdm import tqdm\n\nfrom bugbug import db\n\nCOMMITS_DB = 'data/commits.json'\ndb.register(COMMITS_DB, 'https://www.dropbox.com/s/mz3afgncx0siijc/commits.json.xz?dl=1')\n\nCOMPONENTS = {}\n\nCommit = namedtuple('Commit', ['node', 'author', 'desc', 'date', 'bug', 'ever_backedout', 'author_email'])\n\nauthor_experience = {}\nauthor_experience_90_days = {}\n\n\ndef get_commits():\n return db.read(COMMITS_DB)\n\n\ndef _init(repo_dir):\n global HG\n HG = hglib.open(repo_dir)\n\n\ndef _transform(commit):\n desc = commit.desc.decode('utf-8')\n\n obj = {\n 'author': commit.author.decode('utf-8'),\n 'desc': desc,\n 'date': str(commit.date),\n 'bug_id': commit.bug.decode('utf-8'),\n 'ever_backedout': commit.ever_backedout,\n 'added': 0,\n 'deleted': 0,\n 'files_modified_num': 0,\n 'types': set(),\n 'components': list(),\n 'author_experience': author_experience[commit],\n 'author_experience_90_days': author_experience_90_days[commit],\n 'author_email': commit.author_email.decode('utf-8'),\n }\n\n patch = HG.export(revs=[commit.node], git=True)\n patch_data = Patch.parse_patch(patch.decode('utf-8', 'ignore'), skip_comments=False, add_lines_for_new=True)\n for path, stats in patch_data.items():\n if 'added' not in stats:\n # Must be a binary file\n obj['types'].add('binary')\n continue\n\n obj['added'] += len(stats['added']) + len(stats['touched'])\n obj['deleted'] += len(stats['deleted']) + len(stats['touched'])\n ext = os.path.splitext(path)[1]\n if ext in ['.js', '.jsm']:\n type_ = 'JavaScript'\n elif ext in ['.c', '.cpp', '.h']:\n type_ = 'C/C++'\n elif ext in ['.java']:\n type_ = 'Java'\n elif ext in ['.py']:\n type_ = 'Python'\n else:\n type_ = ext\n obj['types'].add(type_)\n\n obj['files_modified_num'] = len(patch_data)\n\n # Covert to a list, as a set is not JSON-serializable.\n obj['types'] = list(obj['types'])\n\n obj['components'] = list(set('::'.join(COMPONENTS[fl]) for fl in patch_data.keys() if COMPONENTS.get(fl)))\n\n return obj\n\n\ndef hg_log(hg, first_rev):\n template = '{node}\\\\0{author}\\\\0{desc}\\\\0{date}\\\\0{bug}\\\\0{backedoutby}\\\\0{author|email}\\\\0'\n\n args = hglib.util.cmdbuilder(b'log', template=template, no_merges=True, rev=f'{first_rev}:tip')\n x = hg.rawcommand(args)\n out = x.split(b'\\x00')[:-1]\n\n revs = []\n for rev in hglib.util.grouper(template.count('\\\\0'), out):\n posixtime = float(rev[3].split(b'.', 1)[0])\n dt = datetime.fromtimestamp(posixtime)\n\n revs.append(Commit(\n node=rev[0],\n author=rev[1],\n desc=rev[2],\n date=dt,\n bug=rev[4],\n ever_backedout=(rev[5] != b''),\n author_email=rev[6],\n ))\n\n return revs\n\n\ndef get_rev(hg, date):\n return hg.log(date=date.strftime('%Y-%m-%d'), limit=1)[0].node.decode('utf-8')\n\n\ndef download_commits(repo_dir, date_from):\n hg = hglib.open(repo_dir)\n\n first_rev = get_rev(hg, date_from)\n\n commits = hg_log(hg, first_rev)\n commits_num = len(commits)\n\n hg.close()\n\n # Total previous number of commits by the author.\n total_commits_by_author = defaultdict(int)\n # Previous commits by the author, in a 90 days window.\n commits_by_author = defaultdict(list)\n\n global author_experience\n global author_experience_90_days\n for commit in commits:\n author_experience[commit] = total_commits_by_author[commit.author]\n # We don't want to consider backed out commits when calculating author/reviewer experience.\n if not commit.ever_backedout:\n total_commits_by_author[commit.author] += 1\n\n # Keep only the previous commits from a window of 90 days in the commits_by_author map.\n cut = None\n\n for i, prev_commit in enumerate(commits_by_author[commit.author]):\n if (commit.date - prev_commit.date).days <= 90:\n break\n\n cut = i\n\n if cut is not None:\n commits_by_author[commit.author] = commits_by_author[commit.author][cut + 1:]\n\n author_experience_90_days[commit] = len(commits_by_author[commit.author])\n\n if not commit.ever_backedout:\n commits_by_author[commit.author].append(commit)\n\n global COMPONENTS\n r = requests.get('https://index.taskcluster.net/v1/task/gecko.v2.mozilla-central.latest.source.source-bugzilla-info/artifacts/public/components.json')\n r.raise_for_status()\n COMPONENTS = r.json()\n\n print(f'Mining commits using {multiprocessing.cpu_count()} processes...')\n\n with concurrent.futures.ProcessPoolExecutor(initializer=_init, initargs=(repo_dir,)) as executor:\n commits = executor.map(_transform, commits, chunksize=64)\n commits = tqdm(commits, total=commits_num)\n db.write(COMMITS_DB, commits)\n\n\ndef get_commit_map():\n commit_map = {}\n\n for commit in get_commits():\n bug_id = commit['bug_id']\n\n if not bug_id:\n continue\n\n if bug_id not in commit_map:\n commit_map[bug_id] = []\n\n commit_map[bug_id].append(commit)\n\n return commit_map\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('repository_dir', help='Path to the repository', action='store')\n args = parser.parse_args()\n\n two_years_and_six_months_ago = datetime.utcnow() - relativedelta(years=2, months=6)\n\n download_commits(args.repository_dir, two_years_and_six_months_ago)\n", "path": "bugbug/repository.py"}]}
2,330
275
gh_patches_debug_29535
rasdani/github-patches
git_diff
conan-io__conan-center-index-7032
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [package] imgui/1.84.1: Shared library does not automatically import global data symbols ### Package and Environment Details (include every applicable attribute) * Package Name/Version: **imgui/1.84.1** * Operating System+version: **Windows 10 21H1 Build 19043.1165** * Compiler+version: **Visual Studio 16 (2019)** * Docker image: **N/A** * Conan version: **conan 1.39.0** * Python version: **Python 3.9.6** ### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use) ``` [settings] os=Windows os_build=Windows arch=x86_64 arch_build=x86_64 compiler=Visual Studio compiler.version=16 build_type=Release [options] [conf] [build_requires] [env] ``` ### Steps to reproduce (Include if Applicable) Try to reference any code that uses global data symbols since those need to use `__declspec(dllimport)` when using [`WINDOWS_EXPORT_ALL_SYMBOLS`](https://cmake.org/cmake/help/latest/prop_tgt/WINDOWS_EXPORT_ALL_SYMBOLS.html#windows-export-all-symbols). One example could be using [`ImGuiTextBuffer`](https://github.com/ocornut/imgui/blob/v1.84.1/imgui.h#L2078) (which has `IMGUI_API static char EmptyString[1];`). The following diff is for ImGui's [`test_package.cpp`](https://github.com/conan-io/conan-center-index/blob/master/recipes/imgui/all/test_package/test_package.cpp) and can reproduce this issue. ``` --- a/recipes/imgui/all/test_package/test_package.cpp +++ b/recipes/imgui/all/test_package/test_package.cpp @@ -5,6 +5,9 @@ int main(int, char**) { ImGuiContext* context =ImGui::CreateContext(); ImGuiIO& io = ImGui::GetIO(); + + ImGuiTextBuffer textBuffer; + textBuffer.append("Hello, ImGui"); // Build atlas unsigned char* tex_pixels = NULL; @@ -20,6 +23,7 @@ int main(int, char**) static float f = 0.0f; ImGui::Text("Hello, world!"); + ImGui::Text(textBuffer.begin()); ImGui::SliderFloat("float", &f, 0.0f, 1.0f); ImGui::Text("Application average %.3f ms/frame (%.1f FPS)", 1000.0f / io.Framerate, io.Framerate); ImGui::ShowDemoWindow(NULL); ``` Then, try to create the package with `conan create . imgui/1.84.1@ -o imgui:shared=True`. ### Logs (Include/Attach if Applicable) <details><summary>Click to expand log</summary> ``` test_package.obj : error LNK2019: unresolved external symbol "public: static char * ImGuiTextBuffer::EmptyString" (?Emp tyString@ImGuiTextBuffer@@2PADA) referenced in function main ``` </details> I think the simplest solution would be to add something like this ``` if self.options.shared and self.settings.os == "Windows": self.cpp_info.defines.append("IMGUI_API=__declspec(dllimport)") ``` I'd be happy to open a PR with this change. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `recipes/imgui/all/conanfile.py` Content: ``` 1 from conans import ConanFile, CMake, tools 2 import os 3 4 required_conan_version = ">=1.33.0" 5 6 7 class IMGUIConan(ConanFile): 8 name = "imgui" 9 url = "https://github.com/conan-io/conan-center-index" 10 homepage = "https://github.com/ocornut/imgui" 11 description = "Bloat-free Immediate Mode Graphical User interface for C++ with minimal dependencies" 12 topics = ("conan", "imgui", "gui", "graphical") 13 license = "MIT" 14 15 exports_sources = ["CMakeLists.txt"] 16 generators = "cmake" 17 18 settings = "os", "arch", "compiler", "build_type" 19 options = { 20 "shared": [True, False], 21 "fPIC": [True, False] 22 } 23 default_options = { 24 "shared": False, 25 "fPIC": True 26 } 27 28 _cmake = None 29 30 @property 31 def _source_subfolder(self): 32 return "source_subfolder" 33 34 def config_options(self): 35 if self.settings.os == "Windows": 36 del self.options.fPIC 37 38 def configure(self): 39 if self.options.shared: 40 del self.options.fPIC 41 42 def source(self): 43 tools.get(**self.conan_data["sources"][self.version], 44 destination=self._source_subfolder, strip_root=True) 45 46 def _configure_cmake(self): 47 if self._cmake: 48 return self._cmake 49 self._cmake = CMake(self) 50 self._cmake.configure() 51 return self._cmake 52 53 def build(self): 54 cmake = self._configure_cmake() 55 cmake.build() 56 57 def package(self): 58 self.copy(pattern="LICENSE.txt", dst="licenses", src=self._source_subfolder) 59 backends_folder = src=os.path.join( 60 self._source_subfolder, 61 "backends" if tools.Version(self.version) >= "1.80" else "examples" 62 ) 63 self.copy(pattern="imgui_impl_*", 64 dst=os.path.join("res", "bindings"), 65 src=backends_folder) 66 cmake = self._configure_cmake() 67 cmake.install() 68 69 def package_info(self): 70 self.cpp_info.libs = ["imgui"] 71 if self.settings.os == "Linux": 72 self.cpp_info.system_libs.append("m") 73 self.cpp_info.srcdirs = [os.path.join("res", "bindings")] 74 75 bin_path = os.path.join(self.package_folder, "bin") 76 self.output.info("Appending PATH env var with : {}".format(bin_path)) 77 self.env_info.PATH.append(bin_path) 78 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/recipes/imgui/all/conanfile.py b/recipes/imgui/all/conanfile.py --- a/recipes/imgui/all/conanfile.py +++ b/recipes/imgui/all/conanfile.py @@ -12,19 +12,18 @@ topics = ("conan", "imgui", "gui", "graphical") license = "MIT" - exports_sources = ["CMakeLists.txt"] - generators = "cmake" - settings = "os", "arch", "compiler", "build_type" options = { "shared": [True, False], - "fPIC": [True, False] + "fPIC": [True, False], } default_options = { "shared": False, - "fPIC": True + "fPIC": True, } + exports_sources = "CMakeLists.txt" + generators = "cmake" _cmake = None @property @@ -56,7 +55,7 @@ def package(self): self.copy(pattern="LICENSE.txt", dst="licenses", src=self._source_subfolder) - backends_folder = src=os.path.join( + backends_folder = os.path.join( self._source_subfolder, "backends" if tools.Version(self.version) >= "1.80" else "examples" ) @@ -68,6 +67,7 @@ def package_info(self): self.cpp_info.libs = ["imgui"] + self.cpp_info.defines.append("IMGUI_USER_CONFIG=\"imgui_user_config.h\"") if self.settings.os == "Linux": self.cpp_info.system_libs.append("m") self.cpp_info.srcdirs = [os.path.join("res", "bindings")]
{"golden_diff": "diff --git a/recipes/imgui/all/conanfile.py b/recipes/imgui/all/conanfile.py\n--- a/recipes/imgui/all/conanfile.py\n+++ b/recipes/imgui/all/conanfile.py\n@@ -12,19 +12,18 @@\n topics = (\"conan\", \"imgui\", \"gui\", \"graphical\")\n license = \"MIT\"\n \n- exports_sources = [\"CMakeLists.txt\"]\n- generators = \"cmake\"\n-\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n- \"fPIC\": [True, False]\n+ \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n- \"fPIC\": True\n+ \"fPIC\": True,\n }\n \n+ exports_sources = \"CMakeLists.txt\"\n+ generators = \"cmake\"\n _cmake = None\n \n @property\n@@ -56,7 +55,7 @@\n \n def package(self):\n self.copy(pattern=\"LICENSE.txt\", dst=\"licenses\", src=self._source_subfolder)\n- backends_folder = src=os.path.join(\n+ backends_folder = os.path.join(\n self._source_subfolder,\n \"backends\" if tools.Version(self.version) >= \"1.80\" else \"examples\"\n )\n@@ -68,6 +67,7 @@\n \n def package_info(self):\n self.cpp_info.libs = [\"imgui\"]\n+ self.cpp_info.defines.append(\"IMGUI_USER_CONFIG=\\\"imgui_user_config.h\\\"\")\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.append(\"m\")\n self.cpp_info.srcdirs = [os.path.join(\"res\", \"bindings\")]\n", "issue": "[package] imgui/1.84.1: Shared library does not automatically import global data symbols\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **imgui/1.84.1**\r\n * Operating System+version: **Windows 10 21H1 Build 19043.1165**\r\n * Compiler+version: **Visual Studio 16 (2019)**\r\n * Docker image: **N/A**\r\n * Conan version: **conan 1.39.0**\r\n * Python version: **Python 3.9.6**\r\n\r\n\r\n### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)\r\n```\r\n[settings]\r\nos=Windows\r\nos_build=Windows\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=Visual Studio\r\ncompiler.version=16\r\nbuild_type=Release\r\n[options]\r\n[conf]\r\n[build_requires]\r\n[env]\r\n```\r\n\r\n\r\n### Steps to reproduce (Include if Applicable)\r\nTry to reference any code that uses global data symbols since those need to use `__declspec(dllimport)` when using [`WINDOWS_EXPORT_ALL_SYMBOLS`](https://cmake.org/cmake/help/latest/prop_tgt/WINDOWS_EXPORT_ALL_SYMBOLS.html#windows-export-all-symbols). One example could be using [`ImGuiTextBuffer`](https://github.com/ocornut/imgui/blob/v1.84.1/imgui.h#L2078) (which has `IMGUI_API static char EmptyString[1];`).\r\nThe following diff is for ImGui's [`test_package.cpp`](https://github.com/conan-io/conan-center-index/blob/master/recipes/imgui/all/test_package/test_package.cpp) and can reproduce this issue.\r\n\r\n```\r\n--- a/recipes/imgui/all/test_package/test_package.cpp\r\n+++ b/recipes/imgui/all/test_package/test_package.cpp\r\n@@ -5,6 +5,9 @@ int main(int, char**)\r\n {\r\n ImGuiContext* context =ImGui::CreateContext();\r\n ImGuiIO& io = ImGui::GetIO();\r\n+\t\r\n+ ImGuiTextBuffer textBuffer;\r\n+ textBuffer.append(\"Hello, ImGui\");\r\n \r\n // Build atlas\r\n unsigned char* tex_pixels = NULL;\r\n@@ -20,6 +23,7 @@ int main(int, char**)\r\n \r\n static float f = 0.0f;\r\n ImGui::Text(\"Hello, world!\");\r\n+ ImGui::Text(textBuffer.begin());\r\n ImGui::SliderFloat(\"float\", &f, 0.0f, 1.0f);\r\n ImGui::Text(\"Application average %.3f ms/frame (%.1f FPS)\", 1000.0f / io.Framerate, io.Framerate);\r\n ImGui::ShowDemoWindow(NULL);\r\n```\r\n\r\nThen, try to create the package with `conan create . imgui/1.84.1@ -o imgui:shared=True`.\r\n\r\n\r\n### Logs (Include/Attach if Applicable)\r\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\ntest_package.obj : error LNK2019: unresolved external symbol \"public: static char * ImGuiTextBuffer::EmptyString\" (?Emp\r\ntyString@ImGuiTextBuffer@@2PADA) referenced in function main\r\n```\r\n\r\n</details>\r\n\r\nI think the simplest solution would be to add something like this\r\n\r\n```\r\nif self.options.shared and self.settings.os == \"Windows\":\r\n self.cpp_info.defines.append(\"IMGUI_API=__declspec(dllimport)\")\r\n```\r\n\r\nI'd be happy to open a PR with this change.\n", "before_files": [{"content": "from conans import ConanFile, CMake, tools\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass IMGUIConan(ConanFile):\n name = \"imgui\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/ocornut/imgui\"\n description = \"Bloat-free Immediate Mode Graphical User interface for C++ with minimal dependencies\"\n topics = (\"conan\", \"imgui\", \"gui\", \"graphical\")\n license = \"MIT\"\n\n exports_sources = [\"CMakeLists.txt\"]\n generators = \"cmake\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False]\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True\n }\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.configure()\n return self._cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"LICENSE.txt\", dst=\"licenses\", src=self._source_subfolder)\n backends_folder = src=os.path.join(\n self._source_subfolder,\n \"backends\" if tools.Version(self.version) >= \"1.80\" else \"examples\"\n )\n self.copy(pattern=\"imgui_impl_*\",\n dst=os.path.join(\"res\", \"bindings\"),\n src=backends_folder)\n cmake = self._configure_cmake()\n cmake.install()\n\n def package_info(self):\n self.cpp_info.libs = [\"imgui\"]\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.append(\"m\")\n self.cpp_info.srcdirs = [os.path.join(\"res\", \"bindings\")]\n\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH env var with : {}\".format(bin_path))\n self.env_info.PATH.append(bin_path)\n", "path": "recipes/imgui/all/conanfile.py"}], "after_files": [{"content": "from conans import ConanFile, CMake, tools\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass IMGUIConan(ConanFile):\n name = \"imgui\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/ocornut/imgui\"\n description = \"Bloat-free Immediate Mode Graphical User interface for C++ with minimal dependencies\"\n topics = (\"conan\", \"imgui\", \"gui\", \"graphical\")\n license = \"MIT\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n exports_sources = \"CMakeLists.txt\"\n generators = \"cmake\"\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.configure()\n return self._cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"LICENSE.txt\", dst=\"licenses\", src=self._source_subfolder)\n backends_folder = os.path.join(\n self._source_subfolder,\n \"backends\" if tools.Version(self.version) >= \"1.80\" else \"examples\"\n )\n self.copy(pattern=\"imgui_impl_*\",\n dst=os.path.join(\"res\", \"bindings\"),\n src=backends_folder)\n cmake = self._configure_cmake()\n cmake.install()\n\n def package_info(self):\n self.cpp_info.libs = [\"imgui\"]\n self.cpp_info.defines.append(\"IMGUI_USER_CONFIG=\\\"imgui_user_config.h\\\"\")\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.append(\"m\")\n self.cpp_info.srcdirs = [os.path.join(\"res\", \"bindings\")]\n\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH env var with : {}\".format(bin_path))\n self.env_info.PATH.append(bin_path)\n", "path": "recipes/imgui/all/conanfile.py"}]}
1,767
396
gh_patches_debug_36505
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-2690
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `colossalai/gemini/gemini_context.py` Content: ``` 1 from enum import EnumMeta 2 3 4 class GeminiMemoryManager(object): 5 6 def __init__(self, states_cls: EnumMeta): 7 super().__init__() 8 self.states_cls = states_cls 9 self._cnter = 0 # the counter of instances 10 11 self.total_mem = dict() 12 self.state_mem = dict() 13 self.state_mem['cpu'] = dict() 14 self.state_mem['cuda'] = dict() 15 16 self.reset() 17 18 @property 19 def total_number(self): 20 return self._cnter 21 22 def reset(self): 23 self._cnter = 0 # the counter of instances 24 25 self.total_mem['cpu'] = 0 # memory occupation of instances in cpu 26 self.total_mem['cuda'] = 0 # memory of occupation of instances in cuda 27 28 # memory conditions for all states 29 for state in self.states_cls: 30 self.state_mem['cpu'][state] = 0 31 self.state_mem['cuda'][state] = 0 32 33 def register_new_instance(self): 34 self._cnter += 1 35 36 def delete_instance(self): 37 self._cnter -= 1 38 39 def print_info(self): 40 print(f"Total number: {self.total_number}", 41 f"Total CPU memory occupation: {self.total_mem['cpu']}", 42 f"Total CUDA memory occupation: {self.total_mem['cuda']}\n", 43 sep='\n') 44 45 for state in self.states_cls: 46 print(f"{state}: CPU memory occupation: {self.state_mem['cpu'][state]}", 47 f"{state}: CUDA memory occupation: {self.state_mem['cuda'][state]}\n", 48 sep='\n') 49 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/colossalai/gemini/gemini_context.py b/colossalai/gemini/gemini_context.py --- a/colossalai/gemini/gemini_context.py +++ b/colossalai/gemini/gemini_context.py @@ -1,48 +1,48 @@ -from enum import EnumMeta - - -class GeminiMemoryManager(object): - - def __init__(self, states_cls: EnumMeta): - super().__init__() - self.states_cls = states_cls - self._cnter = 0 # the counter of instances - - self.total_mem = dict() - self.state_mem = dict() - self.state_mem['cpu'] = dict() - self.state_mem['cuda'] = dict() - - self.reset() - - @property - def total_number(self): - return self._cnter - - def reset(self): - self._cnter = 0 # the counter of instances - - self.total_mem['cpu'] = 0 # memory occupation of instances in cpu - self.total_mem['cuda'] = 0 # memory of occupation of instances in cuda - - # memory conditions for all states - for state in self.states_cls: - self.state_mem['cpu'][state] = 0 - self.state_mem['cuda'][state] = 0 - - def register_new_instance(self): - self._cnter += 1 - - def delete_instance(self): - self._cnter -= 1 - - def print_info(self): - print(f"Total number: {self.total_number}", - f"Total CPU memory occupation: {self.total_mem['cpu']}", - f"Total CUDA memory occupation: {self.total_mem['cuda']}\n", - sep='\n') - - for state in self.states_cls: - print(f"{state}: CPU memory occupation: {self.state_mem['cpu'][state]}", - f"{state}: CUDA memory occupation: {self.state_mem['cuda'][state]}\n", - sep='\n') +from enum import EnumMeta + + +class GeminiMemoryManager(object): + + def __init__(self, states_cls: EnumMeta): + super().__init__() + self.states_cls = states_cls + self._cnter = 0 # the counter of instances + + self.total_mem = dict() + self.state_mem = dict() + self.state_mem['cpu'] = dict() + self.state_mem['cuda'] = dict() + + self.reset() + + @property + def total_number(self): + return self._cnter + + def reset(self): + self._cnter = 0 # the counter of instances + + self.total_mem['cpu'] = 0 # memory occupation of instances in cpu + self.total_mem['cuda'] = 0 # memory of occupation of instances in cuda + + # memory conditions for all states + for state in self.states_cls: + self.state_mem['cpu'][state] = 0 + self.state_mem['cuda'][state] = 0 + + def register_new_instance(self): + self._cnter += 1 + + def delete_instance(self): + self._cnter -= 1 + + def print_info(self): + print(f"Total number: {self.total_number}", + f"Total CPU memory occupation: {self.total_mem['cpu']}", + f"Total CUDA memory occupation: {self.total_mem['cuda']}\n", + sep='\n') + + for state in self.states_cls: + print(f"{state}: CPU memory occupation: {self.state_mem['cpu'][state]}", + f"{state}: CUDA memory occupation: {self.state_mem['cuda'][state]}\n", + sep='\n')
{"golden_diff": "diff --git a/colossalai/gemini/gemini_context.py b/colossalai/gemini/gemini_context.py\n--- a/colossalai/gemini/gemini_context.py\n+++ b/colossalai/gemini/gemini_context.py\n@@ -1,48 +1,48 @@\n-from enum import EnumMeta\r\n-\r\n-\r\n-class GeminiMemoryManager(object):\r\n-\r\n- def __init__(self, states_cls: EnumMeta):\r\n- super().__init__()\r\n- self.states_cls = states_cls\r\n- self._cnter = 0 # the counter of instances\r\n-\r\n- self.total_mem = dict()\r\n- self.state_mem = dict()\r\n- self.state_mem['cpu'] = dict()\r\n- self.state_mem['cuda'] = dict()\r\n-\r\n- self.reset()\r\n-\r\n- @property\r\n- def total_number(self):\r\n- return self._cnter\r\n-\r\n- def reset(self):\r\n- self._cnter = 0 # the counter of instances\r\n-\r\n- self.total_mem['cpu'] = 0 # memory occupation of instances in cpu\r\n- self.total_mem['cuda'] = 0 # memory of occupation of instances in cuda\r\n-\r\n- # memory conditions for all states\r\n- for state in self.states_cls:\r\n- self.state_mem['cpu'][state] = 0\r\n- self.state_mem['cuda'][state] = 0\r\n-\r\n- def register_new_instance(self):\r\n- self._cnter += 1\r\n-\r\n- def delete_instance(self):\r\n- self._cnter -= 1\r\n-\r\n- def print_info(self):\r\n- print(f\"Total number: {self.total_number}\",\r\n- f\"Total CPU memory occupation: {self.total_mem['cpu']}\",\r\n- f\"Total CUDA memory occupation: {self.total_mem['cuda']}\\n\",\r\n- sep='\\n')\r\n-\r\n- for state in self.states_cls:\r\n- print(f\"{state}: CPU memory occupation: {self.state_mem['cpu'][state]}\",\r\n- f\"{state}: CUDA memory occupation: {self.state_mem['cuda'][state]}\\n\",\r\n- sep='\\n')\r\n+from enum import EnumMeta\n+\n+\n+class GeminiMemoryManager(object):\n+\n+ def __init__(self, states_cls: EnumMeta):\n+ super().__init__()\n+ self.states_cls = states_cls\n+ self._cnter = 0 # the counter of instances\n+\n+ self.total_mem = dict()\n+ self.state_mem = dict()\n+ self.state_mem['cpu'] = dict()\n+ self.state_mem['cuda'] = dict()\n+\n+ self.reset()\n+\n+ @property\n+ def total_number(self):\n+ return self._cnter\n+\n+ def reset(self):\n+ self._cnter = 0 # the counter of instances\n+\n+ self.total_mem['cpu'] = 0 # memory occupation of instances in cpu\n+ self.total_mem['cuda'] = 0 # memory of occupation of instances in cuda\n+\n+ # memory conditions for all states\n+ for state in self.states_cls:\n+ self.state_mem['cpu'][state] = 0\n+ self.state_mem['cuda'][state] = 0\n+\n+ def register_new_instance(self):\n+ self._cnter += 1\n+\n+ def delete_instance(self):\n+ self._cnter -= 1\n+\n+ def print_info(self):\n+ print(f\"Total number: {self.total_number}\",\n+ f\"Total CPU memory occupation: {self.total_mem['cpu']}\",\n+ f\"Total CUDA memory occupation: {self.total_mem['cuda']}\\n\",\n+ sep='\\n')\n+\n+ for state in self.states_cls:\n+ print(f\"{state}: CPU memory occupation: {self.state_mem['cpu'][state]}\",\n+ f\"{state}: CUDA memory occupation: {self.state_mem['cuda'][state]}\\n\",\n+ sep='\\n')\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from enum import EnumMeta\r\n\r\n\r\nclass GeminiMemoryManager(object):\r\n\r\n def __init__(self, states_cls: EnumMeta):\r\n super().__init__()\r\n self.states_cls = states_cls\r\n self._cnter = 0 # the counter of instances\r\n\r\n self.total_mem = dict()\r\n self.state_mem = dict()\r\n self.state_mem['cpu'] = dict()\r\n self.state_mem['cuda'] = dict()\r\n\r\n self.reset()\r\n\r\n @property\r\n def total_number(self):\r\n return self._cnter\r\n\r\n def reset(self):\r\n self._cnter = 0 # the counter of instances\r\n\r\n self.total_mem['cpu'] = 0 # memory occupation of instances in cpu\r\n self.total_mem['cuda'] = 0 # memory of occupation of instances in cuda\r\n\r\n # memory conditions for all states\r\n for state in self.states_cls:\r\n self.state_mem['cpu'][state] = 0\r\n self.state_mem['cuda'][state] = 0\r\n\r\n def register_new_instance(self):\r\n self._cnter += 1\r\n\r\n def delete_instance(self):\r\n self._cnter -= 1\r\n\r\n def print_info(self):\r\n print(f\"Total number: {self.total_number}\",\r\n f\"Total CPU memory occupation: {self.total_mem['cpu']}\",\r\n f\"Total CUDA memory occupation: {self.total_mem['cuda']}\\n\",\r\n sep='\\n')\r\n\r\n for state in self.states_cls:\r\n print(f\"{state}: CPU memory occupation: {self.state_mem['cpu'][state]}\",\r\n f\"{state}: CUDA memory occupation: {self.state_mem['cuda'][state]}\\n\",\r\n sep='\\n')\r\n", "path": "colossalai/gemini/gemini_context.py"}], "after_files": [{"content": "from enum import EnumMeta\n\n\nclass GeminiMemoryManager(object):\n\n def __init__(self, states_cls: EnumMeta):\n super().__init__()\n self.states_cls = states_cls\n self._cnter = 0 # the counter of instances\n\n self.total_mem = dict()\n self.state_mem = dict()\n self.state_mem['cpu'] = dict()\n self.state_mem['cuda'] = dict()\n\n self.reset()\n\n @property\n def total_number(self):\n return self._cnter\n\n def reset(self):\n self._cnter = 0 # the counter of instances\n\n self.total_mem['cpu'] = 0 # memory occupation of instances in cpu\n self.total_mem['cuda'] = 0 # memory of occupation of instances in cuda\n\n # memory conditions for all states\n for state in self.states_cls:\n self.state_mem['cpu'][state] = 0\n self.state_mem['cuda'][state] = 0\n\n def register_new_instance(self):\n self._cnter += 1\n\n def delete_instance(self):\n self._cnter -= 1\n\n def print_info(self):\n print(f\"Total number: {self.total_number}\",\n f\"Total CPU memory occupation: {self.total_mem['cpu']}\",\n f\"Total CUDA memory occupation: {self.total_mem['cuda']}\\n\",\n sep='\\n')\n\n for state in self.states_cls:\n print(f\"{state}: CPU memory occupation: {self.state_mem['cpu'][state]}\",\n f\"{state}: CUDA memory occupation: {self.state_mem['cuda'][state]}\\n\",\n sep='\\n')\n", "path": "colossalai/gemini/gemini_context.py"}]}
744
870
gh_patches_debug_9001
rasdani/github-patches
git_diff
docker__docker-py-1853
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- .dockerignore file with space-only lines breaks build on Windows Originally reported in https://github.com/docker/compose/issues/5526 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docker/api/build.py` Content: ``` 1 import json 2 import logging 3 import os 4 import re 5 6 from .. import auth 7 from .. import constants 8 from .. import errors 9 from .. import utils 10 11 12 log = logging.getLogger(__name__) 13 14 15 class BuildApiMixin(object): 16 def build(self, path=None, tag=None, quiet=False, fileobj=None, 17 nocache=False, rm=False, stream=False, timeout=None, 18 custom_context=False, encoding=None, pull=False, 19 forcerm=False, dockerfile=None, container_limits=None, 20 decode=False, buildargs=None, gzip=False, shmsize=None, 21 labels=None, cache_from=None, target=None, network_mode=None, 22 squash=None, extra_hosts=None): 23 """ 24 Similar to the ``docker build`` command. Either ``path`` or ``fileobj`` 25 needs to be set. ``path`` can be a local path (to a directory 26 containing a Dockerfile) or a remote URL. ``fileobj`` must be a 27 readable file-like object to a Dockerfile. 28 29 If you have a tar file for the Docker build context (including a 30 Dockerfile) already, pass a readable file-like object to ``fileobj`` 31 and also pass ``custom_context=True``. If the stream is compressed 32 also, set ``encoding`` to the correct value (e.g ``gzip``). 33 34 Example: 35 >>> from io import BytesIO 36 >>> from docker import APIClient 37 >>> dockerfile = ''' 38 ... # Shared Volume 39 ... FROM busybox:buildroot-2014.02 40 ... VOLUME /data 41 ... CMD ["/bin/sh"] 42 ... ''' 43 >>> f = BytesIO(dockerfile.encode('utf-8')) 44 >>> cli = APIClient(base_url='tcp://127.0.0.1:2375') 45 >>> response = [line for line in cli.build( 46 ... fileobj=f, rm=True, tag='yourname/volume' 47 ... )] 48 >>> response 49 ['{"stream":" ---\\u003e a9eb17255234\\n"}', 50 '{"stream":"Step 1 : VOLUME /data\\n"}', 51 '{"stream":" ---\\u003e Running in abdc1e6896c6\\n"}', 52 '{"stream":" ---\\u003e 713bca62012e\\n"}', 53 '{"stream":"Removing intermediate container abdc1e6896c6\\n"}', 54 '{"stream":"Step 2 : CMD [\\"/bin/sh\\"]\\n"}', 55 '{"stream":" ---\\u003e Running in dba30f2a1a7e\\n"}', 56 '{"stream":" ---\\u003e 032b8b2855fc\\n"}', 57 '{"stream":"Removing intermediate container dba30f2a1a7e\\n"}', 58 '{"stream":"Successfully built 032b8b2855fc\\n"}'] 59 60 Args: 61 path (str): Path to the directory containing the Dockerfile 62 fileobj: A file object to use as the Dockerfile. (Or a file-like 63 object) 64 tag (str): A tag to add to the final image 65 quiet (bool): Whether to return the status 66 nocache (bool): Don't use the cache when set to ``True`` 67 rm (bool): Remove intermediate containers. The ``docker build`` 68 command now defaults to ``--rm=true``, but we have kept the old 69 default of `False` to preserve backward compatibility 70 stream (bool): *Deprecated for API version > 1.8 (always True)*. 71 Return a blocking generator you can iterate over to retrieve 72 build output as it happens 73 timeout (int): HTTP timeout 74 custom_context (bool): Optional if using ``fileobj`` 75 encoding (str): The encoding for a stream. Set to ``gzip`` for 76 compressing 77 pull (bool): Downloads any updates to the FROM image in Dockerfiles 78 forcerm (bool): Always remove intermediate containers, even after 79 unsuccessful builds 80 dockerfile (str): path within the build context to the Dockerfile 81 buildargs (dict): A dictionary of build arguments 82 container_limits (dict): A dictionary of limits applied to each 83 container created by the build process. Valid keys: 84 85 - memory (int): set memory limit for build 86 - memswap (int): Total memory (memory + swap), -1 to disable 87 swap 88 - cpushares (int): CPU shares (relative weight) 89 - cpusetcpus (str): CPUs in which to allow execution, e.g., 90 ``"0-3"``, ``"0,1"`` 91 decode (bool): If set to ``True``, the returned stream will be 92 decoded into dicts on the fly. Default ``False`` 93 shmsize (int): Size of `/dev/shm` in bytes. The size must be 94 greater than 0. If omitted the system uses 64MB 95 labels (dict): A dictionary of labels to set on the image 96 cache_from (:py:class:`list`): A list of images used for build 97 cache resolution 98 target (str): Name of the build-stage to build in a multi-stage 99 Dockerfile 100 network_mode (str): networking mode for the run commands during 101 build 102 squash (bool): Squash the resulting images layers into a 103 single layer. 104 extra_hosts (dict): Extra hosts to add to /etc/hosts in building 105 containers, as a mapping of hostname to IP address. 106 107 Returns: 108 A generator for the build output. 109 110 Raises: 111 :py:class:`docker.errors.APIError` 112 If the server returns an error. 113 ``TypeError`` 114 If neither ``path`` nor ``fileobj`` is specified. 115 """ 116 remote = context = None 117 headers = {} 118 container_limits = container_limits or {} 119 if path is None and fileobj is None: 120 raise TypeError("Either path or fileobj needs to be provided.") 121 if gzip and encoding is not None: 122 raise errors.DockerException( 123 'Can not use custom encoding if gzip is enabled' 124 ) 125 126 for key in container_limits.keys(): 127 if key not in constants.CONTAINER_LIMITS_KEYS: 128 raise errors.DockerException( 129 'Invalid container_limits key {0}'.format(key) 130 ) 131 132 if custom_context: 133 if not fileobj: 134 raise TypeError("You must specify fileobj with custom_context") 135 context = fileobj 136 elif fileobj is not None: 137 context = utils.mkbuildcontext(fileobj) 138 elif path.startswith(('http://', 'https://', 139 'git://', 'github.com/', 'git@')): 140 remote = path 141 elif not os.path.isdir(path): 142 raise TypeError("You must specify a directory to build in path") 143 else: 144 dockerignore = os.path.join(path, '.dockerignore') 145 exclude = None 146 if os.path.exists(dockerignore): 147 with open(dockerignore, 'r') as f: 148 exclude = list(filter(bool, f.read().splitlines())) 149 context = utils.tar( 150 path, exclude=exclude, dockerfile=dockerfile, gzip=gzip 151 ) 152 encoding = 'gzip' if gzip else encoding 153 154 if utils.compare_version('1.8', self._version) >= 0: 155 stream = True 156 157 if dockerfile and utils.compare_version('1.17', self._version) < 0: 158 raise errors.InvalidVersion( 159 'dockerfile was only introduced in API version 1.17' 160 ) 161 162 if utils.compare_version('1.19', self._version) < 0: 163 pull = 1 if pull else 0 164 165 u = self._url('/build') 166 params = { 167 't': tag, 168 'remote': remote, 169 'q': quiet, 170 'nocache': nocache, 171 'rm': rm, 172 'forcerm': forcerm, 173 'pull': pull, 174 'dockerfile': dockerfile, 175 } 176 params.update(container_limits) 177 178 if buildargs: 179 if utils.version_gte(self._version, '1.21'): 180 params.update({'buildargs': json.dumps(buildargs)}) 181 else: 182 raise errors.InvalidVersion( 183 'buildargs was only introduced in API version 1.21' 184 ) 185 186 if shmsize: 187 if utils.version_gte(self._version, '1.22'): 188 params.update({'shmsize': shmsize}) 189 else: 190 raise errors.InvalidVersion( 191 'shmsize was only introduced in API version 1.22' 192 ) 193 194 if labels: 195 if utils.version_gte(self._version, '1.23'): 196 params.update({'labels': json.dumps(labels)}) 197 else: 198 raise errors.InvalidVersion( 199 'labels was only introduced in API version 1.23' 200 ) 201 202 if cache_from: 203 if utils.version_gte(self._version, '1.25'): 204 params.update({'cachefrom': json.dumps(cache_from)}) 205 else: 206 raise errors.InvalidVersion( 207 'cache_from was only introduced in API version 1.25' 208 ) 209 210 if target: 211 if utils.version_gte(self._version, '1.29'): 212 params.update({'target': target}) 213 else: 214 raise errors.InvalidVersion( 215 'target was only introduced in API version 1.29' 216 ) 217 218 if network_mode: 219 if utils.version_gte(self._version, '1.25'): 220 params.update({'networkmode': network_mode}) 221 else: 222 raise errors.InvalidVersion( 223 'network_mode was only introduced in API version 1.25' 224 ) 225 226 if squash: 227 if utils.version_gte(self._version, '1.25'): 228 params.update({'squash': squash}) 229 else: 230 raise errors.InvalidVersion( 231 'squash was only introduced in API version 1.25' 232 ) 233 234 if extra_hosts is not None: 235 if utils.version_lt(self._version, '1.27'): 236 raise errors.InvalidVersion( 237 'extra_hosts was only introduced in API version 1.27' 238 ) 239 240 if isinstance(extra_hosts, dict): 241 extra_hosts = utils.format_extra_hosts(extra_hosts) 242 params.update({'extrahosts': extra_hosts}) 243 244 if context is not None: 245 headers = {'Content-Type': 'application/tar'} 246 if encoding: 247 headers['Content-Encoding'] = encoding 248 249 if utils.compare_version('1.9', self._version) >= 0: 250 self._set_auth_headers(headers) 251 252 response = self._post( 253 u, 254 data=context, 255 params=params, 256 headers=headers, 257 stream=stream, 258 timeout=timeout, 259 ) 260 261 if context is not None and not custom_context: 262 context.close() 263 264 if stream: 265 return self._stream_helper(response, decode=decode) 266 else: 267 output = self._result(response) 268 srch = r'Successfully built ([0-9a-f]+)' 269 match = re.search(srch, output) 270 if not match: 271 return None, output 272 return match.group(1), output 273 274 def _set_auth_headers(self, headers): 275 log.debug('Looking for auth config') 276 277 # If we don't have any auth data so far, try reloading the config 278 # file one more time in case anything showed up in there. 279 if not self._auth_configs: 280 log.debug("No auth config in memory - loading from filesystem") 281 self._auth_configs = auth.load_config() 282 283 # Send the full auth configuration (if any exists), since the build 284 # could use any (or all) of the registries. 285 if self._auth_configs: 286 auth_data = {} 287 if self._auth_configs.get('credsStore'): 288 # Using a credentials store, we need to retrieve the 289 # credentials for each registry listed in the config.json file 290 # Matches CLI behavior: https://github.com/docker/docker/blob/ 291 # 67b85f9d26f1b0b2b240f2d794748fac0f45243c/cliconfig/ 292 # credentials/native_store.go#L68-L83 293 for registry in self._auth_configs.keys(): 294 if registry == 'credsStore' or registry == 'HttpHeaders': 295 continue 296 auth_data[registry] = auth.resolve_authconfig( 297 self._auth_configs, registry 298 ) 299 else: 300 auth_data = self._auth_configs.copy() 301 # See https://github.com/docker/docker-py/issues/1683 302 if auth.INDEX_NAME in auth_data: 303 auth_data[auth.INDEX_URL] = auth_data[auth.INDEX_NAME] 304 305 log.debug( 306 'Sending auth config ({0})'.format( 307 ', '.join(repr(k) for k in auth_data.keys()) 308 ) 309 ) 310 311 if utils.compare_version('1.19', self._version) >= 0: 312 headers['X-Registry-Config'] = auth.encode_header( 313 auth_data 314 ) 315 else: 316 headers['X-Registry-Config'] = auth.encode_header({ 317 'configs': auth_data 318 }) 319 else: 320 log.debug('No auth config found') 321 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docker/api/build.py b/docker/api/build.py --- a/docker/api/build.py +++ b/docker/api/build.py @@ -145,7 +145,9 @@ exclude = None if os.path.exists(dockerignore): with open(dockerignore, 'r') as f: - exclude = list(filter(bool, f.read().splitlines())) + exclude = list(filter( + bool, [l.strip() for l in f.read().splitlines()] + )) context = utils.tar( path, exclude=exclude, dockerfile=dockerfile, gzip=gzip )
{"golden_diff": "diff --git a/docker/api/build.py b/docker/api/build.py\n--- a/docker/api/build.py\n+++ b/docker/api/build.py\n@@ -145,7 +145,9 @@\n exclude = None\n if os.path.exists(dockerignore):\n with open(dockerignore, 'r') as f:\n- exclude = list(filter(bool, f.read().splitlines()))\n+ exclude = list(filter(\n+ bool, [l.strip() for l in f.read().splitlines()]\n+ ))\n context = utils.tar(\n path, exclude=exclude, dockerfile=dockerfile, gzip=gzip\n )\n", "issue": ".dockerignore file with space-only lines breaks build on Windows\nOriginally reported in https://github.com/docker/compose/issues/5526\n", "before_files": [{"content": "import json\nimport logging\nimport os\nimport re\n\nfrom .. import auth\nfrom .. import constants\nfrom .. import errors\nfrom .. import utils\n\n\nlog = logging.getLogger(__name__)\n\n\nclass BuildApiMixin(object):\n def build(self, path=None, tag=None, quiet=False, fileobj=None,\n nocache=False, rm=False, stream=False, timeout=None,\n custom_context=False, encoding=None, pull=False,\n forcerm=False, dockerfile=None, container_limits=None,\n decode=False, buildargs=None, gzip=False, shmsize=None,\n labels=None, cache_from=None, target=None, network_mode=None,\n squash=None, extra_hosts=None):\n \"\"\"\n Similar to the ``docker build`` command. Either ``path`` or ``fileobj``\n needs to be set. ``path`` can be a local path (to a directory\n containing a Dockerfile) or a remote URL. ``fileobj`` must be a\n readable file-like object to a Dockerfile.\n\n If you have a tar file for the Docker build context (including a\n Dockerfile) already, pass a readable file-like object to ``fileobj``\n and also pass ``custom_context=True``. If the stream is compressed\n also, set ``encoding`` to the correct value (e.g ``gzip``).\n\n Example:\n >>> from io import BytesIO\n >>> from docker import APIClient\n >>> dockerfile = '''\n ... # Shared Volume\n ... FROM busybox:buildroot-2014.02\n ... VOLUME /data\n ... CMD [\"/bin/sh\"]\n ... '''\n >>> f = BytesIO(dockerfile.encode('utf-8'))\n >>> cli = APIClient(base_url='tcp://127.0.0.1:2375')\n >>> response = [line for line in cli.build(\n ... fileobj=f, rm=True, tag='yourname/volume'\n ... )]\n >>> response\n ['{\"stream\":\" ---\\\\u003e a9eb17255234\\\\n\"}',\n '{\"stream\":\"Step 1 : VOLUME /data\\\\n\"}',\n '{\"stream\":\" ---\\\\u003e Running in abdc1e6896c6\\\\n\"}',\n '{\"stream\":\" ---\\\\u003e 713bca62012e\\\\n\"}',\n '{\"stream\":\"Removing intermediate container abdc1e6896c6\\\\n\"}',\n '{\"stream\":\"Step 2 : CMD [\\\\\"/bin/sh\\\\\"]\\\\n\"}',\n '{\"stream\":\" ---\\\\u003e Running in dba30f2a1a7e\\\\n\"}',\n '{\"stream\":\" ---\\\\u003e 032b8b2855fc\\\\n\"}',\n '{\"stream\":\"Removing intermediate container dba30f2a1a7e\\\\n\"}',\n '{\"stream\":\"Successfully built 032b8b2855fc\\\\n\"}']\n\n Args:\n path (str): Path to the directory containing the Dockerfile\n fileobj: A file object to use as the Dockerfile. (Or a file-like\n object)\n tag (str): A tag to add to the final image\n quiet (bool): Whether to return the status\n nocache (bool): Don't use the cache when set to ``True``\n rm (bool): Remove intermediate containers. The ``docker build``\n command now defaults to ``--rm=true``, but we have kept the old\n default of `False` to preserve backward compatibility\n stream (bool): *Deprecated for API version > 1.8 (always True)*.\n Return a blocking generator you can iterate over to retrieve\n build output as it happens\n timeout (int): HTTP timeout\n custom_context (bool): Optional if using ``fileobj``\n encoding (str): The encoding for a stream. Set to ``gzip`` for\n compressing\n pull (bool): Downloads any updates to the FROM image in Dockerfiles\n forcerm (bool): Always remove intermediate containers, even after\n unsuccessful builds\n dockerfile (str): path within the build context to the Dockerfile\n buildargs (dict): A dictionary of build arguments\n container_limits (dict): A dictionary of limits applied to each\n container created by the build process. Valid keys:\n\n - memory (int): set memory limit for build\n - memswap (int): Total memory (memory + swap), -1 to disable\n swap\n - cpushares (int): CPU shares (relative weight)\n - cpusetcpus (str): CPUs in which to allow execution, e.g.,\n ``\"0-3\"``, ``\"0,1\"``\n decode (bool): If set to ``True``, the returned stream will be\n decoded into dicts on the fly. Default ``False``\n shmsize (int): Size of `/dev/shm` in bytes. The size must be\n greater than 0. If omitted the system uses 64MB\n labels (dict): A dictionary of labels to set on the image\n cache_from (:py:class:`list`): A list of images used for build\n cache resolution\n target (str): Name of the build-stage to build in a multi-stage\n Dockerfile\n network_mode (str): networking mode for the run commands during\n build\n squash (bool): Squash the resulting images layers into a\n single layer.\n extra_hosts (dict): Extra hosts to add to /etc/hosts in building\n containers, as a mapping of hostname to IP address.\n\n Returns:\n A generator for the build output.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n ``TypeError``\n If neither ``path`` nor ``fileobj`` is specified.\n \"\"\"\n remote = context = None\n headers = {}\n container_limits = container_limits or {}\n if path is None and fileobj is None:\n raise TypeError(\"Either path or fileobj needs to be provided.\")\n if gzip and encoding is not None:\n raise errors.DockerException(\n 'Can not use custom encoding if gzip is enabled'\n )\n\n for key in container_limits.keys():\n if key not in constants.CONTAINER_LIMITS_KEYS:\n raise errors.DockerException(\n 'Invalid container_limits key {0}'.format(key)\n )\n\n if custom_context:\n if not fileobj:\n raise TypeError(\"You must specify fileobj with custom_context\")\n context = fileobj\n elif fileobj is not None:\n context = utils.mkbuildcontext(fileobj)\n elif path.startswith(('http://', 'https://',\n 'git://', 'github.com/', 'git@')):\n remote = path\n elif not os.path.isdir(path):\n raise TypeError(\"You must specify a directory to build in path\")\n else:\n dockerignore = os.path.join(path, '.dockerignore')\n exclude = None\n if os.path.exists(dockerignore):\n with open(dockerignore, 'r') as f:\n exclude = list(filter(bool, f.read().splitlines()))\n context = utils.tar(\n path, exclude=exclude, dockerfile=dockerfile, gzip=gzip\n )\n encoding = 'gzip' if gzip else encoding\n\n if utils.compare_version('1.8', self._version) >= 0:\n stream = True\n\n if dockerfile and utils.compare_version('1.17', self._version) < 0:\n raise errors.InvalidVersion(\n 'dockerfile was only introduced in API version 1.17'\n )\n\n if utils.compare_version('1.19', self._version) < 0:\n pull = 1 if pull else 0\n\n u = self._url('/build')\n params = {\n 't': tag,\n 'remote': remote,\n 'q': quiet,\n 'nocache': nocache,\n 'rm': rm,\n 'forcerm': forcerm,\n 'pull': pull,\n 'dockerfile': dockerfile,\n }\n params.update(container_limits)\n\n if buildargs:\n if utils.version_gte(self._version, '1.21'):\n params.update({'buildargs': json.dumps(buildargs)})\n else:\n raise errors.InvalidVersion(\n 'buildargs was only introduced in API version 1.21'\n )\n\n if shmsize:\n if utils.version_gte(self._version, '1.22'):\n params.update({'shmsize': shmsize})\n else:\n raise errors.InvalidVersion(\n 'shmsize was only introduced in API version 1.22'\n )\n\n if labels:\n if utils.version_gte(self._version, '1.23'):\n params.update({'labels': json.dumps(labels)})\n else:\n raise errors.InvalidVersion(\n 'labels was only introduced in API version 1.23'\n )\n\n if cache_from:\n if utils.version_gte(self._version, '1.25'):\n params.update({'cachefrom': json.dumps(cache_from)})\n else:\n raise errors.InvalidVersion(\n 'cache_from was only introduced in API version 1.25'\n )\n\n if target:\n if utils.version_gte(self._version, '1.29'):\n params.update({'target': target})\n else:\n raise errors.InvalidVersion(\n 'target was only introduced in API version 1.29'\n )\n\n if network_mode:\n if utils.version_gte(self._version, '1.25'):\n params.update({'networkmode': network_mode})\n else:\n raise errors.InvalidVersion(\n 'network_mode was only introduced in API version 1.25'\n )\n\n if squash:\n if utils.version_gte(self._version, '1.25'):\n params.update({'squash': squash})\n else:\n raise errors.InvalidVersion(\n 'squash was only introduced in API version 1.25'\n )\n\n if extra_hosts is not None:\n if utils.version_lt(self._version, '1.27'):\n raise errors.InvalidVersion(\n 'extra_hosts was only introduced in API version 1.27'\n )\n\n if isinstance(extra_hosts, dict):\n extra_hosts = utils.format_extra_hosts(extra_hosts)\n params.update({'extrahosts': extra_hosts})\n\n if context is not None:\n headers = {'Content-Type': 'application/tar'}\n if encoding:\n headers['Content-Encoding'] = encoding\n\n if utils.compare_version('1.9', self._version) >= 0:\n self._set_auth_headers(headers)\n\n response = self._post(\n u,\n data=context,\n params=params,\n headers=headers,\n stream=stream,\n timeout=timeout,\n )\n\n if context is not None and not custom_context:\n context.close()\n\n if stream:\n return self._stream_helper(response, decode=decode)\n else:\n output = self._result(response)\n srch = r'Successfully built ([0-9a-f]+)'\n match = re.search(srch, output)\n if not match:\n return None, output\n return match.group(1), output\n\n def _set_auth_headers(self, headers):\n log.debug('Looking for auth config')\n\n # If we don't have any auth data so far, try reloading the config\n # file one more time in case anything showed up in there.\n if not self._auth_configs:\n log.debug(\"No auth config in memory - loading from filesystem\")\n self._auth_configs = auth.load_config()\n\n # Send the full auth configuration (if any exists), since the build\n # could use any (or all) of the registries.\n if self._auth_configs:\n auth_data = {}\n if self._auth_configs.get('credsStore'):\n # Using a credentials store, we need to retrieve the\n # credentials for each registry listed in the config.json file\n # Matches CLI behavior: https://github.com/docker/docker/blob/\n # 67b85f9d26f1b0b2b240f2d794748fac0f45243c/cliconfig/\n # credentials/native_store.go#L68-L83\n for registry in self._auth_configs.keys():\n if registry == 'credsStore' or registry == 'HttpHeaders':\n continue\n auth_data[registry] = auth.resolve_authconfig(\n self._auth_configs, registry\n )\n else:\n auth_data = self._auth_configs.copy()\n # See https://github.com/docker/docker-py/issues/1683\n if auth.INDEX_NAME in auth_data:\n auth_data[auth.INDEX_URL] = auth_data[auth.INDEX_NAME]\n\n log.debug(\n 'Sending auth config ({0})'.format(\n ', '.join(repr(k) for k in auth_data.keys())\n )\n )\n\n if utils.compare_version('1.19', self._version) >= 0:\n headers['X-Registry-Config'] = auth.encode_header(\n auth_data\n )\n else:\n headers['X-Registry-Config'] = auth.encode_header({\n 'configs': auth_data\n })\n else:\n log.debug('No auth config found')\n", "path": "docker/api/build.py"}], "after_files": [{"content": "import json\nimport logging\nimport os\nimport re\n\nfrom .. import auth\nfrom .. import constants\nfrom .. import errors\nfrom .. import utils\n\n\nlog = logging.getLogger(__name__)\n\n\nclass BuildApiMixin(object):\n def build(self, path=None, tag=None, quiet=False, fileobj=None,\n nocache=False, rm=False, stream=False, timeout=None,\n custom_context=False, encoding=None, pull=False,\n forcerm=False, dockerfile=None, container_limits=None,\n decode=False, buildargs=None, gzip=False, shmsize=None,\n labels=None, cache_from=None, target=None, network_mode=None,\n squash=None, extra_hosts=None):\n \"\"\"\n Similar to the ``docker build`` command. Either ``path`` or ``fileobj``\n needs to be set. ``path`` can be a local path (to a directory\n containing a Dockerfile) or a remote URL. ``fileobj`` must be a\n readable file-like object to a Dockerfile.\n\n If you have a tar file for the Docker build context (including a\n Dockerfile) already, pass a readable file-like object to ``fileobj``\n and also pass ``custom_context=True``. If the stream is compressed\n also, set ``encoding`` to the correct value (e.g ``gzip``).\n\n Example:\n >>> from io import BytesIO\n >>> from docker import APIClient\n >>> dockerfile = '''\n ... # Shared Volume\n ... FROM busybox:buildroot-2014.02\n ... VOLUME /data\n ... CMD [\"/bin/sh\"]\n ... '''\n >>> f = BytesIO(dockerfile.encode('utf-8'))\n >>> cli = APIClient(base_url='tcp://127.0.0.1:2375')\n >>> response = [line for line in cli.build(\n ... fileobj=f, rm=True, tag='yourname/volume'\n ... )]\n >>> response\n ['{\"stream\":\" ---\\\\u003e a9eb17255234\\\\n\"}',\n '{\"stream\":\"Step 1 : VOLUME /data\\\\n\"}',\n '{\"stream\":\" ---\\\\u003e Running in abdc1e6896c6\\\\n\"}',\n '{\"stream\":\" ---\\\\u003e 713bca62012e\\\\n\"}',\n '{\"stream\":\"Removing intermediate container abdc1e6896c6\\\\n\"}',\n '{\"stream\":\"Step 2 : CMD [\\\\\"/bin/sh\\\\\"]\\\\n\"}',\n '{\"stream\":\" ---\\\\u003e Running in dba30f2a1a7e\\\\n\"}',\n '{\"stream\":\" ---\\\\u003e 032b8b2855fc\\\\n\"}',\n '{\"stream\":\"Removing intermediate container dba30f2a1a7e\\\\n\"}',\n '{\"stream\":\"Successfully built 032b8b2855fc\\\\n\"}']\n\n Args:\n path (str): Path to the directory containing the Dockerfile\n fileobj: A file object to use as the Dockerfile. (Or a file-like\n object)\n tag (str): A tag to add to the final image\n quiet (bool): Whether to return the status\n nocache (bool): Don't use the cache when set to ``True``\n rm (bool): Remove intermediate containers. The ``docker build``\n command now defaults to ``--rm=true``, but we have kept the old\n default of `False` to preserve backward compatibility\n stream (bool): *Deprecated for API version > 1.8 (always True)*.\n Return a blocking generator you can iterate over to retrieve\n build output as it happens\n timeout (int): HTTP timeout\n custom_context (bool): Optional if using ``fileobj``\n encoding (str): The encoding for a stream. Set to ``gzip`` for\n compressing\n pull (bool): Downloads any updates to the FROM image in Dockerfiles\n forcerm (bool): Always remove intermediate containers, even after\n unsuccessful builds\n dockerfile (str): path within the build context to the Dockerfile\n buildargs (dict): A dictionary of build arguments\n container_limits (dict): A dictionary of limits applied to each\n container created by the build process. Valid keys:\n\n - memory (int): set memory limit for build\n - memswap (int): Total memory (memory + swap), -1 to disable\n swap\n - cpushares (int): CPU shares (relative weight)\n - cpusetcpus (str): CPUs in which to allow execution, e.g.,\n ``\"0-3\"``, ``\"0,1\"``\n decode (bool): If set to ``True``, the returned stream will be\n decoded into dicts on the fly. Default ``False``\n shmsize (int): Size of `/dev/shm` in bytes. The size must be\n greater than 0. If omitted the system uses 64MB\n labels (dict): A dictionary of labels to set on the image\n cache_from (:py:class:`list`): A list of images used for build\n cache resolution\n target (str): Name of the build-stage to build in a multi-stage\n Dockerfile\n network_mode (str): networking mode for the run commands during\n build\n squash (bool): Squash the resulting images layers into a\n single layer.\n extra_hosts (dict): Extra hosts to add to /etc/hosts in building\n containers, as a mapping of hostname to IP address.\n\n Returns:\n A generator for the build output.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n ``TypeError``\n If neither ``path`` nor ``fileobj`` is specified.\n \"\"\"\n remote = context = None\n headers = {}\n container_limits = container_limits or {}\n if path is None and fileobj is None:\n raise TypeError(\"Either path or fileobj needs to be provided.\")\n if gzip and encoding is not None:\n raise errors.DockerException(\n 'Can not use custom encoding if gzip is enabled'\n )\n\n for key in container_limits.keys():\n if key not in constants.CONTAINER_LIMITS_KEYS:\n raise errors.DockerException(\n 'Invalid container_limits key {0}'.format(key)\n )\n\n if custom_context:\n if not fileobj:\n raise TypeError(\"You must specify fileobj with custom_context\")\n context = fileobj\n elif fileobj is not None:\n context = utils.mkbuildcontext(fileobj)\n elif path.startswith(('http://', 'https://',\n 'git://', 'github.com/', 'git@')):\n remote = path\n elif not os.path.isdir(path):\n raise TypeError(\"You must specify a directory to build in path\")\n else:\n dockerignore = os.path.join(path, '.dockerignore')\n exclude = None\n if os.path.exists(dockerignore):\n with open(dockerignore, 'r') as f:\n exclude = list(filter(\n bool, [l.strip() for l in f.read().splitlines()]\n ))\n context = utils.tar(\n path, exclude=exclude, dockerfile=dockerfile, gzip=gzip\n )\n encoding = 'gzip' if gzip else encoding\n\n if utils.compare_version('1.8', self._version) >= 0:\n stream = True\n\n if dockerfile and utils.compare_version('1.17', self._version) < 0:\n raise errors.InvalidVersion(\n 'dockerfile was only introduced in API version 1.17'\n )\n\n if utils.compare_version('1.19', self._version) < 0:\n pull = 1 if pull else 0\n\n u = self._url('/build')\n params = {\n 't': tag,\n 'remote': remote,\n 'q': quiet,\n 'nocache': nocache,\n 'rm': rm,\n 'forcerm': forcerm,\n 'pull': pull,\n 'dockerfile': dockerfile,\n }\n params.update(container_limits)\n\n if buildargs:\n if utils.version_gte(self._version, '1.21'):\n params.update({'buildargs': json.dumps(buildargs)})\n else:\n raise errors.InvalidVersion(\n 'buildargs was only introduced in API version 1.21'\n )\n\n if shmsize:\n if utils.version_gte(self._version, '1.22'):\n params.update({'shmsize': shmsize})\n else:\n raise errors.InvalidVersion(\n 'shmsize was only introduced in API version 1.22'\n )\n\n if labels:\n if utils.version_gte(self._version, '1.23'):\n params.update({'labels': json.dumps(labels)})\n else:\n raise errors.InvalidVersion(\n 'labels was only introduced in API version 1.23'\n )\n\n if cache_from:\n if utils.version_gte(self._version, '1.25'):\n params.update({'cachefrom': json.dumps(cache_from)})\n else:\n raise errors.InvalidVersion(\n 'cache_from was only introduced in API version 1.25'\n )\n\n if target:\n if utils.version_gte(self._version, '1.29'):\n params.update({'target': target})\n else:\n raise errors.InvalidVersion(\n 'target was only introduced in API version 1.29'\n )\n\n if network_mode:\n if utils.version_gte(self._version, '1.25'):\n params.update({'networkmode': network_mode})\n else:\n raise errors.InvalidVersion(\n 'network_mode was only introduced in API version 1.25'\n )\n\n if squash:\n if utils.version_gte(self._version, '1.25'):\n params.update({'squash': squash})\n else:\n raise errors.InvalidVersion(\n 'squash was only introduced in API version 1.25'\n )\n\n if extra_hosts is not None:\n if utils.version_lt(self._version, '1.27'):\n raise errors.InvalidVersion(\n 'extra_hosts was only introduced in API version 1.27'\n )\n\n if isinstance(extra_hosts, dict):\n extra_hosts = utils.format_extra_hosts(extra_hosts)\n params.update({'extrahosts': extra_hosts})\n\n if context is not None:\n headers = {'Content-Type': 'application/tar'}\n if encoding:\n headers['Content-Encoding'] = encoding\n\n if utils.compare_version('1.9', self._version) >= 0:\n self._set_auth_headers(headers)\n\n response = self._post(\n u,\n data=context,\n params=params,\n headers=headers,\n stream=stream,\n timeout=timeout,\n )\n\n if context is not None and not custom_context:\n context.close()\n\n if stream:\n return self._stream_helper(response, decode=decode)\n else:\n output = self._result(response)\n srch = r'Successfully built ([0-9a-f]+)'\n match = re.search(srch, output)\n if not match:\n return None, output\n return match.group(1), output\n\n def _set_auth_headers(self, headers):\n log.debug('Looking for auth config')\n\n # If we don't have any auth data so far, try reloading the config\n # file one more time in case anything showed up in there.\n if not self._auth_configs:\n log.debug(\"No auth config in memory - loading from filesystem\")\n self._auth_configs = auth.load_config()\n\n # Send the full auth configuration (if any exists), since the build\n # could use any (or all) of the registries.\n if self._auth_configs:\n auth_data = {}\n if self._auth_configs.get('credsStore'):\n # Using a credentials store, we need to retrieve the\n # credentials for each registry listed in the config.json file\n # Matches CLI behavior: https://github.com/docker/docker/blob/\n # 67b85f9d26f1b0b2b240f2d794748fac0f45243c/cliconfig/\n # credentials/native_store.go#L68-L83\n for registry in self._auth_configs.keys():\n if registry == 'credsStore' or registry == 'HttpHeaders':\n continue\n auth_data[registry] = auth.resolve_authconfig(\n self._auth_configs, registry\n )\n else:\n auth_data = self._auth_configs.copy()\n # See https://github.com/docker/docker-py/issues/1683\n if auth.INDEX_NAME in auth_data:\n auth_data[auth.INDEX_URL] = auth_data[auth.INDEX_NAME]\n\n log.debug(\n 'Sending auth config ({0})'.format(\n ', '.join(repr(k) for k in auth_data.keys())\n )\n )\n\n if utils.compare_version('1.19', self._version) >= 0:\n headers['X-Registry-Config'] = auth.encode_header(\n auth_data\n )\n else:\n headers['X-Registry-Config'] = auth.encode_header({\n 'configs': auth_data\n })\n else:\n log.debug('No auth config found')\n", "path": "docker/api/build.py"}]}
4,058
135
gh_patches_debug_27273
rasdani/github-patches
git_diff
ManimCommunity__manim-652
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Making test for SVGMobject Here, I have some test and folder structure for SVGMobject and ImageMobject. The images and svgs for the test can be found in a new folder called img_svg_resources Depends on #650 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `manim/mobject/types/image_mobject.py` Content: ``` 1 """Mobjects representing raster images.""" 2 3 __all__ = ["AbstractImageMobject", "ImageMobject", "ImageMobjectFromCamera"] 4 5 import pathlib 6 7 import numpy as np 8 9 from PIL import Image 10 11 from ... import config 12 from ...constants import * 13 from ...mobject.mobject import Mobject 14 from ...mobject.shape_matchers import SurroundingRectangle 15 from ...utils.bezier import interpolate 16 from ...utils.color import color_to_int_rgb, WHITE 17 from ...utils.config_ops import digest_config 18 from ...utils.images import get_full_raster_image_path 19 from manim.constants import QUALITIES, DEFAULT_QUALITY 20 21 22 class AbstractImageMobject(Mobject): 23 """ 24 Automatically filters out black pixels 25 26 Parameters 27 ---------- 28 scale_to_resolution : :class:`int` 29 At this resolution the image is placed pixel by pixel onto the screen, so it will look the sharpest and best. 30 This is a custom parameter of ImageMobject so that rendering a scene with the `--quality low` flag for faster rendering and testing won't effect the position of the image on the screen. 31 Calculated by `height = image_height / scale_to_resolution * config["frame_height"]` 32 """ 33 34 CONFIG = { 35 "pixel_array_dtype": "uint8", 36 } 37 38 def __init__(self, scale_to_resolution, **kwargs): 39 digest_config(self, kwargs) 40 self.scale_to_resolution = scale_to_resolution 41 42 Mobject.__init__(self, **kwargs) 43 44 def get_pixel_array(self): 45 raise NotImplementedError() 46 47 def set_color(self): 48 # Likely to be implemented in subclasses, but no obgligation 49 pass 50 51 def reset_points(self): 52 # Corresponding corners of image are fixed to these 3 points 53 self.points = np.array( 54 [ 55 UP + LEFT, 56 UP + RIGHT, 57 DOWN + LEFT, 58 ] 59 ) 60 self.center() 61 h, w = self.get_pixel_array().shape[:2] 62 if self.scale_to_resolution: 63 self.height = h / self.scale_to_resolution * config["frame_height"] 64 else: 65 self.height = 3 ## this is the case for ImageMobjectFromCamera 66 self.stretch_to_fit_height(self.height) 67 self.stretch_to_fit_width(self.height * w / h) 68 69 70 class ImageMobject(AbstractImageMobject): 71 """Displays an Image from a numpy array or a file. 72 73 Parameters 74 ---------- 75 scale_to_resolution : :class:`int` 76 At this resolution the image is placed pixel by pixel onto the screen, so it will look the sharpest and best. 77 This is a custom parameter of ImageMobject so that rendering a scene with the `--quality low` flag for faster rendering and testing won't effect the position of the image on the screen. 78 Calculated by `height = image_height / scale_to_resolution * config["frame_height"]` 79 80 81 82 Example 83 ------- 84 .. manim:: ImageFromArray 85 :save_last_frame: 86 87 class ImageFromArray(Scene): 88 def construct(self): 89 image = ImageMobject(np.uint8([[0, 100, 30, 200], 90 [255, 0, 5, 33]])) 91 image.set_height(7) 92 self.add(image) 93 94 """ 95 96 CONFIG = { 97 "invert": False, 98 "image_mode": "RGBA", 99 } 100 101 def __init__( 102 self, 103 filename_or_array, 104 scale_to_resolution=QUALITIES[DEFAULT_QUALITY]["pixel_height"], 105 **kwargs, 106 ): 107 digest_config(self, kwargs) 108 if isinstance(filename_or_array, (str, pathlib.PurePath)): 109 path = get_full_raster_image_path(filename_or_array) 110 image = Image.open(path).convert(self.image_mode) 111 self.pixel_array = np.array(image) 112 else: 113 self.pixel_array = np.array(filename_or_array) 114 self.change_to_rgba_array() 115 if self.invert: 116 self.pixel_array[:, :, :3] = 255 - self.pixel_array[:, :, :3] 117 AbstractImageMobject.__init__(self, scale_to_resolution, **kwargs) 118 119 def change_to_rgba_array(self): 120 pa = self.pixel_array 121 if len(pa.shape) == 2: 122 pa = pa.reshape(list(pa.shape) + [1]) 123 if pa.shape[2] == 1: 124 pa = pa.repeat(3, axis=2) 125 if pa.shape[2] == 3: 126 alphas = 255 * np.ones( 127 list(pa.shape[:2]) + [1], dtype=self.pixel_array_dtype 128 ) 129 pa = np.append(pa, alphas, axis=2) 130 self.pixel_array = pa 131 132 def get_pixel_array(self): 133 return self.pixel_array 134 135 def set_color(self, color, alpha=None, family=True): 136 rgb = color_to_int_rgb(color) 137 self.pixel_array[:, :, :3] = rgb 138 if alpha is not None: 139 self.pixel_array[:, :, 3] = int(255 * alpha) 140 for submob in self.submobjects: 141 submob.set_color(color, alpha, family) 142 self.color = color 143 return self 144 145 def set_opacity(self, alpha): 146 self.pixel_array[:, :, 3] = int(255 * alpha) 147 return self 148 149 def fade(self, darkness=0.5, family=True): 150 self.set_opacity(1 - darkness) 151 super().fade(darkness, family) 152 return self 153 154 def interpolate_color(self, mobject1, mobject2, alpha): 155 assert mobject1.pixel_array.shape == mobject2.pixel_array.shape, ( 156 f"Mobject pixel array shapes incompatible for interpolation.\n" 157 f"Mobject 1 ({mobject1}) : {mobject1.pixel_array.shape}\n" 158 f"Mobject 2 ({mobject2}) : {mobject1.pixel_array.shape}" 159 ) 160 self.pixel_array = interpolate( 161 mobject1.pixel_array, mobject2.pixel_array, alpha 162 ).astype(self.pixel_array_dtype) 163 164 165 # TODO, add the ability to have the dimensions/orientation of this 166 # mobject more strongly tied to the frame of the camera it contains, 167 # in the case where that's a MovingCamera 168 169 170 class ImageMobjectFromCamera(AbstractImageMobject): 171 CONFIG = { 172 "default_display_frame_config": { 173 "stroke_width": 3, 174 "stroke_color": WHITE, 175 "buff": 0, 176 } 177 } 178 179 def __init__(self, camera, **kwargs): 180 self.camera = camera 181 self.pixel_array = self.camera.pixel_array 182 AbstractImageMobject.__init__(self, scale_to_resolution=False, **kwargs) 183 184 # TODO: Get rid of this. 185 def get_pixel_array(self): 186 self.pixel_array = self.camera.pixel_array 187 return self.pixel_array 188 189 def add_display_frame(self, **kwargs): 190 config = dict(self.default_display_frame_config) 191 config.update(kwargs) 192 self.display_frame = SurroundingRectangle(self, **config) 193 self.add(self.display_frame) 194 return self 195 196 def interpolate_color(self, mobject1, mobject2, alpha): 197 assert mobject1.pixel_array.shape == mobject2.pixel_array.shape, ( 198 f"Mobject pixel array shapes incompatible for interpolation.\n" 199 f"Mobject 1 ({mobject1}) : {mobject1.pixel_array.shape}\n" 200 f"Mobject 2 ({mobject2}) : {mobject1.pixel_array.shape}" 201 ) 202 self.pixel_array = interpolate( 203 mobject1.pixel_array, mobject2.pixel_array, alpha 204 ).astype(self.pixel_array_dtype) 205 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/manim/mobject/types/image_mobject.py b/manim/mobject/types/image_mobject.py --- a/manim/mobject/types/image_mobject.py +++ b/manim/mobject/types/image_mobject.py @@ -27,8 +27,7 @@ ---------- scale_to_resolution : :class:`int` At this resolution the image is placed pixel by pixel onto the screen, so it will look the sharpest and best. - This is a custom parameter of ImageMobject so that rendering a scene with the `--quality low` flag for faster rendering and testing won't effect the position of the image on the screen. - Calculated by `height = image_height / scale_to_resolution * config["frame_height"]` + This is a custom parameter of ImageMobject so that rendering a scene with e.g. the ``--quality low`` or ``--quality medium`` flag for faster rendering won't effect the position of the image on the screen. """ CONFIG = { @@ -74,8 +73,7 @@ ---------- scale_to_resolution : :class:`int` At this resolution the image is placed pixel by pixel onto the screen, so it will look the sharpest and best. - This is a custom parameter of ImageMobject so that rendering a scene with the `--quality low` flag for faster rendering and testing won't effect the position of the image on the screen. - Calculated by `height = image_height / scale_to_resolution * config["frame_height"]` + This is a custom parameter of ImageMobject so that rendering a scene with e.g. the ``--quality low`` or ``--quality medium`` flag for faster rendering won't effect the position of the image on the screen.
{"golden_diff": "diff --git a/manim/mobject/types/image_mobject.py b/manim/mobject/types/image_mobject.py\n--- a/manim/mobject/types/image_mobject.py\n+++ b/manim/mobject/types/image_mobject.py\n@@ -27,8 +27,7 @@\n ----------\n scale_to_resolution : :class:`int`\n At this resolution the image is placed pixel by pixel onto the screen, so it will look the sharpest and best.\n- This is a custom parameter of ImageMobject so that rendering a scene with the `--quality low` flag for faster rendering and testing won't effect the position of the image on the screen.\n- Calculated by `height = image_height / scale_to_resolution * config[\"frame_height\"]`\n+ This is a custom parameter of ImageMobject so that rendering a scene with e.g. the ``--quality low`` or ``--quality medium`` flag for faster rendering won't effect the position of the image on the screen.\n \"\"\"\n \n CONFIG = {\n@@ -74,8 +73,7 @@\n ----------\n scale_to_resolution : :class:`int`\n At this resolution the image is placed pixel by pixel onto the screen, so it will look the sharpest and best.\n- This is a custom parameter of ImageMobject so that rendering a scene with the `--quality low` flag for faster rendering and testing won't effect the position of the image on the screen.\n- Calculated by `height = image_height / scale_to_resolution * config[\"frame_height\"]`\n+ This is a custom parameter of ImageMobject so that rendering a scene with e.g. the ``--quality low`` or ``--quality medium`` flag for faster rendering won't effect the position of the image on the screen.\n", "issue": "Making test for SVGMobject\nHere, I have some test and folder structure for SVGMobject and ImageMobject.\r\nThe images and svgs for the test can be found in a new folder called img_svg_resources\r\n\r\nDepends on #650 \n", "before_files": [{"content": "\"\"\"Mobjects representing raster images.\"\"\"\n\n__all__ = [\"AbstractImageMobject\", \"ImageMobject\", \"ImageMobjectFromCamera\"]\n\nimport pathlib\n\nimport numpy as np\n\nfrom PIL import Image\n\nfrom ... import config\nfrom ...constants import *\nfrom ...mobject.mobject import Mobject\nfrom ...mobject.shape_matchers import SurroundingRectangle\nfrom ...utils.bezier import interpolate\nfrom ...utils.color import color_to_int_rgb, WHITE\nfrom ...utils.config_ops import digest_config\nfrom ...utils.images import get_full_raster_image_path\nfrom manim.constants import QUALITIES, DEFAULT_QUALITY\n\n\nclass AbstractImageMobject(Mobject):\n \"\"\"\n Automatically filters out black pixels\n\n Parameters\n ----------\n scale_to_resolution : :class:`int`\n At this resolution the image is placed pixel by pixel onto the screen, so it will look the sharpest and best.\n This is a custom parameter of ImageMobject so that rendering a scene with the `--quality low` flag for faster rendering and testing won't effect the position of the image on the screen.\n Calculated by `height = image_height / scale_to_resolution * config[\"frame_height\"]`\n \"\"\"\n\n CONFIG = {\n \"pixel_array_dtype\": \"uint8\",\n }\n\n def __init__(self, scale_to_resolution, **kwargs):\n digest_config(self, kwargs)\n self.scale_to_resolution = scale_to_resolution\n\n Mobject.__init__(self, **kwargs)\n\n def get_pixel_array(self):\n raise NotImplementedError()\n\n def set_color(self):\n # Likely to be implemented in subclasses, but no obgligation\n pass\n\n def reset_points(self):\n # Corresponding corners of image are fixed to these 3 points\n self.points = np.array(\n [\n UP + LEFT,\n UP + RIGHT,\n DOWN + LEFT,\n ]\n )\n self.center()\n h, w = self.get_pixel_array().shape[:2]\n if self.scale_to_resolution:\n self.height = h / self.scale_to_resolution * config[\"frame_height\"]\n else:\n self.height = 3 ## this is the case for ImageMobjectFromCamera\n self.stretch_to_fit_height(self.height)\n self.stretch_to_fit_width(self.height * w / h)\n\n\nclass ImageMobject(AbstractImageMobject):\n \"\"\"Displays an Image from a numpy array or a file.\n\n Parameters\n ----------\n scale_to_resolution : :class:`int`\n At this resolution the image is placed pixel by pixel onto the screen, so it will look the sharpest and best.\n This is a custom parameter of ImageMobject so that rendering a scene with the `--quality low` flag for faster rendering and testing won't effect the position of the image on the screen.\n Calculated by `height = image_height / scale_to_resolution * config[\"frame_height\"]`\n\n\n\n Example\n -------\n .. manim:: ImageFromArray\n :save_last_frame:\n\n class ImageFromArray(Scene):\n def construct(self):\n image = ImageMobject(np.uint8([[0, 100, 30, 200],\n [255, 0, 5, 33]]))\n image.set_height(7)\n self.add(image)\n\n \"\"\"\n\n CONFIG = {\n \"invert\": False,\n \"image_mode\": \"RGBA\",\n }\n\n def __init__(\n self,\n filename_or_array,\n scale_to_resolution=QUALITIES[DEFAULT_QUALITY][\"pixel_height\"],\n **kwargs,\n ):\n digest_config(self, kwargs)\n if isinstance(filename_or_array, (str, pathlib.PurePath)):\n path = get_full_raster_image_path(filename_or_array)\n image = Image.open(path).convert(self.image_mode)\n self.pixel_array = np.array(image)\n else:\n self.pixel_array = np.array(filename_or_array)\n self.change_to_rgba_array()\n if self.invert:\n self.pixel_array[:, :, :3] = 255 - self.pixel_array[:, :, :3]\n AbstractImageMobject.__init__(self, scale_to_resolution, **kwargs)\n\n def change_to_rgba_array(self):\n pa = self.pixel_array\n if len(pa.shape) == 2:\n pa = pa.reshape(list(pa.shape) + [1])\n if pa.shape[2] == 1:\n pa = pa.repeat(3, axis=2)\n if pa.shape[2] == 3:\n alphas = 255 * np.ones(\n list(pa.shape[:2]) + [1], dtype=self.pixel_array_dtype\n )\n pa = np.append(pa, alphas, axis=2)\n self.pixel_array = pa\n\n def get_pixel_array(self):\n return self.pixel_array\n\n def set_color(self, color, alpha=None, family=True):\n rgb = color_to_int_rgb(color)\n self.pixel_array[:, :, :3] = rgb\n if alpha is not None:\n self.pixel_array[:, :, 3] = int(255 * alpha)\n for submob in self.submobjects:\n submob.set_color(color, alpha, family)\n self.color = color\n return self\n\n def set_opacity(self, alpha):\n self.pixel_array[:, :, 3] = int(255 * alpha)\n return self\n\n def fade(self, darkness=0.5, family=True):\n self.set_opacity(1 - darkness)\n super().fade(darkness, family)\n return self\n\n def interpolate_color(self, mobject1, mobject2, alpha):\n assert mobject1.pixel_array.shape == mobject2.pixel_array.shape, (\n f\"Mobject pixel array shapes incompatible for interpolation.\\n\"\n f\"Mobject 1 ({mobject1}) : {mobject1.pixel_array.shape}\\n\"\n f\"Mobject 2 ({mobject2}) : {mobject1.pixel_array.shape}\"\n )\n self.pixel_array = interpolate(\n mobject1.pixel_array, mobject2.pixel_array, alpha\n ).astype(self.pixel_array_dtype)\n\n\n# TODO, add the ability to have the dimensions/orientation of this\n# mobject more strongly tied to the frame of the camera it contains,\n# in the case where that's a MovingCamera\n\n\nclass ImageMobjectFromCamera(AbstractImageMobject):\n CONFIG = {\n \"default_display_frame_config\": {\n \"stroke_width\": 3,\n \"stroke_color\": WHITE,\n \"buff\": 0,\n }\n }\n\n def __init__(self, camera, **kwargs):\n self.camera = camera\n self.pixel_array = self.camera.pixel_array\n AbstractImageMobject.__init__(self, scale_to_resolution=False, **kwargs)\n\n # TODO: Get rid of this.\n def get_pixel_array(self):\n self.pixel_array = self.camera.pixel_array\n return self.pixel_array\n\n def add_display_frame(self, **kwargs):\n config = dict(self.default_display_frame_config)\n config.update(kwargs)\n self.display_frame = SurroundingRectangle(self, **config)\n self.add(self.display_frame)\n return self\n\n def interpolate_color(self, mobject1, mobject2, alpha):\n assert mobject1.pixel_array.shape == mobject2.pixel_array.shape, (\n f\"Mobject pixel array shapes incompatible for interpolation.\\n\"\n f\"Mobject 1 ({mobject1}) : {mobject1.pixel_array.shape}\\n\"\n f\"Mobject 2 ({mobject2}) : {mobject1.pixel_array.shape}\"\n )\n self.pixel_array = interpolate(\n mobject1.pixel_array, mobject2.pixel_array, alpha\n ).astype(self.pixel_array_dtype)\n", "path": "manim/mobject/types/image_mobject.py"}], "after_files": [{"content": "\"\"\"Mobjects representing raster images.\"\"\"\n\n__all__ = [\"AbstractImageMobject\", \"ImageMobject\", \"ImageMobjectFromCamera\"]\n\nimport pathlib\n\nimport numpy as np\n\nfrom PIL import Image\n\nfrom ... import config\nfrom ...constants import *\nfrom ...mobject.mobject import Mobject\nfrom ...mobject.shape_matchers import SurroundingRectangle\nfrom ...utils.bezier import interpolate\nfrom ...utils.color import color_to_int_rgb, WHITE\nfrom ...utils.config_ops import digest_config\nfrom ...utils.images import get_full_raster_image_path\nfrom manim.constants import QUALITIES, DEFAULT_QUALITY\n\n\nclass AbstractImageMobject(Mobject):\n \"\"\"\n Automatically filters out black pixels\n\n Parameters\n ----------\n scale_to_resolution : :class:`int`\n At this resolution the image is placed pixel by pixel onto the screen, so it will look the sharpest and best.\n This is a custom parameter of ImageMobject so that rendering a scene with e.g. the ``--quality low`` or ``--quality medium`` flag for faster rendering won't effect the position of the image on the screen.\n \"\"\"\n\n CONFIG = {\n \"pixel_array_dtype\": \"uint8\",\n }\n\n def __init__(self, scale_to_resolution, **kwargs):\n digest_config(self, kwargs)\n self.scale_to_resolution = scale_to_resolution\n\n Mobject.__init__(self, **kwargs)\n\n def get_pixel_array(self):\n raise NotImplementedError()\n\n def set_color(self):\n # Likely to be implemented in subclasses, but no obgligation\n pass\n\n def reset_points(self):\n # Corresponding corners of image are fixed to these 3 points\n self.points = np.array(\n [\n UP + LEFT,\n UP + RIGHT,\n DOWN + LEFT,\n ]\n )\n self.center()\n h, w = self.get_pixel_array().shape[:2]\n if self.scale_to_resolution:\n self.height = h / self.scale_to_resolution * config[\"frame_height\"]\n else:\n self.height = 3 ## this is the case for ImageMobjectFromCamera\n self.stretch_to_fit_height(self.height)\n self.stretch_to_fit_width(self.height * w / h)\n\n\nclass ImageMobject(AbstractImageMobject):\n \"\"\"Displays an Image from a numpy array or a file.\n\n Parameters\n ----------\n scale_to_resolution : :class:`int`\n At this resolution the image is placed pixel by pixel onto the screen, so it will look the sharpest and best.\n This is a custom parameter of ImageMobject so that rendering a scene with e.g. the ``--quality low`` or ``--quality medium`` flag for faster rendering won't effect the position of the image on the screen.\n\n\n\n Example\n -------\n .. manim:: ImageFromArray\n :save_last_frame:\n\n class ImageFromArray(Scene):\n def construct(self):\n image = ImageMobject(np.uint8([[0, 100, 30, 200],\n [255, 0, 5, 33]]))\n image.set_height(7)\n self.add(image)\n\n \"\"\"\n\n CONFIG = {\n \"invert\": False,\n \"image_mode\": \"RGBA\",\n }\n\n def __init__(\n self,\n filename_or_array,\n scale_to_resolution=QUALITIES[DEFAULT_QUALITY][\"pixel_height\"],\n **kwargs,\n ):\n digest_config(self, kwargs)\n if isinstance(filename_or_array, (str, pathlib.PurePath)):\n path = get_full_raster_image_path(filename_or_array)\n image = Image.open(path).convert(self.image_mode)\n self.pixel_array = np.array(image)\n else:\n self.pixel_array = np.array(filename_or_array)\n self.change_to_rgba_array()\n if self.invert:\n self.pixel_array[:, :, :3] = 255 - self.pixel_array[:, :, :3]\n AbstractImageMobject.__init__(self, scale_to_resolution, **kwargs)\n\n def change_to_rgba_array(self):\n pa = self.pixel_array\n if len(pa.shape) == 2:\n pa = pa.reshape(list(pa.shape) + [1])\n if pa.shape[2] == 1:\n pa = pa.repeat(3, axis=2)\n if pa.shape[2] == 3:\n alphas = 255 * np.ones(\n list(pa.shape[:2]) + [1], dtype=self.pixel_array_dtype\n )\n pa = np.append(pa, alphas, axis=2)\n self.pixel_array = pa\n\n def get_pixel_array(self):\n return self.pixel_array\n\n def set_color(self, color, alpha=None, family=True):\n rgb = color_to_int_rgb(color)\n self.pixel_array[:, :, :3] = rgb\n if alpha is not None:\n self.pixel_array[:, :, 3] = int(255 * alpha)\n for submob in self.submobjects:\n submob.set_color(color, alpha, family)\n self.color = color\n return self\n\n def set_opacity(self, alpha):\n self.pixel_array[:, :, 3] = int(255 * alpha)\n return self\n\n def fade(self, darkness=0.5, family=True):\n self.set_opacity(1 - darkness)\n super().fade(darkness, family)\n return self\n\n def interpolate_color(self, mobject1, mobject2, alpha):\n assert mobject1.pixel_array.shape == mobject2.pixel_array.shape, (\n f\"Mobject pixel array shapes incompatible for interpolation.\\n\"\n f\"Mobject 1 ({mobject1}) : {mobject1.pixel_array.shape}\\n\"\n f\"Mobject 2 ({mobject2}) : {mobject1.pixel_array.shape}\"\n )\n self.pixel_array = interpolate(\n mobject1.pixel_array, mobject2.pixel_array, alpha\n ).astype(self.pixel_array_dtype)\n\n\n# TODO, add the ability to have the dimensions/orientation of this\n# mobject more strongly tied to the frame of the camera it contains,\n# in the case where that's a MovingCamera\n\n\nclass ImageMobjectFromCamera(AbstractImageMobject):\n CONFIG = {\n \"default_display_frame_config\": {\n \"stroke_width\": 3,\n \"stroke_color\": WHITE,\n \"buff\": 0,\n }\n }\n\n def __init__(self, camera, **kwargs):\n self.camera = camera\n self.pixel_array = self.camera.pixel_array\n AbstractImageMobject.__init__(self, scale_to_resolution=False, **kwargs)\n\n # TODO: Get rid of this.\n def get_pixel_array(self):\n self.pixel_array = self.camera.pixel_array\n return self.pixel_array\n\n def add_display_frame(self, **kwargs):\n config = dict(self.default_display_frame_config)\n config.update(kwargs)\n self.display_frame = SurroundingRectangle(self, **config)\n self.add(self.display_frame)\n return self\n\n def interpolate_color(self, mobject1, mobject2, alpha):\n assert mobject1.pixel_array.shape == mobject2.pixel_array.shape, (\n f\"Mobject pixel array shapes incompatible for interpolation.\\n\"\n f\"Mobject 1 ({mobject1}) : {mobject1.pixel_array.shape}\\n\"\n f\"Mobject 2 ({mobject2}) : {mobject1.pixel_array.shape}\"\n )\n self.pixel_array = interpolate(\n mobject1.pixel_array, mobject2.pixel_array, alpha\n ).astype(self.pixel_array_dtype)\n", "path": "manim/mobject/types/image_mobject.py"}]}
2,475
373
gh_patches_debug_363
rasdani/github-patches
git_diff
mozilla__bugbug-3921
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [model:regressor] AttributeError: 'IsotonicRegressionCalibrator' object has no attribute 'n_features_in_' https://community-tc.services.mozilla.com/tasks/HncpjvKKRcSnxL_GJ8PV9A/runs/0/logs/public/logs/live.log ``` Traceback (most recent call last): File "/usr/local/bin/bugbug-train", line 8, in <module> sys.exit(main()) File "/usr/local/lib/python3.10/site-packages/scripts/trainer.py", line 141, in main retriever.go(args) File "/usr/local/lib/python3.10/site-packages/scripts/trainer.py", line 41, in go metrics = model_obj.train(limit=args.limit) File "/usr/local/lib/python3.10/site-packages/bugbug/model.py", line 418, in train logger.info("Number of features: %d", self.clf.steps[-1][1].n_features_in_) AttributeError: 'IsotonicRegressionCalibrator' object has no attribute 'n_features_in_' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bugbug/model_calibration.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # This Source Code Form is subject to the terms of the Mozilla Public 3 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 # You can obtain one at http://mozilla.org/MPL/2.0/. 5 6 from sklearn.base import BaseEstimator, ClassifierMixin 7 from sklearn.calibration import CalibratedClassifierCV 8 from sklearn.model_selection import train_test_split 9 10 11 class IsotonicRegressionCalibrator(BaseEstimator, ClassifierMixin): 12 def __init__(self, base_clf): 13 self.base_clf = base_clf 14 self.calibrated_clf = CalibratedClassifierCV( 15 base_clf, cv="prefit", method="isotonic" 16 ) 17 18 def fit(self, X_train, y_train): 19 X_train, X_val, y_train, y_val = train_test_split( 20 X_train, y_train, test_size=0.2, random_state=42 21 ) 22 self.base_clf.fit(X_train, y_train) 23 self.calibrated_clf.fit(X_val, y_val) 24 25 def predict(self, X): 26 return self.calibrated_clf.predict(X) 27 28 def predict_proba(self, X): 29 return self.calibrated_clf.predict_proba(X) 30 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bugbug/model_calibration.py b/bugbug/model_calibration.py --- a/bugbug/model_calibration.py +++ b/bugbug/model_calibration.py @@ -27,3 +27,7 @@ def predict_proba(self, X): return self.calibrated_clf.predict_proba(X) + + @property + def n_features_in_(self): + return self.base_clf.n_features_in_
{"golden_diff": "diff --git a/bugbug/model_calibration.py b/bugbug/model_calibration.py\n--- a/bugbug/model_calibration.py\n+++ b/bugbug/model_calibration.py\n@@ -27,3 +27,7 @@\n \n def predict_proba(self, X):\n return self.calibrated_clf.predict_proba(X)\n+\n+ @property\n+ def n_features_in_(self):\n+ return self.base_clf.n_features_in_\n", "issue": "[model:regressor] AttributeError: 'IsotonicRegressionCalibrator' object has no attribute 'n_features_in_'\nhttps://community-tc.services.mozilla.com/tasks/HncpjvKKRcSnxL_GJ8PV9A/runs/0/logs/public/logs/live.log\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/bugbug-train\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/usr/local/lib/python3.10/site-packages/scripts/trainer.py\", line 141, in main\r\n retriever.go(args)\r\n File \"/usr/local/lib/python3.10/site-packages/scripts/trainer.py\", line 41, in go\r\n metrics = model_obj.train(limit=args.limit)\r\n File \"/usr/local/lib/python3.10/site-packages/bugbug/model.py\", line 418, in train\r\n logger.info(\"Number of features: %d\", self.clf.steps[-1][1].n_features_in_)\r\nAttributeError: 'IsotonicRegressionCalibrator' object has no attribute 'n_features_in_'\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.model_selection import train_test_split\n\n\nclass IsotonicRegressionCalibrator(BaseEstimator, ClassifierMixin):\n def __init__(self, base_clf):\n self.base_clf = base_clf\n self.calibrated_clf = CalibratedClassifierCV(\n base_clf, cv=\"prefit\", method=\"isotonic\"\n )\n\n def fit(self, X_train, y_train):\n X_train, X_val, y_train, y_val = train_test_split(\n X_train, y_train, test_size=0.2, random_state=42\n )\n self.base_clf.fit(X_train, y_train)\n self.calibrated_clf.fit(X_val, y_val)\n\n def predict(self, X):\n return self.calibrated_clf.predict(X)\n\n def predict_proba(self, X):\n return self.calibrated_clf.predict_proba(X)\n", "path": "bugbug/model_calibration.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.model_selection import train_test_split\n\n\nclass IsotonicRegressionCalibrator(BaseEstimator, ClassifierMixin):\n def __init__(self, base_clf):\n self.base_clf = base_clf\n self.calibrated_clf = CalibratedClassifierCV(\n base_clf, cv=\"prefit\", method=\"isotonic\"\n )\n\n def fit(self, X_train, y_train):\n X_train, X_val, y_train, y_val = train_test_split(\n X_train, y_train, test_size=0.2, random_state=42\n )\n self.base_clf.fit(X_train, y_train)\n self.calibrated_clf.fit(X_val, y_val)\n\n def predict(self, X):\n return self.calibrated_clf.predict(X)\n\n def predict_proba(self, X):\n return self.calibrated_clf.predict_proba(X)\n\n @property\n def n_features_in_(self):\n return self.base_clf.n_features_in_\n", "path": "bugbug/model_calibration.py"}]}
823
94
gh_patches_debug_20408
rasdani/github-patches
git_diff
dmlc__dgl-2796
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- HGT implementation bug? https://github.com/dmlc/dgl/blob/master/examples/pytorch/hgt/model.py#L81 The implemented hgt set value of nodes multiple times when calculating by different edge attention, which will cause the `nodes["v"]` of earlier etype be overwritten by later one. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/pytorch/hgt/model.py` Content: ``` 1 import dgl 2 import math 3 import torch 4 import torch.nn as nn 5 import torch.nn.functional as F 6 import dgl.function as fn 7 from dgl.nn.functional import edge_softmax 8 9 class HGTLayer(nn.Module): 10 def __init__(self, 11 in_dim, 12 out_dim, 13 node_dict, 14 edge_dict, 15 n_heads, 16 dropout = 0.2, 17 use_norm = False): 18 super(HGTLayer, self).__init__() 19 20 self.in_dim = in_dim 21 self.out_dim = out_dim 22 self.node_dict = node_dict 23 self.edge_dict = edge_dict 24 self.num_types = len(node_dict) 25 self.num_relations = len(edge_dict) 26 self.total_rel = self.num_types * self.num_relations * self.num_types 27 self.n_heads = n_heads 28 self.d_k = out_dim // n_heads 29 self.sqrt_dk = math.sqrt(self.d_k) 30 self.att = None 31 32 self.k_linears = nn.ModuleList() 33 self.q_linears = nn.ModuleList() 34 self.v_linears = nn.ModuleList() 35 self.a_linears = nn.ModuleList() 36 self.norms = nn.ModuleList() 37 self.use_norm = use_norm 38 39 for t in range(self.num_types): 40 self.k_linears.append(nn.Linear(in_dim, out_dim)) 41 self.q_linears.append(nn.Linear(in_dim, out_dim)) 42 self.v_linears.append(nn.Linear(in_dim, out_dim)) 43 self.a_linears.append(nn.Linear(out_dim, out_dim)) 44 if use_norm: 45 self.norms.append(nn.LayerNorm(out_dim)) 46 47 self.relation_pri = nn.Parameter(torch.ones(self.num_relations, self.n_heads)) 48 self.relation_att = nn.Parameter(torch.Tensor(self.num_relations, n_heads, self.d_k, self.d_k)) 49 self.relation_msg = nn.Parameter(torch.Tensor(self.num_relations, n_heads, self.d_k, self.d_k)) 50 self.skip = nn.Parameter(torch.ones(self.num_types)) 51 self.drop = nn.Dropout(dropout) 52 53 nn.init.xavier_uniform_(self.relation_att) 54 nn.init.xavier_uniform_(self.relation_msg) 55 56 def forward(self, G, h): 57 with G.local_scope(): 58 node_dict, edge_dict = self.node_dict, self.edge_dict 59 for srctype, etype, dsttype in G.canonical_etypes: 60 sub_graph = G[srctype, etype, dsttype] 61 62 k_linear = self.k_linears[node_dict[srctype]] 63 v_linear = self.v_linears[node_dict[srctype]] 64 q_linear = self.q_linears[node_dict[dsttype]] 65 66 k = k_linear(h[srctype]).view(-1, self.n_heads, self.d_k) 67 v = v_linear(h[srctype]).view(-1, self.n_heads, self.d_k) 68 q = q_linear(h[dsttype]).view(-1, self.n_heads, self.d_k) 69 70 e_id = self.edge_dict[etype] 71 72 relation_att = self.relation_att[e_id] 73 relation_pri = self.relation_pri[e_id] 74 relation_msg = self.relation_msg[e_id] 75 76 k = torch.einsum("bij,ijk->bik", k, relation_att) 77 v = torch.einsum("bij,ijk->bik", v, relation_msg) 78 79 sub_graph.srcdata['k'] = k 80 sub_graph.dstdata['q'] = q 81 sub_graph.srcdata['v'] = v 82 83 sub_graph.apply_edges(fn.v_dot_u('q', 'k', 't')) 84 attn_score = sub_graph.edata.pop('t').sum(-1) * relation_pri / self.sqrt_dk 85 attn_score = edge_softmax(sub_graph, attn_score, norm_by='dst') 86 87 sub_graph.edata['t'] = attn_score.unsqueeze(-1) 88 89 G.multi_update_all({etype : (fn.u_mul_e('v', 't', 'm'), fn.sum('m', 't')) \ 90 for etype in edge_dict}, cross_reducer = 'mean') 91 92 new_h = {} 93 for ntype in G.ntypes: 94 ''' 95 Step 3: Target-specific Aggregation 96 x = norm( W[node_type] * gelu( Agg(x) ) + x ) 97 ''' 98 n_id = node_dict[ntype] 99 alpha = torch.sigmoid(self.skip[n_id]) 100 t = G.nodes[ntype].data['t'].view(-1, self.out_dim) 101 trans_out = self.drop(self.a_linears[n_id](t)) 102 trans_out = trans_out * alpha + h[ntype] * (1-alpha) 103 if self.use_norm: 104 new_h[ntype] = self.norms[n_id](trans_out) 105 else: 106 new_h[ntype] = trans_out 107 return new_h 108 109 class HGT(nn.Module): 110 def __init__(self, G, node_dict, edge_dict, n_inp, n_hid, n_out, n_layers, n_heads, use_norm = True): 111 super(HGT, self).__init__() 112 self.node_dict = node_dict 113 self.edge_dict = edge_dict 114 self.gcs = nn.ModuleList() 115 self.n_inp = n_inp 116 self.n_hid = n_hid 117 self.n_out = n_out 118 self.n_layers = n_layers 119 self.adapt_ws = nn.ModuleList() 120 for t in range(len(node_dict)): 121 self.adapt_ws.append(nn.Linear(n_inp, n_hid)) 122 for _ in range(n_layers): 123 self.gcs.append(HGTLayer(n_hid, n_hid, node_dict, edge_dict, n_heads, use_norm = use_norm)) 124 self.out = nn.Linear(n_hid, n_out) 125 126 def forward(self, G, out_key): 127 h = {} 128 for ntype in G.ntypes: 129 n_id = self.node_dict[ntype] 130 h[ntype] = F.gelu(self.adapt_ws[n_id](G.nodes[ntype].data['inp'])) 131 for i in range(self.n_layers): 132 h = self.gcs[i](G, h) 133 return self.out(h[out_key]) 134 135 class HeteroRGCNLayer(nn.Module): 136 def __init__(self, in_size, out_size, etypes): 137 super(HeteroRGCNLayer, self).__init__() 138 # W_r for each relation 139 self.weight = nn.ModuleDict({ 140 name : nn.Linear(in_size, out_size) for name in etypes 141 }) 142 143 def forward(self, G, feat_dict): 144 # The input is a dictionary of node features for each type 145 funcs = {} 146 for srctype, etype, dsttype in G.canonical_etypes: 147 # Compute W_r * h 148 Wh = self.weight[etype](feat_dict[srctype]) 149 # Save it in graph for message passing 150 G.nodes[srctype].data['Wh_%s' % etype] = Wh 151 # Specify per-relation message passing functions: (message_func, reduce_func). 152 # Note that the results are saved to the same destination feature 'h', which 153 # hints the type wise reducer for aggregation. 154 funcs[etype] = (fn.copy_u('Wh_%s' % etype, 'm'), fn.mean('m', 'h')) 155 # Trigger message passing of multiple types. 156 # The first argument is the message passing functions for each relation. 157 # The second one is the type wise reducer, could be "sum", "max", 158 # "min", "mean", "stack" 159 G.multi_update_all(funcs, 'sum') 160 # return the updated node feature dictionary 161 return {ntype : G.nodes[ntype].data['h'] for ntype in G.ntypes} 162 163 164 class HeteroRGCN(nn.Module): 165 def __init__(self, G, in_size, hidden_size, out_size): 166 super(HeteroRGCN, self).__init__() 167 # create layers 168 self.layer1 = HeteroRGCNLayer(in_size, hidden_size, G.etypes) 169 self.layer2 = HeteroRGCNLayer(hidden_size, out_size, G.etypes) 170 171 def forward(self, G, out_key): 172 input_dict = {ntype : G.nodes[ntype].data['inp'] for ntype in G.ntypes} 173 h_dict = self.layer1(G, input_dict) 174 h_dict = {k : F.leaky_relu(h) for k, h in h_dict.items()} 175 h_dict = self.layer2(G, h_dict) 176 # get paper logits 177 return h_dict[out_key] 178 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/pytorch/hgt/model.py b/examples/pytorch/hgt/model.py --- a/examples/pytorch/hgt/model.py +++ b/examples/pytorch/hgt/model.py @@ -78,7 +78,7 @@ sub_graph.srcdata['k'] = k sub_graph.dstdata['q'] = q - sub_graph.srcdata['v'] = v + sub_graph.srcdata['v_%d' % e_id] = v sub_graph.apply_edges(fn.v_dot_u('q', 'k', 't')) attn_score = sub_graph.edata.pop('t').sum(-1) * relation_pri / self.sqrt_dk @@ -86,8 +86,8 @@ sub_graph.edata['t'] = attn_score.unsqueeze(-1) - G.multi_update_all({etype : (fn.u_mul_e('v', 't', 'm'), fn.sum('m', 't')) \ - for etype in edge_dict}, cross_reducer = 'mean') + G.multi_update_all({etype : (fn.u_mul_e('v_%d' % e_id, 't', 'm'), fn.sum('m', 't')) \ + for etype, e_id in edge_dict.items()}, cross_reducer = 'mean') new_h = {} for ntype in G.ntypes:
{"golden_diff": "diff --git a/examples/pytorch/hgt/model.py b/examples/pytorch/hgt/model.py\n--- a/examples/pytorch/hgt/model.py\n+++ b/examples/pytorch/hgt/model.py\n@@ -78,7 +78,7 @@\n \n sub_graph.srcdata['k'] = k\n sub_graph.dstdata['q'] = q\n- sub_graph.srcdata['v'] = v\n+ sub_graph.srcdata['v_%d' % e_id] = v\n \n sub_graph.apply_edges(fn.v_dot_u('q', 'k', 't'))\n attn_score = sub_graph.edata.pop('t').sum(-1) * relation_pri / self.sqrt_dk\n@@ -86,8 +86,8 @@\n \n sub_graph.edata['t'] = attn_score.unsqueeze(-1)\n \n- G.multi_update_all({etype : (fn.u_mul_e('v', 't', 'm'), fn.sum('m', 't')) \\\n- for etype in edge_dict}, cross_reducer = 'mean')\n+ G.multi_update_all({etype : (fn.u_mul_e('v_%d' % e_id, 't', 'm'), fn.sum('m', 't')) \\\n+ for etype, e_id in edge_dict.items()}, cross_reducer = 'mean')\n \n new_h = {}\n for ntype in G.ntypes:\n", "issue": "HGT implementation bug?\nhttps://github.com/dmlc/dgl/blob/master/examples/pytorch/hgt/model.py#L81\r\n\r\nThe implemented hgt set value of nodes multiple times when calculating by different edge attention, which will cause the `nodes[\"v\"]` of earlier etype be overwritten by later one. \n", "before_files": [{"content": "import dgl\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport dgl.function as fn\nfrom dgl.nn.functional import edge_softmax\n\nclass HGTLayer(nn.Module):\n def __init__(self,\n in_dim,\n out_dim,\n node_dict,\n edge_dict,\n n_heads,\n dropout = 0.2,\n use_norm = False):\n super(HGTLayer, self).__init__()\n\n self.in_dim = in_dim\n self.out_dim = out_dim\n self.node_dict = node_dict\n self.edge_dict = edge_dict\n self.num_types = len(node_dict)\n self.num_relations = len(edge_dict)\n self.total_rel = self.num_types * self.num_relations * self.num_types\n self.n_heads = n_heads\n self.d_k = out_dim // n_heads\n self.sqrt_dk = math.sqrt(self.d_k)\n self.att = None\n\n self.k_linears = nn.ModuleList()\n self.q_linears = nn.ModuleList()\n self.v_linears = nn.ModuleList()\n self.a_linears = nn.ModuleList()\n self.norms = nn.ModuleList()\n self.use_norm = use_norm\n\n for t in range(self.num_types):\n self.k_linears.append(nn.Linear(in_dim, out_dim))\n self.q_linears.append(nn.Linear(in_dim, out_dim))\n self.v_linears.append(nn.Linear(in_dim, out_dim))\n self.a_linears.append(nn.Linear(out_dim, out_dim))\n if use_norm:\n self.norms.append(nn.LayerNorm(out_dim))\n\n self.relation_pri = nn.Parameter(torch.ones(self.num_relations, self.n_heads))\n self.relation_att = nn.Parameter(torch.Tensor(self.num_relations, n_heads, self.d_k, self.d_k))\n self.relation_msg = nn.Parameter(torch.Tensor(self.num_relations, n_heads, self.d_k, self.d_k))\n self.skip = nn.Parameter(torch.ones(self.num_types))\n self.drop = nn.Dropout(dropout)\n\n nn.init.xavier_uniform_(self.relation_att)\n nn.init.xavier_uniform_(self.relation_msg)\n\n def forward(self, G, h):\n with G.local_scope():\n node_dict, edge_dict = self.node_dict, self.edge_dict\n for srctype, etype, dsttype in G.canonical_etypes:\n sub_graph = G[srctype, etype, dsttype]\n\n k_linear = self.k_linears[node_dict[srctype]]\n v_linear = self.v_linears[node_dict[srctype]]\n q_linear = self.q_linears[node_dict[dsttype]]\n\n k = k_linear(h[srctype]).view(-1, self.n_heads, self.d_k)\n v = v_linear(h[srctype]).view(-1, self.n_heads, self.d_k)\n q = q_linear(h[dsttype]).view(-1, self.n_heads, self.d_k)\n\n e_id = self.edge_dict[etype]\n\n relation_att = self.relation_att[e_id]\n relation_pri = self.relation_pri[e_id]\n relation_msg = self.relation_msg[e_id]\n\n k = torch.einsum(\"bij,ijk->bik\", k, relation_att)\n v = torch.einsum(\"bij,ijk->bik\", v, relation_msg)\n\n sub_graph.srcdata['k'] = k\n sub_graph.dstdata['q'] = q\n sub_graph.srcdata['v'] = v\n\n sub_graph.apply_edges(fn.v_dot_u('q', 'k', 't'))\n attn_score = sub_graph.edata.pop('t').sum(-1) * relation_pri / self.sqrt_dk\n attn_score = edge_softmax(sub_graph, attn_score, norm_by='dst')\n\n sub_graph.edata['t'] = attn_score.unsqueeze(-1)\n\n G.multi_update_all({etype : (fn.u_mul_e('v', 't', 'm'), fn.sum('m', 't')) \\\n for etype in edge_dict}, cross_reducer = 'mean')\n\n new_h = {}\n for ntype in G.ntypes:\n '''\n Step 3: Target-specific Aggregation\n x = norm( W[node_type] * gelu( Agg(x) ) + x )\n '''\n n_id = node_dict[ntype]\n alpha = torch.sigmoid(self.skip[n_id])\n t = G.nodes[ntype].data['t'].view(-1, self.out_dim)\n trans_out = self.drop(self.a_linears[n_id](t))\n trans_out = trans_out * alpha + h[ntype] * (1-alpha)\n if self.use_norm:\n new_h[ntype] = self.norms[n_id](trans_out)\n else:\n new_h[ntype] = trans_out\n return new_h\n\nclass HGT(nn.Module):\n def __init__(self, G, node_dict, edge_dict, n_inp, n_hid, n_out, n_layers, n_heads, use_norm = True):\n super(HGT, self).__init__()\n self.node_dict = node_dict\n self.edge_dict = edge_dict\n self.gcs = nn.ModuleList()\n self.n_inp = n_inp\n self.n_hid = n_hid\n self.n_out = n_out\n self.n_layers = n_layers\n self.adapt_ws = nn.ModuleList()\n for t in range(len(node_dict)):\n self.adapt_ws.append(nn.Linear(n_inp, n_hid))\n for _ in range(n_layers):\n self.gcs.append(HGTLayer(n_hid, n_hid, node_dict, edge_dict, n_heads, use_norm = use_norm))\n self.out = nn.Linear(n_hid, n_out)\n\n def forward(self, G, out_key):\n h = {}\n for ntype in G.ntypes:\n n_id = self.node_dict[ntype]\n h[ntype] = F.gelu(self.adapt_ws[n_id](G.nodes[ntype].data['inp']))\n for i in range(self.n_layers):\n h = self.gcs[i](G, h)\n return self.out(h[out_key])\n\nclass HeteroRGCNLayer(nn.Module):\n def __init__(self, in_size, out_size, etypes):\n super(HeteroRGCNLayer, self).__init__()\n # W_r for each relation\n self.weight = nn.ModuleDict({\n name : nn.Linear(in_size, out_size) for name in etypes\n })\n\n def forward(self, G, feat_dict):\n # The input is a dictionary of node features for each type\n funcs = {}\n for srctype, etype, dsttype in G.canonical_etypes:\n # Compute W_r * h\n Wh = self.weight[etype](feat_dict[srctype])\n # Save it in graph for message passing\n G.nodes[srctype].data['Wh_%s' % etype] = Wh\n # Specify per-relation message passing functions: (message_func, reduce_func).\n # Note that the results are saved to the same destination feature 'h', which\n # hints the type wise reducer for aggregation.\n funcs[etype] = (fn.copy_u('Wh_%s' % etype, 'm'), fn.mean('m', 'h'))\n # Trigger message passing of multiple types.\n # The first argument is the message passing functions for each relation.\n # The second one is the type wise reducer, could be \"sum\", \"max\",\n # \"min\", \"mean\", \"stack\"\n G.multi_update_all(funcs, 'sum')\n # return the updated node feature dictionary\n return {ntype : G.nodes[ntype].data['h'] for ntype in G.ntypes}\n\n\nclass HeteroRGCN(nn.Module):\n def __init__(self, G, in_size, hidden_size, out_size):\n super(HeteroRGCN, self).__init__()\n # create layers\n self.layer1 = HeteroRGCNLayer(in_size, hidden_size, G.etypes)\n self.layer2 = HeteroRGCNLayer(hidden_size, out_size, G.etypes)\n\n def forward(self, G, out_key):\n input_dict = {ntype : G.nodes[ntype].data['inp'] for ntype in G.ntypes}\n h_dict = self.layer1(G, input_dict)\n h_dict = {k : F.leaky_relu(h) for k, h in h_dict.items()}\n h_dict = self.layer2(G, h_dict)\n # get paper logits\n return h_dict[out_key]\n", "path": "examples/pytorch/hgt/model.py"}], "after_files": [{"content": "import dgl\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport dgl.function as fn\nfrom dgl.nn.functional import edge_softmax\n\nclass HGTLayer(nn.Module):\n def __init__(self,\n in_dim,\n out_dim,\n node_dict,\n edge_dict,\n n_heads,\n dropout = 0.2,\n use_norm = False):\n super(HGTLayer, self).__init__()\n\n self.in_dim = in_dim\n self.out_dim = out_dim\n self.node_dict = node_dict\n self.edge_dict = edge_dict\n self.num_types = len(node_dict)\n self.num_relations = len(edge_dict)\n self.total_rel = self.num_types * self.num_relations * self.num_types\n self.n_heads = n_heads\n self.d_k = out_dim // n_heads\n self.sqrt_dk = math.sqrt(self.d_k)\n self.att = None\n\n self.k_linears = nn.ModuleList()\n self.q_linears = nn.ModuleList()\n self.v_linears = nn.ModuleList()\n self.a_linears = nn.ModuleList()\n self.norms = nn.ModuleList()\n self.use_norm = use_norm\n\n for t in range(self.num_types):\n self.k_linears.append(nn.Linear(in_dim, out_dim))\n self.q_linears.append(nn.Linear(in_dim, out_dim))\n self.v_linears.append(nn.Linear(in_dim, out_dim))\n self.a_linears.append(nn.Linear(out_dim, out_dim))\n if use_norm:\n self.norms.append(nn.LayerNorm(out_dim))\n\n self.relation_pri = nn.Parameter(torch.ones(self.num_relations, self.n_heads))\n self.relation_att = nn.Parameter(torch.Tensor(self.num_relations, n_heads, self.d_k, self.d_k))\n self.relation_msg = nn.Parameter(torch.Tensor(self.num_relations, n_heads, self.d_k, self.d_k))\n self.skip = nn.Parameter(torch.ones(self.num_types))\n self.drop = nn.Dropout(dropout)\n\n nn.init.xavier_uniform_(self.relation_att)\n nn.init.xavier_uniform_(self.relation_msg)\n\n def forward(self, G, h):\n with G.local_scope():\n node_dict, edge_dict = self.node_dict, self.edge_dict\n for srctype, etype, dsttype in G.canonical_etypes:\n sub_graph = G[srctype, etype, dsttype]\n\n k_linear = self.k_linears[node_dict[srctype]]\n v_linear = self.v_linears[node_dict[srctype]]\n q_linear = self.q_linears[node_dict[dsttype]]\n\n k = k_linear(h[srctype]).view(-1, self.n_heads, self.d_k)\n v = v_linear(h[srctype]).view(-1, self.n_heads, self.d_k)\n q = q_linear(h[dsttype]).view(-1, self.n_heads, self.d_k)\n\n e_id = self.edge_dict[etype]\n\n relation_att = self.relation_att[e_id]\n relation_pri = self.relation_pri[e_id]\n relation_msg = self.relation_msg[e_id]\n\n k = torch.einsum(\"bij,ijk->bik\", k, relation_att)\n v = torch.einsum(\"bij,ijk->bik\", v, relation_msg)\n\n sub_graph.srcdata['k'] = k\n sub_graph.dstdata['q'] = q\n sub_graph.srcdata['v_%d' % e_id] = v\n\n sub_graph.apply_edges(fn.v_dot_u('q', 'k', 't'))\n attn_score = sub_graph.edata.pop('t').sum(-1) * relation_pri / self.sqrt_dk\n attn_score = edge_softmax(sub_graph, attn_score, norm_by='dst')\n\n sub_graph.edata['t'] = attn_score.unsqueeze(-1)\n\n G.multi_update_all({etype : (fn.u_mul_e('v_%d' % e_id, 't', 'm'), fn.sum('m', 't')) \\\n for etype, e_id in edge_dict.items()}, cross_reducer = 'mean')\n\n new_h = {}\n for ntype in G.ntypes:\n '''\n Step 3: Target-specific Aggregation\n x = norm( W[node_type] * gelu( Agg(x) ) + x )\n '''\n n_id = node_dict[ntype]\n alpha = torch.sigmoid(self.skip[n_id])\n t = G.nodes[ntype].data['t'].view(-1, self.out_dim)\n trans_out = self.drop(self.a_linears[n_id](t))\n trans_out = trans_out * alpha + h[ntype] * (1-alpha)\n if self.use_norm:\n new_h[ntype] = self.norms[n_id](trans_out)\n else:\n new_h[ntype] = trans_out\n return new_h\n\nclass HGT(nn.Module):\n def __init__(self, G, node_dict, edge_dict, n_inp, n_hid, n_out, n_layers, n_heads, use_norm = True):\n super(HGT, self).__init__()\n self.node_dict = node_dict\n self.edge_dict = edge_dict\n self.gcs = nn.ModuleList()\n self.n_inp = n_inp\n self.n_hid = n_hid\n self.n_out = n_out\n self.n_layers = n_layers\n self.adapt_ws = nn.ModuleList()\n for t in range(len(node_dict)):\n self.adapt_ws.append(nn.Linear(n_inp, n_hid))\n for _ in range(n_layers):\n self.gcs.append(HGTLayer(n_hid, n_hid, node_dict, edge_dict, n_heads, use_norm = use_norm))\n self.out = nn.Linear(n_hid, n_out)\n\n def forward(self, G, out_key):\n h = {}\n for ntype in G.ntypes:\n n_id = self.node_dict[ntype]\n h[ntype] = F.gelu(self.adapt_ws[n_id](G.nodes[ntype].data['inp']))\n for i in range(self.n_layers):\n h = self.gcs[i](G, h)\n return self.out(h[out_key])\n\nclass HeteroRGCNLayer(nn.Module):\n def __init__(self, in_size, out_size, etypes):\n super(HeteroRGCNLayer, self).__init__()\n # W_r for each relation\n self.weight = nn.ModuleDict({\n name : nn.Linear(in_size, out_size) for name in etypes\n })\n\n def forward(self, G, feat_dict):\n # The input is a dictionary of node features for each type\n funcs = {}\n for srctype, etype, dsttype in G.canonical_etypes:\n # Compute W_r * h\n Wh = self.weight[etype](feat_dict[srctype])\n # Save it in graph for message passing\n G.nodes[srctype].data['Wh_%s' % etype] = Wh\n # Specify per-relation message passing functions: (message_func, reduce_func).\n # Note that the results are saved to the same destination feature 'h', which\n # hints the type wise reducer for aggregation.\n funcs[etype] = (fn.copy_u('Wh_%s' % etype, 'm'), fn.mean('m', 'h'))\n # Trigger message passing of multiple types.\n # The first argument is the message passing functions for each relation.\n # The second one is the type wise reducer, could be \"sum\", \"max\",\n # \"min\", \"mean\", \"stack\"\n G.multi_update_all(funcs, 'sum')\n # return the updated node feature dictionary\n return {ntype : G.nodes[ntype].data['h'] for ntype in G.ntypes}\n\n\nclass HeteroRGCN(nn.Module):\n def __init__(self, G, in_size, hidden_size, out_size):\n super(HeteroRGCN, self).__init__()\n # create layers\n self.layer1 = HeteroRGCNLayer(in_size, hidden_size, G.etypes)\n self.layer2 = HeteroRGCNLayer(hidden_size, out_size, G.etypes)\n\n def forward(self, G, out_key):\n input_dict = {ntype : G.nodes[ntype].data['inp'] for ntype in G.ntypes}\n h_dict = self.layer1(G, input_dict)\n h_dict = {k : F.leaky_relu(h) for k, h in h_dict.items()}\n h_dict = self.layer2(G, h_dict)\n # get paper logits\n return h_dict[out_key]\n", "path": "examples/pytorch/hgt/model.py"}]}
2,638
298
gh_patches_debug_30439
rasdani/github-patches
git_diff
sql-machine-learning__elasticdl-38
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- support blocking pull in PS so client don't need to retry in a loop --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tensorflow/ps/ps.py` Content: ``` 1 import tensorflow as tf 2 tf.enable_eager_execution() 3 import tensorflow.contrib.eager as tfe 4 import numpy as np 5 import queue 6 import threading 7 8 9 class ParameterServer(object): 10 def __init__(self, optimizer, vars): 11 self._opt = optimizer 12 self._vars = {} 13 for k, v in vars.items(): 14 if (not isinstance(v, np.ndarray) 15 or v.dtype not in (np.float32, np.float64)): 16 raise ValueError( 17 'Initial value for variable %s is not of float type ndarray' % 18 k) 19 self._vars[k] = tfe.Variable(v, name=k) 20 self._step = 0 21 self._grad_q = queue.Queue() 22 self._lock = threading.Lock() 23 self._runner = threading.Thread(target=self._run, name='ps-runner') 24 self._exiting = False 25 26 def pull(self, min_step=0, names=None): 27 with self._lock: 28 if min_step > self._step: 29 raise LookupError('Required step is not ready yet: %s' % min_step) 30 if names: 31 res = {k: self._vars[k].numpy() for k in names} 32 else: 33 res = {k: v.numpy() for k, v in self._vars.items()} 34 return self._step, res 35 36 def push(self, base_step, sub_step, grads): 37 with self._lock: 38 if base_step > self._step: 39 raise ValueError( 40 'Illegal base step %s, parameter server step is %s' % 41 (base_step, self._step)) 42 43 if sub_step < 0: 44 raise ValueError('Illegal sub step %s' % sub_step) 45 46 for k, g in grads.items(): 47 v = self._vars[k] 48 if g.dtype != v.dtype.as_numpy_dtype or g.shape != v.shape: 49 raise ValueError('Incompatible gradient for variable %s' % k) 50 # TODO(l.zou): use @dataclass when python 3.7 is available. 51 self._grad_q.put((base_step, sub_step, grads)) 52 53 def _compute(self, grads): 54 grads_vars = [(g, self._vars[k]) for k, g in grads.items()] 55 with self._lock: 56 self._opt.apply_gradients(grads_vars) 57 self._step += 1 58 59 def _run(self): 60 while not self._exiting: 61 # TODO(l.zou): How to properly accumulate and decay grads? 62 try: 63 base_step, sub_step, grads = self._grad_q.get(timeout=1.0) 64 self._compute(grads) 65 except queue.Empty: 66 pass 67 68 def start(self): 69 self._runner.start() 70 71 def join(self): 72 self._exiting = True 73 self._runner.join() 74 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tensorflow/ps/ps.py b/tensorflow/ps/ps.py --- a/tensorflow/ps/ps.py +++ b/tensorflow/ps/ps.py @@ -1,9 +1,9 @@ +import threading +import queue +import numpy as np +import tensorflow.contrib.eager as tfe import tensorflow as tf tf.enable_eager_execution() -import tensorflow.contrib.eager as tfe -import numpy as np -import queue -import threading class ParameterServer(object): @@ -22,11 +22,18 @@ self._lock = threading.Lock() self._runner = threading.Thread(target=self._run, name='ps-runner') self._exiting = False + self._min_step_cv = threading.Condition() - def pull(self, min_step=0, names=None): + def pull(self, names=None, min_step=0, blocking=True, timeout=None): + with self._min_step_cv: + self._min_step_cv.wait_for( + lambda: not blocking or min_step <= self._step, + timeout=timeout) with self._lock: if min_step > self._step: - raise LookupError('Required step is not ready yet: %s' % min_step) + raise LookupError( + 'Required step is not ready yet: %s' % + min_step) if names: res = {k: self._vars[k].numpy() for k in names} else: @@ -54,7 +61,9 @@ grads_vars = [(g, self._vars[k]) for k, g in grads.items()] with self._lock: self._opt.apply_gradients(grads_vars) + with self._min_step_cv: self._step += 1 + self._min_step_cv.notify_all() def _run(self): while not self._exiting:
{"golden_diff": "diff --git a/tensorflow/ps/ps.py b/tensorflow/ps/ps.py\n--- a/tensorflow/ps/ps.py\n+++ b/tensorflow/ps/ps.py\n@@ -1,9 +1,9 @@\n+import threading\n+import queue\n+import numpy as np\n+import tensorflow.contrib.eager as tfe\n import tensorflow as tf\n tf.enable_eager_execution()\n-import tensorflow.contrib.eager as tfe\n-import numpy as np\n-import queue\n-import threading\n \n \n class ParameterServer(object):\n@@ -22,11 +22,18 @@\n self._lock = threading.Lock()\n self._runner = threading.Thread(target=self._run, name='ps-runner')\n self._exiting = False\n+ self._min_step_cv = threading.Condition()\n \n- def pull(self, min_step=0, names=None):\n+ def pull(self, names=None, min_step=0, blocking=True, timeout=None):\n+ with self._min_step_cv:\n+ self._min_step_cv.wait_for(\n+ lambda: not blocking or min_step <= self._step,\n+ timeout=timeout)\n with self._lock:\n if min_step > self._step:\n- raise LookupError('Required step is not ready yet: %s' % min_step)\n+ raise LookupError(\n+ 'Required step is not ready yet: %s' %\n+ min_step)\n if names:\n res = {k: self._vars[k].numpy() for k in names}\n else:\n@@ -54,7 +61,9 @@\n grads_vars = [(g, self._vars[k]) for k, g in grads.items()]\n with self._lock:\n self._opt.apply_gradients(grads_vars)\n+ with self._min_step_cv:\n self._step += 1\n+ self._min_step_cv.notify_all()\n \n def _run(self):\n while not self._exiting:\n", "issue": "support blocking pull in PS so client don't need to retry in a loop\n\n", "before_files": [{"content": "import tensorflow as tf\ntf.enable_eager_execution()\nimport tensorflow.contrib.eager as tfe\nimport numpy as np\nimport queue\nimport threading\n\n\nclass ParameterServer(object):\n def __init__(self, optimizer, vars):\n self._opt = optimizer\n self._vars = {}\n for k, v in vars.items():\n if (not isinstance(v, np.ndarray)\n or v.dtype not in (np.float32, np.float64)):\n raise ValueError(\n 'Initial value for variable %s is not of float type ndarray' %\n k)\n self._vars[k] = tfe.Variable(v, name=k)\n self._step = 0\n self._grad_q = queue.Queue()\n self._lock = threading.Lock()\n self._runner = threading.Thread(target=self._run, name='ps-runner')\n self._exiting = False\n\n def pull(self, min_step=0, names=None):\n with self._lock:\n if min_step > self._step:\n raise LookupError('Required step is not ready yet: %s' % min_step)\n if names:\n res = {k: self._vars[k].numpy() for k in names}\n else:\n res = {k: v.numpy() for k, v in self._vars.items()}\n return self._step, res\n\n def push(self, base_step, sub_step, grads):\n with self._lock:\n if base_step > self._step:\n raise ValueError(\n 'Illegal base step %s, parameter server step is %s' %\n (base_step, self._step))\n\n if sub_step < 0:\n raise ValueError('Illegal sub step %s' % sub_step)\n\n for k, g in grads.items():\n v = self._vars[k]\n if g.dtype != v.dtype.as_numpy_dtype or g.shape != v.shape:\n raise ValueError('Incompatible gradient for variable %s' % k)\n # TODO(l.zou): use @dataclass when python 3.7 is available.\n self._grad_q.put((base_step, sub_step, grads))\n\n def _compute(self, grads):\n grads_vars = [(g, self._vars[k]) for k, g in grads.items()]\n with self._lock:\n self._opt.apply_gradients(grads_vars)\n self._step += 1\n\n def _run(self):\n while not self._exiting:\n # TODO(l.zou): How to properly accumulate and decay grads?\n try:\n base_step, sub_step, grads = self._grad_q.get(timeout=1.0)\n self._compute(grads)\n except queue.Empty:\n pass\n\n def start(self):\n self._runner.start()\n\n def join(self):\n self._exiting = True\n self._runner.join()\n", "path": "tensorflow/ps/ps.py"}], "after_files": [{"content": "import threading\nimport queue\nimport numpy as np\nimport tensorflow.contrib.eager as tfe\nimport tensorflow as tf\ntf.enable_eager_execution()\n\n\nclass ParameterServer(object):\n def __init__(self, optimizer, vars):\n self._opt = optimizer\n self._vars = {}\n for k, v in vars.items():\n if (not isinstance(v, np.ndarray)\n or v.dtype not in (np.float32, np.float64)):\n raise ValueError(\n 'Initial value for variable %s is not of float type ndarray' %\n k)\n self._vars[k] = tfe.Variable(v, name=k)\n self._step = 0\n self._grad_q = queue.Queue()\n self._lock = threading.Lock()\n self._runner = threading.Thread(target=self._run, name='ps-runner')\n self._exiting = False\n self._min_step_cv = threading.Condition()\n\n def pull(self, names=None, min_step=0, blocking=True, timeout=None):\n with self._min_step_cv:\n self._min_step_cv.wait_for(\n lambda: not blocking or min_step <= self._step,\n timeout=timeout)\n with self._lock:\n if min_step > self._step:\n raise LookupError(\n 'Required step is not ready yet: %s' %\n min_step)\n if names:\n res = {k: self._vars[k].numpy() for k in names}\n else:\n res = {k: v.numpy() for k, v in self._vars.items()}\n return self._step, res\n\n def push(self, base_step, sub_step, grads):\n with self._lock:\n if base_step > self._step:\n raise ValueError(\n 'Illegal base step %s, parameter server step is %s' %\n (base_step, self._step))\n\n if sub_step < 0:\n raise ValueError('Illegal sub step %s' % sub_step)\n\n for k, g in grads.items():\n v = self._vars[k]\n if g.dtype != v.dtype.as_numpy_dtype or g.shape != v.shape:\n raise ValueError('Incompatible gradient for variable %s' % k)\n # TODO(l.zou): use @dataclass when python 3.7 is available.\n self._grad_q.put((base_step, sub_step, grads))\n\n def _compute(self, grads):\n grads_vars = [(g, self._vars[k]) for k, g in grads.items()]\n with self._lock:\n self._opt.apply_gradients(grads_vars)\n with self._min_step_cv:\n self._step += 1\n self._min_step_cv.notify_all()\n\n def _run(self):\n while not self._exiting:\n # TODO(l.zou): How to properly accumulate and decay grads?\n try:\n base_step, sub_step, grads = self._grad_q.get(timeout=1.0)\n self._compute(grads)\n except queue.Empty:\n pass\n\n def start(self):\n self._runner.start()\n\n def join(self):\n self._exiting = True\n self._runner.join()\n", "path": "tensorflow/ps/ps.py"}]}
1,020
422
gh_patches_debug_9871
rasdani/github-patches
git_diff
OCA__social-623
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [13.0] [BUG]"base_search_mail_content" module > Getting bug with "hr" (Employees) module module: base_search_mail_content version: 13.0 **Context :** OCB 13.0 Odoo Server up to date [(08/30/2020),] Virgin database , to reproduce issue faced on my test environnement. Also !! >> Get same bug on runbot : http://3437172-13-0-56e0a2.runbot2-2.odoo-community.org **Steps to reproduce** - Install together "base_search_mail_content" & "hr" (Employees) native odoo module, and try to access to : hr" (Employees) **Current behavior** (model=hr.employee&view_type=kanban, or tree) When i try to access to menu "Employees"There is this following message : > Something went wrong ! Only types ['many2one'] are supported for category (found type text) **Current resolution** i uninstall "base_search_mail_content" to retreive access to hr" (Employees) ----------------------------------------------------------------------------------------------------------------------- ![Capture-Access _employees-1](https://user-images.githubusercontent.com/59052920/91721693-74c7db80-eb99-11ea-8871-c5a5b0b21eac.JPG) ![Capture-Access _employees](https://user-images.githubusercontent.com/59052920/91721766-932dd700-eb99-11ea-8c2b-12a280df4217.JPG) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `base_search_mail_content/models/mail_thread.py` Content: ``` 1 # Copyright 2016-17 Eficent Business and IT Consulting Services S.L. 2 # (http://www.eficent.com) 3 # Copyright 2016 Serpent Consulting Services Pvt. Ltd. 4 # (<http://www.serpentcs.com>) 5 # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html). 6 7 from lxml import etree 8 9 from odoo import _, api, fields, models 10 from odoo.osv import expression 11 12 13 class MailThread(models.AbstractModel): 14 _inherit = "mail.thread" 15 16 def _search_message_content(self, operator, value): 17 model_domain = [("model", "=", self._name)] 18 if operator not in expression.NEGATIVE_TERM_OPERATORS: 19 model_domain += ["|"] * 4 20 model_domain += [ 21 ("record_name", operator, value), 22 ("subject", operator, value), 23 ("body", operator, value), 24 ("email_from", operator, value), 25 ("reply_to", operator, value), 26 ] 27 recs = self.env["mail.message"].search(model_domain) 28 return [("id", "in", recs.mapped("res_id"))] 29 30 message_content = fields.Text( 31 string="Message Content", 32 help="Message content, to be used only in searches", 33 compute=lambda self: False, 34 search="_search_message_content", 35 ) 36 37 @api.model 38 def fields_view_get( 39 self, view_id=None, view_type="form", toolbar=False, submenu=False 40 ): 41 """ 42 Override to add message_content field in all the objects 43 that inherits mail.thread 44 """ 45 res = super(MailThread, self).fields_view_get( 46 view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu 47 ) 48 if view_type == "search" and self._fields.get("message_content"): 49 doc = etree.XML(res["arch"]) 50 res["fields"].update( 51 {"message_content": {"type": "char", "string": _("Message Content")}} 52 ) 53 54 for node in doc.xpath("//field[last()]"): 55 # Add message_content in search view 56 elem = etree.Element("field", {"name": "message_content"}) 57 node.addnext(elem) 58 res["arch"] = etree.tostring(doc) 59 return res 60 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/base_search_mail_content/models/mail_thread.py b/base_search_mail_content/models/mail_thread.py --- a/base_search_mail_content/models/mail_thread.py +++ b/base_search_mail_content/models/mail_thread.py @@ -50,8 +50,7 @@ res["fields"].update( {"message_content": {"type": "char", "string": _("Message Content")}} ) - - for node in doc.xpath("//field[last()]"): + for node in doc.xpath("/search/field[last()]"): # Add message_content in search view elem = etree.Element("field", {"name": "message_content"}) node.addnext(elem)
{"golden_diff": "diff --git a/base_search_mail_content/models/mail_thread.py b/base_search_mail_content/models/mail_thread.py\n--- a/base_search_mail_content/models/mail_thread.py\n+++ b/base_search_mail_content/models/mail_thread.py\n@@ -50,8 +50,7 @@\n res[\"fields\"].update(\n {\"message_content\": {\"type\": \"char\", \"string\": _(\"Message Content\")}}\n )\n-\n- for node in doc.xpath(\"//field[last()]\"):\n+ for node in doc.xpath(\"/search/field[last()]\"):\n # Add message_content in search view\n elem = etree.Element(\"field\", {\"name\": \"message_content\"})\n node.addnext(elem)\n", "issue": "[13.0] [BUG]\"base_search_mail_content\" module > Getting bug with \"hr\" (Employees) module\nmodule: base_search_mail_content\r\nversion: 13.0\r\n\r\n**Context :**\r\nOCB 13.0 Odoo Server up to date [(08/30/2020),]\r\nVirgin database , to reproduce issue faced on my test environnement.\r\nAlso !! >> Get same bug on runbot : http://3437172-13-0-56e0a2.runbot2-2.odoo-community.org\r\n\r\n**Steps to reproduce**\r\n- Install together \"base_search_mail_content\" & \"hr\" (Employees) native odoo module, and try to access to : hr\" (Employees)\r\n\r\n**Current behavior** (model=hr.employee&view_type=kanban, or tree)\r\nWhen i try to access to menu \"Employees\"There is this following message : \r\n> Something went wrong !\r\nOnly types ['many2one'] are supported for category (found type text)\r\n\r\n**Current resolution**\r\ni uninstall \"base_search_mail_content\" to retreive access to hr\" (Employees)\r\n\r\n-----------------------------------------------------------------------------------------------------------------------\r\n![Capture-Access _employees-1](https://user-images.githubusercontent.com/59052920/91721693-74c7db80-eb99-11ea-8871-c5a5b0b21eac.JPG)\r\n![Capture-Access _employees](https://user-images.githubusercontent.com/59052920/91721766-932dd700-eb99-11ea-8c2b-12a280df4217.JPG)\r\n\r\n\n", "before_files": [{"content": "# Copyright 2016-17 Eficent Business and IT Consulting Services S.L.\n# (http://www.eficent.com)\n# Copyright 2016 Serpent Consulting Services Pvt. Ltd.\n# (<http://www.serpentcs.com>)\n# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).\n\nfrom lxml import etree\n\nfrom odoo import _, api, fields, models\nfrom odoo.osv import expression\n\n\nclass MailThread(models.AbstractModel):\n _inherit = \"mail.thread\"\n\n def _search_message_content(self, operator, value):\n model_domain = [(\"model\", \"=\", self._name)]\n if operator not in expression.NEGATIVE_TERM_OPERATORS:\n model_domain += [\"|\"] * 4\n model_domain += [\n (\"record_name\", operator, value),\n (\"subject\", operator, value),\n (\"body\", operator, value),\n (\"email_from\", operator, value),\n (\"reply_to\", operator, value),\n ]\n recs = self.env[\"mail.message\"].search(model_domain)\n return [(\"id\", \"in\", recs.mapped(\"res_id\"))]\n\n message_content = fields.Text(\n string=\"Message Content\",\n help=\"Message content, to be used only in searches\",\n compute=lambda self: False,\n search=\"_search_message_content\",\n )\n\n @api.model\n def fields_view_get(\n self, view_id=None, view_type=\"form\", toolbar=False, submenu=False\n ):\n \"\"\"\n Override to add message_content field in all the objects\n that inherits mail.thread\n \"\"\"\n res = super(MailThread, self).fields_view_get(\n view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu\n )\n if view_type == \"search\" and self._fields.get(\"message_content\"):\n doc = etree.XML(res[\"arch\"])\n res[\"fields\"].update(\n {\"message_content\": {\"type\": \"char\", \"string\": _(\"Message Content\")}}\n )\n\n for node in doc.xpath(\"//field[last()]\"):\n # Add message_content in search view\n elem = etree.Element(\"field\", {\"name\": \"message_content\"})\n node.addnext(elem)\n res[\"arch\"] = etree.tostring(doc)\n return res\n", "path": "base_search_mail_content/models/mail_thread.py"}], "after_files": [{"content": "# Copyright 2016-17 Eficent Business and IT Consulting Services S.L.\n# (http://www.eficent.com)\n# Copyright 2016 Serpent Consulting Services Pvt. Ltd.\n# (<http://www.serpentcs.com>)\n# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).\n\nfrom lxml import etree\n\nfrom odoo import _, api, fields, models\nfrom odoo.osv import expression\n\n\nclass MailThread(models.AbstractModel):\n _inherit = \"mail.thread\"\n\n def _search_message_content(self, operator, value):\n model_domain = [(\"model\", \"=\", self._name)]\n if operator not in expression.NEGATIVE_TERM_OPERATORS:\n model_domain += [\"|\"] * 4\n model_domain += [\n (\"record_name\", operator, value),\n (\"subject\", operator, value),\n (\"body\", operator, value),\n (\"email_from\", operator, value),\n (\"reply_to\", operator, value),\n ]\n recs = self.env[\"mail.message\"].search(model_domain)\n return [(\"id\", \"in\", recs.mapped(\"res_id\"))]\n\n message_content = fields.Text(\n string=\"Message Content\",\n help=\"Message content, to be used only in searches\",\n compute=lambda self: False,\n search=\"_search_message_content\",\n )\n\n @api.model\n def fields_view_get(\n self, view_id=None, view_type=\"form\", toolbar=False, submenu=False\n ):\n \"\"\"\n Override to add message_content field in all the objects\n that inherits mail.thread\n \"\"\"\n res = super(MailThread, self).fields_view_get(\n view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu\n )\n if view_type == \"search\" and self._fields.get(\"message_content\"):\n doc = etree.XML(res[\"arch\"])\n res[\"fields\"].update(\n {\"message_content\": {\"type\": \"char\", \"string\": _(\"Message Content\")}}\n )\n for node in doc.xpath(\"/search/field[last()]\"):\n # Add message_content in search view\n elem = etree.Element(\"field\", {\"name\": \"message_content\"})\n node.addnext(elem)\n res[\"arch\"] = etree.tostring(doc)\n return res\n", "path": "base_search_mail_content/models/mail_thread.py"}]}
1,252
141
gh_patches_debug_13356
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-2791
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Modify page routing to allow for any database name ## Current behavior - Many of our pages have URLs that begin with the database name. - We also have routes that begin with things like `administration` and `auth`. - Those routing rules produce an ambiguous routing grammar making it impossible to use Mathesar with a database named "administration" (for example). ## Desired behavior We should change `/<db_name>/` to `/db/<db_name>` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mathesar/urls.py` Content: ``` 1 from django.contrib.auth.views import LoginView 2 from django.urls import include, path, re_path 3 from rest_framework_nested import routers 4 5 from mathesar import views 6 from mathesar.api.db import viewsets as db_viewsets 7 from mathesar.api.ui import viewsets as ui_viewsets 8 from mathesar.users.password_reset import MathesarPasswordResetConfirmView 9 10 db_router = routers.DefaultRouter() 11 db_router.register(r'tables', db_viewsets.TableViewSet, basename='table') 12 db_router.register(r'queries', db_viewsets.QueryViewSet, basename='query') 13 db_router.register(r'links', db_viewsets.LinkViewSet, basename='links') 14 db_router.register(r'schemas', db_viewsets.SchemaViewSet, basename='schema') 15 db_router.register(r'databases', db_viewsets.DatabaseViewSet, basename='database') 16 db_router.register(r'data_files', db_viewsets.DataFileViewSet, basename='data-file') 17 18 db_table_router = routers.NestedSimpleRouter(db_router, r'tables', lookup='table') 19 db_table_router.register(r'records', db_viewsets.RecordViewSet, basename='table-record') 20 db_table_router.register(r'settings', db_viewsets.TableSettingsViewSet, basename='table-setting') 21 db_table_router.register(r'columns', db_viewsets.ColumnViewSet, basename='table-column') 22 db_table_router.register(r'constraints', db_viewsets.ConstraintViewSet, basename='table-constraint') 23 24 ui_router = routers.DefaultRouter() 25 ui_router.register(r'version', ui_viewsets.VersionViewSet, basename='version') 26 ui_router.register(r'databases', ui_viewsets.DatabaseViewSet, basename='database') 27 ui_router.register(r'users', ui_viewsets.UserViewSet, basename='user') 28 ui_router.register(r'database_roles', ui_viewsets.DatabaseRoleViewSet, basename='database_role') 29 ui_router.register(r'schema_roles', ui_viewsets.SchemaRoleViewSet, basename='schema_role') 30 31 ui_table_router = routers.NestedSimpleRouter(db_router, r'tables', lookup='table') 32 ui_table_router.register(r'records', ui_viewsets.RecordViewSet, basename='table-record') 33 34 urlpatterns = [ 35 path('api/db/v0/', include(db_router.urls)), 36 path('api/db/v0/', include(db_table_router.urls)), 37 path('api/ui/v0/', include(ui_router.urls)), 38 path('api/ui/v0/', include(ui_table_router.urls)), 39 path('api/ui/v0/reflect/', views.reflect_all, name='reflect_all'), 40 path('auth/password_reset_confirm', MathesarPasswordResetConfirmView.as_view(), name='password_reset_confirm'), 41 path('auth/login/', LoginView.as_view(redirect_authenticated_user=True), name='login'), 42 path('auth/', include('django.contrib.auth.urls')), 43 path('', views.home, name='home'), 44 path('profile/', views.profile, name='profile'), 45 path('administration/', views.admin_home, name='admin_home'), 46 path('administration/users/', views.admin_home, name='admin_users_home'), 47 path('administration/users/<user_id>/', views.admin_home, name='admin_users_edit'), 48 path('administration/update/', views.admin_home, name='admin_update'), 49 path('<db_name>/', views.schemas, name='schemas'), 50 re_path( 51 r'^(?P<db_name>\w+)/(?P<schema_id>\w+)/', 52 views.schema_home, 53 name='schema_home' 54 ), 55 ] 56 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mathesar/urls.py b/mathesar/urls.py --- a/mathesar/urls.py +++ b/mathesar/urls.py @@ -46,9 +46,10 @@ path('administration/users/', views.admin_home, name='admin_users_home'), path('administration/users/<user_id>/', views.admin_home, name='admin_users_edit'), path('administration/update/', views.admin_home, name='admin_update'), - path('<db_name>/', views.schemas, name='schemas'), + path('db/', views.home, name='db_home'), + path('db/<db_name>/', views.schemas, name='schemas'), re_path( - r'^(?P<db_name>\w+)/(?P<schema_id>\w+)/', + r'^db/(?P<db_name>\w+)/(?P<schema_id>\w+)/', views.schema_home, name='schema_home' ),
{"golden_diff": "diff --git a/mathesar/urls.py b/mathesar/urls.py\n--- a/mathesar/urls.py\n+++ b/mathesar/urls.py\n@@ -46,9 +46,10 @@\n path('administration/users/', views.admin_home, name='admin_users_home'),\n path('administration/users/<user_id>/', views.admin_home, name='admin_users_edit'),\n path('administration/update/', views.admin_home, name='admin_update'),\n- path('<db_name>/', views.schemas, name='schemas'),\n+ path('db/', views.home, name='db_home'),\n+ path('db/<db_name>/', views.schemas, name='schemas'),\n re_path(\n- r'^(?P<db_name>\\w+)/(?P<schema_id>\\w+)/',\n+ r'^db/(?P<db_name>\\w+)/(?P<schema_id>\\w+)/',\n views.schema_home,\n name='schema_home'\n ),\n", "issue": "Modify page routing to allow for any database name\n## Current behavior\r\n\r\n- Many of our pages have URLs that begin with the database name.\r\n- We also have routes that begin with things like `administration` and `auth`.\r\n- Those routing rules produce an ambiguous routing grammar making it impossible to use Mathesar with a database named \"administration\" (for example).\r\n\r\n## Desired behavior\r\n\r\nWe should change `/<db_name>/` to `/db/<db_name>`\r\n\r\n\n", "before_files": [{"content": "from django.contrib.auth.views import LoginView\nfrom django.urls import include, path, re_path\nfrom rest_framework_nested import routers\n\nfrom mathesar import views\nfrom mathesar.api.db import viewsets as db_viewsets\nfrom mathesar.api.ui import viewsets as ui_viewsets\nfrom mathesar.users.password_reset import MathesarPasswordResetConfirmView\n\ndb_router = routers.DefaultRouter()\ndb_router.register(r'tables', db_viewsets.TableViewSet, basename='table')\ndb_router.register(r'queries', db_viewsets.QueryViewSet, basename='query')\ndb_router.register(r'links', db_viewsets.LinkViewSet, basename='links')\ndb_router.register(r'schemas', db_viewsets.SchemaViewSet, basename='schema')\ndb_router.register(r'databases', db_viewsets.DatabaseViewSet, basename='database')\ndb_router.register(r'data_files', db_viewsets.DataFileViewSet, basename='data-file')\n\ndb_table_router = routers.NestedSimpleRouter(db_router, r'tables', lookup='table')\ndb_table_router.register(r'records', db_viewsets.RecordViewSet, basename='table-record')\ndb_table_router.register(r'settings', db_viewsets.TableSettingsViewSet, basename='table-setting')\ndb_table_router.register(r'columns', db_viewsets.ColumnViewSet, basename='table-column')\ndb_table_router.register(r'constraints', db_viewsets.ConstraintViewSet, basename='table-constraint')\n\nui_router = routers.DefaultRouter()\nui_router.register(r'version', ui_viewsets.VersionViewSet, basename='version')\nui_router.register(r'databases', ui_viewsets.DatabaseViewSet, basename='database')\nui_router.register(r'users', ui_viewsets.UserViewSet, basename='user')\nui_router.register(r'database_roles', ui_viewsets.DatabaseRoleViewSet, basename='database_role')\nui_router.register(r'schema_roles', ui_viewsets.SchemaRoleViewSet, basename='schema_role')\n\nui_table_router = routers.NestedSimpleRouter(db_router, r'tables', lookup='table')\nui_table_router.register(r'records', ui_viewsets.RecordViewSet, basename='table-record')\n\nurlpatterns = [\n path('api/db/v0/', include(db_router.urls)),\n path('api/db/v0/', include(db_table_router.urls)),\n path('api/ui/v0/', include(ui_router.urls)),\n path('api/ui/v0/', include(ui_table_router.urls)),\n path('api/ui/v0/reflect/', views.reflect_all, name='reflect_all'),\n path('auth/password_reset_confirm', MathesarPasswordResetConfirmView.as_view(), name='password_reset_confirm'),\n path('auth/login/', LoginView.as_view(redirect_authenticated_user=True), name='login'),\n path('auth/', include('django.contrib.auth.urls')),\n path('', views.home, name='home'),\n path('profile/', views.profile, name='profile'),\n path('administration/', views.admin_home, name='admin_home'),\n path('administration/users/', views.admin_home, name='admin_users_home'),\n path('administration/users/<user_id>/', views.admin_home, name='admin_users_edit'),\n path('administration/update/', views.admin_home, name='admin_update'),\n path('<db_name>/', views.schemas, name='schemas'),\n re_path(\n r'^(?P<db_name>\\w+)/(?P<schema_id>\\w+)/',\n views.schema_home,\n name='schema_home'\n ),\n]\n", "path": "mathesar/urls.py"}], "after_files": [{"content": "from django.contrib.auth.views import LoginView\nfrom django.urls import include, path, re_path\nfrom rest_framework_nested import routers\n\nfrom mathesar import views\nfrom mathesar.api.db import viewsets as db_viewsets\nfrom mathesar.api.ui import viewsets as ui_viewsets\nfrom mathesar.users.password_reset import MathesarPasswordResetConfirmView\n\ndb_router = routers.DefaultRouter()\ndb_router.register(r'tables', db_viewsets.TableViewSet, basename='table')\ndb_router.register(r'queries', db_viewsets.QueryViewSet, basename='query')\ndb_router.register(r'links', db_viewsets.LinkViewSet, basename='links')\ndb_router.register(r'schemas', db_viewsets.SchemaViewSet, basename='schema')\ndb_router.register(r'databases', db_viewsets.DatabaseViewSet, basename='database')\ndb_router.register(r'data_files', db_viewsets.DataFileViewSet, basename='data-file')\n\ndb_table_router = routers.NestedSimpleRouter(db_router, r'tables', lookup='table')\ndb_table_router.register(r'records', db_viewsets.RecordViewSet, basename='table-record')\ndb_table_router.register(r'settings', db_viewsets.TableSettingsViewSet, basename='table-setting')\ndb_table_router.register(r'columns', db_viewsets.ColumnViewSet, basename='table-column')\ndb_table_router.register(r'constraints', db_viewsets.ConstraintViewSet, basename='table-constraint')\n\nui_router = routers.DefaultRouter()\nui_router.register(r'version', ui_viewsets.VersionViewSet, basename='version')\nui_router.register(r'databases', ui_viewsets.DatabaseViewSet, basename='database')\nui_router.register(r'users', ui_viewsets.UserViewSet, basename='user')\nui_router.register(r'database_roles', ui_viewsets.DatabaseRoleViewSet, basename='database_role')\nui_router.register(r'schema_roles', ui_viewsets.SchemaRoleViewSet, basename='schema_role')\n\nui_table_router = routers.NestedSimpleRouter(db_router, r'tables', lookup='table')\nui_table_router.register(r'records', ui_viewsets.RecordViewSet, basename='table-record')\n\nurlpatterns = [\n path('api/db/v0/', include(db_router.urls)),\n path('api/db/v0/', include(db_table_router.urls)),\n path('api/ui/v0/', include(ui_router.urls)),\n path('api/ui/v0/', include(ui_table_router.urls)),\n path('api/ui/v0/reflect/', views.reflect_all, name='reflect_all'),\n path('auth/password_reset_confirm', MathesarPasswordResetConfirmView.as_view(), name='password_reset_confirm'),\n path('auth/login/', LoginView.as_view(redirect_authenticated_user=True), name='login'),\n path('auth/', include('django.contrib.auth.urls')),\n path('', views.home, name='home'),\n path('profile/', views.profile, name='profile'),\n path('administration/', views.admin_home, name='admin_home'),\n path('administration/users/', views.admin_home, name='admin_users_home'),\n path('administration/users/<user_id>/', views.admin_home, name='admin_users_edit'),\n path('administration/update/', views.admin_home, name='admin_update'),\n path('db/', views.home, name='db_home'),\n path('db/<db_name>/', views.schemas, name='schemas'),\n re_path(\n r'^db/(?P<db_name>\\w+)/(?P<schema_id>\\w+)/',\n views.schema_home,\n name='schema_home'\n ),\n]\n", "path": "mathesar/urls.py"}]}
1,160
204
gh_patches_debug_7778
rasdani/github-patches
git_diff
nipy__nipype-2096
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- interfaces.camino.convert.FSL2Scheme does not show up in documentation Diagram on front page of docs contains a typo "Idiosynchratic" should be "Idiosyncratic" --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tools/build_interface_docs.py` Content: ``` 1 #!/usr/bin/env python 2 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- 3 # vi: set ft=python sts=4 ts=4 sw=4 et: 4 """Script to auto-generate interface docs. 5 """ 6 from __future__ import print_function, unicode_literals 7 # stdlib imports 8 import os 9 import sys 10 11 # ***************************************************************************** 12 if __name__ == '__main__': 13 nipypepath = os.path.abspath('..') 14 sys.path.insert(1, nipypepath) 15 # local imports 16 from interfacedocgen import InterfaceHelpWriter 17 package = 'nipype' 18 outdir = os.path.join('interfaces', 'generated') 19 docwriter = InterfaceHelpWriter(package) 20 # Packages that should not be included in generated API docs. 21 docwriter.package_skip_patterns += ['\.external$', 22 '\.fixes$', 23 '\.utils$', 24 '\.pipeline', 25 '\.testing', 26 '\.caching', 27 '\.scripts', 28 ] 29 # Modules that should not be included in generated API docs. 30 docwriter.module_skip_patterns += ['\.version$', 31 '\.interfaces\.base$', 32 '\.interfaces\.matlab$', 33 '\.interfaces\.rest$', 34 '\.interfaces\.pymvpa$', 35 '\.interfaces\.slicer\.generate_classes$', 36 '\.interfaces\.spm\.base$', 37 '\.interfaces\.traits', 38 '\.pipeline\.alloy$', 39 '\.pipeline\.s3_node_wrapper$', 40 '\.testing', 41 '\.scripts', 42 ] 43 docwriter.class_skip_patterns += ['AFNICommand', 44 'ANTS', 45 'FSL', 46 'FS', 47 'Info', 48 '^SPM', 49 'Tester', 50 'Spec$', 51 'Numpy' 52 # NipypeTester raises an 53 # exception when instantiated in 54 # InterfaceHelpWriter.generate_api_doc 55 'NipypeTester', 56 ] 57 docwriter.write_api_docs(outdir) 58 docwriter.write_index(outdir, 'gen', relative_to='interfaces') 59 print('%d files written' % len(docwriter.written_modules)) 60 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tools/build_interface_docs.py b/tools/build_interface_docs.py --- a/tools/build_interface_docs.py +++ b/tools/build_interface_docs.py @@ -42,7 +42,7 @@ ] docwriter.class_skip_patterns += ['AFNICommand', 'ANTS', - 'FSL', + 'FSLCommand', 'FS', 'Info', '^SPM',
{"golden_diff": "diff --git a/tools/build_interface_docs.py b/tools/build_interface_docs.py\n--- a/tools/build_interface_docs.py\n+++ b/tools/build_interface_docs.py\n@@ -42,7 +42,7 @@\n ]\n docwriter.class_skip_patterns += ['AFNICommand',\n 'ANTS',\n- 'FSL',\n+ 'FSLCommand',\n 'FS',\n 'Info',\n '^SPM',\n", "issue": "interfaces.camino.convert.FSL2Scheme does not show up in documentation\n\nDiagram on front page of docs contains a typo\n\"Idiosynchratic\" should be \"Idiosyncratic\"\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Script to auto-generate interface docs.\n\"\"\"\nfrom __future__ import print_function, unicode_literals\n# stdlib imports\nimport os\nimport sys\n\n# *****************************************************************************\nif __name__ == '__main__':\n nipypepath = os.path.abspath('..')\n sys.path.insert(1, nipypepath)\n # local imports\n from interfacedocgen import InterfaceHelpWriter\n package = 'nipype'\n outdir = os.path.join('interfaces', 'generated')\n docwriter = InterfaceHelpWriter(package)\n # Packages that should not be included in generated API docs.\n docwriter.package_skip_patterns += ['\\.external$',\n '\\.fixes$',\n '\\.utils$',\n '\\.pipeline',\n '\\.testing',\n '\\.caching',\n '\\.scripts',\n ]\n # Modules that should not be included in generated API docs.\n docwriter.module_skip_patterns += ['\\.version$',\n '\\.interfaces\\.base$',\n '\\.interfaces\\.matlab$',\n '\\.interfaces\\.rest$',\n '\\.interfaces\\.pymvpa$',\n '\\.interfaces\\.slicer\\.generate_classes$',\n '\\.interfaces\\.spm\\.base$',\n '\\.interfaces\\.traits',\n '\\.pipeline\\.alloy$',\n '\\.pipeline\\.s3_node_wrapper$',\n '\\.testing',\n '\\.scripts',\n ]\n docwriter.class_skip_patterns += ['AFNICommand',\n 'ANTS',\n 'FSL',\n 'FS',\n 'Info',\n '^SPM',\n 'Tester',\n 'Spec$',\n 'Numpy'\n # NipypeTester raises an\n # exception when instantiated in\n # InterfaceHelpWriter.generate_api_doc\n 'NipypeTester',\n ]\n docwriter.write_api_docs(outdir)\n docwriter.write_index(outdir, 'gen', relative_to='interfaces')\n print('%d files written' % len(docwriter.written_modules))\n", "path": "tools/build_interface_docs.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Script to auto-generate interface docs.\n\"\"\"\nfrom __future__ import print_function, unicode_literals\n# stdlib imports\nimport os\nimport sys\n\n# *****************************************************************************\nif __name__ == '__main__':\n nipypepath = os.path.abspath('..')\n sys.path.insert(1, nipypepath)\n # local imports\n from interfacedocgen import InterfaceHelpWriter\n package = 'nipype'\n outdir = os.path.join('interfaces', 'generated')\n docwriter = InterfaceHelpWriter(package)\n # Packages that should not be included in generated API docs.\n docwriter.package_skip_patterns += ['\\.external$',\n '\\.fixes$',\n '\\.utils$',\n '\\.pipeline',\n '\\.testing',\n '\\.caching',\n '\\.scripts',\n ]\n # Modules that should not be included in generated API docs.\n docwriter.module_skip_patterns += ['\\.version$',\n '\\.interfaces\\.base$',\n '\\.interfaces\\.matlab$',\n '\\.interfaces\\.rest$',\n '\\.interfaces\\.pymvpa$',\n '\\.interfaces\\.slicer\\.generate_classes$',\n '\\.interfaces\\.spm\\.base$',\n '\\.interfaces\\.traits',\n '\\.pipeline\\.alloy$',\n '\\.pipeline\\.s3_node_wrapper$',\n '\\.testing',\n '\\.scripts',\n ]\n docwriter.class_skip_patterns += ['AFNICommand',\n 'ANTS',\n 'FSLCommand',\n 'FS',\n 'Info',\n '^SPM',\n 'Tester',\n 'Spec$',\n 'Numpy'\n # NipypeTester raises an\n # exception when instantiated in\n # InterfaceHelpWriter.generate_api_doc\n 'NipypeTester',\n ]\n docwriter.write_api_docs(outdir)\n docwriter.write_index(outdir, 'gen', relative_to='interfaces')\n print('%d files written' % len(docwriter.written_modules))\n", "path": "tools/build_interface_docs.py"}]}
873
88
gh_patches_debug_9682
rasdani/github-patches
git_diff
pypa__setuptools-781
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ImportError: cannot import name monkey ``` user@hostname:~/project$ venv/bin/pip install -U setuptools==27.1.0 Collecting setuptools==27.1.0 Downloading setuptools-27.1.0-py2.py3-none-any.whl (464kB) 100% |████████████████████████████████| 471kB 1.6MB/s Installing collected packages: setuptools Found existing installation: setuptools 27.1.1 Uninstalling setuptools-27.1.1: Successfully uninstalled setuptools-27.1.1 Successfully installed setuptools-27.1.0 user@hostname:~/project$ venv/bin/python -c "import setuptools" user@hostname:~/project$ venv/bin/pip install -U setuptools==27.1.1 Collecting setuptools==27.1.1 Using cached setuptools-27.1.1-py2.py3-none-any.whl Installing collected packages: setuptools Found existing installation: setuptools 27.1.0 Uninstalling setuptools-27.1.0: Successfully uninstalled setuptools-27.1.0 Successfully installed setuptools-27.1.1 user@hostname:~/project$ venv/bin/python -c "import setuptools" Traceback (most recent call last): File "<string>", line 1, in <module> File "/home/user/project/venv/local/lib/python2.7/site-packages/setuptools/__init__.py", line 13, in <module> from setuptools.extension import Extension File "/home/user/project/venv/local/lib/python2.7/site-packages/setuptools/extension.py", line 9, in <module> from .monkey import get_unpatched File "/home/user/project/venv/local/lib/python2.7/site-packages/setuptools/monkey.py", line 10, in <module> from . import msvc File "/home/user/project/venv/local/lib/python2.7/site-packages/setuptools/msvc.py", line 27, in <module> from . import monkey ImportError: cannot import name monkey user@hostname:~/project$ ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setuptools/monkey.py` Content: ``` 1 """ 2 Monkey patching of distutils. 3 """ 4 5 import sys 6 import distutils.filelist 7 import platform 8 9 import setuptools 10 from . import msvc 11 12 13 __all__ = [] 14 """ 15 Everything is private. Contact the project team 16 if you think you need this functionality. 17 """ 18 19 20 def get_unpatched(cls): 21 """Protect against re-patching the distutils if reloaded 22 23 Also ensures that no other distutils extension monkeypatched the distutils 24 first. 25 """ 26 while cls.__module__.startswith('setuptools'): 27 cls, = cls.__bases__ 28 if not cls.__module__.startswith('distutils'): 29 raise AssertionError( 30 "distutils has already been patched by %r" % cls 31 ) 32 return cls 33 34 35 def patch_all(): 36 # we can't patch distutils.cmd, alas 37 distutils.core.Command = setuptools.Command 38 39 has_issue_12885 = ( 40 sys.version_info < (3, 4, 6) 41 or 42 (3, 5) < sys.version_info <= (3, 5, 3) 43 or 44 (3, 6) < sys.version_info 45 ) 46 47 if has_issue_12885: 48 # fix findall bug in distutils (http://bugs.python.org/issue12885) 49 distutils.filelist.findall = setuptools.findall 50 51 needs_warehouse = ( 52 sys.version_info < (2, 7, 13) 53 or 54 (3, 0) < sys.version_info < (3, 3, 7) 55 or 56 (3, 4) < sys.version_info < (3, 4, 6) 57 or 58 (3, 5) < sys.version_info <= (3, 5, 3) 59 or 60 (3, 6) < sys.version_info 61 ) 62 63 if needs_warehouse: 64 warehouse = 'https://upload.pypi.org/legacy/' 65 distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse 66 67 _patch_distribution_metadata_write_pkg_file() 68 _patch_distribution_metadata_write_pkg_info() 69 70 # Install Distribution throughout the distutils 71 for module in distutils.dist, distutils.core, distutils.cmd: 72 module.Distribution = setuptools.dist.Distribution 73 74 # Install the patched Extension 75 distutils.core.Extension = setuptools.extension.Extension 76 distutils.extension.Extension = setuptools.extension.Extension 77 if 'distutils.command.build_ext' in sys.modules: 78 sys.modules['distutils.command.build_ext'].Extension = ( 79 setuptools.extension.Extension 80 ) 81 82 patch_for_msvc_specialized_compiler() 83 84 85 def _patch_distribution_metadata_write_pkg_file(): 86 """Patch write_pkg_file to also write Requires-Python/Requires-External""" 87 distutils.dist.DistributionMetadata.write_pkg_file = ( 88 setuptools.dist.write_pkg_file 89 ) 90 91 92 def _patch_distribution_metadata_write_pkg_info(): 93 """ 94 Workaround issue #197 - Python 3 prior to 3.2.2 uses an environment-local 95 encoding to save the pkg_info. Monkey-patch its write_pkg_info method to 96 correct this undesirable behavior. 97 """ 98 environment_local = (3,) <= sys.version_info[:3] < (3, 2, 2) 99 if not environment_local: 100 return 101 102 distutils.dist.DistributionMetadata.write_pkg_info = ( 103 setuptools.dist.write_pkg_info 104 ) 105 106 107 unpatched = dict() 108 109 110 def patch_for_msvc_specialized_compiler(): 111 """ 112 Patch functions in distutils to use standalone Microsoft Visual C++ 113 compilers. 114 """ 115 try: 116 # Distutil file for MSVC++ 9.0 and upper (Python 2.7 to 3.4) 117 import distutils.msvc9compiler as msvc9compiler 118 except ImportError: 119 pass 120 121 try: 122 # Distutil file for MSVC++ 14.0 and upper (Python 3.5+) 123 import distutils._msvccompiler as msvc14compiler 124 except ImportError: 125 pass 126 127 if platform.system() != 'Windows': 128 # Compilers only availables on Microsoft Windows 129 return 130 131 if unpatched: 132 # Already patched 133 return 134 135 try: 136 # Patch distutils.msvc9compiler 137 unpatched['msvc9_find_vcvarsall'] = msvc9compiler.find_vcvarsall 138 msvc9compiler.find_vcvarsall = msvc.msvc9_find_vcvarsall 139 unpatched['msvc9_query_vcvarsall'] = msvc9compiler.query_vcvarsall 140 msvc9compiler.query_vcvarsall = msvc.msvc9_query_vcvarsall 141 except NameError: 142 pass 143 144 try: 145 # Patch distutils._msvccompiler._get_vc_env 146 unpatched['msvc14_get_vc_env'] = msvc14compiler._get_vc_env 147 msvc14compiler._get_vc_env = msvc.msvc14_get_vc_env 148 except NameError: 149 pass 150 151 try: 152 # Patch distutils._msvccompiler.gen_lib_options for Numpy 153 unpatched['msvc14_gen_lib_options'] = msvc14compiler.gen_lib_options 154 msvc14compiler.gen_lib_options = msvc.msvc14_gen_lib_options 155 except NameError: 156 pass 157 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setuptools/monkey.py b/setuptools/monkey.py --- a/setuptools/monkey.py +++ b/setuptools/monkey.py @@ -7,7 +7,6 @@ import platform import setuptools -from . import msvc __all__ = [] @@ -112,6 +111,8 @@ Patch functions in distutils to use standalone Microsoft Visual C++ compilers. """ + from . import msvc + try: # Distutil file for MSVC++ 9.0 and upper (Python 2.7 to 3.4) import distutils.msvc9compiler as msvc9compiler
{"golden_diff": "diff --git a/setuptools/monkey.py b/setuptools/monkey.py\n--- a/setuptools/monkey.py\n+++ b/setuptools/monkey.py\n@@ -7,7 +7,6 @@\n import platform\n \n import setuptools\n-from . import msvc\n \n \n __all__ = []\n@@ -112,6 +111,8 @@\n Patch functions in distutils to use standalone Microsoft Visual C++\n compilers.\n \"\"\"\n+ from . import msvc\n+\n try:\n # Distutil file for MSVC++ 9.0 and upper (Python 2.7 to 3.4)\n import distutils.msvc9compiler as msvc9compiler\n", "issue": "ImportError: cannot import name monkey\n```\nuser@hostname:~/project$ venv/bin/pip install -U setuptools==27.1.0\nCollecting setuptools==27.1.0\n Downloading setuptools-27.1.0-py2.py3-none-any.whl (464kB)\n 100% |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 471kB 1.6MB/s \nInstalling collected packages: setuptools\n Found existing installation: setuptools 27.1.1\n Uninstalling setuptools-27.1.1:\n Successfully uninstalled setuptools-27.1.1\nSuccessfully installed setuptools-27.1.0\nuser@hostname:~/project$ venv/bin/python -c \"import setuptools\"\nuser@hostname:~/project$ venv/bin/pip install -U setuptools==27.1.1\nCollecting setuptools==27.1.1\n Using cached setuptools-27.1.1-py2.py3-none-any.whl\nInstalling collected packages: setuptools\n Found existing installation: setuptools 27.1.0\n Uninstalling setuptools-27.1.0:\n Successfully uninstalled setuptools-27.1.0\nSuccessfully installed setuptools-27.1.1\nuser@hostname:~/project$ venv/bin/python -c \"import setuptools\"\nTraceback (most recent call last):\n File \"<string>\", line 1, in <module>\n File \"/home/user/project/venv/local/lib/python2.7/site-packages/setuptools/__init__.py\", line 13, in <module>\n from setuptools.extension import Extension\n File \"/home/user/project/venv/local/lib/python2.7/site-packages/setuptools/extension.py\", line 9, in <module>\n from .monkey import get_unpatched\n File \"/home/user/project/venv/local/lib/python2.7/site-packages/setuptools/monkey.py\", line 10, in <module>\n from . import msvc\n File \"/home/user/project/venv/local/lib/python2.7/site-packages/setuptools/msvc.py\", line 27, in <module>\n from . import monkey\nImportError: cannot import name monkey\nuser@hostname:~/project$ \n```\n\n", "before_files": [{"content": "\"\"\"\nMonkey patching of distutils.\n\"\"\"\n\nimport sys\nimport distutils.filelist\nimport platform\n\nimport setuptools\nfrom . import msvc\n\n\n__all__ = []\n\"\"\"\nEverything is private. Contact the project team\nif you think you need this functionality.\n\"\"\"\n\n\ndef get_unpatched(cls):\n \"\"\"Protect against re-patching the distutils if reloaded\n\n Also ensures that no other distutils extension monkeypatched the distutils\n first.\n \"\"\"\n while cls.__module__.startswith('setuptools'):\n cls, = cls.__bases__\n if not cls.__module__.startswith('distutils'):\n raise AssertionError(\n \"distutils has already been patched by %r\" % cls\n )\n return cls\n\n\ndef patch_all():\n # we can't patch distutils.cmd, alas\n distutils.core.Command = setuptools.Command\n\n has_issue_12885 = (\n sys.version_info < (3, 4, 6)\n or\n (3, 5) < sys.version_info <= (3, 5, 3)\n or\n (3, 6) < sys.version_info\n )\n\n if has_issue_12885:\n # fix findall bug in distutils (http://bugs.python.org/issue12885)\n distutils.filelist.findall = setuptools.findall\n\n needs_warehouse = (\n sys.version_info < (2, 7, 13)\n or\n (3, 0) < sys.version_info < (3, 3, 7)\n or\n (3, 4) < sys.version_info < (3, 4, 6)\n or\n (3, 5) < sys.version_info <= (3, 5, 3)\n or\n (3, 6) < sys.version_info\n )\n\n if needs_warehouse:\n warehouse = 'https://upload.pypi.org/legacy/'\n distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse\n\n _patch_distribution_metadata_write_pkg_file()\n _patch_distribution_metadata_write_pkg_info()\n\n # Install Distribution throughout the distutils\n for module in distutils.dist, distutils.core, distutils.cmd:\n module.Distribution = setuptools.dist.Distribution\n\n # Install the patched Extension\n distutils.core.Extension = setuptools.extension.Extension\n distutils.extension.Extension = setuptools.extension.Extension\n if 'distutils.command.build_ext' in sys.modules:\n sys.modules['distutils.command.build_ext'].Extension = (\n setuptools.extension.Extension\n )\n\n patch_for_msvc_specialized_compiler()\n\n\ndef _patch_distribution_metadata_write_pkg_file():\n \"\"\"Patch write_pkg_file to also write Requires-Python/Requires-External\"\"\"\n distutils.dist.DistributionMetadata.write_pkg_file = (\n setuptools.dist.write_pkg_file\n )\n\n\ndef _patch_distribution_metadata_write_pkg_info():\n \"\"\"\n Workaround issue #197 - Python 3 prior to 3.2.2 uses an environment-local\n encoding to save the pkg_info. Monkey-patch its write_pkg_info method to\n correct this undesirable behavior.\n \"\"\"\n environment_local = (3,) <= sys.version_info[:3] < (3, 2, 2)\n if not environment_local:\n return\n\n distutils.dist.DistributionMetadata.write_pkg_info = (\n setuptools.dist.write_pkg_info\n )\n\n\nunpatched = dict()\n\n\ndef patch_for_msvc_specialized_compiler():\n \"\"\"\n Patch functions in distutils to use standalone Microsoft Visual C++\n compilers.\n \"\"\"\n try:\n # Distutil file for MSVC++ 9.0 and upper (Python 2.7 to 3.4)\n import distutils.msvc9compiler as msvc9compiler\n except ImportError:\n pass\n\n try:\n # Distutil file for MSVC++ 14.0 and upper (Python 3.5+)\n import distutils._msvccompiler as msvc14compiler\n except ImportError:\n pass\n\n if platform.system() != 'Windows':\n # Compilers only availables on Microsoft Windows\n return\n\n if unpatched:\n # Already patched\n return\n\n try:\n # Patch distutils.msvc9compiler\n unpatched['msvc9_find_vcvarsall'] = msvc9compiler.find_vcvarsall\n msvc9compiler.find_vcvarsall = msvc.msvc9_find_vcvarsall\n unpatched['msvc9_query_vcvarsall'] = msvc9compiler.query_vcvarsall\n msvc9compiler.query_vcvarsall = msvc.msvc9_query_vcvarsall\n except NameError:\n pass\n\n try:\n # Patch distutils._msvccompiler._get_vc_env\n unpatched['msvc14_get_vc_env'] = msvc14compiler._get_vc_env\n msvc14compiler._get_vc_env = msvc.msvc14_get_vc_env\n except NameError:\n pass\n\n try:\n # Patch distutils._msvccompiler.gen_lib_options for Numpy\n unpatched['msvc14_gen_lib_options'] = msvc14compiler.gen_lib_options\n msvc14compiler.gen_lib_options = msvc.msvc14_gen_lib_options\n except NameError:\n pass\n", "path": "setuptools/monkey.py"}], "after_files": [{"content": "\"\"\"\nMonkey patching of distutils.\n\"\"\"\n\nimport sys\nimport distutils.filelist\nimport platform\n\nimport setuptools\n\n\n__all__ = []\n\"\"\"\nEverything is private. Contact the project team\nif you think you need this functionality.\n\"\"\"\n\n\ndef get_unpatched(cls):\n \"\"\"Protect against re-patching the distutils if reloaded\n\n Also ensures that no other distutils extension monkeypatched the distutils\n first.\n \"\"\"\n while cls.__module__.startswith('setuptools'):\n cls, = cls.__bases__\n if not cls.__module__.startswith('distutils'):\n raise AssertionError(\n \"distutils has already been patched by %r\" % cls\n )\n return cls\n\n\ndef patch_all():\n # we can't patch distutils.cmd, alas\n distutils.core.Command = setuptools.Command\n\n has_issue_12885 = (\n sys.version_info < (3, 4, 6)\n or\n (3, 5) < sys.version_info <= (3, 5, 3)\n or\n (3, 6) < sys.version_info\n )\n\n if has_issue_12885:\n # fix findall bug in distutils (http://bugs.python.org/issue12885)\n distutils.filelist.findall = setuptools.findall\n\n needs_warehouse = (\n sys.version_info < (2, 7, 13)\n or\n (3, 0) < sys.version_info < (3, 3, 7)\n or\n (3, 4) < sys.version_info < (3, 4, 6)\n or\n (3, 5) < sys.version_info <= (3, 5, 3)\n or\n (3, 6) < sys.version_info\n )\n\n if needs_warehouse:\n warehouse = 'https://upload.pypi.org/legacy/'\n distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse\n\n _patch_distribution_metadata_write_pkg_file()\n _patch_distribution_metadata_write_pkg_info()\n\n # Install Distribution throughout the distutils\n for module in distutils.dist, distutils.core, distutils.cmd:\n module.Distribution = setuptools.dist.Distribution\n\n # Install the patched Extension\n distutils.core.Extension = setuptools.extension.Extension\n distutils.extension.Extension = setuptools.extension.Extension\n if 'distutils.command.build_ext' in sys.modules:\n sys.modules['distutils.command.build_ext'].Extension = (\n setuptools.extension.Extension\n )\n\n patch_for_msvc_specialized_compiler()\n\n\ndef _patch_distribution_metadata_write_pkg_file():\n \"\"\"Patch write_pkg_file to also write Requires-Python/Requires-External\"\"\"\n distutils.dist.DistributionMetadata.write_pkg_file = (\n setuptools.dist.write_pkg_file\n )\n\n\ndef _patch_distribution_metadata_write_pkg_info():\n \"\"\"\n Workaround issue #197 - Python 3 prior to 3.2.2 uses an environment-local\n encoding to save the pkg_info. Monkey-patch its write_pkg_info method to\n correct this undesirable behavior.\n \"\"\"\n environment_local = (3,) <= sys.version_info[:3] < (3, 2, 2)\n if not environment_local:\n return\n\n distutils.dist.DistributionMetadata.write_pkg_info = (\n setuptools.dist.write_pkg_info\n )\n\n\nunpatched = dict()\n\n\ndef patch_for_msvc_specialized_compiler():\n \"\"\"\n Patch functions in distutils to use standalone Microsoft Visual C++\n compilers.\n \"\"\"\n from . import msvc\n\n try:\n # Distutil file for MSVC++ 9.0 and upper (Python 2.7 to 3.4)\n import distutils.msvc9compiler as msvc9compiler\n except ImportError:\n pass\n\n try:\n # Distutil file for MSVC++ 14.0 and upper (Python 3.5+)\n import distutils._msvccompiler as msvc14compiler\n except ImportError:\n pass\n\n if platform.system() != 'Windows':\n # Compilers only availables on Microsoft Windows\n return\n\n if unpatched:\n # Already patched\n return\n\n try:\n # Patch distutils.msvc9compiler\n unpatched['msvc9_find_vcvarsall'] = msvc9compiler.find_vcvarsall\n msvc9compiler.find_vcvarsall = msvc.msvc9_find_vcvarsall\n unpatched['msvc9_query_vcvarsall'] = msvc9compiler.query_vcvarsall\n msvc9compiler.query_vcvarsall = msvc.msvc9_query_vcvarsall\n except NameError:\n pass\n\n try:\n # Patch distutils._msvccompiler._get_vc_env\n unpatched['msvc14_get_vc_env'] = msvc14compiler._get_vc_env\n msvc14compiler._get_vc_env = msvc.msvc14_get_vc_env\n except NameError:\n pass\n\n try:\n # Patch distutils._msvccompiler.gen_lib_options for Numpy\n unpatched['msvc14_gen_lib_options'] = msvc14compiler.gen_lib_options\n msvc14compiler.gen_lib_options = msvc.msvc14_gen_lib_options\n except NameError:\n pass\n", "path": "setuptools/monkey.py"}]}
2,292
145
gh_patches_debug_2400
rasdani/github-patches
git_diff
dask__distributed-2975
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- dask.distributed.progress no longer callable in 2.3.0? We've used the progress() function from dask.distributed a bunch in the past to display a progress bar in JupyterLab, but it seems to have stopped working after upgrading to Dask 2.3.0: ``` from dask.distributed import Client, progress import dask.dataframe as dd df = dd.demo.make_timeseries('2010', '2016', {'value': float, 'name': str, 'id': int}, freq='10s', partition_freq='7d', seed=1) df = df.persist() progress(df) ``` Executing this in a single cell in JupyterLab (with an existing Dask cluster already running) results in: ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-1-16af814d7204> in <module> 7 8 df = df.persist() ----> 9 progress(df) TypeError: 'module' object is not callable ``` Let me know if I can provide any more info. Thanks! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `distributed/__init__.py` Content: ``` 1 from . import config 2 from dask.config import config 3 from .actor import Actor, ActorFuture 4 from .core import connect, rpc 5 from .deploy import LocalCluster, Adaptive, SpecCluster 6 from .diagnostics import progress 7 from .client import ( 8 Client, 9 Executor, 10 CompatibleExecutor, 11 wait, 12 as_completed, 13 default_client, 14 fire_and_forget, 15 Future, 16 futures_of, 17 get_task_stream, 18 ) 19 from .lock import Lock 20 from .nanny import Nanny 21 from .pubsub import Pub, Sub 22 from .queues import Queue 23 from .scheduler import Scheduler 24 from .threadpoolexecutor import rejoin 25 from .utils import sync 26 from .variable import Variable 27 from .worker import Worker, get_worker, get_client, secede, Reschedule 28 from .worker_client import local_client, worker_client 29 30 from tornado.gen import TimeoutError 31 32 from ._version import get_versions 33 34 versions = get_versions() 35 __version__ = versions["version"] 36 __git_revision__ = versions["full-revisionid"] 37 del get_versions, versions 38 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/distributed/__init__.py b/distributed/__init__.py --- a/distributed/__init__.py +++ b/distributed/__init__.py @@ -3,7 +3,7 @@ from .actor import Actor, ActorFuture from .core import connect, rpc from .deploy import LocalCluster, Adaptive, SpecCluster -from .diagnostics import progress +from .diagnostics.progressbar import progress from .client import ( Client, Executor,
{"golden_diff": "diff --git a/distributed/__init__.py b/distributed/__init__.py\n--- a/distributed/__init__.py\n+++ b/distributed/__init__.py\n@@ -3,7 +3,7 @@\n from .actor import Actor, ActorFuture\n from .core import connect, rpc\n from .deploy import LocalCluster, Adaptive, SpecCluster\n-from .diagnostics import progress\n+from .diagnostics.progressbar import progress\n from .client import (\n Client,\n Executor,\n", "issue": "dask.distributed.progress no longer callable in 2.3.0?\nWe've used the progress() function from dask.distributed a bunch in the past to display a progress bar in JupyterLab, but it seems to have stopped working after upgrading to Dask 2.3.0:\r\n\r\n```\r\nfrom dask.distributed import Client, progress\r\nimport dask.dataframe as dd\r\n\r\ndf = dd.demo.make_timeseries('2010', '2016',\r\n {'value': float, 'name': str, 'id': int},\r\n freq='10s', partition_freq='7d', seed=1)\r\n\r\ndf = df.persist()\r\nprogress(df)\r\n```\r\n\r\nExecuting this in a single cell in JupyterLab (with an existing Dask cluster already running) results in:\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n<ipython-input-1-16af814d7204> in <module>\r\n 7 \r\n 8 df = df.persist()\r\n----> 9 progress(df)\r\n\r\nTypeError: 'module' object is not callable\r\n```\r\n\r\nLet me know if I can provide any more info. Thanks!\n", "before_files": [{"content": "from . import config\nfrom dask.config import config\nfrom .actor import Actor, ActorFuture\nfrom .core import connect, rpc\nfrom .deploy import LocalCluster, Adaptive, SpecCluster\nfrom .diagnostics import progress\nfrom .client import (\n Client,\n Executor,\n CompatibleExecutor,\n wait,\n as_completed,\n default_client,\n fire_and_forget,\n Future,\n futures_of,\n get_task_stream,\n)\nfrom .lock import Lock\nfrom .nanny import Nanny\nfrom .pubsub import Pub, Sub\nfrom .queues import Queue\nfrom .scheduler import Scheduler\nfrom .threadpoolexecutor import rejoin\nfrom .utils import sync\nfrom .variable import Variable\nfrom .worker import Worker, get_worker, get_client, secede, Reschedule\nfrom .worker_client import local_client, worker_client\n\nfrom tornado.gen import TimeoutError\n\nfrom ._version import get_versions\n\nversions = get_versions()\n__version__ = versions[\"version\"]\n__git_revision__ = versions[\"full-revisionid\"]\ndel get_versions, versions\n", "path": "distributed/__init__.py"}], "after_files": [{"content": "from . import config\nfrom dask.config import config\nfrom .actor import Actor, ActorFuture\nfrom .core import connect, rpc\nfrom .deploy import LocalCluster, Adaptive, SpecCluster\nfrom .diagnostics.progressbar import progress\nfrom .client import (\n Client,\n Executor,\n CompatibleExecutor,\n wait,\n as_completed,\n default_client,\n fire_and_forget,\n Future,\n futures_of,\n get_task_stream,\n)\nfrom .lock import Lock\nfrom .nanny import Nanny\nfrom .pubsub import Pub, Sub\nfrom .queues import Queue\nfrom .scheduler import Scheduler\nfrom .threadpoolexecutor import rejoin\nfrom .utils import sync\nfrom .variable import Variable\nfrom .worker import Worker, get_worker, get_client, secede, Reschedule\nfrom .worker_client import local_client, worker_client\n\nfrom tornado.gen import TimeoutError\n\nfrom ._version import get_versions\n\nversions = get_versions()\n__version__ = versions[\"version\"]\n__git_revision__ = versions[\"full-revisionid\"]\ndel get_versions, versions\n", "path": "distributed/__init__.py"}]}
802
103
gh_patches_debug_31673
rasdani/github-patches
git_diff
getnikola__nikola-2541
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Plugin dependencies Plugins (from the plugins repo) should be able to depend on each other. Plugin dependencies Plugins (from the plugins repo) should be able to depend on each other. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nikola/plugins/command/plugin.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # Copyright © 2012-2016 Roberto Alsina and others. 4 5 # Permission is hereby granted, free of charge, to any 6 # person obtaining a copy of this software and associated 7 # documentation files (the "Software"), to deal in the 8 # Software without restriction, including without limitation 9 # the rights to use, copy, modify, merge, publish, 10 # distribute, sublicense, and/or sell copies of the 11 # Software, and to permit persons to whom the Software is 12 # furnished to do so, subject to the following conditions: 13 # 14 # The above copyright notice and this permission notice 15 # shall be included in all copies or substantial portions of 16 # the Software. 17 # 18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR 21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS 22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 27 """Manage plugins.""" 28 29 from __future__ import print_function 30 import io 31 import os 32 import sys 33 import shutil 34 import subprocess 35 import time 36 import requests 37 38 import pygments 39 from pygments.lexers import PythonLexer 40 from pygments.formatters import TerminalFormatter 41 42 from nikola.plugin_categories import Command 43 from nikola import utils 44 45 LOGGER = utils.get_logger('plugin', utils.STDERR_HANDLER) 46 47 48 class CommandPlugin(Command): 49 """Manage plugins.""" 50 51 json = None 52 name = "plugin" 53 doc_usage = "[-u url] [--user] [-i name] [-r name] [--upgrade] [-l] [--list-installed]" 54 doc_purpose = "manage plugins" 55 output_dir = None 56 needs_config = False 57 cmd_options = [ 58 { 59 'name': 'install', 60 'short': 'i', 61 'long': 'install', 62 'type': str, 63 'default': '', 64 'help': 'Install a plugin.', 65 }, 66 { 67 'name': 'uninstall', 68 'long': 'uninstall', 69 'short': 'r', 70 'type': str, 71 'default': '', 72 'help': 'Uninstall a plugin.' 73 }, 74 { 75 'name': 'list', 76 'short': 'l', 77 'long': 'list', 78 'type': bool, 79 'default': False, 80 'help': 'Show list of available plugins.' 81 }, 82 { 83 'name': 'url', 84 'short': 'u', 85 'long': 'url', 86 'type': str, 87 'help': "URL for the plugin repository (default: " 88 "https://plugins.getnikola.com/v7/plugins.json)", 89 'default': 'https://plugins.getnikola.com/v7/plugins.json' 90 }, 91 { 92 'name': 'user', 93 'long': 'user', 94 'type': bool, 95 'help': "Install user-wide, available for all sites.", 96 'default': False 97 }, 98 { 99 'name': 'upgrade', 100 'long': 'upgrade', 101 'type': bool, 102 'help': "Upgrade all installed plugins.", 103 'default': False 104 }, 105 { 106 'name': 'list_installed', 107 'long': 'list-installed', 108 'type': bool, 109 'help': "List the installed plugins with their location.", 110 'default': False 111 }, 112 ] 113 114 def _execute(self, options, args): 115 """Install plugin into current site.""" 116 url = options['url'] 117 user_mode = options['user'] 118 119 # See the "mode" we need to operate in 120 install = options.get('install') 121 uninstall = options.get('uninstall') 122 upgrade = options.get('upgrade') 123 list_available = options.get('list') 124 list_installed = options.get('list_installed') 125 show_install_notes = options.get('show_install_notes', True) 126 command_count = [bool(x) for x in ( 127 install, 128 uninstall, 129 upgrade, 130 list_available, 131 list_installed)].count(True) 132 if command_count > 1 or command_count == 0: 133 print(self.help()) 134 return 2 135 136 if options.get('output_dir') is not None: 137 self.output_dir = options.get('output_dir') 138 else: 139 if not self.site.configured and not user_mode and install: 140 LOGGER.notice('No site found, assuming --user') 141 user_mode = True 142 143 if user_mode: 144 self.output_dir = os.path.expanduser('~/.nikola/plugins') 145 else: 146 self.output_dir = 'plugins' 147 148 if list_available: 149 return self.list_available(url) 150 elif list_installed: 151 return self.list_installed() 152 elif upgrade: 153 return self.do_upgrade(url) 154 elif uninstall: 155 return self.do_uninstall(uninstall) 156 elif install: 157 return self.do_install(url, install, show_install_notes) 158 159 def list_available(self, url): 160 """List all available plugins.""" 161 data = self.get_json(url) 162 print("Available Plugins:") 163 print("------------------") 164 for plugin in sorted(data.keys()): 165 print(plugin) 166 return 0 167 168 def list_installed(self): 169 """List installed plugins.""" 170 plugins = [] 171 for plugin in self.site.plugin_manager.getAllPlugins(): 172 p = plugin.path 173 if os.path.isdir(p): 174 p = p + os.sep 175 else: 176 p = p + '.py' 177 plugins.append([plugin.name, p]) 178 179 plugins.sort() 180 print('Installed Plugins:') 181 print('------------------') 182 for name, path in plugins: 183 print('{0} at {1}'.format(name, path)) 184 print('\n\nAlso, you have disabled these plugins: {}'.format(self.site.config['DISABLED_PLUGINS'])) 185 return 0 186 187 def do_upgrade(self, url): 188 """Upgrade all installed plugins.""" 189 LOGGER.warning('This is not very smart, it just reinstalls some plugins and hopes for the best') 190 data = self.get_json(url) 191 plugins = [] 192 for plugin in self.site.plugin_manager.getAllPlugins(): 193 p = plugin.path 194 if os.path.isdir(p): 195 p = p + os.sep 196 else: 197 p = p + '.py' 198 if plugin.name in data: 199 plugins.append([plugin.name, p]) 200 print('Will upgrade {0} plugins: {1}'.format(len(plugins), ', '.join(n for n, _ in plugins))) 201 for name, path in plugins: 202 print('Upgrading {0}'.format(name)) 203 p = path 204 while True: 205 tail, head = os.path.split(path) 206 if head == 'plugins': 207 self.output_dir = path 208 break 209 elif tail == '': 210 LOGGER.error("Can't find the plugins folder for path: {0}".format(p)) 211 return 1 212 else: 213 path = tail 214 self.do_install(url, name) 215 return 0 216 217 def do_install(self, url, name, show_install_notes=True): 218 """Download and install a plugin.""" 219 data = self.get_json(url) 220 if name in data: 221 utils.makedirs(self.output_dir) 222 url = data[name] 223 LOGGER.info("Downloading '{0}'".format(url)) 224 try: 225 zip_data = requests.get(url).content 226 except requests.exceptions.SSLError: 227 LOGGER.warning("SSL error, using http instead of https (press ^C to abort)") 228 time.sleep(1) 229 url = url.replace('https', 'http', 1) 230 zip_data = requests.get(url).content 231 232 zip_file = io.BytesIO() 233 zip_file.write(zip_data) 234 LOGGER.info('Extracting: {0} into {1}/'.format(name, self.output_dir)) 235 utils.extract_all(zip_file, self.output_dir) 236 dest_path = os.path.join(self.output_dir, name) 237 else: 238 try: 239 plugin_path = utils.get_plugin_path(name) 240 except: 241 LOGGER.error("Can't find plugin " + name) 242 return 1 243 244 utils.makedirs(self.output_dir) 245 dest_path = os.path.join(self.output_dir, name) 246 if os.path.exists(dest_path): 247 LOGGER.error("{0} is already installed".format(name)) 248 return 1 249 250 LOGGER.info('Copying {0} into plugins'.format(plugin_path)) 251 shutil.copytree(plugin_path, dest_path) 252 253 reqpath = os.path.join(dest_path, 'requirements.txt') 254 if os.path.exists(reqpath): 255 LOGGER.notice('This plugin has Python dependencies.') 256 LOGGER.info('Installing dependencies with pip...') 257 try: 258 subprocess.check_call((sys.executable, '-m', 'pip', 'install', '-r', reqpath)) 259 except subprocess.CalledProcessError: 260 LOGGER.error('Could not install the dependencies.') 261 print('Contents of the requirements.txt file:\n') 262 with io.open(reqpath, 'r', encoding='utf-8') as fh: 263 print(utils.indent(fh.read(), 4 * ' ')) 264 print('You have to install those yourself or through a ' 265 'package manager.') 266 else: 267 LOGGER.info('Dependency installation succeeded.') 268 reqnpypath = os.path.join(dest_path, 'requirements-nonpy.txt') 269 if os.path.exists(reqnpypath): 270 LOGGER.notice('This plugin has third-party ' 271 'dependencies you need to install ' 272 'manually.') 273 print('Contents of the requirements-nonpy.txt file:\n') 274 with io.open(reqnpypath, 'r', encoding='utf-8') as fh: 275 for l in fh.readlines(): 276 i, j = l.split('::') 277 print(utils.indent(i.strip(), 4 * ' ')) 278 print(utils.indent(j.strip(), 8 * ' ')) 279 print() 280 281 print('You have to install those yourself or through a package ' 282 'manager.') 283 confpypath = os.path.join(dest_path, 'conf.py.sample') 284 if os.path.exists(confpypath) and show_install_notes: 285 LOGGER.notice('This plugin has a sample config file. Integrate it with yours in order to make this plugin work!') 286 print('Contents of the conf.py.sample file:\n') 287 with io.open(confpypath, 'r', encoding='utf-8') as fh: 288 if self.site.colorful: 289 print(utils.indent(pygments.highlight( 290 fh.read(), PythonLexer(), TerminalFormatter()), 291 4 * ' ')) 292 else: 293 print(utils.indent(fh.read(), 4 * ' ')) 294 return 0 295 296 def do_uninstall(self, name): 297 """Uninstall a plugin.""" 298 for plugin in self.site.plugin_manager.getAllPlugins(): # FIXME: this is repeated thrice 299 if name == plugin.name: # Uninstall this one 300 p = plugin.path 301 if os.path.isdir(p): 302 # Plugins that have a package in them need to delete parent 303 # Issue #2356 304 p = p + os.sep 305 p = os.path.abspath(os.path.join(p, os.pardir)) 306 else: 307 p = os.path.dirname(p) 308 LOGGER.warning('About to uninstall plugin: {0}'.format(name)) 309 LOGGER.warning('This will delete {0}'.format(p)) 310 sure = utils.ask_yesno('Are you sure?') 311 if sure: 312 LOGGER.warning('Removing {0}'.format(p)) 313 shutil.rmtree(p) 314 return 0 315 return 1 316 LOGGER.error('Unknown plugin: {0}'.format(name)) 317 return 1 318 319 def get_json(self, url): 320 """Download the JSON file with all plugins.""" 321 if self.json is None: 322 try: 323 self.json = requests.get(url).json() 324 except requests.exceptions.SSLError: 325 LOGGER.warning("SSL error, using http instead of https (press ^C to abort)") 326 time.sleep(1) 327 url = url.replace('https', 'http', 1) 328 self.json = requests.get(url).json() 329 return self.json 330 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nikola/plugins/command/plugin.py b/nikola/plugins/command/plugin.py --- a/nikola/plugins/command/plugin.py +++ b/nikola/plugins/command/plugin.py @@ -265,6 +265,7 @@ 'package manager.') else: LOGGER.info('Dependency installation succeeded.') + reqnpypath = os.path.join(dest_path, 'requirements-nonpy.txt') if os.path.exists(reqnpypath): LOGGER.notice('This plugin has third-party ' @@ -280,6 +281,25 @@ print('You have to install those yourself or through a package ' 'manager.') + + req_plug_path = os.path.join(dest_path, 'requirements-plugins.txt') + if os.path.exists(req_plug_path): + LOGGER.notice('This plugin requires other Nikola plugins.') + LOGGER.info('Installing plugins using Nikola...') + print('Contents of the requirements-nonpy.txt file:\n') + try: + with io.open(req_plug_path, 'r', encoding='utf-8') as inf: + for plugname in inf.readlines(): + self.do_install(url, plugname, show_install_notes) + except subprocess.CalledProcessError: + LOGGER.error('Could not install a plugin.') + print('Contents of the requirements-plugins.txt file:\n') + with io.open(req_plug_path, 'r', encoding='utf-8') as fh: + print(utils.indent(fh.read(), 4 * ' ')) + print('You have to install those yourself manually.') + else: + LOGGER.info('Dependency installation succeeded.') + confpypath = os.path.join(dest_path, 'conf.py.sample') if os.path.exists(confpypath) and show_install_notes: LOGGER.notice('This plugin has a sample config file. Integrate it with yours in order to make this plugin work!')
{"golden_diff": "diff --git a/nikola/plugins/command/plugin.py b/nikola/plugins/command/plugin.py\n--- a/nikola/plugins/command/plugin.py\n+++ b/nikola/plugins/command/plugin.py\n@@ -265,6 +265,7 @@\n 'package manager.')\n else:\n LOGGER.info('Dependency installation succeeded.')\n+\n reqnpypath = os.path.join(dest_path, 'requirements-nonpy.txt')\n if os.path.exists(reqnpypath):\n LOGGER.notice('This plugin has third-party '\n@@ -280,6 +281,25 @@\n \n print('You have to install those yourself or through a package '\n 'manager.')\n+\n+ req_plug_path = os.path.join(dest_path, 'requirements-plugins.txt')\n+ if os.path.exists(req_plug_path):\n+ LOGGER.notice('This plugin requires other Nikola plugins.')\n+ LOGGER.info('Installing plugins using Nikola...')\n+ print('Contents of the requirements-nonpy.txt file:\\n')\n+ try:\n+ with io.open(req_plug_path, 'r', encoding='utf-8') as inf:\n+ for plugname in inf.readlines():\n+ self.do_install(url, plugname, show_install_notes)\n+ except subprocess.CalledProcessError:\n+ LOGGER.error('Could not install a plugin.')\n+ print('Contents of the requirements-plugins.txt file:\\n')\n+ with io.open(req_plug_path, 'r', encoding='utf-8') as fh:\n+ print(utils.indent(fh.read(), 4 * ' '))\n+ print('You have to install those yourself manually.')\n+ else:\n+ LOGGER.info('Dependency installation succeeded.')\n+\n confpypath = os.path.join(dest_path, 'conf.py.sample')\n if os.path.exists(confpypath) and show_install_notes:\n LOGGER.notice('This plugin has a sample config file. Integrate it with yours in order to make this plugin work!')\n", "issue": "Plugin dependencies\nPlugins (from the plugins repo) should be able to depend on each other.\n\nPlugin dependencies\nPlugins (from the plugins repo) should be able to depend on each other.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2016 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Manage plugins.\"\"\"\n\nfrom __future__ import print_function\nimport io\nimport os\nimport sys\nimport shutil\nimport subprocess\nimport time\nimport requests\n\nimport pygments\nfrom pygments.lexers import PythonLexer\nfrom pygments.formatters import TerminalFormatter\n\nfrom nikola.plugin_categories import Command\nfrom nikola import utils\n\nLOGGER = utils.get_logger('plugin', utils.STDERR_HANDLER)\n\n\nclass CommandPlugin(Command):\n \"\"\"Manage plugins.\"\"\"\n\n json = None\n name = \"plugin\"\n doc_usage = \"[-u url] [--user] [-i name] [-r name] [--upgrade] [-l] [--list-installed]\"\n doc_purpose = \"manage plugins\"\n output_dir = None\n needs_config = False\n cmd_options = [\n {\n 'name': 'install',\n 'short': 'i',\n 'long': 'install',\n 'type': str,\n 'default': '',\n 'help': 'Install a plugin.',\n },\n {\n 'name': 'uninstall',\n 'long': 'uninstall',\n 'short': 'r',\n 'type': str,\n 'default': '',\n 'help': 'Uninstall a plugin.'\n },\n {\n 'name': 'list',\n 'short': 'l',\n 'long': 'list',\n 'type': bool,\n 'default': False,\n 'help': 'Show list of available plugins.'\n },\n {\n 'name': 'url',\n 'short': 'u',\n 'long': 'url',\n 'type': str,\n 'help': \"URL for the plugin repository (default: \"\n \"https://plugins.getnikola.com/v7/plugins.json)\",\n 'default': 'https://plugins.getnikola.com/v7/plugins.json'\n },\n {\n 'name': 'user',\n 'long': 'user',\n 'type': bool,\n 'help': \"Install user-wide, available for all sites.\",\n 'default': False\n },\n {\n 'name': 'upgrade',\n 'long': 'upgrade',\n 'type': bool,\n 'help': \"Upgrade all installed plugins.\",\n 'default': False\n },\n {\n 'name': 'list_installed',\n 'long': 'list-installed',\n 'type': bool,\n 'help': \"List the installed plugins with their location.\",\n 'default': False\n },\n ]\n\n def _execute(self, options, args):\n \"\"\"Install plugin into current site.\"\"\"\n url = options['url']\n user_mode = options['user']\n\n # See the \"mode\" we need to operate in\n install = options.get('install')\n uninstall = options.get('uninstall')\n upgrade = options.get('upgrade')\n list_available = options.get('list')\n list_installed = options.get('list_installed')\n show_install_notes = options.get('show_install_notes', True)\n command_count = [bool(x) for x in (\n install,\n uninstall,\n upgrade,\n list_available,\n list_installed)].count(True)\n if command_count > 1 or command_count == 0:\n print(self.help())\n return 2\n\n if options.get('output_dir') is not None:\n self.output_dir = options.get('output_dir')\n else:\n if not self.site.configured and not user_mode and install:\n LOGGER.notice('No site found, assuming --user')\n user_mode = True\n\n if user_mode:\n self.output_dir = os.path.expanduser('~/.nikola/plugins')\n else:\n self.output_dir = 'plugins'\n\n if list_available:\n return self.list_available(url)\n elif list_installed:\n return self.list_installed()\n elif upgrade:\n return self.do_upgrade(url)\n elif uninstall:\n return self.do_uninstall(uninstall)\n elif install:\n return self.do_install(url, install, show_install_notes)\n\n def list_available(self, url):\n \"\"\"List all available plugins.\"\"\"\n data = self.get_json(url)\n print(\"Available Plugins:\")\n print(\"------------------\")\n for plugin in sorted(data.keys()):\n print(plugin)\n return 0\n\n def list_installed(self):\n \"\"\"List installed plugins.\"\"\"\n plugins = []\n for plugin in self.site.plugin_manager.getAllPlugins():\n p = plugin.path\n if os.path.isdir(p):\n p = p + os.sep\n else:\n p = p + '.py'\n plugins.append([plugin.name, p])\n\n plugins.sort()\n print('Installed Plugins:')\n print('------------------')\n for name, path in plugins:\n print('{0} at {1}'.format(name, path))\n print('\\n\\nAlso, you have disabled these plugins: {}'.format(self.site.config['DISABLED_PLUGINS']))\n return 0\n\n def do_upgrade(self, url):\n \"\"\"Upgrade all installed plugins.\"\"\"\n LOGGER.warning('This is not very smart, it just reinstalls some plugins and hopes for the best')\n data = self.get_json(url)\n plugins = []\n for plugin in self.site.plugin_manager.getAllPlugins():\n p = plugin.path\n if os.path.isdir(p):\n p = p + os.sep\n else:\n p = p + '.py'\n if plugin.name in data:\n plugins.append([plugin.name, p])\n print('Will upgrade {0} plugins: {1}'.format(len(plugins), ', '.join(n for n, _ in plugins)))\n for name, path in plugins:\n print('Upgrading {0}'.format(name))\n p = path\n while True:\n tail, head = os.path.split(path)\n if head == 'plugins':\n self.output_dir = path\n break\n elif tail == '':\n LOGGER.error(\"Can't find the plugins folder for path: {0}\".format(p))\n return 1\n else:\n path = tail\n self.do_install(url, name)\n return 0\n\n def do_install(self, url, name, show_install_notes=True):\n \"\"\"Download and install a plugin.\"\"\"\n data = self.get_json(url)\n if name in data:\n utils.makedirs(self.output_dir)\n url = data[name]\n LOGGER.info(\"Downloading '{0}'\".format(url))\n try:\n zip_data = requests.get(url).content\n except requests.exceptions.SSLError:\n LOGGER.warning(\"SSL error, using http instead of https (press ^C to abort)\")\n time.sleep(1)\n url = url.replace('https', 'http', 1)\n zip_data = requests.get(url).content\n\n zip_file = io.BytesIO()\n zip_file.write(zip_data)\n LOGGER.info('Extracting: {0} into {1}/'.format(name, self.output_dir))\n utils.extract_all(zip_file, self.output_dir)\n dest_path = os.path.join(self.output_dir, name)\n else:\n try:\n plugin_path = utils.get_plugin_path(name)\n except:\n LOGGER.error(\"Can't find plugin \" + name)\n return 1\n\n utils.makedirs(self.output_dir)\n dest_path = os.path.join(self.output_dir, name)\n if os.path.exists(dest_path):\n LOGGER.error(\"{0} is already installed\".format(name))\n return 1\n\n LOGGER.info('Copying {0} into plugins'.format(plugin_path))\n shutil.copytree(plugin_path, dest_path)\n\n reqpath = os.path.join(dest_path, 'requirements.txt')\n if os.path.exists(reqpath):\n LOGGER.notice('This plugin has Python dependencies.')\n LOGGER.info('Installing dependencies with pip...')\n try:\n subprocess.check_call((sys.executable, '-m', 'pip', 'install', '-r', reqpath))\n except subprocess.CalledProcessError:\n LOGGER.error('Could not install the dependencies.')\n print('Contents of the requirements.txt file:\\n')\n with io.open(reqpath, 'r', encoding='utf-8') as fh:\n print(utils.indent(fh.read(), 4 * ' '))\n print('You have to install those yourself or through a '\n 'package manager.')\n else:\n LOGGER.info('Dependency installation succeeded.')\n reqnpypath = os.path.join(dest_path, 'requirements-nonpy.txt')\n if os.path.exists(reqnpypath):\n LOGGER.notice('This plugin has third-party '\n 'dependencies you need to install '\n 'manually.')\n print('Contents of the requirements-nonpy.txt file:\\n')\n with io.open(reqnpypath, 'r', encoding='utf-8') as fh:\n for l in fh.readlines():\n i, j = l.split('::')\n print(utils.indent(i.strip(), 4 * ' '))\n print(utils.indent(j.strip(), 8 * ' '))\n print()\n\n print('You have to install those yourself or through a package '\n 'manager.')\n confpypath = os.path.join(dest_path, 'conf.py.sample')\n if os.path.exists(confpypath) and show_install_notes:\n LOGGER.notice('This plugin has a sample config file. Integrate it with yours in order to make this plugin work!')\n print('Contents of the conf.py.sample file:\\n')\n with io.open(confpypath, 'r', encoding='utf-8') as fh:\n if self.site.colorful:\n print(utils.indent(pygments.highlight(\n fh.read(), PythonLexer(), TerminalFormatter()),\n 4 * ' '))\n else:\n print(utils.indent(fh.read(), 4 * ' '))\n return 0\n\n def do_uninstall(self, name):\n \"\"\"Uninstall a plugin.\"\"\"\n for plugin in self.site.plugin_manager.getAllPlugins(): # FIXME: this is repeated thrice\n if name == plugin.name: # Uninstall this one\n p = plugin.path\n if os.path.isdir(p):\n # Plugins that have a package in them need to delete parent\n # Issue #2356\n p = p + os.sep\n p = os.path.abspath(os.path.join(p, os.pardir))\n else:\n p = os.path.dirname(p)\n LOGGER.warning('About to uninstall plugin: {0}'.format(name))\n LOGGER.warning('This will delete {0}'.format(p))\n sure = utils.ask_yesno('Are you sure?')\n if sure:\n LOGGER.warning('Removing {0}'.format(p))\n shutil.rmtree(p)\n return 0\n return 1\n LOGGER.error('Unknown plugin: {0}'.format(name))\n return 1\n\n def get_json(self, url):\n \"\"\"Download the JSON file with all plugins.\"\"\"\n if self.json is None:\n try:\n self.json = requests.get(url).json()\n except requests.exceptions.SSLError:\n LOGGER.warning(\"SSL error, using http instead of https (press ^C to abort)\")\n time.sleep(1)\n url = url.replace('https', 'http', 1)\n self.json = requests.get(url).json()\n return self.json\n", "path": "nikola/plugins/command/plugin.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2016 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Manage plugins.\"\"\"\n\nfrom __future__ import print_function\nimport io\nimport os\nimport sys\nimport shutil\nimport subprocess\nimport time\nimport requests\n\nimport pygments\nfrom pygments.lexers import PythonLexer\nfrom pygments.formatters import TerminalFormatter\n\nfrom nikola.plugin_categories import Command\nfrom nikola import utils\n\nLOGGER = utils.get_logger('plugin', utils.STDERR_HANDLER)\n\n\nclass CommandPlugin(Command):\n \"\"\"Manage plugins.\"\"\"\n\n json = None\n name = \"plugin\"\n doc_usage = \"[-u url] [--user] [-i name] [-r name] [--upgrade] [-l] [--list-installed]\"\n doc_purpose = \"manage plugins\"\n output_dir = None\n needs_config = False\n cmd_options = [\n {\n 'name': 'install',\n 'short': 'i',\n 'long': 'install',\n 'type': str,\n 'default': '',\n 'help': 'Install a plugin.',\n },\n {\n 'name': 'uninstall',\n 'long': 'uninstall',\n 'short': 'r',\n 'type': str,\n 'default': '',\n 'help': 'Uninstall a plugin.'\n },\n {\n 'name': 'list',\n 'short': 'l',\n 'long': 'list',\n 'type': bool,\n 'default': False,\n 'help': 'Show list of available plugins.'\n },\n {\n 'name': 'url',\n 'short': 'u',\n 'long': 'url',\n 'type': str,\n 'help': \"URL for the plugin repository (default: \"\n \"https://plugins.getnikola.com/v7/plugins.json)\",\n 'default': 'https://plugins.getnikola.com/v7/plugins.json'\n },\n {\n 'name': 'user',\n 'long': 'user',\n 'type': bool,\n 'help': \"Install user-wide, available for all sites.\",\n 'default': False\n },\n {\n 'name': 'upgrade',\n 'long': 'upgrade',\n 'type': bool,\n 'help': \"Upgrade all installed plugins.\",\n 'default': False\n },\n {\n 'name': 'list_installed',\n 'long': 'list-installed',\n 'type': bool,\n 'help': \"List the installed plugins with their location.\",\n 'default': False\n },\n ]\n\n def _execute(self, options, args):\n \"\"\"Install plugin into current site.\"\"\"\n url = options['url']\n user_mode = options['user']\n\n # See the \"mode\" we need to operate in\n install = options.get('install')\n uninstall = options.get('uninstall')\n upgrade = options.get('upgrade')\n list_available = options.get('list')\n list_installed = options.get('list_installed')\n show_install_notes = options.get('show_install_notes', True)\n command_count = [bool(x) for x in (\n install,\n uninstall,\n upgrade,\n list_available,\n list_installed)].count(True)\n if command_count > 1 or command_count == 0:\n print(self.help())\n return 2\n\n if options.get('output_dir') is not None:\n self.output_dir = options.get('output_dir')\n else:\n if not self.site.configured and not user_mode and install:\n LOGGER.notice('No site found, assuming --user')\n user_mode = True\n\n if user_mode:\n self.output_dir = os.path.expanduser('~/.nikola/plugins')\n else:\n self.output_dir = 'plugins'\n\n if list_available:\n return self.list_available(url)\n elif list_installed:\n return self.list_installed()\n elif upgrade:\n return self.do_upgrade(url)\n elif uninstall:\n return self.do_uninstall(uninstall)\n elif install:\n return self.do_install(url, install, show_install_notes)\n\n def list_available(self, url):\n \"\"\"List all available plugins.\"\"\"\n data = self.get_json(url)\n print(\"Available Plugins:\")\n print(\"------------------\")\n for plugin in sorted(data.keys()):\n print(plugin)\n return 0\n\n def list_installed(self):\n \"\"\"List installed plugins.\"\"\"\n plugins = []\n for plugin in self.site.plugin_manager.getAllPlugins():\n p = plugin.path\n if os.path.isdir(p):\n p = p + os.sep\n else:\n p = p + '.py'\n plugins.append([plugin.name, p])\n\n plugins.sort()\n print('Installed Plugins:')\n print('------------------')\n for name, path in plugins:\n print('{0} at {1}'.format(name, path))\n print('\\n\\nAlso, you have disabled these plugins: {}'.format(self.site.config['DISABLED_PLUGINS']))\n return 0\n\n def do_upgrade(self, url):\n \"\"\"Upgrade all installed plugins.\"\"\"\n LOGGER.warning('This is not very smart, it just reinstalls some plugins and hopes for the best')\n data = self.get_json(url)\n plugins = []\n for plugin in self.site.plugin_manager.getAllPlugins():\n p = plugin.path\n if os.path.isdir(p):\n p = p + os.sep\n else:\n p = p + '.py'\n if plugin.name in data:\n plugins.append([plugin.name, p])\n print('Will upgrade {0} plugins: {1}'.format(len(plugins), ', '.join(n for n, _ in plugins)))\n for name, path in plugins:\n print('Upgrading {0}'.format(name))\n p = path\n while True:\n tail, head = os.path.split(path)\n if head == 'plugins':\n self.output_dir = path\n break\n elif tail == '':\n LOGGER.error(\"Can't find the plugins folder for path: {0}\".format(p))\n return 1\n else:\n path = tail\n self.do_install(url, name)\n return 0\n\n def do_install(self, url, name, show_install_notes=True):\n \"\"\"Download and install a plugin.\"\"\"\n data = self.get_json(url)\n if name in data:\n utils.makedirs(self.output_dir)\n url = data[name]\n LOGGER.info(\"Downloading '{0}'\".format(url))\n try:\n zip_data = requests.get(url).content\n except requests.exceptions.SSLError:\n LOGGER.warning(\"SSL error, using http instead of https (press ^C to abort)\")\n time.sleep(1)\n url = url.replace('https', 'http', 1)\n zip_data = requests.get(url).content\n\n zip_file = io.BytesIO()\n zip_file.write(zip_data)\n LOGGER.info('Extracting: {0} into {1}/'.format(name, self.output_dir))\n utils.extract_all(zip_file, self.output_dir)\n dest_path = os.path.join(self.output_dir, name)\n else:\n try:\n plugin_path = utils.get_plugin_path(name)\n except:\n LOGGER.error(\"Can't find plugin \" + name)\n return 1\n\n utils.makedirs(self.output_dir)\n dest_path = os.path.join(self.output_dir, name)\n if os.path.exists(dest_path):\n LOGGER.error(\"{0} is already installed\".format(name))\n return 1\n\n LOGGER.info('Copying {0} into plugins'.format(plugin_path))\n shutil.copytree(plugin_path, dest_path)\n\n reqpath = os.path.join(dest_path, 'requirements.txt')\n if os.path.exists(reqpath):\n LOGGER.notice('This plugin has Python dependencies.')\n LOGGER.info('Installing dependencies with pip...')\n try:\n subprocess.check_call((sys.executable, '-m', 'pip', 'install', '-r', reqpath))\n except subprocess.CalledProcessError:\n LOGGER.error('Could not install the dependencies.')\n print('Contents of the requirements.txt file:\\n')\n with io.open(reqpath, 'r', encoding='utf-8') as fh:\n print(utils.indent(fh.read(), 4 * ' '))\n print('You have to install those yourself or through a '\n 'package manager.')\n else:\n LOGGER.info('Dependency installation succeeded.')\n\n reqnpypath = os.path.join(dest_path, 'requirements-nonpy.txt')\n if os.path.exists(reqnpypath):\n LOGGER.notice('This plugin has third-party '\n 'dependencies you need to install '\n 'manually.')\n print('Contents of the requirements-nonpy.txt file:\\n')\n with io.open(reqnpypath, 'r', encoding='utf-8') as fh:\n for l in fh.readlines():\n i, j = l.split('::')\n print(utils.indent(i.strip(), 4 * ' '))\n print(utils.indent(j.strip(), 8 * ' '))\n print()\n\n print('You have to install those yourself or through a package '\n 'manager.')\n\n req_plug_path = os.path.join(dest_path, 'requirements-plugins.txt')\n if os.path.exists(req_plug_path):\n LOGGER.notice('This plugin requires other Nikola plugins.')\n LOGGER.info('Installing plugins using Nikola...')\n print('Contents of the requirements-nonpy.txt file:\\n')\n try:\n with io.open(req_plug_path, 'r', encoding='utf-8') as inf:\n for plugname in inf.readlines():\n self.do_install(url, plugname, show_install_notes)\n except subprocess.CalledProcessError:\n LOGGER.error('Could not install a plugin.')\n print('Contents of the requirements-plugins.txt file:\\n')\n with io.open(req_plug_path, 'r', encoding='utf-8') as fh:\n print(utils.indent(fh.read(), 4 * ' '))\n print('You have to install those yourself manually.')\n else:\n LOGGER.info('Dependency installation succeeded.')\n\n confpypath = os.path.join(dest_path, 'conf.py.sample')\n if os.path.exists(confpypath) and show_install_notes:\n LOGGER.notice('This plugin has a sample config file. Integrate it with yours in order to make this plugin work!')\n print('Contents of the conf.py.sample file:\\n')\n with io.open(confpypath, 'r', encoding='utf-8') as fh:\n if self.site.colorful:\n print(utils.indent(pygments.highlight(\n fh.read(), PythonLexer(), TerminalFormatter()),\n 4 * ' '))\n else:\n print(utils.indent(fh.read(), 4 * ' '))\n return 0\n\n def do_uninstall(self, name):\n \"\"\"Uninstall a plugin.\"\"\"\n for plugin in self.site.plugin_manager.getAllPlugins(): # FIXME: this is repeated thrice\n if name == plugin.name: # Uninstall this one\n p = plugin.path\n if os.path.isdir(p):\n # Plugins that have a package in them need to delete parent\n # Issue #2356\n p = p + os.sep\n p = os.path.abspath(os.path.join(p, os.pardir))\n else:\n p = os.path.dirname(p)\n LOGGER.warning('About to uninstall plugin: {0}'.format(name))\n LOGGER.warning('This will delete {0}'.format(p))\n sure = utils.ask_yesno('Are you sure?')\n if sure:\n LOGGER.warning('Removing {0}'.format(p))\n shutil.rmtree(p)\n return 0\n return 1\n LOGGER.error('Unknown plugin: {0}'.format(name))\n return 1\n\n def get_json(self, url):\n \"\"\"Download the JSON file with all plugins.\"\"\"\n if self.json is None:\n try:\n self.json = requests.get(url).json()\n except requests.exceptions.SSLError:\n LOGGER.warning(\"SSL error, using http instead of https (press ^C to abort)\")\n time.sleep(1)\n url = url.replace('https', 'http', 1)\n self.json = requests.get(url).json()\n return self.json\n", "path": "nikola/plugins/command/plugin.py"}]}
3,773
412
gh_patches_debug_18781
rasdani/github-patches
git_diff
ivy-llc__ivy-15979
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- selu --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ivy/functional/frontends/paddle/nn/functional/activation.py` Content: ``` 1 # local 2 from ivy.functional.frontends.paddle.tensor.math import tanh as paddle_tanh 3 from ivy.functional.frontends.paddle.tensor.math import ( 4 log_softmax as paddle_log_softmax, 5 ) 6 7 8 tanh = paddle_tanh 9 log_softmax = paddle_log_softmax 10 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ivy/functional/frontends/paddle/nn/functional/activation.py b/ivy/functional/frontends/paddle/nn/functional/activation.py --- a/ivy/functional/frontends/paddle/nn/functional/activation.py +++ b/ivy/functional/frontends/paddle/nn/functional/activation.py @@ -1,9 +1,33 @@ # local +import ivy +from ivy.func_wrapper import with_supported_dtypes +from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back from ivy.functional.frontends.paddle.tensor.math import tanh as paddle_tanh from ivy.functional.frontends.paddle.tensor.math import ( log_softmax as paddle_log_softmax, ) +@with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle") +@to_ivy_arrays_and_back +def selu( + x, + /, + *, + alpha=1.6732632423543772848170429916717, + scale=1.0507009873554804934193349852946, + name=None, +): + if scale <= 1.0: + raise ValueError(f"The scale must be greater than 1.0. Received: {scale}.") + + if alpha < 0: + raise ValueError(f"The alpha must be no less than zero. Received: {alpha}.") + + ret = ivy.where(x > 0, x, alpha * ivy.expm1(x)) + arr = scale * ret + return ivy.astype(arr, x.dtype) + + tanh = paddle_tanh log_softmax = paddle_log_softmax
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/nn/functional/activation.py b/ivy/functional/frontends/paddle/nn/functional/activation.py\n--- a/ivy/functional/frontends/paddle/nn/functional/activation.py\n+++ b/ivy/functional/frontends/paddle/nn/functional/activation.py\n@@ -1,9 +1,33 @@\n # local\n+import ivy\n+from ivy.func_wrapper import with_supported_dtypes\n+from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n from ivy.functional.frontends.paddle.tensor.math import tanh as paddle_tanh\n from ivy.functional.frontends.paddle.tensor.math import (\n log_softmax as paddle_log_softmax,\n )\n \n \n+@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n+@to_ivy_arrays_and_back\n+def selu(\n+ x,\n+ /,\n+ *,\n+ alpha=1.6732632423543772848170429916717,\n+ scale=1.0507009873554804934193349852946,\n+ name=None,\n+):\n+ if scale <= 1.0:\n+ raise ValueError(f\"The scale must be greater than 1.0. Received: {scale}.\")\n+\n+ if alpha < 0:\n+ raise ValueError(f\"The alpha must be no less than zero. Received: {alpha}.\")\n+\n+ ret = ivy.where(x > 0, x, alpha * ivy.expm1(x))\n+ arr = scale * ret\n+ return ivy.astype(arr, x.dtype)\n+\n+\n tanh = paddle_tanh\n log_softmax = paddle_log_softmax\n", "issue": "selu\n\n", "before_files": [{"content": "# local\nfrom ivy.functional.frontends.paddle.tensor.math import tanh as paddle_tanh\nfrom ivy.functional.frontends.paddle.tensor.math import (\n log_softmax as paddle_log_softmax,\n)\n\n\ntanh = paddle_tanh\nlog_softmax = paddle_log_softmax\n", "path": "ivy/functional/frontends/paddle/nn/functional/activation.py"}], "after_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.functional.frontends.paddle.tensor.math import tanh as paddle_tanh\nfrom ivy.functional.frontends.paddle.tensor.math import (\n log_softmax as paddle_log_softmax,\n)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef selu(\n x,\n /,\n *,\n alpha=1.6732632423543772848170429916717,\n scale=1.0507009873554804934193349852946,\n name=None,\n):\n if scale <= 1.0:\n raise ValueError(f\"The scale must be greater than 1.0. Received: {scale}.\")\n\n if alpha < 0:\n raise ValueError(f\"The alpha must be no less than zero. Received: {alpha}.\")\n\n ret = ivy.where(x > 0, x, alpha * ivy.expm1(x))\n arr = scale * ret\n return ivy.astype(arr, x.dtype)\n\n\ntanh = paddle_tanh\nlog_softmax = paddle_log_softmax\n", "path": "ivy/functional/frontends/paddle/nn/functional/activation.py"}]}
340
420
gh_patches_debug_22439
rasdani/github-patches
git_diff
scrapy__scrapy-5722
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add logging functionality to `memusage` extension ## Summary To add logging functionality to memusage extension. ## Motivation Scrapy jobs with `MEMUSAGE_ENABLED : True` and defined `MEMUSAGE_LIMIT_MB` (all jobs on scrapy cloud) can be stopped early due to overuse of RAM memory and receive `memusage_exceeded` outcome. First thing required to debug RAM memory leaks - is to identify.. pattern of RAM memory usage. Is RAM usage continuously increased at higher rates during runtime? or Is RAM usage rapidly increased over limit in last several minutes after hours or even days of stable runtime performance? Each reason require different approaches to debug RAM memory leaks. It will be much easier to debug this if value of `self.get_virtual_size()` will be added to log in `_check_limit` method of `memusage` extension https://github.com/scrapy/scrapy/blob/6ded3cf4cd134b615239babe28bb28c3ff524b05/scrapy/extensions/memusage.py#L77-L89 ## Describe alternatives you've considered Applying `MEMUSAGE_WARNING_MB` setting to ~80-90% of `MEMUSAGE_LIMIT_MB` - current implementation of `memusage` extension warns only 1 time so it is not enough data for this. Manually subclass `memusage` extension with similar changes - as well as any other option it will require to reschedule job. It may be not siutable for jobs with several days(and more) total runtime. So from this side it is preferable that it will be applied in scrapy itself and with enabled this loggin by default. ## Additional context Similar functionality previously requested here https://github.com/scrapy/scrapy/issues/2173 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scrapy/extensions/memusage.py` Content: ``` 1 """ 2 MemoryUsage extension 3 4 See documentation in docs/topics/extensions.rst 5 """ 6 import sys 7 import socket 8 import logging 9 from pprint import pformat 10 from importlib import import_module 11 12 from twisted.internet import task 13 14 from scrapy import signals 15 from scrapy.exceptions import NotConfigured 16 from scrapy.mail import MailSender 17 from scrapy.utils.engine import get_engine_status 18 19 logger = logging.getLogger(__name__) 20 21 22 class MemoryUsage: 23 24 def __init__(self, crawler): 25 if not crawler.settings.getbool('MEMUSAGE_ENABLED'): 26 raise NotConfigured 27 try: 28 # stdlib's resource module is only available on unix platforms. 29 self.resource = import_module('resource') 30 except ImportError: 31 raise NotConfigured 32 33 self.crawler = crawler 34 self.warned = False 35 self.notify_mails = crawler.settings.getlist('MEMUSAGE_NOTIFY_MAIL') 36 self.limit = crawler.settings.getint('MEMUSAGE_LIMIT_MB') * 1024 * 1024 37 self.warning = crawler.settings.getint('MEMUSAGE_WARNING_MB') * 1024 * 1024 38 self.check_interval = crawler.settings.getfloat('MEMUSAGE_CHECK_INTERVAL_SECONDS') 39 self.mail = MailSender.from_settings(crawler.settings) 40 crawler.signals.connect(self.engine_started, signal=signals.engine_started) 41 crawler.signals.connect(self.engine_stopped, signal=signals.engine_stopped) 42 43 @classmethod 44 def from_crawler(cls, crawler): 45 return cls(crawler) 46 47 def get_virtual_size(self): 48 size = self.resource.getrusage(self.resource.RUSAGE_SELF).ru_maxrss 49 if sys.platform != 'darwin': 50 # on macOS ru_maxrss is in bytes, on Linux it is in KB 51 size *= 1024 52 return size 53 54 def engine_started(self): 55 self.crawler.stats.set_value('memusage/startup', self.get_virtual_size()) 56 self.tasks = [] 57 tsk = task.LoopingCall(self.update) 58 self.tasks.append(tsk) 59 tsk.start(self.check_interval, now=True) 60 if self.limit: 61 tsk = task.LoopingCall(self._check_limit) 62 self.tasks.append(tsk) 63 tsk.start(self.check_interval, now=True) 64 if self.warning: 65 tsk = task.LoopingCall(self._check_warning) 66 self.tasks.append(tsk) 67 tsk.start(self.check_interval, now=True) 68 69 def engine_stopped(self): 70 for tsk in self.tasks: 71 if tsk.running: 72 tsk.stop() 73 74 def update(self): 75 self.crawler.stats.max_value('memusage/max', self.get_virtual_size()) 76 77 def _check_limit(self): 78 if self.get_virtual_size() > self.limit: 79 self.crawler.stats.set_value('memusage/limit_reached', 1) 80 mem = self.limit / 1024 / 1024 81 logger.error("Memory usage exceeded %(memusage)dM. Shutting down Scrapy...", 82 {'memusage': mem}, extra={'crawler': self.crawler}) 83 if self.notify_mails: 84 subj = ( 85 f"{self.crawler.settings['BOT_NAME']} terminated: " 86 f"memory usage exceeded {mem}M at {socket.gethostname()}" 87 ) 88 self._send_report(self.notify_mails, subj) 89 self.crawler.stats.set_value('memusage/limit_notified', 1) 90 91 if self.crawler.engine.spider is not None: 92 self.crawler.engine.close_spider(self.crawler.engine.spider, 'memusage_exceeded') 93 else: 94 self.crawler.stop() 95 96 def _check_warning(self): 97 if self.warned: # warn only once 98 return 99 if self.get_virtual_size() > self.warning: 100 self.crawler.stats.set_value('memusage/warning_reached', 1) 101 mem = self.warning / 1024 / 1024 102 logger.warning("Memory usage reached %(memusage)dM", 103 {'memusage': mem}, extra={'crawler': self.crawler}) 104 if self.notify_mails: 105 subj = ( 106 f"{self.crawler.settings['BOT_NAME']} warning: " 107 f"memory usage reached {mem}M at {socket.gethostname()}" 108 ) 109 self._send_report(self.notify_mails, subj) 110 self.crawler.stats.set_value('memusage/warning_notified', 1) 111 self.warned = True 112 113 def _send_report(self, rcpts, subject): 114 """send notification mail with some additional useful info""" 115 stats = self.crawler.stats 116 s = f"Memory usage at engine startup : {stats.get_value('memusage/startup')/1024/1024}M\r\n" 117 s += f"Maximum memory usage : {stats.get_value('memusage/max')/1024/1024}M\r\n" 118 s += f"Current memory usage : {self.get_virtual_size()/1024/1024}M\r\n" 119 120 s += "ENGINE STATUS ------------------------------------------------------- \r\n" 121 s += "\r\n" 122 s += pformat(get_engine_status(self.crawler.engine)) 123 s += "\r\n" 124 self.mail.send(rcpts, subject, s) 125 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scrapy/extensions/memusage.py b/scrapy/extensions/memusage.py --- a/scrapy/extensions/memusage.py +++ b/scrapy/extensions/memusage.py @@ -75,7 +75,8 @@ self.crawler.stats.max_value('memusage/max', self.get_virtual_size()) def _check_limit(self): - if self.get_virtual_size() > self.limit: + current_mem_usage = self.get_virtual_size() + if current_mem_usage > self.limit: self.crawler.stats.set_value('memusage/limit_reached', 1) mem = self.limit / 1024 / 1024 logger.error("Memory usage exceeded %(memusage)dM. Shutting down Scrapy...", @@ -92,6 +93,8 @@ self.crawler.engine.close_spider(self.crawler.engine.spider, 'memusage_exceeded') else: self.crawler.stop() + else: + logger.info("Current memory usage is %(virtualsize)dM", {'virtualsize': current_mem_usage / 1024 / 1024}) def _check_warning(self): if self.warned: # warn only once
{"golden_diff": "diff --git a/scrapy/extensions/memusage.py b/scrapy/extensions/memusage.py\n--- a/scrapy/extensions/memusage.py\n+++ b/scrapy/extensions/memusage.py\n@@ -75,7 +75,8 @@\n self.crawler.stats.max_value('memusage/max', self.get_virtual_size())\n \n def _check_limit(self):\n- if self.get_virtual_size() > self.limit:\n+ current_mem_usage = self.get_virtual_size()\n+ if current_mem_usage > self.limit:\n self.crawler.stats.set_value('memusage/limit_reached', 1)\n mem = self.limit / 1024 / 1024\n logger.error(\"Memory usage exceeded %(memusage)dM. Shutting down Scrapy...\",\n@@ -92,6 +93,8 @@\n self.crawler.engine.close_spider(self.crawler.engine.spider, 'memusage_exceeded')\n else:\n self.crawler.stop()\n+ else:\n+ logger.info(\"Current memory usage is %(virtualsize)dM\", {'virtualsize': current_mem_usage / 1024 / 1024})\n \n def _check_warning(self):\n if self.warned: # warn only once\n", "issue": "Add logging functionality to `memusage` extension\n\r\n\r\n## Summary\r\n\r\nTo add logging functionality to memusage extension.\r\n\r\n## Motivation\r\n\r\nScrapy jobs with `MEMUSAGE_ENABLED : True` and defined `MEMUSAGE_LIMIT_MB` (all jobs on scrapy cloud) can be stopped early due to overuse of RAM memory and receive `memusage_exceeded` outcome.\r\n\r\nFirst thing required to debug RAM memory leaks - is to identify.. pattern of RAM memory usage.\r\nIs RAM usage continuously increased at higher rates during runtime?\r\nor Is RAM usage rapidly increased over limit in last several minutes after hours or even days of stable runtime performance?\r\nEach reason require different approaches to debug RAM memory leaks.\r\n\r\nIt will be much easier to debug this if value of `self.get_virtual_size()` will be added to log in `_check_limit` method of `memusage` extension\r\nhttps://github.com/scrapy/scrapy/blob/6ded3cf4cd134b615239babe28bb28c3ff524b05/scrapy/extensions/memusage.py#L77-L89\r\n\r\n## Describe alternatives you've considered\r\n\r\nApplying `MEMUSAGE_WARNING_MB` setting to ~80-90% of `MEMUSAGE_LIMIT_MB` - current implementation of `memusage` extension warns only 1 time so it is not enough data for this.\r\n\r\nManually subclass `memusage` extension with similar changes - as well as any other option it will require to reschedule job. It may be not siutable for jobs with several days(and more) total runtime. So from this side it is preferable that it will be applied in scrapy itself and with enabled this loggin by default.\r\n \r\n## Additional context\r\n\r\nSimilar functionality previously requested here https://github.com/scrapy/scrapy/issues/2173\r\n\n", "before_files": [{"content": "\"\"\"\nMemoryUsage extension\n\nSee documentation in docs/topics/extensions.rst\n\"\"\"\nimport sys\nimport socket\nimport logging\nfrom pprint import pformat\nfrom importlib import import_module\n\nfrom twisted.internet import task\n\nfrom scrapy import signals\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.mail import MailSender\nfrom scrapy.utils.engine import get_engine_status\n\nlogger = logging.getLogger(__name__)\n\n\nclass MemoryUsage:\n\n def __init__(self, crawler):\n if not crawler.settings.getbool('MEMUSAGE_ENABLED'):\n raise NotConfigured\n try:\n # stdlib's resource module is only available on unix platforms.\n self.resource = import_module('resource')\n except ImportError:\n raise NotConfigured\n\n self.crawler = crawler\n self.warned = False\n self.notify_mails = crawler.settings.getlist('MEMUSAGE_NOTIFY_MAIL')\n self.limit = crawler.settings.getint('MEMUSAGE_LIMIT_MB') * 1024 * 1024\n self.warning = crawler.settings.getint('MEMUSAGE_WARNING_MB') * 1024 * 1024\n self.check_interval = crawler.settings.getfloat('MEMUSAGE_CHECK_INTERVAL_SECONDS')\n self.mail = MailSender.from_settings(crawler.settings)\n crawler.signals.connect(self.engine_started, signal=signals.engine_started)\n crawler.signals.connect(self.engine_stopped, signal=signals.engine_stopped)\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler)\n\n def get_virtual_size(self):\n size = self.resource.getrusage(self.resource.RUSAGE_SELF).ru_maxrss\n if sys.platform != 'darwin':\n # on macOS ru_maxrss is in bytes, on Linux it is in KB\n size *= 1024\n return size\n\n def engine_started(self):\n self.crawler.stats.set_value('memusage/startup', self.get_virtual_size())\n self.tasks = []\n tsk = task.LoopingCall(self.update)\n self.tasks.append(tsk)\n tsk.start(self.check_interval, now=True)\n if self.limit:\n tsk = task.LoopingCall(self._check_limit)\n self.tasks.append(tsk)\n tsk.start(self.check_interval, now=True)\n if self.warning:\n tsk = task.LoopingCall(self._check_warning)\n self.tasks.append(tsk)\n tsk.start(self.check_interval, now=True)\n\n def engine_stopped(self):\n for tsk in self.tasks:\n if tsk.running:\n tsk.stop()\n\n def update(self):\n self.crawler.stats.max_value('memusage/max', self.get_virtual_size())\n\n def _check_limit(self):\n if self.get_virtual_size() > self.limit:\n self.crawler.stats.set_value('memusage/limit_reached', 1)\n mem = self.limit / 1024 / 1024\n logger.error(\"Memory usage exceeded %(memusage)dM. Shutting down Scrapy...\",\n {'memusage': mem}, extra={'crawler': self.crawler})\n if self.notify_mails:\n subj = (\n f\"{self.crawler.settings['BOT_NAME']} terminated: \"\n f\"memory usage exceeded {mem}M at {socket.gethostname()}\"\n )\n self._send_report(self.notify_mails, subj)\n self.crawler.stats.set_value('memusage/limit_notified', 1)\n\n if self.crawler.engine.spider is not None:\n self.crawler.engine.close_spider(self.crawler.engine.spider, 'memusage_exceeded')\n else:\n self.crawler.stop()\n\n def _check_warning(self):\n if self.warned: # warn only once\n return\n if self.get_virtual_size() > self.warning:\n self.crawler.stats.set_value('memusage/warning_reached', 1)\n mem = self.warning / 1024 / 1024\n logger.warning(\"Memory usage reached %(memusage)dM\",\n {'memusage': mem}, extra={'crawler': self.crawler})\n if self.notify_mails:\n subj = (\n f\"{self.crawler.settings['BOT_NAME']} warning: \"\n f\"memory usage reached {mem}M at {socket.gethostname()}\"\n )\n self._send_report(self.notify_mails, subj)\n self.crawler.stats.set_value('memusage/warning_notified', 1)\n self.warned = True\n\n def _send_report(self, rcpts, subject):\n \"\"\"send notification mail with some additional useful info\"\"\"\n stats = self.crawler.stats\n s = f\"Memory usage at engine startup : {stats.get_value('memusage/startup')/1024/1024}M\\r\\n\"\n s += f\"Maximum memory usage : {stats.get_value('memusage/max')/1024/1024}M\\r\\n\"\n s += f\"Current memory usage : {self.get_virtual_size()/1024/1024}M\\r\\n\"\n\n s += \"ENGINE STATUS ------------------------------------------------------- \\r\\n\"\n s += \"\\r\\n\"\n s += pformat(get_engine_status(self.crawler.engine))\n s += \"\\r\\n\"\n self.mail.send(rcpts, subject, s)\n", "path": "scrapy/extensions/memusage.py"}], "after_files": [{"content": "\"\"\"\nMemoryUsage extension\n\nSee documentation in docs/topics/extensions.rst\n\"\"\"\nimport sys\nimport socket\nimport logging\nfrom pprint import pformat\nfrom importlib import import_module\n\nfrom twisted.internet import task\n\nfrom scrapy import signals\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.mail import MailSender\nfrom scrapy.utils.engine import get_engine_status\n\nlogger = logging.getLogger(__name__)\n\n\nclass MemoryUsage:\n\n def __init__(self, crawler):\n if not crawler.settings.getbool('MEMUSAGE_ENABLED'):\n raise NotConfigured\n try:\n # stdlib's resource module is only available on unix platforms.\n self.resource = import_module('resource')\n except ImportError:\n raise NotConfigured\n\n self.crawler = crawler\n self.warned = False\n self.notify_mails = crawler.settings.getlist('MEMUSAGE_NOTIFY_MAIL')\n self.limit = crawler.settings.getint('MEMUSAGE_LIMIT_MB') * 1024 * 1024\n self.warning = crawler.settings.getint('MEMUSAGE_WARNING_MB') * 1024 * 1024\n self.check_interval = crawler.settings.getfloat('MEMUSAGE_CHECK_INTERVAL_SECONDS')\n self.mail = MailSender.from_settings(crawler.settings)\n crawler.signals.connect(self.engine_started, signal=signals.engine_started)\n crawler.signals.connect(self.engine_stopped, signal=signals.engine_stopped)\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler)\n\n def get_virtual_size(self):\n size = self.resource.getrusage(self.resource.RUSAGE_SELF).ru_maxrss\n if sys.platform != 'darwin':\n # on macOS ru_maxrss is in bytes, on Linux it is in KB\n size *= 1024\n return size\n\n def engine_started(self):\n self.crawler.stats.set_value('memusage/startup', self.get_virtual_size())\n self.tasks = []\n tsk = task.LoopingCall(self.update)\n self.tasks.append(tsk)\n tsk.start(self.check_interval, now=True)\n if self.limit:\n tsk = task.LoopingCall(self._check_limit)\n self.tasks.append(tsk)\n tsk.start(self.check_interval, now=True)\n if self.warning:\n tsk = task.LoopingCall(self._check_warning)\n self.tasks.append(tsk)\n tsk.start(self.check_interval, now=True)\n\n def engine_stopped(self):\n for tsk in self.tasks:\n if tsk.running:\n tsk.stop()\n\n def update(self):\n self.crawler.stats.max_value('memusage/max', self.get_virtual_size())\n\n def _check_limit(self):\n current_mem_usage = self.get_virtual_size()\n if current_mem_usage > self.limit:\n self.crawler.stats.set_value('memusage/limit_reached', 1)\n mem = self.limit / 1024 / 1024\n logger.error(\"Memory usage exceeded %(memusage)dM. Shutting down Scrapy...\",\n {'memusage': mem}, extra={'crawler': self.crawler})\n if self.notify_mails:\n subj = (\n f\"{self.crawler.settings['BOT_NAME']} terminated: \"\n f\"memory usage exceeded {mem}M at {socket.gethostname()}\"\n )\n self._send_report(self.notify_mails, subj)\n self.crawler.stats.set_value('memusage/limit_notified', 1)\n\n if self.crawler.engine.spider is not None:\n self.crawler.engine.close_spider(self.crawler.engine.spider, 'memusage_exceeded')\n else:\n self.crawler.stop()\n else:\n logger.info(\"Current memory usage is %(virtualsize)dM\", {'virtualsize': current_mem_usage / 1024 / 1024})\n\n def _check_warning(self):\n if self.warned: # warn only once\n return\n if self.get_virtual_size() > self.warning:\n self.crawler.stats.set_value('memusage/warning_reached', 1)\n mem = self.warning / 1024 / 1024\n logger.warning(\"Memory usage reached %(memusage)dM\",\n {'memusage': mem}, extra={'crawler': self.crawler})\n if self.notify_mails:\n subj = (\n f\"{self.crawler.settings['BOT_NAME']} warning: \"\n f\"memory usage reached {mem}M at {socket.gethostname()}\"\n )\n self._send_report(self.notify_mails, subj)\n self.crawler.stats.set_value('memusage/warning_notified', 1)\n self.warned = True\n\n def _send_report(self, rcpts, subject):\n \"\"\"send notification mail with some additional useful info\"\"\"\n stats = self.crawler.stats\n s = f\"Memory usage at engine startup : {stats.get_value('memusage/startup')/1024/1024}M\\r\\n\"\n s += f\"Maximum memory usage : {stats.get_value('memusage/max')/1024/1024}M\\r\\n\"\n s += f\"Current memory usage : {self.get_virtual_size()/1024/1024}M\\r\\n\"\n\n s += \"ENGINE STATUS ------------------------------------------------------- \\r\\n\"\n s += \"\\r\\n\"\n s += pformat(get_engine_status(self.crawler.engine))\n s += \"\\r\\n\"\n self.mail.send(rcpts, subject, s)\n", "path": "scrapy/extensions/memusage.py"}]}
2,038
266
gh_patches_debug_41505
rasdani/github-patches
git_diff
great-expectations__great_expectations-3279
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Use cleaner solution for non-truncating division in python 2 Prefer `from __future__ import division` to `1.*x/y` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py` Content: ``` 1 from dateutil.parser import parse 2 3 from great_expectations.execution_engine import ( 4 PandasExecutionEngine, 5 SqlAlchemyExecutionEngine, 6 ) 7 from great_expectations.expectations.metrics.import_manager import sa 8 from great_expectations.expectations.metrics.map_metric_provider import ( 9 ColumnPairMapMetricProvider, 10 column_pair_condition_partial, 11 ) 12 13 14 class ColumnPairValuesEqual(ColumnPairMapMetricProvider): 15 condition_metric_name = "column_pair_values.equal" 16 condition_domain_keys = ( 17 "batch_id", 18 "table", 19 "column_A", 20 "column_B", 21 "row_condition", 22 "condition_parser", 23 "ignore_row_if", 24 ) 25 condition_value_keys = () 26 27 # TODO: <Alex>ALEX -- temporarily only Pandas and SQL Alchemy implementations are provided (Spark to follow).</Alex> 28 @column_pair_condition_partial(engine=PandasExecutionEngine) 29 def _pandas(cls, column_A, column_B, **kwargs): 30 return column_A == column_B 31 32 @column_pair_condition_partial(engine=SqlAlchemyExecutionEngine) 33 def _sqlalchemy(cls, column_A, column_B, **kwargs): 34 return sa.case((column_A == column_B, True), else_=False) 35 ``` Path: `great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py` Content: ``` 1 from dateutil.parser import parse 2 3 from great_expectations.execution_engine import ( 4 PandasExecutionEngine, 5 SqlAlchemyExecutionEngine, 6 ) 7 from great_expectations.expectations.metrics.import_manager import sa 8 from great_expectations.expectations.metrics.map_metric_provider import ( 9 ColumnPairMapMetricProvider, 10 column_pair_condition_partial, 11 ) 12 13 14 class ColumnPairValuesAGreaterThanB(ColumnPairMapMetricProvider): 15 condition_metric_name = "column_pair_values.a_greater_than_b" 16 condition_domain_keys = ( 17 "batch_id", 18 "table", 19 "column_A", 20 "column_B", 21 "row_condition", 22 "condition_parser", 23 "ignore_row_if", 24 ) 25 condition_value_keys = ( 26 "or_equal", 27 "parse_strings_as_datetimes", 28 "allow_cross_type_comparisons", 29 ) 30 31 # TODO: <Alex>ALEX -- temporarily only Pandas and SQL Alchemy implementations are provided (Spark to follow).</Alex> 32 # noinspection PyPep8Naming 33 @column_pair_condition_partial(engine=PandasExecutionEngine) 34 def _pandas(cls, column_A, column_B, **kwargs): 35 allow_cross_type_comparisons = kwargs.get("allow_cross_type_comparisons") 36 if allow_cross_type_comparisons: 37 raise NotImplementedError 38 39 parse_strings_as_datetimes = kwargs.get("parse_strings_as_datetimes") 40 if parse_strings_as_datetimes: 41 # noinspection PyPep8Naming 42 temp_column_A = column_A.map(parse) 43 # noinspection PyPep8Naming 44 temp_column_B = column_B.map(parse) 45 else: 46 temp_column_A = column_A 47 temp_column_B = column_B 48 49 or_equal = kwargs.get("or_equal") 50 if or_equal: 51 return temp_column_A >= temp_column_B 52 else: 53 return temp_column_A > temp_column_B 54 55 # noinspection PyPep8Naming 56 @column_pair_condition_partial(engine=SqlAlchemyExecutionEngine) 57 def _sqlalchemy(cls, column_A, column_B, **kwargs): 58 allow_cross_type_comparisons = kwargs.get("allow_cross_type_comparisons") 59 if allow_cross_type_comparisons: 60 raise NotImplementedError 61 62 parse_strings_as_datetimes = kwargs.get("parse_strings_as_datetimes") 63 if parse_strings_as_datetimes: 64 raise NotImplementedError 65 66 or_equal = kwargs.get("or_equal") 67 if or_equal: 68 return sa.case((column_A >= column_B, True), else_=False) 69 else: 70 return sa.case((column_A > column_B, True), else_=False) 71 ``` Path: `great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py` Content: ``` 1 from great_expectations.execution_engine import PandasExecutionEngine 2 from great_expectations.expectations.metrics.map_metric_provider import ( 3 MulticolumnMapMetricProvider, 4 multicolumn_condition_partial, 5 ) 6 7 8 class SelectColumnValuesUniqueWithinRecord(MulticolumnMapMetricProvider): 9 condition_metric_name = "select_column_values.unique.within_record" 10 condition_domain_keys = ( 11 "batch_id", 12 "table", 13 "column_list", 14 "row_condition", 15 "condition_parser", 16 "ignore_row_if", 17 ) 18 19 # TODO: <Alex>ALEX -- temporarily only a Pandas implementation is provided (others to follow).</Alex> 20 @multicolumn_condition_partial(engine=PandasExecutionEngine) 21 def _pandas(cls, column_list, **kwargs): 22 num_columns = len(column_list.columns) 23 row_wise_cond = column_list.nunique(dropna=False, axis=1) >= num_columns 24 return row_wise_cond 25 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py b/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py --- a/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py +++ b/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py @@ -1,5 +1,3 @@ -from dateutil.parser import parse - from great_expectations.execution_engine import ( PandasExecutionEngine, SqlAlchemyExecutionEngine, diff --git a/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py b/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py --- a/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py +++ b/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py @@ -38,9 +38,7 @@ parse_strings_as_datetimes = kwargs.get("parse_strings_as_datetimes") if parse_strings_as_datetimes: - # noinspection PyPep8Naming temp_column_A = column_A.map(parse) - # noinspection PyPep8Naming temp_column_B = column_B.map(parse) else: temp_column_A = column_A diff --git a/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py b/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py --- a/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py +++ b/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py @@ -1,9 +1,17 @@ -from great_expectations.execution_engine import PandasExecutionEngine +import logging + +from great_expectations.execution_engine import ( + PandasExecutionEngine, + SqlAlchemyExecutionEngine, +) +from great_expectations.expectations.metrics.import_manager import sa from great_expectations.expectations.metrics.map_metric_provider import ( MulticolumnMapMetricProvider, multicolumn_condition_partial, ) +logger = logging.getLogger(__name__) + class SelectColumnValuesUniqueWithinRecord(MulticolumnMapMetricProvider): condition_metric_name = "select_column_values.unique.within_record" @@ -16,9 +24,37 @@ "ignore_row_if", ) - # TODO: <Alex>ALEX -- temporarily only a Pandas implementation is provided (others to follow).</Alex> + # TODO: <Alex>ALEX -- temporarily only Pandas and SQL Alchemy implementations are provided (Spark to follow).</Alex> @multicolumn_condition_partial(engine=PandasExecutionEngine) def _pandas(cls, column_list, **kwargs): num_columns = len(column_list.columns) row_wise_cond = column_list.nunique(dropna=False, axis=1) >= num_columns return row_wise_cond + + @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine) + def _sqlalchemy(cls, column_list, **kwargs): + """ + The present approach relies on an inefficient query condition construction implementation, whose computational + cost is O(num_columns^2). However, until a more efficient implementation compatible with SQLAlchemy is + available, this is the only feasible mechanism under the current architecture, where map metric providers must + return a condition. Nevertheless, SQL query length limit is 1GB (sufficient for most practical scenarios). + """ + num_columns = len(column_list) + + # An arbitrary "num_columns" value used for issuing an explanatory message as a warning. + if num_columns > 100: + logger.warning( + f"""Batch data with {num_columns} columns is detected. Computing the "{cls.condition_metric_name}" \ +metric for wide tables using SQLAlchemy leads to long WHERE clauses for the underlying database engine to process. +""" + ) + + condition = sa.or_() + for idx_src in range(num_columns - 1): + for idx_dest in range(idx_src + 1, num_columns): + condition = sa.or_( + condition, (column_list[idx_src] == column_list[idx_dest]) + ) + + condition = sa.not_(condition) + return sa.case((condition, True), else_=False)
{"golden_diff": "diff --git a/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py b/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py\n--- a/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py\n+++ b/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py\n@@ -1,5 +1,3 @@\n-from dateutil.parser import parse\n-\n from great_expectations.execution_engine import (\n PandasExecutionEngine,\n SqlAlchemyExecutionEngine,\ndiff --git a/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py b/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py\n--- a/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py\n+++ b/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py\n@@ -38,9 +38,7 @@\n \n parse_strings_as_datetimes = kwargs.get(\"parse_strings_as_datetimes\")\n if parse_strings_as_datetimes:\n- # noinspection PyPep8Naming\n temp_column_A = column_A.map(parse)\n- # noinspection PyPep8Naming\n temp_column_B = column_B.map(parse)\n else:\n temp_column_A = column_A\ndiff --git a/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py b/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py\n--- a/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py\n+++ b/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py\n@@ -1,9 +1,17 @@\n-from great_expectations.execution_engine import PandasExecutionEngine\n+import logging\n+\n+from great_expectations.execution_engine import (\n+ PandasExecutionEngine,\n+ SqlAlchemyExecutionEngine,\n+)\n+from great_expectations.expectations.metrics.import_manager import sa\n from great_expectations.expectations.metrics.map_metric_provider import (\n MulticolumnMapMetricProvider,\n multicolumn_condition_partial,\n )\n \n+logger = logging.getLogger(__name__)\n+\n \n class SelectColumnValuesUniqueWithinRecord(MulticolumnMapMetricProvider):\n condition_metric_name = \"select_column_values.unique.within_record\"\n@@ -16,9 +24,37 @@\n \"ignore_row_if\",\n )\n \n- # TODO: <Alex>ALEX -- temporarily only a Pandas implementation is provided (others to follow).</Alex>\n+ # TODO: <Alex>ALEX -- temporarily only Pandas and SQL Alchemy implementations are provided (Spark to follow).</Alex>\n @multicolumn_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column_list, **kwargs):\n num_columns = len(column_list.columns)\n row_wise_cond = column_list.nunique(dropna=False, axis=1) >= num_columns\n return row_wise_cond\n+\n+ @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)\n+ def _sqlalchemy(cls, column_list, **kwargs):\n+ \"\"\"\n+ The present approach relies on an inefficient query condition construction implementation, whose computational\n+ cost is O(num_columns^2). However, until a more efficient implementation compatible with SQLAlchemy is\n+ available, this is the only feasible mechanism under the current architecture, where map metric providers must\n+ return a condition. Nevertheless, SQL query length limit is 1GB (sufficient for most practical scenarios).\n+ \"\"\"\n+ num_columns = len(column_list)\n+\n+ # An arbitrary \"num_columns\" value used for issuing an explanatory message as a warning.\n+ if num_columns > 100:\n+ logger.warning(\n+ f\"\"\"Batch data with {num_columns} columns is detected. Computing the \"{cls.condition_metric_name}\" \\\n+metric for wide tables using SQLAlchemy leads to long WHERE clauses for the underlying database engine to process.\n+\"\"\"\n+ )\n+\n+ condition = sa.or_()\n+ for idx_src in range(num_columns - 1):\n+ for idx_dest in range(idx_src + 1, num_columns):\n+ condition = sa.or_(\n+ condition, (column_list[idx_src] == column_list[idx_dest])\n+ )\n+\n+ condition = sa.not_(condition)\n+ return sa.case((condition, True), else_=False)\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "from dateutil.parser import parse\n\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.import_manager import sa\nfrom great_expectations.expectations.metrics.map_metric_provider import (\n ColumnPairMapMetricProvider,\n column_pair_condition_partial,\n)\n\n\nclass ColumnPairValuesEqual(ColumnPairMapMetricProvider):\n condition_metric_name = \"column_pair_values.equal\"\n condition_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column_A\",\n \"column_B\",\n \"row_condition\",\n \"condition_parser\",\n \"ignore_row_if\",\n )\n condition_value_keys = ()\n\n # TODO: <Alex>ALEX -- temporarily only Pandas and SQL Alchemy implementations are provided (Spark to follow).</Alex>\n @column_pair_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column_A, column_B, **kwargs):\n return column_A == column_B\n\n @column_pair_condition_partial(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(cls, column_A, column_B, **kwargs):\n return sa.case((column_A == column_B, True), else_=False)\n", "path": "great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py"}, {"content": "from dateutil.parser import parse\n\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.import_manager import sa\nfrom great_expectations.expectations.metrics.map_metric_provider import (\n ColumnPairMapMetricProvider,\n column_pair_condition_partial,\n)\n\n\nclass ColumnPairValuesAGreaterThanB(ColumnPairMapMetricProvider):\n condition_metric_name = \"column_pair_values.a_greater_than_b\"\n condition_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column_A\",\n \"column_B\",\n \"row_condition\",\n \"condition_parser\",\n \"ignore_row_if\",\n )\n condition_value_keys = (\n \"or_equal\",\n \"parse_strings_as_datetimes\",\n \"allow_cross_type_comparisons\",\n )\n\n # TODO: <Alex>ALEX -- temporarily only Pandas and SQL Alchemy implementations are provided (Spark to follow).</Alex>\n # noinspection PyPep8Naming\n @column_pair_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column_A, column_B, **kwargs):\n allow_cross_type_comparisons = kwargs.get(\"allow_cross_type_comparisons\")\n if allow_cross_type_comparisons:\n raise NotImplementedError\n\n parse_strings_as_datetimes = kwargs.get(\"parse_strings_as_datetimes\")\n if parse_strings_as_datetimes:\n # noinspection PyPep8Naming\n temp_column_A = column_A.map(parse)\n # noinspection PyPep8Naming\n temp_column_B = column_B.map(parse)\n else:\n temp_column_A = column_A\n temp_column_B = column_B\n\n or_equal = kwargs.get(\"or_equal\")\n if or_equal:\n return temp_column_A >= temp_column_B\n else:\n return temp_column_A > temp_column_B\n\n # noinspection PyPep8Naming\n @column_pair_condition_partial(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(cls, column_A, column_B, **kwargs):\n allow_cross_type_comparisons = kwargs.get(\"allow_cross_type_comparisons\")\n if allow_cross_type_comparisons:\n raise NotImplementedError\n\n parse_strings_as_datetimes = kwargs.get(\"parse_strings_as_datetimes\")\n if parse_strings_as_datetimes:\n raise NotImplementedError\n\n or_equal = kwargs.get(\"or_equal\")\n if or_equal:\n return sa.case((column_A >= column_B, True), else_=False)\n else:\n return sa.case((column_A > column_B, True), else_=False)\n", "path": "great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py"}, {"content": "from great_expectations.execution_engine import PandasExecutionEngine\nfrom great_expectations.expectations.metrics.map_metric_provider import (\n MulticolumnMapMetricProvider,\n multicolumn_condition_partial,\n)\n\n\nclass SelectColumnValuesUniqueWithinRecord(MulticolumnMapMetricProvider):\n condition_metric_name = \"select_column_values.unique.within_record\"\n condition_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column_list\",\n \"row_condition\",\n \"condition_parser\",\n \"ignore_row_if\",\n )\n\n # TODO: <Alex>ALEX -- temporarily only a Pandas implementation is provided (others to follow).</Alex>\n @multicolumn_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column_list, **kwargs):\n num_columns = len(column_list.columns)\n row_wise_cond = column_list.nunique(dropna=False, axis=1) >= num_columns\n return row_wise_cond\n", "path": "great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py"}], "after_files": [{"content": "from great_expectations.execution_engine import (\n PandasExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.import_manager import sa\nfrom great_expectations.expectations.metrics.map_metric_provider import (\n ColumnPairMapMetricProvider,\n column_pair_condition_partial,\n)\n\n\nclass ColumnPairValuesEqual(ColumnPairMapMetricProvider):\n condition_metric_name = \"column_pair_values.equal\"\n condition_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column_A\",\n \"column_B\",\n \"row_condition\",\n \"condition_parser\",\n \"ignore_row_if\",\n )\n condition_value_keys = ()\n\n # TODO: <Alex>ALEX -- temporarily only Pandas and SQL Alchemy implementations are provided (Spark to follow).</Alex>\n @column_pair_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column_A, column_B, **kwargs):\n return column_A == column_B\n\n @column_pair_condition_partial(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(cls, column_A, column_B, **kwargs):\n return sa.case((column_A == column_B, True), else_=False)\n", "path": "great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py"}, {"content": "from dateutil.parser import parse\n\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.import_manager import sa\nfrom great_expectations.expectations.metrics.map_metric_provider import (\n ColumnPairMapMetricProvider,\n column_pair_condition_partial,\n)\n\n\nclass ColumnPairValuesAGreaterThanB(ColumnPairMapMetricProvider):\n condition_metric_name = \"column_pair_values.a_greater_than_b\"\n condition_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column_A\",\n \"column_B\",\n \"row_condition\",\n \"condition_parser\",\n \"ignore_row_if\",\n )\n condition_value_keys = (\n \"or_equal\",\n \"parse_strings_as_datetimes\",\n \"allow_cross_type_comparisons\",\n )\n\n # TODO: <Alex>ALEX -- temporarily only Pandas and SQL Alchemy implementations are provided (Spark to follow).</Alex>\n # noinspection PyPep8Naming\n @column_pair_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column_A, column_B, **kwargs):\n allow_cross_type_comparisons = kwargs.get(\"allow_cross_type_comparisons\")\n if allow_cross_type_comparisons:\n raise NotImplementedError\n\n parse_strings_as_datetimes = kwargs.get(\"parse_strings_as_datetimes\")\n if parse_strings_as_datetimes:\n temp_column_A = column_A.map(parse)\n temp_column_B = column_B.map(parse)\n else:\n temp_column_A = column_A\n temp_column_B = column_B\n\n or_equal = kwargs.get(\"or_equal\")\n if or_equal:\n return temp_column_A >= temp_column_B\n else:\n return temp_column_A > temp_column_B\n\n # noinspection PyPep8Naming\n @column_pair_condition_partial(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(cls, column_A, column_B, **kwargs):\n allow_cross_type_comparisons = kwargs.get(\"allow_cross_type_comparisons\")\n if allow_cross_type_comparisons:\n raise NotImplementedError\n\n parse_strings_as_datetimes = kwargs.get(\"parse_strings_as_datetimes\")\n if parse_strings_as_datetimes:\n raise NotImplementedError\n\n or_equal = kwargs.get(\"or_equal\")\n if or_equal:\n return sa.case((column_A >= column_B, True), else_=False)\n else:\n return sa.case((column_A > column_B, True), else_=False)\n", "path": "great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py"}, {"content": "import logging\n\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.import_manager import sa\nfrom great_expectations.expectations.metrics.map_metric_provider import (\n MulticolumnMapMetricProvider,\n multicolumn_condition_partial,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass SelectColumnValuesUniqueWithinRecord(MulticolumnMapMetricProvider):\n condition_metric_name = \"select_column_values.unique.within_record\"\n condition_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column_list\",\n \"row_condition\",\n \"condition_parser\",\n \"ignore_row_if\",\n )\n\n # TODO: <Alex>ALEX -- temporarily only Pandas and SQL Alchemy implementations are provided (Spark to follow).</Alex>\n @multicolumn_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column_list, **kwargs):\n num_columns = len(column_list.columns)\n row_wise_cond = column_list.nunique(dropna=False, axis=1) >= num_columns\n return row_wise_cond\n\n @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(cls, column_list, **kwargs):\n \"\"\"\n The present approach relies on an inefficient query condition construction implementation, whose computational\n cost is O(num_columns^2). However, until a more efficient implementation compatible with SQLAlchemy is\n available, this is the only feasible mechanism under the current architecture, where map metric providers must\n return a condition. Nevertheless, SQL query length limit is 1GB (sufficient for most practical scenarios).\n \"\"\"\n num_columns = len(column_list)\n\n # An arbitrary \"num_columns\" value used for issuing an explanatory message as a warning.\n if num_columns > 100:\n logger.warning(\n f\"\"\"Batch data with {num_columns} columns is detected. Computing the \"{cls.condition_metric_name}\" \\\nmetric for wide tables using SQLAlchemy leads to long WHERE clauses for the underlying database engine to process.\n\"\"\"\n )\n\n condition = sa.or_()\n for idx_src in range(num_columns - 1):\n for idx_dest in range(idx_src + 1, num_columns):\n condition = sa.or_(\n condition, (column_list[idx_src] == column_list[idx_dest])\n )\n\n condition = sa.not_(condition)\n return sa.case((condition, True), else_=False)\n", "path": "great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py"}]}
1,588
974
gh_patches_debug_8219
rasdani/github-patches
git_diff
liqd__a4-meinberlin-1411
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Type Error in plans list if a project connected to the plans list adds a module, but no phase dates, the date might become none and the whole list will not be shown --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `meinberlin/apps/plans/views.py` Content: ``` 1 import json 2 3 from django.conf import settings 4 from django.contrib import messages 5 from django.core.exceptions import ObjectDoesNotExist 6 from django.urls import reverse 7 from django.utils import timezone 8 from django.utils.translation import ugettext 9 from django.utils.translation import ugettext_lazy as _ 10 from django.views import generic 11 12 from adhocracy4.dashboard import mixins as a4dashboard_mixins 13 from adhocracy4.exports import mixins as export_mixins 14 from adhocracy4.exports import unescape_and_strip_html 15 from adhocracy4.exports import views as export_views 16 from adhocracy4.rules import mixins as rules_mixins 17 from meinberlin.apps.contrib.views import CanonicalURLDetailView 18 from meinberlin.apps.maps.models import MapPreset 19 from meinberlin.apps.plans.forms import PlanForm 20 from meinberlin.apps.plans.models import Plan 21 22 from . import models 23 24 25 class PlanDetailView(rules_mixins.PermissionRequiredMixin, 26 CanonicalURLDetailView): 27 model = models.Plan 28 template_name = 'meinberlin_plans/plan_detail.html' 29 permission_required = 'meinberlin_plans.view_plan' 30 31 def get_context_data(self, **kwargs): 32 context = super().get_context_data(**kwargs) 33 context['berlin_polygon'] = settings.BERLIN_POLYGON 34 return context 35 36 37 class PlanListView(rules_mixins.PermissionRequiredMixin, 38 generic.ListView): 39 model = models.Plan 40 template_name = 'meinberlin_plans/plan_list.html' 41 permission_required = 'meinberlin_plans.list_plan' 42 43 def get_districts(self): 44 try: 45 return MapPreset.objects.filter( 46 category__name='Bezirke - Berlin') 47 except ObjectDoesNotExist: 48 return [] 49 50 def _get_status_string(self, projects): 51 52 future_phase = None 53 for project in projects: 54 phases = project.phases 55 if phases.active_phases(): 56 return ugettext('running') 57 if phases.future_phases(): 58 date = phases.future_phases().first().start_date 59 if not future_phase: 60 future_phase = date 61 else: 62 if date < future_phase: 63 future_phase = date 64 65 if future_phase: 66 return ugettext('starts at {}').format(future_phase.date()) 67 68 def _get_participation_status(self, item): 69 projects = item.projects.all()\ 70 .filter(is_draft=False, 71 is_archived=False, 72 is_public=True) 73 if not projects: 74 return item.get_participation_display(), False 75 else: 76 status_string = self._get_status_string(projects) 77 if status_string: 78 return status_string, True 79 else: 80 return item.get_participation_display(), False 81 82 def get_context_data(self, **kwargs): 83 context = super().get_context_data(**kwargs) 84 85 districts = self.get_districts() 86 87 district_list = json.dumps([district.polygon 88 for district in districts]) 89 district_names = json.dumps([district.name 90 for district in districts]) 91 context['districts'] = district_list 92 context['district_names'] = district_names 93 94 items = sorted(context['object_list'], 95 key=lambda x: x.modified or x.created, 96 reverse=True) 97 98 result = [] 99 100 for item in items: 101 participation_string, active = self._get_participation_status(item) 102 result.append({ 103 'title': item.title, 104 'url': item.get_absolute_url(), 105 'organisation': item.organisation.name, 106 'point': item.point, 107 'point_label': item.point_label, 108 'cost': item.cost, 109 'district': item.district.name, 110 'category': item.category, 111 'status': item.status, 112 'status_display': item.get_status_display(), 113 'participation_string': participation_string, 114 'participation_active': active, 115 'participation': item.participation, 116 'participation_display': item.get_participation_display(), 117 }) 118 119 context['items'] = json.dumps(result) 120 context['baseurl'] = settings.A4_MAP_BASEURL 121 context['attribution'] = settings.A4_MAP_ATTRIBUTION 122 context['bounds'] = json.dumps(settings.A4_MAP_BOUNDING_BOX) 123 124 return context 125 126 127 class PlanExportView(rules_mixins.PermissionRequiredMixin, 128 export_mixins.ItemExportWithLinkMixin, 129 export_mixins.ExportModelFieldsMixin, 130 export_mixins.ItemExportWithLocationMixin, 131 export_views.BaseExport, 132 export_views.AbstractXlsxExportView): 133 134 permission_required = 'meinberlin_plans.list_plan' 135 model = models.Plan 136 fields = ['title', 'organisation', 'contact', 'district', 'cost', 137 'description', 'category', 'status', 'participation'] 138 html_fields = ['description'] 139 140 def get_object_list(self): 141 return models.Plan.objects.all() 142 143 def get_base_filename(self): 144 return 'plans_%s' % timezone.now().strftime('%Y%m%dT%H%M%S') 145 146 def get_virtual_fields(self, virtual): 147 virtual = super().get_virtual_fields(virtual) 148 virtual['projects'] = ugettext('Projects') 149 virtual['projects_links'] = ugettext('Project Links') 150 return virtual 151 152 def get_organisation_data(self, item): 153 return item.organisation.name 154 155 def get_district_data(self, item): 156 return item.district.name 157 158 def get_contact_data(self, item): 159 return unescape_and_strip_html(item.contact) 160 161 def get_status_data(self, item): 162 return item.get_status_display() 163 164 def get_participation_data(self, item): 165 return item.get_participation_display() 166 167 def get_description_data(self, item): 168 return unescape_and_strip_html(item.description) 169 170 def get_projects_data(self, item): 171 if item.projects.all(): 172 return ', \n'.join( 173 [project.name 174 for project in item.projects.all()] 175 ) 176 return '' 177 178 def get_projects_links_data(self, item): 179 if item.projects.all(): 180 return str([self.request.build_absolute_uri( 181 project.get_absolute_url()) 182 for project in item.projects.all() 183 ]) 184 return '' 185 186 187 class DashboardPlanListView(a4dashboard_mixins.DashboardBaseMixin, 188 generic.ListView): 189 model = Plan 190 template_name = 'meinberlin_plans/plan_dashboard_list.html' 191 permission_required = 'meinberlin_plans.add_plan' 192 menu_item = 'project' 193 194 def get_permission_object(self): 195 return self.organisation 196 197 def get_queryset(self): 198 return super().get_queryset().filter(organisation=self.organisation) 199 200 201 class DashboardPlanCreateView(a4dashboard_mixins.DashboardBaseMixin, 202 generic.CreateView): 203 model = Plan 204 form_class = PlanForm 205 permission_required = 'meinberlin_plans.add_plan' 206 template_name = 'meinberlin_plans/plan_create_form.html' 207 menu_item = 'project' 208 209 def form_valid(self, form): 210 form.instance.creator = self.request.user 211 form.instance.organisation = self.organisation 212 return super().form_valid(form) 213 214 def get_permission_object(self): 215 return self.organisation 216 217 def get_success_url(self): 218 return reverse( 219 'a4dashboard:plan-list', 220 kwargs={'organisation_slug': self.organisation.slug}) 221 222 223 class DashboardPlanUpdateView(a4dashboard_mixins.DashboardBaseMixin, 224 generic.UpdateView): 225 model = Plan 226 form_class = PlanForm 227 permission_required = 'meinberlin_plans.change_plan' 228 template_name = 'meinberlin_plans/plan_update_form.html' 229 menu_item = 'project' 230 231 def get_permission_object(self): 232 return self.organisation 233 234 def get_success_url(self): 235 return reverse( 236 'a4dashboard:plan-list', 237 kwargs={'organisation_slug': self.organisation.slug}) 238 239 240 class DashboardPlanDeleteView(a4dashboard_mixins.DashboardBaseMixin, 241 generic.DeleteView): 242 model = Plan 243 success_message = _('The plan has been deleted') 244 permission_required = 'meinberlin_plans.change_plan' 245 template_name = 'meinberlin_plans/plan_confirm_delete.html' 246 menu_item = 'project' 247 248 def delete(self, request, *args, **kwargs): 249 messages.success(self.request, self.success_message) 250 return super().delete(request, *args, **kwargs) 251 252 def get_permission_object(self): 253 return self.organisation 254 255 def get_success_url(self): 256 return reverse( 257 'a4dashboard:plan-list', 258 kwargs={'organisation_slug': self.organisation.slug}) 259 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/meinberlin/apps/plans/views.py b/meinberlin/apps/plans/views.py --- a/meinberlin/apps/plans/views.py +++ b/meinberlin/apps/plans/views.py @@ -54,7 +54,8 @@ phases = project.phases if phases.active_phases(): return ugettext('running') - if phases.future_phases(): + if phases.future_phases() and \ + phases.future_phases().first().start_date: date = phases.future_phases().first().start_date if not future_phase: future_phase = date
{"golden_diff": "diff --git a/meinberlin/apps/plans/views.py b/meinberlin/apps/plans/views.py\n--- a/meinberlin/apps/plans/views.py\n+++ b/meinberlin/apps/plans/views.py\n@@ -54,7 +54,8 @@\n phases = project.phases\n if phases.active_phases():\n return ugettext('running')\n- if phases.future_phases():\n+ if phases.future_phases() and \\\n+ phases.future_phases().first().start_date:\n date = phases.future_phases().first().start_date\n if not future_phase:\n future_phase = date\n", "issue": "Type Error in plans list\nif a project connected to the plans list adds a module, but no phase dates, the date might become none and the whole list will not be shown\n", "before_files": [{"content": "import json\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.dashboard import mixins as a4dashboard_mixins\nfrom adhocracy4.exports import mixins as export_mixins\nfrom adhocracy4.exports import unescape_and_strip_html\nfrom adhocracy4.exports import views as export_views\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom meinberlin.apps.contrib.views import CanonicalURLDetailView\nfrom meinberlin.apps.maps.models import MapPreset\nfrom meinberlin.apps.plans.forms import PlanForm\nfrom meinberlin.apps.plans.models import Plan\n\nfrom . import models\n\n\nclass PlanDetailView(rules_mixins.PermissionRequiredMixin,\n CanonicalURLDetailView):\n model = models.Plan\n template_name = 'meinberlin_plans/plan_detail.html'\n permission_required = 'meinberlin_plans.view_plan'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['berlin_polygon'] = settings.BERLIN_POLYGON\n return context\n\n\nclass PlanListView(rules_mixins.PermissionRequiredMixin,\n generic.ListView):\n model = models.Plan\n template_name = 'meinberlin_plans/plan_list.html'\n permission_required = 'meinberlin_plans.list_plan'\n\n def get_districts(self):\n try:\n return MapPreset.objects.filter(\n category__name='Bezirke - Berlin')\n except ObjectDoesNotExist:\n return []\n\n def _get_status_string(self, projects):\n\n future_phase = None\n for project in projects:\n phases = project.phases\n if phases.active_phases():\n return ugettext('running')\n if phases.future_phases():\n date = phases.future_phases().first().start_date\n if not future_phase:\n future_phase = date\n else:\n if date < future_phase:\n future_phase = date\n\n if future_phase:\n return ugettext('starts at {}').format(future_phase.date())\n\n def _get_participation_status(self, item):\n projects = item.projects.all()\\\n .filter(is_draft=False,\n is_archived=False,\n is_public=True)\n if not projects:\n return item.get_participation_display(), False\n else:\n status_string = self._get_status_string(projects)\n if status_string:\n return status_string, True\n else:\n return item.get_participation_display(), False\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n districts = self.get_districts()\n\n district_list = json.dumps([district.polygon\n for district in districts])\n district_names = json.dumps([district.name\n for district in districts])\n context['districts'] = district_list\n context['district_names'] = district_names\n\n items = sorted(context['object_list'],\n key=lambda x: x.modified or x.created,\n reverse=True)\n\n result = []\n\n for item in items:\n participation_string, active = self._get_participation_status(item)\n result.append({\n 'title': item.title,\n 'url': item.get_absolute_url(),\n 'organisation': item.organisation.name,\n 'point': item.point,\n 'point_label': item.point_label,\n 'cost': item.cost,\n 'district': item.district.name,\n 'category': item.category,\n 'status': item.status,\n 'status_display': item.get_status_display(),\n 'participation_string': participation_string,\n 'participation_active': active,\n 'participation': item.participation,\n 'participation_display': item.get_participation_display(),\n })\n\n context['items'] = json.dumps(result)\n context['baseurl'] = settings.A4_MAP_BASEURL\n context['attribution'] = settings.A4_MAP_ATTRIBUTION\n context['bounds'] = json.dumps(settings.A4_MAP_BOUNDING_BOX)\n\n return context\n\n\nclass PlanExportView(rules_mixins.PermissionRequiredMixin,\n export_mixins.ItemExportWithLinkMixin,\n export_mixins.ExportModelFieldsMixin,\n export_mixins.ItemExportWithLocationMixin,\n export_views.BaseExport,\n export_views.AbstractXlsxExportView):\n\n permission_required = 'meinberlin_plans.list_plan'\n model = models.Plan\n fields = ['title', 'organisation', 'contact', 'district', 'cost',\n 'description', 'category', 'status', 'participation']\n html_fields = ['description']\n\n def get_object_list(self):\n return models.Plan.objects.all()\n\n def get_base_filename(self):\n return 'plans_%s' % timezone.now().strftime('%Y%m%dT%H%M%S')\n\n def get_virtual_fields(self, virtual):\n virtual = super().get_virtual_fields(virtual)\n virtual['projects'] = ugettext('Projects')\n virtual['projects_links'] = ugettext('Project Links')\n return virtual\n\n def get_organisation_data(self, item):\n return item.organisation.name\n\n def get_district_data(self, item):\n return item.district.name\n\n def get_contact_data(self, item):\n return unescape_and_strip_html(item.contact)\n\n def get_status_data(self, item):\n return item.get_status_display()\n\n def get_participation_data(self, item):\n return item.get_participation_display()\n\n def get_description_data(self, item):\n return unescape_and_strip_html(item.description)\n\n def get_projects_data(self, item):\n if item.projects.all():\n return ', \\n'.join(\n [project.name\n for project in item.projects.all()]\n )\n return ''\n\n def get_projects_links_data(self, item):\n if item.projects.all():\n return str([self.request.build_absolute_uri(\n project.get_absolute_url())\n for project in item.projects.all()\n ])\n return ''\n\n\nclass DashboardPlanListView(a4dashboard_mixins.DashboardBaseMixin,\n generic.ListView):\n model = Plan\n template_name = 'meinberlin_plans/plan_dashboard_list.html'\n permission_required = 'meinberlin_plans.add_plan'\n menu_item = 'project'\n\n def get_permission_object(self):\n return self.organisation\n\n def get_queryset(self):\n return super().get_queryset().filter(organisation=self.organisation)\n\n\nclass DashboardPlanCreateView(a4dashboard_mixins.DashboardBaseMixin,\n generic.CreateView):\n model = Plan\n form_class = PlanForm\n permission_required = 'meinberlin_plans.add_plan'\n template_name = 'meinberlin_plans/plan_create_form.html'\n menu_item = 'project'\n\n def form_valid(self, form):\n form.instance.creator = self.request.user\n form.instance.organisation = self.organisation\n return super().form_valid(form)\n\n def get_permission_object(self):\n return self.organisation\n\n def get_success_url(self):\n return reverse(\n 'a4dashboard:plan-list',\n kwargs={'organisation_slug': self.organisation.slug})\n\n\nclass DashboardPlanUpdateView(a4dashboard_mixins.DashboardBaseMixin,\n generic.UpdateView):\n model = Plan\n form_class = PlanForm\n permission_required = 'meinberlin_plans.change_plan'\n template_name = 'meinberlin_plans/plan_update_form.html'\n menu_item = 'project'\n\n def get_permission_object(self):\n return self.organisation\n\n def get_success_url(self):\n return reverse(\n 'a4dashboard:plan-list',\n kwargs={'organisation_slug': self.organisation.slug})\n\n\nclass DashboardPlanDeleteView(a4dashboard_mixins.DashboardBaseMixin,\n generic.DeleteView):\n model = Plan\n success_message = _('The plan has been deleted')\n permission_required = 'meinberlin_plans.change_plan'\n template_name = 'meinberlin_plans/plan_confirm_delete.html'\n menu_item = 'project'\n\n def delete(self, request, *args, **kwargs):\n messages.success(self.request, self.success_message)\n return super().delete(request, *args, **kwargs)\n\n def get_permission_object(self):\n return self.organisation\n\n def get_success_url(self):\n return reverse(\n 'a4dashboard:plan-list',\n kwargs={'organisation_slug': self.organisation.slug})\n", "path": "meinberlin/apps/plans/views.py"}], "after_files": [{"content": "import json\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.dashboard import mixins as a4dashboard_mixins\nfrom adhocracy4.exports import mixins as export_mixins\nfrom adhocracy4.exports import unescape_and_strip_html\nfrom adhocracy4.exports import views as export_views\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom meinberlin.apps.contrib.views import CanonicalURLDetailView\nfrom meinberlin.apps.maps.models import MapPreset\nfrom meinberlin.apps.plans.forms import PlanForm\nfrom meinberlin.apps.plans.models import Plan\n\nfrom . import models\n\n\nclass PlanDetailView(rules_mixins.PermissionRequiredMixin,\n CanonicalURLDetailView):\n model = models.Plan\n template_name = 'meinberlin_plans/plan_detail.html'\n permission_required = 'meinberlin_plans.view_plan'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['berlin_polygon'] = settings.BERLIN_POLYGON\n return context\n\n\nclass PlanListView(rules_mixins.PermissionRequiredMixin,\n generic.ListView):\n model = models.Plan\n template_name = 'meinberlin_plans/plan_list.html'\n permission_required = 'meinberlin_plans.list_plan'\n\n def get_districts(self):\n try:\n return MapPreset.objects.filter(\n category__name='Bezirke - Berlin')\n except ObjectDoesNotExist:\n return []\n\n def _get_status_string(self, projects):\n\n future_phase = None\n for project in projects:\n phases = project.phases\n if phases.active_phases():\n return ugettext('running')\n if phases.future_phases() and \\\n phases.future_phases().first().start_date:\n date = phases.future_phases().first().start_date\n if not future_phase:\n future_phase = date\n else:\n if date < future_phase:\n future_phase = date\n\n if future_phase:\n return ugettext('starts at {}').format(future_phase.date())\n\n def _get_participation_status(self, item):\n projects = item.projects.all()\\\n .filter(is_draft=False,\n is_archived=False,\n is_public=True)\n if not projects:\n return item.get_participation_display(), False\n else:\n status_string = self._get_status_string(projects)\n if status_string:\n return status_string, True\n else:\n return item.get_participation_display(), False\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n districts = self.get_districts()\n\n district_list = json.dumps([district.polygon\n for district in districts])\n district_names = json.dumps([district.name\n for district in districts])\n context['districts'] = district_list\n context['district_names'] = district_names\n\n items = sorted(context['object_list'],\n key=lambda x: x.modified or x.created,\n reverse=True)\n\n result = []\n\n for item in items:\n participation_string, active = self._get_participation_status(item)\n result.append({\n 'title': item.title,\n 'url': item.get_absolute_url(),\n 'organisation': item.organisation.name,\n 'point': item.point,\n 'point_label': item.point_label,\n 'cost': item.cost,\n 'district': item.district.name,\n 'category': item.category,\n 'status': item.status,\n 'status_display': item.get_status_display(),\n 'participation_string': participation_string,\n 'participation_active': active,\n 'participation': item.participation,\n 'participation_display': item.get_participation_display(),\n })\n\n context['items'] = json.dumps(result)\n context['baseurl'] = settings.A4_MAP_BASEURL\n context['attribution'] = settings.A4_MAP_ATTRIBUTION\n context['bounds'] = json.dumps(settings.A4_MAP_BOUNDING_BOX)\n\n return context\n\n\nclass PlanExportView(rules_mixins.PermissionRequiredMixin,\n export_mixins.ItemExportWithLinkMixin,\n export_mixins.ExportModelFieldsMixin,\n export_mixins.ItemExportWithLocationMixin,\n export_views.BaseExport,\n export_views.AbstractXlsxExportView):\n\n permission_required = 'meinberlin_plans.list_plan'\n model = models.Plan\n fields = ['title', 'organisation', 'contact', 'district', 'cost',\n 'description', 'category', 'status', 'participation']\n html_fields = ['description']\n\n def get_object_list(self):\n return models.Plan.objects.all()\n\n def get_base_filename(self):\n return 'plans_%s' % timezone.now().strftime('%Y%m%dT%H%M%S')\n\n def get_virtual_fields(self, virtual):\n virtual = super().get_virtual_fields(virtual)\n virtual['projects'] = ugettext('Projects')\n virtual['projects_links'] = ugettext('Project Links')\n return virtual\n\n def get_organisation_data(self, item):\n return item.organisation.name\n\n def get_district_data(self, item):\n return item.district.name\n\n def get_contact_data(self, item):\n return unescape_and_strip_html(item.contact)\n\n def get_status_data(self, item):\n return item.get_status_display()\n\n def get_participation_data(self, item):\n return item.get_participation_display()\n\n def get_description_data(self, item):\n return unescape_and_strip_html(item.description)\n\n def get_projects_data(self, item):\n if item.projects.all():\n return ', \\n'.join(\n [project.name\n for project in item.projects.all()]\n )\n return ''\n\n def get_projects_links_data(self, item):\n if item.projects.all():\n return str([self.request.build_absolute_uri(\n project.get_absolute_url())\n for project in item.projects.all()\n ])\n return ''\n\n\nclass DashboardPlanListView(a4dashboard_mixins.DashboardBaseMixin,\n generic.ListView):\n model = Plan\n template_name = 'meinberlin_plans/plan_dashboard_list.html'\n permission_required = 'meinberlin_plans.add_plan'\n menu_item = 'project'\n\n def get_permission_object(self):\n return self.organisation\n\n def get_queryset(self):\n return super().get_queryset().filter(organisation=self.organisation)\n\n\nclass DashboardPlanCreateView(a4dashboard_mixins.DashboardBaseMixin,\n generic.CreateView):\n model = Plan\n form_class = PlanForm\n permission_required = 'meinberlin_plans.add_plan'\n template_name = 'meinberlin_plans/plan_create_form.html'\n menu_item = 'project'\n\n def form_valid(self, form):\n form.instance.creator = self.request.user\n form.instance.organisation = self.organisation\n return super().form_valid(form)\n\n def get_permission_object(self):\n return self.organisation\n\n def get_success_url(self):\n return reverse(\n 'a4dashboard:plan-list',\n kwargs={'organisation_slug': self.organisation.slug})\n\n\nclass DashboardPlanUpdateView(a4dashboard_mixins.DashboardBaseMixin,\n generic.UpdateView):\n model = Plan\n form_class = PlanForm\n permission_required = 'meinberlin_plans.change_plan'\n template_name = 'meinberlin_plans/plan_update_form.html'\n menu_item = 'project'\n\n def get_permission_object(self):\n return self.organisation\n\n def get_success_url(self):\n return reverse(\n 'a4dashboard:plan-list',\n kwargs={'organisation_slug': self.organisation.slug})\n\n\nclass DashboardPlanDeleteView(a4dashboard_mixins.DashboardBaseMixin,\n generic.DeleteView):\n model = Plan\n success_message = _('The plan has been deleted')\n permission_required = 'meinberlin_plans.change_plan'\n template_name = 'meinberlin_plans/plan_confirm_delete.html'\n menu_item = 'project'\n\n def delete(self, request, *args, **kwargs):\n messages.success(self.request, self.success_message)\n return super().delete(request, *args, **kwargs)\n\n def get_permission_object(self):\n return self.organisation\n\n def get_success_url(self):\n return reverse(\n 'a4dashboard:plan-list',\n kwargs={'organisation_slug': self.organisation.slug})\n", "path": "meinberlin/apps/plans/views.py"}]}
2,804
136
gh_patches_debug_6723
rasdani/github-patches
git_diff
pre-commit__pre-commit-427
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- setup.cfg prevent pre-commit to install For some reason I have a setup.cfg file in the root directory of my repo for my app where the parameter **install_scripts** is set to **/usr/sbin**. This prevent pre-commit to set up and crash. Here is a repro in a fresh git repository containing only **setup.cfg** file and **.pre-commit-config.yaml** (for the [install guide](http://pre-commit.com/#install) <pre> $ mkdir repro; cd repro $ git init Dépôt Git vide initialisé dans /home/wilfried/repro/.git/ $ pre-commit clean Cleaned /home/wilfried/.pre-commit. $ pre-commit install pre-commit installed at /home/wilfried/repro/.git/hooks/pre-commit $ cat setup.cfg [install] install_scripts=/usr/sbin $ cat .pre-commit-config.yaml - repo: git://github.com/pre-commit/pre-commit-hooks sha: v0.4.2 hooks: - id: trailing-whitespace </pre> Now, with those two files setup, I try to run a simple pre-commit run which try to initiate the virtualenv. <pre> $ pre-commit run --all-files [INFO] Initializing environment for git://github.com/pre-commit/pre-commit-hooks. [INFO] Installing environment for git://github.com/pre-commit/pre-commit-hooks. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... An unexpected error has occurred: CalledProcessError: Command: ('/usr/bin/python', '-m', 'virtualenv', '/home/wilfried/.pre-commit/repoaXLSIv/py_env-default', '-p', '/usr/bin/python2.7') Return code: 1 Expected return code: 0 Output: New python executable in /home/wilfried/.pre-commit/repoaXLSIv/py_env-default/bin/python2.7 Also creating executable in /home/wilfried/.pre-commit/repoaXLSIv/py_env-default/bin/python Installing setuptools, pip, wheel... Complete output from command /home/wilfried/.pre-...efault/bin/python2.7 - setuptools pip wheel: ...Installing setuptools, pip, wheel...done. Running virtualenv with interpreter /usr/bin/python2.7 Errors: Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/virtualenv.py", line 2327, in <module> main() File "/usr/local/lib/python2.7/dist-packages/virtualenv.py", line 711, in main symlink=options.symlink) File "/usr/local/lib/python2.7/dist-packages/virtualenv.py", line 944, in create_environment download=download, File "/usr/local/lib/python2.7/dist-packages/virtualenv.py", line 900, in install_wheel call_subprocess(cmd, show_stdout=False, extra_env=env, stdin=SCRIPT) File "/usr/local/lib/python2.7/dist-packages/virtualenv.py", line 792, in call_subprocess logger.notify('\n'.join(all_output) + '\n----------------------------------------') File "/usr/local/lib/python2.7/dist-packages/virtualenv.py", line 199, in notify self.log(self.NOTIFY, msg, *args, **kw) File "/usr/local/lib/python2.7/dist-packages/virtualenv.py", line 231, in log consumer.write(rendered+'\n') UnicodeEncodeError: 'ascii' codec can't encode character u'\xe9' in position 2254: ordinal not in range(128) Check the log at ~/.pre-commit/pre-commit.log </pre> You'll find the content on pre-commit.log on [pastebin](http://pastebin.com/Ls61EQDj). Now the if I comment out the install_scripts parameter, everything works fine <pre> $ cat setup.cfg [install] #install_scripts=/usr/sbin $ pre-commit clean Cleaned /home/wilfried/.pre-commit. $ pre-commit run --all-files [INFO] Initializing environment for git://github.com/pre-commit/pre-commit-hooks. [INFO] Installing environment for git://github.com/pre-commit/pre-commit-hooks. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... Trim Trailing Whitespace.............................(no files to check)Skipped </pre> I'm running on a linux mint 18, with python 2.7.12 and pre-commit 0.9.2 <pre> $ python --version Python 2.7.12 $ pre-commit --version pre-commit 0.9.2 </pre> Let my know if you need anything else. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pre_commit/languages/python.py` Content: ``` 1 from __future__ import unicode_literals 2 3 import contextlib 4 import distutils.spawn 5 import os 6 import sys 7 8 from pre_commit.envcontext import envcontext 9 from pre_commit.envcontext import UNSET 10 from pre_commit.envcontext import Var 11 from pre_commit.languages import helpers 12 from pre_commit.util import clean_path_on_failure 13 from pre_commit.xargs import xargs 14 15 16 ENVIRONMENT_DIR = 'py_env' 17 18 19 def bin_dir(venv): 20 """On windows there's a different directory for the virtualenv""" 21 bin_part = 'Scripts' if os.name == 'nt' else 'bin' 22 return os.path.join(venv, bin_part) 23 24 25 def get_env_patch(venv): 26 return ( 27 ('PYTHONHOME', UNSET), 28 ('VIRTUAL_ENV', venv), 29 ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))), 30 ) 31 32 33 @contextlib.contextmanager 34 def in_env(repo_cmd_runner, language_version): 35 envdir = os.path.join( 36 repo_cmd_runner.prefix_dir, 37 helpers.environment_dir(ENVIRONMENT_DIR, language_version), 38 ) 39 with envcontext(get_env_patch(envdir)): 40 yield 41 42 43 def norm_version(version): 44 if os.name == 'nt': # pragma: no cover (windows) 45 # Try looking up by name 46 if distutils.spawn.find_executable(version): 47 return version 48 49 # If it is in the form pythonx.x search in the default 50 # place on windows 51 if version.startswith('python'): 52 return r'C:\{}\python.exe'.format(version.replace('.', '')) 53 54 # Otherwise assume it is a path 55 return os.path.expanduser(version) 56 57 58 def install_environment( 59 repo_cmd_runner, 60 version='default', 61 additional_dependencies=(), 62 ): 63 additional_dependencies = tuple(additional_dependencies) 64 directory = helpers.environment_dir(ENVIRONMENT_DIR, version) 65 66 # Install a virtualenv 67 with clean_path_on_failure(repo_cmd_runner.path(directory)): 68 venv_cmd = [ 69 sys.executable, '-m', 'virtualenv', 70 '{{prefix}}{}'.format(directory) 71 ] 72 if version != 'default': 73 venv_cmd.extend(['-p', norm_version(version)]) 74 else: 75 venv_cmd.extend(['-p', os.path.realpath(sys.executable)]) 76 repo_cmd_runner.run(venv_cmd) 77 with in_env(repo_cmd_runner, version): 78 helpers.run_setup_cmd( 79 repo_cmd_runner, 80 ('pip', 'install', '.') + additional_dependencies, 81 ) 82 83 84 def run_hook(repo_cmd_runner, hook, file_args): 85 with in_env(repo_cmd_runner, hook['language_version']): 86 return xargs((hook['entry'],) + tuple(hook['args']), file_args) 87 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py --- a/pre_commit/languages/python.py +++ b/pre_commit/languages/python.py @@ -73,7 +73,7 @@ venv_cmd.extend(['-p', norm_version(version)]) else: venv_cmd.extend(['-p', os.path.realpath(sys.executable)]) - repo_cmd_runner.run(venv_cmd) + repo_cmd_runner.run(venv_cmd, cwd='/') with in_env(repo_cmd_runner, version): helpers.run_setup_cmd( repo_cmd_runner,
{"golden_diff": "diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py\n--- a/pre_commit/languages/python.py\n+++ b/pre_commit/languages/python.py\n@@ -73,7 +73,7 @@\n venv_cmd.extend(['-p', norm_version(version)])\n else:\n venv_cmd.extend(['-p', os.path.realpath(sys.executable)])\n- repo_cmd_runner.run(venv_cmd)\n+ repo_cmd_runner.run(venv_cmd, cwd='/')\n with in_env(repo_cmd_runner, version):\n helpers.run_setup_cmd(\n repo_cmd_runner,\n", "issue": "setup.cfg prevent pre-commit to install\nFor some reason I have a setup.cfg file in the root directory of my repo for my app where the parameter **install_scripts** is set to **/usr/sbin**. This prevent pre-commit to set up and crash.\r\nHere is a repro in a fresh git repository containing only **setup.cfg** file and **.pre-commit-config.yaml** (for the [install guide](http://pre-commit.com/#install)\r\n\r\n<pre>\r\n$ mkdir repro; cd repro\r\n$ git init\r\nD\u00e9p\u00f4t Git vide initialis\u00e9 dans /home/wilfried/repro/.git/\r\n$ pre-commit clean\r\nCleaned /home/wilfried/.pre-commit.\r\n$ pre-commit install\r\npre-commit installed at /home/wilfried/repro/.git/hooks/pre-commit\r\n$ cat setup.cfg \r\n[install]\r\ninstall_scripts=/usr/sbin\r\n$ cat .pre-commit-config.yaml \r\n- repo: git://github.com/pre-commit/pre-commit-hooks\r\n sha: v0.4.2\r\n hooks:\r\n - id: trailing-whitespace\r\n</pre>\r\n\r\nNow, with those two files setup, I try to run a simple pre-commit run which try to initiate the virtualenv.\r\n<pre>\r\n$ pre-commit run --all-files\r\n[INFO] Initializing environment for git://github.com/pre-commit/pre-commit-hooks.\r\n[INFO] Installing environment for git://github.com/pre-commit/pre-commit-hooks.\r\n[INFO] Once installed this environment will be reused.\r\n[INFO] This may take a few minutes...\r\nAn unexpected error has occurred: CalledProcessError: Command: ('/usr/bin/python', '-m', 'virtualenv', '/home/wilfried/.pre-commit/repoaXLSIv/py_env-default', '-p', '/usr/bin/python2.7')\r\nReturn code: 1\r\nExpected return code: 0\r\nOutput: \r\n New python executable in /home/wilfried/.pre-commit/repoaXLSIv/py_env-default/bin/python2.7\r\n Also creating executable in /home/wilfried/.pre-commit/repoaXLSIv/py_env-default/bin/python\r\n Installing setuptools, pip, wheel...\r\n Complete output from command /home/wilfried/.pre-...efault/bin/python2.7 - setuptools pip wheel:\r\n ...Installing setuptools, pip, wheel...done.\r\n Running virtualenv with interpreter /usr/bin/python2.7\r\n \r\nErrors: \r\n Traceback (most recent call last):\r\n File \"/usr/local/lib/python2.7/dist-packages/virtualenv.py\", line 2327, in <module>\r\n main()\r\n File \"/usr/local/lib/python2.7/dist-packages/virtualenv.py\", line 711, in main\r\n symlink=options.symlink)\r\n File \"/usr/local/lib/python2.7/dist-packages/virtualenv.py\", line 944, in create_environment\r\n download=download,\r\n File \"/usr/local/lib/python2.7/dist-packages/virtualenv.py\", line 900, in install_wheel\r\n call_subprocess(cmd, show_stdout=False, extra_env=env, stdin=SCRIPT)\r\n File \"/usr/local/lib/python2.7/dist-packages/virtualenv.py\", line 792, in call_subprocess\r\n logger.notify('\\n'.join(all_output) + '\\n----------------------------------------')\r\n File \"/usr/local/lib/python2.7/dist-packages/virtualenv.py\", line 199, in notify\r\n self.log(self.NOTIFY, msg, *args, **kw)\r\n File \"/usr/local/lib/python2.7/dist-packages/virtualenv.py\", line 231, in log\r\n consumer.write(rendered+'\\n')\r\n UnicodeEncodeError: 'ascii' codec can't encode character u'\\xe9' in position 2254: ordinal not in range(128)\r\n \r\n\r\nCheck the log at ~/.pre-commit/pre-commit.log\r\n</pre>\r\n\r\nYou'll find the content on pre-commit.log on [pastebin](http://pastebin.com/Ls61EQDj).\r\n\r\nNow the if I comment out the install_scripts parameter, everything works fine\r\n\r\n<pre>\r\n$ cat setup.cfg \r\n[install]\r\n#install_scripts=/usr/sbin\r\n$ pre-commit clean\r\nCleaned /home/wilfried/.pre-commit.\r\n$ pre-commit run --all-files\r\n[INFO] Initializing environment for git://github.com/pre-commit/pre-commit-hooks.\r\n[INFO] Installing environment for git://github.com/pre-commit/pre-commit-hooks.\r\n[INFO] Once installed this environment will be reused.\r\n[INFO] This may take a few minutes...\r\nTrim Trailing Whitespace.............................(no files to check)Skipped\r\n</pre>\r\n\r\nI'm running on a linux mint 18, with python 2.7.12 and pre-commit 0.9.2\r\n<pre>\r\n$ python --version\r\nPython 2.7.12\r\n$ pre-commit --version\r\npre-commit 0.9.2\r\n</pre>\r\n\r\nLet my know if you need anything else.\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport distutils.spawn\nimport os\nimport sys\n\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.xargs import xargs\n\n\nENVIRONMENT_DIR = 'py_env'\n\n\ndef bin_dir(venv):\n \"\"\"On windows there's a different directory for the virtualenv\"\"\"\n bin_part = 'Scripts' if os.name == 'nt' else 'bin'\n return os.path.join(venv, bin_part)\n\n\ndef get_env_patch(venv):\n return (\n ('PYTHONHOME', UNSET),\n ('VIRTUAL_ENV', venv),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(repo_cmd_runner, language_version):\n envdir = os.path.join(\n repo_cmd_runner.prefix_dir,\n helpers.environment_dir(ENVIRONMENT_DIR, language_version),\n )\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef norm_version(version):\n if os.name == 'nt': # pragma: no cover (windows)\n # Try looking up by name\n if distutils.spawn.find_executable(version):\n return version\n\n # If it is in the form pythonx.x search in the default\n # place on windows\n if version.startswith('python'):\n return r'C:\\{}\\python.exe'.format(version.replace('.', ''))\n\n # Otherwise assume it is a path\n return os.path.expanduser(version)\n\n\ndef install_environment(\n repo_cmd_runner,\n version='default',\n additional_dependencies=(),\n):\n additional_dependencies = tuple(additional_dependencies)\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n\n # Install a virtualenv\n with clean_path_on_failure(repo_cmd_runner.path(directory)):\n venv_cmd = [\n sys.executable, '-m', 'virtualenv',\n '{{prefix}}{}'.format(directory)\n ]\n if version != 'default':\n venv_cmd.extend(['-p', norm_version(version)])\n else:\n venv_cmd.extend(['-p', os.path.realpath(sys.executable)])\n repo_cmd_runner.run(venv_cmd)\n with in_env(repo_cmd_runner, version):\n helpers.run_setup_cmd(\n repo_cmd_runner,\n ('pip', 'install', '.') + additional_dependencies,\n )\n\n\ndef run_hook(repo_cmd_runner, hook, file_args):\n with in_env(repo_cmd_runner, hook['language_version']):\n return xargs((hook['entry'],) + tuple(hook['args']), file_args)\n", "path": "pre_commit/languages/python.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport distutils.spawn\nimport os\nimport sys\n\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.xargs import xargs\n\n\nENVIRONMENT_DIR = 'py_env'\n\n\ndef bin_dir(venv):\n \"\"\"On windows there's a different directory for the virtualenv\"\"\"\n bin_part = 'Scripts' if os.name == 'nt' else 'bin'\n return os.path.join(venv, bin_part)\n\n\ndef get_env_patch(venv):\n return (\n ('PYTHONHOME', UNSET),\n ('VIRTUAL_ENV', venv),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(repo_cmd_runner, language_version):\n envdir = os.path.join(\n repo_cmd_runner.prefix_dir,\n helpers.environment_dir(ENVIRONMENT_DIR, language_version),\n )\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef norm_version(version):\n if os.name == 'nt': # pragma: no cover (windows)\n # Try looking up by name\n if distutils.spawn.find_executable(version):\n return version\n\n # If it is in the form pythonx.x search in the default\n # place on windows\n if version.startswith('python'):\n return r'C:\\{}\\python.exe'.format(version.replace('.', ''))\n\n # Otherwise assume it is a path\n return os.path.expanduser(version)\n\n\ndef install_environment(\n repo_cmd_runner,\n version='default',\n additional_dependencies=(),\n):\n additional_dependencies = tuple(additional_dependencies)\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n\n # Install a virtualenv\n with clean_path_on_failure(repo_cmd_runner.path(directory)):\n venv_cmd = [\n sys.executable, '-m', 'virtualenv',\n '{{prefix}}{}'.format(directory)\n ]\n if version != 'default':\n venv_cmd.extend(['-p', norm_version(version)])\n else:\n venv_cmd.extend(['-p', os.path.realpath(sys.executable)])\n repo_cmd_runner.run(venv_cmd, cwd='/')\n with in_env(repo_cmd_runner, version):\n helpers.run_setup_cmd(\n repo_cmd_runner,\n ('pip', 'install', '.') + additional_dependencies,\n )\n\n\ndef run_hook(repo_cmd_runner, hook, file_args):\n with in_env(repo_cmd_runner, hook['language_version']):\n return xargs((hook['entry'],) + tuple(hook['args']), file_args)\n", "path": "pre_commit/languages/python.py"}]}
2,053
126
gh_patches_debug_16285
rasdani/github-patches
git_diff
statsmodels__statsmodels-4197
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Issue in removing nan's from the trend variable Hi, I am trying to use your code given in the link: https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/seasonal.py to remove the nan's which occur due to the moving averages. When i put the variable extrapolate_trend >0 then i get a value error saying: ValueError: could not broadcast input array from shape (26,1) into shape (26) I want to extrapolate the trend values for a time series at a week level --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `statsmodels/tsa/seasonal.py` Content: ``` 1 """ 2 Seasonal Decomposition by Moving Averages 3 """ 4 from statsmodels.compat.python import lmap, range, iteritems 5 import numpy as np 6 from pandas.core.nanops import nanmean as pd_nanmean 7 from .filters._utils import (_maybe_get_pandas_wrapper_freq, 8 _maybe_get_pandas_wrapper) 9 from .filters.filtertools import convolution_filter 10 from statsmodels.tsa.tsatools import freq_to_period 11 12 13 def seasonal_mean(x, freq): 14 """ 15 Return means for each period in x. freq is an int that gives the 16 number of periods per cycle. E.g., 12 for monthly. NaNs are ignored 17 in the mean. 18 """ 19 return np.array([pd_nanmean(x[i::freq], axis=0) for i in range(freq)]) 20 21 22 def _extrapolate_trend(trend, npoints): 23 """ 24 Replace nan values on trend's end-points with least-squares extrapolated 25 values with regression considering npoints closest defined points. 26 """ 27 front = next(i for i, vals in enumerate(trend) 28 if not np.any(np.isnan(vals))) 29 back = trend.shape[0] - 1 - next(i for i, vals in enumerate(trend[::-1]) 30 if not np.any(np.isnan(vals))) 31 front_last = min(front + npoints, back) 32 back_first = max(front, back - npoints) 33 34 k, n = np.linalg.lstsq( 35 np.c_[np.arange(front, front_last), np.ones(front_last - front)], 36 trend[front:front_last])[0] 37 trend[:front] = (np.arange(0, front) * np.c_[k] + np.c_[n]).T 38 39 k, n = np.linalg.lstsq( 40 np.c_[np.arange(back_first, back), np.ones(back - back_first)], 41 trend[back_first:back])[0] 42 trend[back + 1:] = (np.arange(back + 1, trend.shape[0]) * np.c_[k] + np.c_[n]).T 43 44 return trend 45 46 47 def seasonal_decompose(x, model="additive", filt=None, freq=None, two_sided=True, 48 extrapolate_trend=0): 49 """ 50 Seasonal decomposition using moving averages 51 52 Parameters 53 ---------- 54 x : array-like 55 Time series. If 2d, individual series are in columns. 56 model : str {"additive", "multiplicative"} 57 Type of seasonal component. Abbreviations are accepted. 58 filt : array-like 59 The filter coefficients for filtering out the seasonal component. 60 The concrete moving average method used in filtering is determined by two_sided. 61 freq : int, optional 62 Frequency of the series. Must be used if x is not a pandas object. 63 Overrides default periodicity of x if x is a pandas 64 object with a timeseries index. 65 two_sided : bool 66 The moving average method used in filtering. 67 If True (default), a centered moving average is computed using the filt. 68 If False, the filter coefficients are for past values only. 69 extrapolate_trend : int or 'freq', optional 70 If set to > 0, the trend resulting from the convolution is 71 linear least-squares extrapolated on both ends (or the single one 72 if two_sided is False) considering this many (+1) closest points. 73 If set to 'freq', use `freq` closest points. Setting this parameter 74 results in no NaN values in trend or resid components. 75 76 Returns 77 ------- 78 results : obj 79 A object with seasonal, trend, and resid attributes. 80 81 Notes 82 ----- 83 This is a naive decomposition. More sophisticated methods should 84 be preferred. 85 86 The additive model is Y[t] = T[t] + S[t] + e[t] 87 88 The multiplicative model is Y[t] = T[t] * S[t] * e[t] 89 90 The seasonal component is first removed by applying a convolution 91 filter to the data. The average of this smoothed series for each 92 period is the returned seasonal component. 93 94 See Also 95 -------- 96 statsmodels.tsa.filters.bk_filter.bkfilter 97 statsmodels.tsa.filters.cf_filter.xffilter 98 statsmodels.tsa.filters.hp_filter.hpfilter 99 statsmodels.tsa.filters.convolution_filter 100 """ 101 if freq is None: 102 _pandas_wrapper, pfreq = _maybe_get_pandas_wrapper_freq(x) 103 else: 104 _pandas_wrapper = _maybe_get_pandas_wrapper(x) 105 pfreq = None 106 x = np.asanyarray(x).squeeze() 107 nobs = len(x) 108 109 if not np.all(np.isfinite(x)): 110 raise ValueError("This function does not handle missing values") 111 if model.startswith('m'): 112 if np.any(x <= 0): 113 raise ValueError("Multiplicative seasonality is not appropriate " 114 "for zero and negative values") 115 116 if freq is None: 117 if pfreq is not None: 118 pfreq = freq_to_period(pfreq) 119 freq = pfreq 120 else: 121 raise ValueError("You must specify a freq or x must be a " 122 "pandas object with a timeseries index with " 123 "a freq not set to None") 124 125 if filt is None: 126 if freq % 2 == 0: # split weights at ends 127 filt = np.array([.5] + [1] * (freq - 1) + [.5]) / freq 128 else: 129 filt = np.repeat(1./freq, freq) 130 131 nsides = int(two_sided) + 1 132 trend = convolution_filter(x, filt, nsides) 133 134 if extrapolate_trend == 'freq': 135 extrapolate_trend = freq - 1 136 137 if extrapolate_trend > 0: 138 trend = _extrapolate_trend(trend, extrapolate_trend + 1) 139 140 if model.startswith('m'): 141 detrended = x / trend 142 else: 143 detrended = x - trend 144 145 period_averages = seasonal_mean(detrended, freq) 146 147 if model.startswith('m'): 148 period_averages /= np.mean(period_averages, axis=0) 149 else: 150 period_averages -= np.mean(period_averages, axis=0) 151 152 seasonal = np.tile(period_averages.T, nobs // freq + 1).T[:nobs] 153 154 if model.startswith('m'): 155 resid = x / seasonal / trend 156 else: 157 resid = detrended - seasonal 158 159 results = lmap(_pandas_wrapper, [seasonal, trend, resid, x]) 160 return DecomposeResult(seasonal=results[0], trend=results[1], 161 resid=results[2], observed=results[3]) 162 163 164 class DecomposeResult(object): 165 def __init__(self, **kwargs): 166 for key, value in iteritems(kwargs): 167 setattr(self, key, value) 168 self.nobs = len(self.observed) 169 170 def plot(self): 171 from statsmodels.graphics.utils import _import_mpl 172 plt = _import_mpl() 173 fig, axes = plt.subplots(4, 1, sharex=True) 174 if hasattr(self.observed, 'plot'): # got pandas use it 175 self.observed.plot(ax=axes[0], legend=False) 176 axes[0].set_ylabel('Observed') 177 self.trend.plot(ax=axes[1], legend=False) 178 axes[1].set_ylabel('Trend') 179 self.seasonal.plot(ax=axes[2], legend=False) 180 axes[2].set_ylabel('Seasonal') 181 self.resid.plot(ax=axes[3], legend=False) 182 axes[3].set_ylabel('Residual') 183 else: 184 axes[0].plot(self.observed) 185 axes[0].set_ylabel('Observed') 186 axes[1].plot(self.trend) 187 axes[1].set_ylabel('Trend') 188 axes[2].plot(self.seasonal) 189 axes[2].set_ylabel('Seasonal') 190 axes[3].plot(self.resid) 191 axes[3].set_ylabel('Residual') 192 axes[3].set_xlabel('Time') 193 axes[3].set_xlim(0, self.nobs) 194 195 fig.tight_layout() 196 return fig 197 198 199 if __name__ == "__main__": 200 x = np.array([-50, 175, 149, 214, 247, 237, 225, 329, 729, 809, 201 530, 489, 540, 457, 195, 176, 337, 239, 128, 102, 202 232, 429, 3, 98, 43, -141, -77, -13, 125, 361, -45, 184]) 203 results = seasonal_decompose(x, freq=4) 204 205 from pandas import DataFrame, DatetimeIndex 206 data = DataFrame(x, DatetimeIndex(start='1/1/1951', 207 periods=len(x), 208 freq='Q')) 209 210 res = seasonal_decompose(data) 211 212 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/statsmodels/tsa/seasonal.py b/statsmodels/tsa/seasonal.py --- a/statsmodels/tsa/seasonal.py +++ b/statsmodels/tsa/seasonal.py @@ -34,12 +34,18 @@ k, n = np.linalg.lstsq( np.c_[np.arange(front, front_last), np.ones(front_last - front)], trend[front:front_last])[0] - trend[:front] = (np.arange(0, front) * np.c_[k] + np.c_[n]).T + extra = (np.arange(0, front) * np.c_[k] + np.c_[n]).T + if trend.ndim == 1: + extra = extra.squeeze() + trend[:front] = extra k, n = np.linalg.lstsq( np.c_[np.arange(back_first, back), np.ones(back - back_first)], trend[back_first:back])[0] - trend[back + 1:] = (np.arange(back + 1, trend.shape[0]) * np.c_[k] + np.c_[n]).T + extra = (np.arange(back + 1, trend.shape[0]) * np.c_[k] + np.c_[n]).T + if trend.ndim == 1: + extra = extra.squeeze() + trend[back + 1:] = extra return trend
{"golden_diff": "diff --git a/statsmodels/tsa/seasonal.py b/statsmodels/tsa/seasonal.py\n--- a/statsmodels/tsa/seasonal.py\n+++ b/statsmodels/tsa/seasonal.py\n@@ -34,12 +34,18 @@\n k, n = np.linalg.lstsq(\n np.c_[np.arange(front, front_last), np.ones(front_last - front)],\n trend[front:front_last])[0]\n- trend[:front] = (np.arange(0, front) * np.c_[k] + np.c_[n]).T\n+ extra = (np.arange(0, front) * np.c_[k] + np.c_[n]).T\n+ if trend.ndim == 1:\n+ extra = extra.squeeze()\n+ trend[:front] = extra\n \n k, n = np.linalg.lstsq(\n np.c_[np.arange(back_first, back), np.ones(back - back_first)],\n trend[back_first:back])[0]\n- trend[back + 1:] = (np.arange(back + 1, trend.shape[0]) * np.c_[k] + np.c_[n]).T\n+ extra = (np.arange(back + 1, trend.shape[0]) * np.c_[k] + np.c_[n]).T\n+ if trend.ndim == 1:\n+ extra = extra.squeeze()\n+ trend[back + 1:] = extra\n \n return trend\n", "issue": "Issue in removing nan's from the trend variable\nHi,\r\nI am trying to use your code given in the link: https://github.com/statsmodels/statsmodels/blob/master/statsmodels/tsa/seasonal.py\r\nto remove the nan's which occur due to the moving averages. When i put the variable extrapolate_trend >0 then i get a value error saying: \r\nValueError: could not broadcast input array from shape (26,1) into shape (26)\r\nI want to extrapolate the trend values for a time series at a week level\n", "before_files": [{"content": "\"\"\"\nSeasonal Decomposition by Moving Averages\n\"\"\"\nfrom statsmodels.compat.python import lmap, range, iteritems\nimport numpy as np\nfrom pandas.core.nanops import nanmean as pd_nanmean\nfrom .filters._utils import (_maybe_get_pandas_wrapper_freq,\n _maybe_get_pandas_wrapper)\nfrom .filters.filtertools import convolution_filter\nfrom statsmodels.tsa.tsatools import freq_to_period\n\n\ndef seasonal_mean(x, freq):\n \"\"\"\n Return means for each period in x. freq is an int that gives the\n number of periods per cycle. E.g., 12 for monthly. NaNs are ignored\n in the mean.\n \"\"\"\n return np.array([pd_nanmean(x[i::freq], axis=0) for i in range(freq)])\n\n\ndef _extrapolate_trend(trend, npoints):\n \"\"\"\n Replace nan values on trend's end-points with least-squares extrapolated\n values with regression considering npoints closest defined points.\n \"\"\"\n front = next(i for i, vals in enumerate(trend)\n if not np.any(np.isnan(vals)))\n back = trend.shape[0] - 1 - next(i for i, vals in enumerate(trend[::-1])\n if not np.any(np.isnan(vals)))\n front_last = min(front + npoints, back)\n back_first = max(front, back - npoints)\n\n k, n = np.linalg.lstsq(\n np.c_[np.arange(front, front_last), np.ones(front_last - front)],\n trend[front:front_last])[0]\n trend[:front] = (np.arange(0, front) * np.c_[k] + np.c_[n]).T\n\n k, n = np.linalg.lstsq(\n np.c_[np.arange(back_first, back), np.ones(back - back_first)],\n trend[back_first:back])[0]\n trend[back + 1:] = (np.arange(back + 1, trend.shape[0]) * np.c_[k] + np.c_[n]).T\n\n return trend\n\n\ndef seasonal_decompose(x, model=\"additive\", filt=None, freq=None, two_sided=True,\n extrapolate_trend=0):\n \"\"\"\n Seasonal decomposition using moving averages\n\n Parameters\n ----------\n x : array-like\n Time series. If 2d, individual series are in columns.\n model : str {\"additive\", \"multiplicative\"}\n Type of seasonal component. Abbreviations are accepted.\n filt : array-like\n The filter coefficients for filtering out the seasonal component.\n The concrete moving average method used in filtering is determined by two_sided.\n freq : int, optional\n Frequency of the series. Must be used if x is not a pandas object.\n Overrides default periodicity of x if x is a pandas\n object with a timeseries index.\n two_sided : bool\n The moving average method used in filtering.\n If True (default), a centered moving average is computed using the filt.\n If False, the filter coefficients are for past values only.\n extrapolate_trend : int or 'freq', optional\n If set to > 0, the trend resulting from the convolution is\n linear least-squares extrapolated on both ends (or the single one\n if two_sided is False) considering this many (+1) closest points.\n If set to 'freq', use `freq` closest points. Setting this parameter\n results in no NaN values in trend or resid components.\n\n Returns\n -------\n results : obj\n A object with seasonal, trend, and resid attributes.\n\n Notes\n -----\n This is a naive decomposition. More sophisticated methods should\n be preferred.\n\n The additive model is Y[t] = T[t] + S[t] + e[t]\n\n The multiplicative model is Y[t] = T[t] * S[t] * e[t]\n\n The seasonal component is first removed by applying a convolution\n filter to the data. The average of this smoothed series for each\n period is the returned seasonal component.\n\n See Also\n --------\n statsmodels.tsa.filters.bk_filter.bkfilter\n statsmodels.tsa.filters.cf_filter.xffilter\n statsmodels.tsa.filters.hp_filter.hpfilter\n statsmodels.tsa.filters.convolution_filter\n \"\"\"\n if freq is None:\n _pandas_wrapper, pfreq = _maybe_get_pandas_wrapper_freq(x)\n else:\n _pandas_wrapper = _maybe_get_pandas_wrapper(x)\n pfreq = None\n x = np.asanyarray(x).squeeze()\n nobs = len(x)\n\n if not np.all(np.isfinite(x)):\n raise ValueError(\"This function does not handle missing values\")\n if model.startswith('m'):\n if np.any(x <= 0):\n raise ValueError(\"Multiplicative seasonality is not appropriate \"\n \"for zero and negative values\")\n\n if freq is None:\n if pfreq is not None:\n pfreq = freq_to_period(pfreq)\n freq = pfreq\n else:\n raise ValueError(\"You must specify a freq or x must be a \"\n \"pandas object with a timeseries index with \"\n \"a freq not set to None\")\n\n if filt is None:\n if freq % 2 == 0: # split weights at ends\n filt = np.array([.5] + [1] * (freq - 1) + [.5]) / freq\n else:\n filt = np.repeat(1./freq, freq)\n\n nsides = int(two_sided) + 1\n trend = convolution_filter(x, filt, nsides)\n\n if extrapolate_trend == 'freq':\n extrapolate_trend = freq - 1\n\n if extrapolate_trend > 0:\n trend = _extrapolate_trend(trend, extrapolate_trend + 1)\n\n if model.startswith('m'):\n detrended = x / trend\n else:\n detrended = x - trend\n\n period_averages = seasonal_mean(detrended, freq)\n\n if model.startswith('m'):\n period_averages /= np.mean(period_averages, axis=0)\n else:\n period_averages -= np.mean(period_averages, axis=0)\n\n seasonal = np.tile(period_averages.T, nobs // freq + 1).T[:nobs]\n\n if model.startswith('m'):\n resid = x / seasonal / trend\n else:\n resid = detrended - seasonal\n\n results = lmap(_pandas_wrapper, [seasonal, trend, resid, x])\n return DecomposeResult(seasonal=results[0], trend=results[1],\n resid=results[2], observed=results[3])\n\n\nclass DecomposeResult(object):\n def __init__(self, **kwargs):\n for key, value in iteritems(kwargs):\n setattr(self, key, value)\n self.nobs = len(self.observed)\n\n def plot(self):\n from statsmodels.graphics.utils import _import_mpl\n plt = _import_mpl()\n fig, axes = plt.subplots(4, 1, sharex=True)\n if hasattr(self.observed, 'plot'): # got pandas use it\n self.observed.plot(ax=axes[0], legend=False)\n axes[0].set_ylabel('Observed')\n self.trend.plot(ax=axes[1], legend=False)\n axes[1].set_ylabel('Trend')\n self.seasonal.plot(ax=axes[2], legend=False)\n axes[2].set_ylabel('Seasonal')\n self.resid.plot(ax=axes[3], legend=False)\n axes[3].set_ylabel('Residual')\n else:\n axes[0].plot(self.observed)\n axes[0].set_ylabel('Observed')\n axes[1].plot(self.trend)\n axes[1].set_ylabel('Trend')\n axes[2].plot(self.seasonal)\n axes[2].set_ylabel('Seasonal')\n axes[3].plot(self.resid)\n axes[3].set_ylabel('Residual')\n axes[3].set_xlabel('Time')\n axes[3].set_xlim(0, self.nobs)\n\n fig.tight_layout()\n return fig\n\n\nif __name__ == \"__main__\":\n x = np.array([-50, 175, 149, 214, 247, 237, 225, 329, 729, 809,\n 530, 489, 540, 457, 195, 176, 337, 239, 128, 102,\n 232, 429, 3, 98, 43, -141, -77, -13, 125, 361, -45, 184])\n results = seasonal_decompose(x, freq=4)\n\n from pandas import DataFrame, DatetimeIndex\n data = DataFrame(x, DatetimeIndex(start='1/1/1951',\n periods=len(x),\n freq='Q'))\n\n res = seasonal_decompose(data)\n\n", "path": "statsmodels/tsa/seasonal.py"}], "after_files": [{"content": "\"\"\"\nSeasonal Decomposition by Moving Averages\n\"\"\"\nfrom statsmodels.compat.python import lmap, range, iteritems\nimport numpy as np\nfrom pandas.core.nanops import nanmean as pd_nanmean\nfrom .filters._utils import (_maybe_get_pandas_wrapper_freq,\n _maybe_get_pandas_wrapper)\nfrom .filters.filtertools import convolution_filter\nfrom statsmodels.tsa.tsatools import freq_to_period\n\n\ndef seasonal_mean(x, freq):\n \"\"\"\n Return means for each period in x. freq is an int that gives the\n number of periods per cycle. E.g., 12 for monthly. NaNs are ignored\n in the mean.\n \"\"\"\n return np.array([pd_nanmean(x[i::freq], axis=0) for i in range(freq)])\n\n\ndef _extrapolate_trend(trend, npoints):\n \"\"\"\n Replace nan values on trend's end-points with least-squares extrapolated\n values with regression considering npoints closest defined points.\n \"\"\"\n front = next(i for i, vals in enumerate(trend)\n if not np.any(np.isnan(vals)))\n back = trend.shape[0] - 1 - next(i for i, vals in enumerate(trend[::-1])\n if not np.any(np.isnan(vals)))\n front_last = min(front + npoints, back)\n back_first = max(front, back - npoints)\n\n k, n = np.linalg.lstsq(\n np.c_[np.arange(front, front_last), np.ones(front_last - front)],\n trend[front:front_last])[0]\n extra = (np.arange(0, front) * np.c_[k] + np.c_[n]).T\n if trend.ndim == 1:\n extra = extra.squeeze()\n trend[:front] = extra\n\n k, n = np.linalg.lstsq(\n np.c_[np.arange(back_first, back), np.ones(back - back_first)],\n trend[back_first:back])[0]\n extra = (np.arange(back + 1, trend.shape[0]) * np.c_[k] + np.c_[n]).T\n if trend.ndim == 1:\n extra = extra.squeeze()\n trend[back + 1:] = extra\n\n return trend\n\n\ndef seasonal_decompose(x, model=\"additive\", filt=None, freq=None, two_sided=True,\n extrapolate_trend=0):\n \"\"\"\n Seasonal decomposition using moving averages\n\n Parameters\n ----------\n x : array-like\n Time series. If 2d, individual series are in columns.\n model : str {\"additive\", \"multiplicative\"}\n Type of seasonal component. Abbreviations are accepted.\n filt : array-like\n The filter coefficients for filtering out the seasonal component.\n The concrete moving average method used in filtering is determined by two_sided.\n freq : int, optional\n Frequency of the series. Must be used if x is not a pandas object.\n Overrides default periodicity of x if x is a pandas\n object with a timeseries index.\n two_sided : bool\n The moving average method used in filtering.\n If True (default), a centered moving average is computed using the filt.\n If False, the filter coefficients are for past values only.\n extrapolate_trend : int or 'freq', optional\n If set to > 0, the trend resulting from the convolution is\n linear least-squares extrapolated on both ends (or the single one\n if two_sided is False) considering this many (+1) closest points.\n If set to 'freq', use `freq` closest points. Setting this parameter\n results in no NaN values in trend or resid components.\n\n Returns\n -------\n results : obj\n A object with seasonal, trend, and resid attributes.\n\n Notes\n -----\n This is a naive decomposition. More sophisticated methods should\n be preferred.\n\n The additive model is Y[t] = T[t] + S[t] + e[t]\n\n The multiplicative model is Y[t] = T[t] * S[t] * e[t]\n\n The seasonal component is first removed by applying a convolution\n filter to the data. The average of this smoothed series for each\n period is the returned seasonal component.\n\n See Also\n --------\n statsmodels.tsa.filters.bk_filter.bkfilter\n statsmodels.tsa.filters.cf_filter.xffilter\n statsmodels.tsa.filters.hp_filter.hpfilter\n statsmodels.tsa.filters.convolution_filter\n \"\"\"\n if freq is None:\n _pandas_wrapper, pfreq = _maybe_get_pandas_wrapper_freq(x)\n else:\n _pandas_wrapper = _maybe_get_pandas_wrapper(x)\n pfreq = None\n x = np.asanyarray(x).squeeze()\n nobs = len(x)\n\n if not np.all(np.isfinite(x)):\n raise ValueError(\"This function does not handle missing values\")\n if model.startswith('m'):\n if np.any(x <= 0):\n raise ValueError(\"Multiplicative seasonality is not appropriate \"\n \"for zero and negative values\")\n\n if freq is None:\n if pfreq is not None:\n pfreq = freq_to_period(pfreq)\n freq = pfreq\n else:\n raise ValueError(\"You must specify a freq or x must be a \"\n \"pandas object with a timeseries index with \"\n \"a freq not set to None\")\n\n if filt is None:\n if freq % 2 == 0: # split weights at ends\n filt = np.array([.5] + [1] * (freq - 1) + [.5]) / freq\n else:\n filt = np.repeat(1./freq, freq)\n\n nsides = int(two_sided) + 1\n trend = convolution_filter(x, filt, nsides)\n\n if extrapolate_trend == 'freq':\n extrapolate_trend = freq - 1\n\n if extrapolate_trend > 0:\n trend = _extrapolate_trend(trend, extrapolate_trend + 1)\n\n if model.startswith('m'):\n detrended = x / trend\n else:\n detrended = x - trend\n\n period_averages = seasonal_mean(detrended, freq)\n\n if model.startswith('m'):\n period_averages /= np.mean(period_averages, axis=0)\n else:\n period_averages -= np.mean(period_averages, axis=0)\n\n seasonal = np.tile(period_averages.T, nobs // freq + 1).T[:nobs]\n\n if model.startswith('m'):\n resid = x / seasonal / trend\n else:\n resid = detrended - seasonal\n\n results = lmap(_pandas_wrapper, [seasonal, trend, resid, x])\n return DecomposeResult(seasonal=results[0], trend=results[1],\n resid=results[2], observed=results[3])\n\n\nclass DecomposeResult(object):\n def __init__(self, **kwargs):\n for key, value in iteritems(kwargs):\n setattr(self, key, value)\n self.nobs = len(self.observed)\n\n def plot(self):\n from statsmodels.graphics.utils import _import_mpl\n plt = _import_mpl()\n fig, axes = plt.subplots(4, 1, sharex=True)\n if hasattr(self.observed, 'plot'): # got pandas use it\n self.observed.plot(ax=axes[0], legend=False)\n axes[0].set_ylabel('Observed')\n self.trend.plot(ax=axes[1], legend=False)\n axes[1].set_ylabel('Trend')\n self.seasonal.plot(ax=axes[2], legend=False)\n axes[2].set_ylabel('Seasonal')\n self.resid.plot(ax=axes[3], legend=False)\n axes[3].set_ylabel('Residual')\n else:\n axes[0].plot(self.observed)\n axes[0].set_ylabel('Observed')\n axes[1].plot(self.trend)\n axes[1].set_ylabel('Trend')\n axes[2].plot(self.seasonal)\n axes[2].set_ylabel('Seasonal')\n axes[3].plot(self.resid)\n axes[3].set_ylabel('Residual')\n axes[3].set_xlabel('Time')\n axes[3].set_xlim(0, self.nobs)\n\n fig.tight_layout()\n return fig\n\n\nif __name__ == \"__main__\":\n x = np.array([-50, 175, 149, 214, 247, 237, 225, 329, 729, 809,\n 530, 489, 540, 457, 195, 176, 337, 239, 128, 102,\n 232, 429, 3, 98, 43, -141, -77, -13, 125, 361, -45, 184])\n results = seasonal_decompose(x, freq=4)\n\n from pandas import DataFrame, DatetimeIndex\n data = DataFrame(x, DatetimeIndex(start='1/1/1951',\n periods=len(x),\n freq='Q'))\n\n res = seasonal_decompose(data)\n\n", "path": "statsmodels/tsa/seasonal.py"}]}
2,929
314
gh_patches_debug_16228
rasdani/github-patches
git_diff
mesonbuild__meson-6606
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- .hgtags included in dist archive **Describe the bug** When running `ninja dist` the resulting `tar.xz` includes the `.hgtags` files that distros (namely debian) will mark as a packaging error. **To Reproduce** Run `ninja dist` on a mercurial repository that has at least 1 tag. **Expected behavior** That the archive does not have the `.hgtags` file in it. I just did an `hg archive` and unfortunately saw that `.hgtags` was there as well. **system parameters** * Is this a [cross build](https://mesonbuild.com/Cross-compilation.html) or just a plain native build (for the same computer)? Native * what operating system (e.g. MacOS Catalina, Windows 10, CentOS 8.0, Ubuntu 18.04, etc.) Linux, Debian Unstable * what Python version are you using e.g. 3.8.0: 3.7.6 * what `meson --version`: 0.53.1 * what `ninja --version` if it's a Ninja build: 1.9.0 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mesonbuild/mdist.py` Content: ``` 1 # Copyright 2017 The Meson development team 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 7 # http://www.apache.org/licenses/LICENSE-2.0 8 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 16 import gzip 17 import os 18 import sys 19 import shutil 20 import subprocess 21 import hashlib 22 import json 23 from glob import glob 24 from pathlib import Path 25 from mesonbuild.environment import detect_ninja 26 from mesonbuild.mesonlib import windows_proof_rmtree, MesonException 27 from mesonbuild.wrap import wrap 28 from mesonbuild import mlog, build 29 30 archive_choices = ['gztar', 'xztar', 'zip'] 31 archive_extension = {'gztar': '.tar.gz', 32 'xztar': '.tar.xz', 33 'zip': '.zip'} 34 35 def add_arguments(parser): 36 parser.add_argument('-C', default='.', dest='wd', 37 help='directory to cd into before running') 38 parser.add_argument('--formats', default='xztar', 39 help='Comma separated list of archive types to create.') 40 parser.add_argument('--include-subprojects', action='store_true', 41 help='Include source code of subprojects that have been used for the build.') 42 43 44 def create_hash(fname): 45 hashname = fname + '.sha256sum' 46 m = hashlib.sha256() 47 m.update(open(fname, 'rb').read()) 48 with open(hashname, 'w') as f: 49 f.write('%s %s\n' % (m.hexdigest(), os.path.basename(fname))) 50 51 52 def del_gitfiles(dirname): 53 for f in glob(os.path.join(dirname, '.git*')): 54 if os.path.isdir(f) and not os.path.islink(f): 55 windows_proof_rmtree(f) 56 else: 57 os.unlink(f) 58 59 def process_submodules(dirname): 60 module_file = os.path.join(dirname, '.gitmodules') 61 if not os.path.exists(module_file): 62 return 63 subprocess.check_call(['git', 'submodule', 'update', '--init', '--recursive'], cwd=dirname) 64 for line in open(module_file): 65 line = line.strip() 66 if '=' not in line: 67 continue 68 k, v = line.split('=', 1) 69 k = k.strip() 70 v = v.strip() 71 if k != 'path': 72 continue 73 del_gitfiles(os.path.join(dirname, v)) 74 75 76 def run_dist_scripts(src_root, bld_root, dist_root, dist_scripts): 77 assert(os.path.isabs(dist_root)) 78 env = os.environ.copy() 79 env['MESON_DIST_ROOT'] = dist_root 80 env['MESON_SOURCE_ROOT'] = src_root 81 env['MESON_BUILD_ROOT'] = bld_root 82 for d in dist_scripts: 83 script = d['exe'] 84 args = d['args'] 85 name = ' '.join(script + args) 86 print('Running custom dist script {!r}'.format(name)) 87 try: 88 rc = subprocess.call(script + args, env=env) 89 if rc != 0: 90 sys.exit('Dist script errored out') 91 except OSError: 92 print('Failed to run dist script {!r}'.format(name)) 93 sys.exit(1) 94 95 def is_git(src_root): 96 _git = os.path.join(src_root, '.git') 97 return os.path.isdir(_git) or os.path.isfile(_git) 98 99 def git_have_dirty_index(src_root): 100 '''Check whether there are uncommitted changes in git''' 101 ret = subprocess.call(['git', '-C', src_root, 'diff-index', '--quiet', 'HEAD']) 102 return ret == 1 103 104 def git_clone(src_root, distdir): 105 if git_have_dirty_index(src_root): 106 mlog.warning('Repository has uncommitted changes that will not be included in the dist tarball') 107 if os.path.exists(distdir): 108 shutil.rmtree(distdir) 109 os.makedirs(distdir) 110 subprocess.check_call(['git', 'clone', '--shared', src_root, distdir]) 111 process_submodules(distdir) 112 del_gitfiles(distdir) 113 114 def create_dist_git(dist_name, archives, src_root, bld_root, dist_sub, dist_scripts, subprojects): 115 distdir = os.path.join(dist_sub, dist_name) 116 git_clone(src_root, distdir) 117 for path in subprojects: 118 sub_src_root = os.path.join(src_root, path) 119 sub_distdir = os.path.join(distdir, path) 120 if os.path.exists(sub_distdir): 121 continue 122 if is_git(sub_src_root): 123 git_clone(sub_src_root, sub_distdir) 124 else: 125 shutil.copytree(sub_src_root, sub_distdir) 126 run_dist_scripts(src_root, bld_root, distdir, dist_scripts) 127 output_names = [] 128 for a in archives: 129 compressed_name = distdir + archive_extension[a] 130 shutil.make_archive(distdir, a, root_dir=dist_sub, base_dir=dist_name) 131 output_names.append(compressed_name) 132 shutil.rmtree(distdir) 133 return output_names 134 135 def is_hg(src_root): 136 return os.path.isdir(os.path.join(src_root, '.hg')) 137 138 def hg_have_dirty_index(src_root): 139 '''Check whether there are uncommitted changes in hg''' 140 out = subprocess.check_output(['hg', '-R', src_root, 'summary']) 141 return b'commit: (clean)' not in out 142 143 def create_dist_hg(dist_name, archives, src_root, bld_root, dist_sub, dist_scripts): 144 if hg_have_dirty_index(src_root): 145 mlog.warning('Repository has uncommitted changes that will not be included in the dist tarball') 146 if dist_scripts: 147 mlog.warning('dist scripts are not supported in Mercurial projects') 148 149 os.makedirs(dist_sub, exist_ok=True) 150 tarname = os.path.join(dist_sub, dist_name + '.tar') 151 xzname = tarname + '.xz' 152 gzname = tarname + '.gz' 153 zipname = os.path.join(dist_sub, dist_name + '.zip') 154 subprocess.check_call(['hg', 'archive', '-R', src_root, '-S', '-t', 'tar', tarname]) 155 output_names = [] 156 if 'xztar' in archives: 157 import lzma 158 with lzma.open(xzname, 'wb') as xf, open(tarname, 'rb') as tf: 159 shutil.copyfileobj(tf, xf) 160 output_names.append(xzname) 161 if 'gztar' in archives: 162 with gzip.open(gzname, 'wb') as zf, open(tarname, 'rb') as tf: 163 shutil.copyfileobj(tf, zf) 164 output_names.append(gzname) 165 os.unlink(tarname) 166 if 'zip' in archives: 167 subprocess.check_call(['hg', 'archive', '-R', src_root, '-S', '-t', 'zip', zipname]) 168 output_names.append(zipname) 169 return output_names 170 171 172 def check_dist(packagename, meson_command, extra_meson_args, bld_root, privdir): 173 print('Testing distribution package %s' % packagename) 174 unpackdir = os.path.join(privdir, 'dist-unpack') 175 builddir = os.path.join(privdir, 'dist-build') 176 installdir = os.path.join(privdir, 'dist-install') 177 for p in (unpackdir, builddir, installdir): 178 if os.path.exists(p): 179 shutil.rmtree(p) 180 os.mkdir(p) 181 ninja_bin = detect_ninja() 182 try: 183 shutil.unpack_archive(packagename, unpackdir) 184 unpacked_files = glob(os.path.join(unpackdir, '*')) 185 assert(len(unpacked_files) == 1) 186 unpacked_src_dir = unpacked_files[0] 187 with open(os.path.join(bld_root, 'meson-info', 'intro-buildoptions.json')) as boptions: 188 meson_command += ['-D{name}={value}'.format(**o) for o in json.load(boptions) 189 if o['name'] not in ['backend', 'install_umask']] 190 meson_command += extra_meson_args 191 if subprocess.call(meson_command + ['--backend=ninja', unpacked_src_dir, builddir]) != 0: 192 print('Running Meson on distribution package failed') 193 return 1 194 if subprocess.call([ninja_bin], cwd=builddir) != 0: 195 print('Compiling the distribution package failed') 196 return 1 197 if subprocess.call([ninja_bin, 'test'], cwd=builddir) != 0: 198 print('Running unit tests on the distribution package failed') 199 return 1 200 myenv = os.environ.copy() 201 myenv['DESTDIR'] = installdir 202 if subprocess.call([ninja_bin, 'install'], cwd=builddir, env=myenv) != 0: 203 print('Installing the distribution package failed') 204 return 1 205 finally: 206 shutil.rmtree(unpackdir) 207 shutil.rmtree(builddir) 208 shutil.rmtree(installdir) 209 print('Distribution package %s tested' % packagename) 210 return 0 211 212 def determine_archives_to_generate(options): 213 result = [] 214 for i in options.formats.split(','): 215 if i not in archive_choices: 216 sys.exit('Value "{}" not one of permitted values {}.'.format(i, archive_choices)) 217 result.append(i) 218 if len(i) == 0: 219 sys.exit('No archive types specified.') 220 return result 221 222 def run(options): 223 options.wd = os.path.abspath(options.wd) 224 buildfile = Path(options.wd) / 'meson-private' / 'build.dat' 225 if not buildfile.is_file(): 226 raise MesonException('Directory {!r} does not seem to be a Meson build directory.'.format(options.wd)) 227 b = build.load(options.wd) 228 # This import must be load delayed, otherwise it will get the default 229 # value of None. 230 from mesonbuild.mesonlib import meson_command 231 src_root = b.environment.source_dir 232 bld_root = b.environment.build_dir 233 priv_dir = os.path.join(bld_root, 'meson-private') 234 dist_sub = os.path.join(bld_root, 'meson-dist') 235 236 dist_name = b.project_name + '-' + b.project_version 237 238 archives = determine_archives_to_generate(options) 239 240 subprojects = [] 241 extra_meson_args = [] 242 if options.include_subprojects: 243 subproject_dir = os.path.join(src_root, b.subproject_dir) 244 for sub in b.subprojects: 245 _, directory = wrap.get_directory(subproject_dir, sub) 246 subprojects.append(os.path.join(b.subproject_dir, directory)) 247 extra_meson_args.append('-Dwrap_mode=nodownload') 248 249 if is_git(src_root): 250 names = create_dist_git(dist_name, archives, src_root, bld_root, dist_sub, b.dist_scripts, subprojects) 251 elif is_hg(src_root): 252 if subprojects: 253 print('--include-subprojects option currently not supported with Mercurial') 254 return 1 255 names = create_dist_hg(dist_name, archives, src_root, bld_root, dist_sub, b.dist_scripts) 256 else: 257 print('Dist currently only works with Git or Mercurial repos') 258 return 1 259 if names is None: 260 return 1 261 # Check only one. 262 rc = check_dist(names[0], meson_command, extra_meson_args, bld_root, priv_dir) 263 if rc == 0: 264 for name in names: 265 create_hash(name) 266 return rc 267 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mesonbuild/mdist.py b/mesonbuild/mdist.py --- a/mesonbuild/mdist.py +++ b/mesonbuild/mdist.py @@ -151,7 +151,16 @@ xzname = tarname + '.xz' gzname = tarname + '.gz' zipname = os.path.join(dist_sub, dist_name + '.zip') - subprocess.check_call(['hg', 'archive', '-R', src_root, '-S', '-t', 'tar', tarname]) + # Note that -X interprets relative paths using the current working + # directory, not the repository root, so this must be an absolute path: + # https://bz.mercurial-scm.org/show_bug.cgi?id=6267 + # + # .hg[a-z]* is used instead of .hg* to keep .hg_archival.txt, which may + # be useful to link the tarball to the Mercurial revision for either + # manual inspection or in case any code interprets it for a --version or + # similar. + subprocess.check_call(['hg', 'archive', '-R', src_root, '-S', '-t', 'tar', + '-X', src_root + '/.hg[a-z]*', tarname]) output_names = [] if 'xztar' in archives: import lzma
{"golden_diff": "diff --git a/mesonbuild/mdist.py b/mesonbuild/mdist.py\n--- a/mesonbuild/mdist.py\n+++ b/mesonbuild/mdist.py\n@@ -151,7 +151,16 @@\n xzname = tarname + '.xz'\n gzname = tarname + '.gz'\n zipname = os.path.join(dist_sub, dist_name + '.zip')\n- subprocess.check_call(['hg', 'archive', '-R', src_root, '-S', '-t', 'tar', tarname])\n+ # Note that -X interprets relative paths using the current working\n+ # directory, not the repository root, so this must be an absolute path:\n+ # https://bz.mercurial-scm.org/show_bug.cgi?id=6267\n+ #\n+ # .hg[a-z]* is used instead of .hg* to keep .hg_archival.txt, which may\n+ # be useful to link the tarball to the Mercurial revision for either\n+ # manual inspection or in case any code interprets it for a --version or\n+ # similar.\n+ subprocess.check_call(['hg', 'archive', '-R', src_root, '-S', '-t', 'tar',\n+ '-X', src_root + '/.hg[a-z]*', tarname])\n output_names = []\n if 'xztar' in archives:\n import lzma\n", "issue": ".hgtags included in dist archive\n**Describe the bug**\r\n\r\nWhen running `ninja dist` the resulting `tar.xz` includes the `.hgtags` files that distros (namely debian) will mark as a packaging error.\r\n\r\n**To Reproduce**\r\n\r\nRun `ninja dist` on a mercurial repository that has at least 1 tag.\r\n\r\n**Expected behavior**\r\nThat the archive does not have the `.hgtags` file in it. I just did an `hg archive` and unfortunately saw that `.hgtags` was there as well.\r\n\r\n**system parameters**\r\n* Is this a [cross build](https://mesonbuild.com/Cross-compilation.html) or just a plain native build (for the same computer)? Native\r\n* what operating system (e.g. MacOS Catalina, Windows 10, CentOS 8.0, Ubuntu 18.04, etc.) Linux, Debian Unstable\r\n* what Python version are you using e.g. 3.8.0: 3.7.6\r\n* what `meson --version`: 0.53.1\r\n* what `ninja --version` if it's a Ninja build: 1.9.0\r\n\n", "before_files": [{"content": "# Copyright 2017 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport gzip\nimport os\nimport sys\nimport shutil\nimport subprocess\nimport hashlib\nimport json\nfrom glob import glob\nfrom pathlib import Path\nfrom mesonbuild.environment import detect_ninja\nfrom mesonbuild.mesonlib import windows_proof_rmtree, MesonException\nfrom mesonbuild.wrap import wrap\nfrom mesonbuild import mlog, build\n\narchive_choices = ['gztar', 'xztar', 'zip']\narchive_extension = {'gztar': '.tar.gz',\n 'xztar': '.tar.xz',\n 'zip': '.zip'}\n\ndef add_arguments(parser):\n parser.add_argument('-C', default='.', dest='wd',\n help='directory to cd into before running')\n parser.add_argument('--formats', default='xztar',\n help='Comma separated list of archive types to create.')\n parser.add_argument('--include-subprojects', action='store_true',\n help='Include source code of subprojects that have been used for the build.')\n\n\ndef create_hash(fname):\n hashname = fname + '.sha256sum'\n m = hashlib.sha256()\n m.update(open(fname, 'rb').read())\n with open(hashname, 'w') as f:\n f.write('%s %s\\n' % (m.hexdigest(), os.path.basename(fname)))\n\n\ndef del_gitfiles(dirname):\n for f in glob(os.path.join(dirname, '.git*')):\n if os.path.isdir(f) and not os.path.islink(f):\n windows_proof_rmtree(f)\n else:\n os.unlink(f)\n\ndef process_submodules(dirname):\n module_file = os.path.join(dirname, '.gitmodules')\n if not os.path.exists(module_file):\n return\n subprocess.check_call(['git', 'submodule', 'update', '--init', '--recursive'], cwd=dirname)\n for line in open(module_file):\n line = line.strip()\n if '=' not in line:\n continue\n k, v = line.split('=', 1)\n k = k.strip()\n v = v.strip()\n if k != 'path':\n continue\n del_gitfiles(os.path.join(dirname, v))\n\n\ndef run_dist_scripts(src_root, bld_root, dist_root, dist_scripts):\n assert(os.path.isabs(dist_root))\n env = os.environ.copy()\n env['MESON_DIST_ROOT'] = dist_root\n env['MESON_SOURCE_ROOT'] = src_root\n env['MESON_BUILD_ROOT'] = bld_root\n for d in dist_scripts:\n script = d['exe']\n args = d['args']\n name = ' '.join(script + args)\n print('Running custom dist script {!r}'.format(name))\n try:\n rc = subprocess.call(script + args, env=env)\n if rc != 0:\n sys.exit('Dist script errored out')\n except OSError:\n print('Failed to run dist script {!r}'.format(name))\n sys.exit(1)\n\ndef is_git(src_root):\n _git = os.path.join(src_root, '.git')\n return os.path.isdir(_git) or os.path.isfile(_git)\n\ndef git_have_dirty_index(src_root):\n '''Check whether there are uncommitted changes in git'''\n ret = subprocess.call(['git', '-C', src_root, 'diff-index', '--quiet', 'HEAD'])\n return ret == 1\n\ndef git_clone(src_root, distdir):\n if git_have_dirty_index(src_root):\n mlog.warning('Repository has uncommitted changes that will not be included in the dist tarball')\n if os.path.exists(distdir):\n shutil.rmtree(distdir)\n os.makedirs(distdir)\n subprocess.check_call(['git', 'clone', '--shared', src_root, distdir])\n process_submodules(distdir)\n del_gitfiles(distdir)\n\ndef create_dist_git(dist_name, archives, src_root, bld_root, dist_sub, dist_scripts, subprojects):\n distdir = os.path.join(dist_sub, dist_name)\n git_clone(src_root, distdir)\n for path in subprojects:\n sub_src_root = os.path.join(src_root, path)\n sub_distdir = os.path.join(distdir, path)\n if os.path.exists(sub_distdir):\n continue\n if is_git(sub_src_root):\n git_clone(sub_src_root, sub_distdir)\n else:\n shutil.copytree(sub_src_root, sub_distdir)\n run_dist_scripts(src_root, bld_root, distdir, dist_scripts)\n output_names = []\n for a in archives:\n compressed_name = distdir + archive_extension[a]\n shutil.make_archive(distdir, a, root_dir=dist_sub, base_dir=dist_name)\n output_names.append(compressed_name)\n shutil.rmtree(distdir)\n return output_names\n\ndef is_hg(src_root):\n return os.path.isdir(os.path.join(src_root, '.hg'))\n\ndef hg_have_dirty_index(src_root):\n '''Check whether there are uncommitted changes in hg'''\n out = subprocess.check_output(['hg', '-R', src_root, 'summary'])\n return b'commit: (clean)' not in out\n\ndef create_dist_hg(dist_name, archives, src_root, bld_root, dist_sub, dist_scripts):\n if hg_have_dirty_index(src_root):\n mlog.warning('Repository has uncommitted changes that will not be included in the dist tarball')\n if dist_scripts:\n mlog.warning('dist scripts are not supported in Mercurial projects')\n\n os.makedirs(dist_sub, exist_ok=True)\n tarname = os.path.join(dist_sub, dist_name + '.tar')\n xzname = tarname + '.xz'\n gzname = tarname + '.gz'\n zipname = os.path.join(dist_sub, dist_name + '.zip')\n subprocess.check_call(['hg', 'archive', '-R', src_root, '-S', '-t', 'tar', tarname])\n output_names = []\n if 'xztar' in archives:\n import lzma\n with lzma.open(xzname, 'wb') as xf, open(tarname, 'rb') as tf:\n shutil.copyfileobj(tf, xf)\n output_names.append(xzname)\n if 'gztar' in archives:\n with gzip.open(gzname, 'wb') as zf, open(tarname, 'rb') as tf:\n shutil.copyfileobj(tf, zf)\n output_names.append(gzname)\n os.unlink(tarname)\n if 'zip' in archives:\n subprocess.check_call(['hg', 'archive', '-R', src_root, '-S', '-t', 'zip', zipname])\n output_names.append(zipname)\n return output_names\n\n\ndef check_dist(packagename, meson_command, extra_meson_args, bld_root, privdir):\n print('Testing distribution package %s' % packagename)\n unpackdir = os.path.join(privdir, 'dist-unpack')\n builddir = os.path.join(privdir, 'dist-build')\n installdir = os.path.join(privdir, 'dist-install')\n for p in (unpackdir, builddir, installdir):\n if os.path.exists(p):\n shutil.rmtree(p)\n os.mkdir(p)\n ninja_bin = detect_ninja()\n try:\n shutil.unpack_archive(packagename, unpackdir)\n unpacked_files = glob(os.path.join(unpackdir, '*'))\n assert(len(unpacked_files) == 1)\n unpacked_src_dir = unpacked_files[0]\n with open(os.path.join(bld_root, 'meson-info', 'intro-buildoptions.json')) as boptions:\n meson_command += ['-D{name}={value}'.format(**o) for o in json.load(boptions)\n if o['name'] not in ['backend', 'install_umask']]\n meson_command += extra_meson_args\n if subprocess.call(meson_command + ['--backend=ninja', unpacked_src_dir, builddir]) != 0:\n print('Running Meson on distribution package failed')\n return 1\n if subprocess.call([ninja_bin], cwd=builddir) != 0:\n print('Compiling the distribution package failed')\n return 1\n if subprocess.call([ninja_bin, 'test'], cwd=builddir) != 0:\n print('Running unit tests on the distribution package failed')\n return 1\n myenv = os.environ.copy()\n myenv['DESTDIR'] = installdir\n if subprocess.call([ninja_bin, 'install'], cwd=builddir, env=myenv) != 0:\n print('Installing the distribution package failed')\n return 1\n finally:\n shutil.rmtree(unpackdir)\n shutil.rmtree(builddir)\n shutil.rmtree(installdir)\n print('Distribution package %s tested' % packagename)\n return 0\n\ndef determine_archives_to_generate(options):\n result = []\n for i in options.formats.split(','):\n if i not in archive_choices:\n sys.exit('Value \"{}\" not one of permitted values {}.'.format(i, archive_choices))\n result.append(i)\n if len(i) == 0:\n sys.exit('No archive types specified.')\n return result\n\ndef run(options):\n options.wd = os.path.abspath(options.wd)\n buildfile = Path(options.wd) / 'meson-private' / 'build.dat'\n if not buildfile.is_file():\n raise MesonException('Directory {!r} does not seem to be a Meson build directory.'.format(options.wd))\n b = build.load(options.wd)\n # This import must be load delayed, otherwise it will get the default\n # value of None.\n from mesonbuild.mesonlib import meson_command\n src_root = b.environment.source_dir\n bld_root = b.environment.build_dir\n priv_dir = os.path.join(bld_root, 'meson-private')\n dist_sub = os.path.join(bld_root, 'meson-dist')\n\n dist_name = b.project_name + '-' + b.project_version\n\n archives = determine_archives_to_generate(options)\n\n subprojects = []\n extra_meson_args = []\n if options.include_subprojects:\n subproject_dir = os.path.join(src_root, b.subproject_dir)\n for sub in b.subprojects:\n _, directory = wrap.get_directory(subproject_dir, sub)\n subprojects.append(os.path.join(b.subproject_dir, directory))\n extra_meson_args.append('-Dwrap_mode=nodownload')\n\n if is_git(src_root):\n names = create_dist_git(dist_name, archives, src_root, bld_root, dist_sub, b.dist_scripts, subprojects)\n elif is_hg(src_root):\n if subprojects:\n print('--include-subprojects option currently not supported with Mercurial')\n return 1\n names = create_dist_hg(dist_name, archives, src_root, bld_root, dist_sub, b.dist_scripts)\n else:\n print('Dist currently only works with Git or Mercurial repos')\n return 1\n if names is None:\n return 1\n # Check only one.\n rc = check_dist(names[0], meson_command, extra_meson_args, bld_root, priv_dir)\n if rc == 0:\n for name in names:\n create_hash(name)\n return rc\n", "path": "mesonbuild/mdist.py"}], "after_files": [{"content": "# Copyright 2017 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport gzip\nimport os\nimport sys\nimport shutil\nimport subprocess\nimport hashlib\nimport json\nfrom glob import glob\nfrom pathlib import Path\nfrom mesonbuild.environment import detect_ninja\nfrom mesonbuild.mesonlib import windows_proof_rmtree, MesonException\nfrom mesonbuild.wrap import wrap\nfrom mesonbuild import mlog, build\n\narchive_choices = ['gztar', 'xztar', 'zip']\narchive_extension = {'gztar': '.tar.gz',\n 'xztar': '.tar.xz',\n 'zip': '.zip'}\n\ndef add_arguments(parser):\n parser.add_argument('-C', default='.', dest='wd',\n help='directory to cd into before running')\n parser.add_argument('--formats', default='xztar',\n help='Comma separated list of archive types to create.')\n parser.add_argument('--include-subprojects', action='store_true',\n help='Include source code of subprojects that have been used for the build.')\n\n\ndef create_hash(fname):\n hashname = fname + '.sha256sum'\n m = hashlib.sha256()\n m.update(open(fname, 'rb').read())\n with open(hashname, 'w') as f:\n f.write('%s %s\\n' % (m.hexdigest(), os.path.basename(fname)))\n\n\ndef del_gitfiles(dirname):\n for f in glob(os.path.join(dirname, '.git*')):\n if os.path.isdir(f) and not os.path.islink(f):\n windows_proof_rmtree(f)\n else:\n os.unlink(f)\n\ndef process_submodules(dirname):\n module_file = os.path.join(dirname, '.gitmodules')\n if not os.path.exists(module_file):\n return\n subprocess.check_call(['git', 'submodule', 'update', '--init', '--recursive'], cwd=dirname)\n for line in open(module_file):\n line = line.strip()\n if '=' not in line:\n continue\n k, v = line.split('=', 1)\n k = k.strip()\n v = v.strip()\n if k != 'path':\n continue\n del_gitfiles(os.path.join(dirname, v))\n\n\ndef run_dist_scripts(src_root, bld_root, dist_root, dist_scripts):\n assert(os.path.isabs(dist_root))\n env = os.environ.copy()\n env['MESON_DIST_ROOT'] = dist_root\n env['MESON_SOURCE_ROOT'] = src_root\n env['MESON_BUILD_ROOT'] = bld_root\n for d in dist_scripts:\n script = d['exe']\n args = d['args']\n name = ' '.join(script + args)\n print('Running custom dist script {!r}'.format(name))\n try:\n rc = subprocess.call(script + args, env=env)\n if rc != 0:\n sys.exit('Dist script errored out')\n except OSError:\n print('Failed to run dist script {!r}'.format(name))\n sys.exit(1)\n\ndef is_git(src_root):\n _git = os.path.join(src_root, '.git')\n return os.path.isdir(_git) or os.path.isfile(_git)\n\ndef git_have_dirty_index(src_root):\n '''Check whether there are uncommitted changes in git'''\n ret = subprocess.call(['git', '-C', src_root, 'diff-index', '--quiet', 'HEAD'])\n return ret == 1\n\ndef git_clone(src_root, distdir):\n if git_have_dirty_index(src_root):\n mlog.warning('Repository has uncommitted changes that will not be included in the dist tarball')\n if os.path.exists(distdir):\n shutil.rmtree(distdir)\n os.makedirs(distdir)\n subprocess.check_call(['git', 'clone', '--shared', src_root, distdir])\n process_submodules(distdir)\n del_gitfiles(distdir)\n\ndef create_dist_git(dist_name, archives, src_root, bld_root, dist_sub, dist_scripts, subprojects):\n distdir = os.path.join(dist_sub, dist_name)\n git_clone(src_root, distdir)\n for path in subprojects:\n sub_src_root = os.path.join(src_root, path)\n sub_distdir = os.path.join(distdir, path)\n if os.path.exists(sub_distdir):\n continue\n if is_git(sub_src_root):\n git_clone(sub_src_root, sub_distdir)\n else:\n shutil.copytree(sub_src_root, sub_distdir)\n run_dist_scripts(src_root, bld_root, distdir, dist_scripts)\n output_names = []\n for a in archives:\n compressed_name = distdir + archive_extension[a]\n shutil.make_archive(distdir, a, root_dir=dist_sub, base_dir=dist_name)\n output_names.append(compressed_name)\n shutil.rmtree(distdir)\n return output_names\n\ndef is_hg(src_root):\n return os.path.isdir(os.path.join(src_root, '.hg'))\n\ndef hg_have_dirty_index(src_root):\n '''Check whether there are uncommitted changes in hg'''\n out = subprocess.check_output(['hg', '-R', src_root, 'summary'])\n return b'commit: (clean)' not in out\n\ndef create_dist_hg(dist_name, archives, src_root, bld_root, dist_sub, dist_scripts):\n if hg_have_dirty_index(src_root):\n mlog.warning('Repository has uncommitted changes that will not be included in the dist tarball')\n if dist_scripts:\n mlog.warning('dist scripts are not supported in Mercurial projects')\n\n os.makedirs(dist_sub, exist_ok=True)\n tarname = os.path.join(dist_sub, dist_name + '.tar')\n xzname = tarname + '.xz'\n gzname = tarname + '.gz'\n zipname = os.path.join(dist_sub, dist_name + '.zip')\n # Note that -X interprets relative paths using the current working\n # directory, not the repository root, so this must be an absolute path:\n # https://bz.mercurial-scm.org/show_bug.cgi?id=6267\n #\n # .hg[a-z]* is used instead of .hg* to keep .hg_archival.txt, which may\n # be useful to link the tarball to the Mercurial revision for either\n # manual inspection or in case any code interprets it for a --version or\n # similar.\n subprocess.check_call(['hg', 'archive', '-R', src_root, '-S', '-t', 'tar',\n '-X', src_root + '/.hg[a-z]*', tarname])\n output_names = []\n if 'xztar' in archives:\n import lzma\n with lzma.open(xzname, 'wb') as xf, open(tarname, 'rb') as tf:\n shutil.copyfileobj(tf, xf)\n output_names.append(xzname)\n if 'gztar' in archives:\n with gzip.open(gzname, 'wb') as zf, open(tarname, 'rb') as tf:\n shutil.copyfileobj(tf, zf)\n output_names.append(gzname)\n os.unlink(tarname)\n if 'zip' in archives:\n subprocess.check_call(['hg', 'archive', '-R', src_root, '-S', '-t', 'zip', zipname])\n output_names.append(zipname)\n return output_names\n\n\ndef check_dist(packagename, meson_command, extra_meson_args, bld_root, privdir):\n print('Testing distribution package %s' % packagename)\n unpackdir = os.path.join(privdir, 'dist-unpack')\n builddir = os.path.join(privdir, 'dist-build')\n installdir = os.path.join(privdir, 'dist-install')\n for p in (unpackdir, builddir, installdir):\n if os.path.exists(p):\n shutil.rmtree(p)\n os.mkdir(p)\n ninja_bin = detect_ninja()\n try:\n shutil.unpack_archive(packagename, unpackdir)\n unpacked_files = glob(os.path.join(unpackdir, '*'))\n assert(len(unpacked_files) == 1)\n unpacked_src_dir = unpacked_files[0]\n with open(os.path.join(bld_root, 'meson-info', 'intro-buildoptions.json')) as boptions:\n meson_command += ['-D{name}={value}'.format(**o) for o in json.load(boptions)\n if o['name'] not in ['backend', 'install_umask']]\n meson_command += extra_meson_args\n if subprocess.call(meson_command + ['--backend=ninja', unpacked_src_dir, builddir]) != 0:\n print('Running Meson on distribution package failed')\n return 1\n if subprocess.call([ninja_bin], cwd=builddir) != 0:\n print('Compiling the distribution package failed')\n return 1\n if subprocess.call([ninja_bin, 'test'], cwd=builddir) != 0:\n print('Running unit tests on the distribution package failed')\n return 1\n myenv = os.environ.copy()\n myenv['DESTDIR'] = installdir\n if subprocess.call([ninja_bin, 'install'], cwd=builddir, env=myenv) != 0:\n print('Installing the distribution package failed')\n return 1\n finally:\n shutil.rmtree(unpackdir)\n shutil.rmtree(builddir)\n shutil.rmtree(installdir)\n print('Distribution package %s tested' % packagename)\n return 0\n\ndef determine_archives_to_generate(options):\n result = []\n for i in options.formats.split(','):\n if i not in archive_choices:\n sys.exit('Value \"{}\" not one of permitted values {}.'.format(i, archive_choices))\n result.append(i)\n if len(i) == 0:\n sys.exit('No archive types specified.')\n return result\n\ndef run(options):\n options.wd = os.path.abspath(options.wd)\n buildfile = Path(options.wd) / 'meson-private' / 'build.dat'\n if not buildfile.is_file():\n raise MesonException('Directory {!r} does not seem to be a Meson build directory.'.format(options.wd))\n b = build.load(options.wd)\n # This import must be load delayed, otherwise it will get the default\n # value of None.\n from mesonbuild.mesonlib import meson_command\n src_root = b.environment.source_dir\n bld_root = b.environment.build_dir\n priv_dir = os.path.join(bld_root, 'meson-private')\n dist_sub = os.path.join(bld_root, 'meson-dist')\n\n dist_name = b.project_name + '-' + b.project_version\n\n archives = determine_archives_to_generate(options)\n\n subprojects = []\n extra_meson_args = []\n if options.include_subprojects:\n subproject_dir = os.path.join(src_root, b.subproject_dir)\n for sub in b.subprojects:\n _, directory = wrap.get_directory(subproject_dir, sub)\n subprojects.append(os.path.join(b.subproject_dir, directory))\n extra_meson_args.append('-Dwrap_mode=nodownload')\n\n if is_git(src_root):\n names = create_dist_git(dist_name, archives, src_root, bld_root, dist_sub, b.dist_scripts, subprojects)\n elif is_hg(src_root):\n if subprojects:\n print('--include-subprojects option currently not supported with Mercurial')\n return 1\n names = create_dist_hg(dist_name, archives, src_root, bld_root, dist_sub, b.dist_scripts)\n else:\n print('Dist currently only works with Git or Mercurial repos')\n return 1\n if names is None:\n return 1\n # Check only one.\n rc = check_dist(names[0], meson_command, extra_meson_args, bld_root, priv_dir)\n if rc == 0:\n for name in names:\n create_hash(name)\n return rc\n", "path": "mesonbuild/mdist.py"}]}
3,794
310
gh_patches_debug_28711
rasdani/github-patches
git_diff
Mailu__Mailu-877
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- command documentation uses `_` where `-` should be used https://mailu.io/1.6/cli.html shows commands ``` alias alias_delete user user_import user_delete config_update ``` where `flask mailu` shows the following subcommands: ``` Commands: admin Create an admin user advertise Advertise this server against statistic services. alias Create an alias alias-delete delete alias config-update sync configuration with data from YAML-formatted stdin domain setlimits Set domain limits setmanager Make a user manager of a domain user Create a user user-delete delete user user-import Import a user along with password hash. ``` I think the documentation must be updated to use `-` rather than `_`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `core/admin/mailu/manage.py` Content: ``` 1 from mailu import models 2 3 from flask import current_app as app 4 from flask import cli as flask_cli 5 6 import flask 7 import os 8 import socket 9 import uuid 10 import click 11 12 13 db = models.db 14 15 16 @click.group() 17 def mailu(cls=flask_cli.FlaskGroup): 18 """ Mailu command line 19 """ 20 21 22 @mailu.command() 23 @flask_cli.with_appcontext 24 def advertise(): 25 """ Advertise this server against statistic services. 26 """ 27 if os.path.isfile(app.config["INSTANCE_ID_PATH"]): 28 with open(app.config["INSTANCE_ID_PATH"], "r") as handle: 29 instance_id = handle.read() 30 else: 31 instance_id = str(uuid.uuid4()) 32 with open(app.config["INSTANCE_ID_PATH"], "w") as handle: 33 handle.write(instance_id) 34 if not app.config["DISABLE_STATISTICS"]: 35 try: 36 socket.gethostbyname(app.config["STATS_ENDPOINT"].format(instance_id)) 37 except: 38 pass 39 40 41 @mailu.command() 42 @click.argument('localpart') 43 @click.argument('domain_name') 44 @click.argument('password') 45 @flask_cli.with_appcontext 46 def admin(localpart, domain_name, password): 47 """ Create an admin user 48 """ 49 domain = models.Domain.query.get(domain_name) 50 if not domain: 51 domain = models.Domain(name=domain_name) 52 db.session.add(domain) 53 user = models.User( 54 localpart=localpart, 55 domain=domain, 56 global_admin=True 57 ) 58 user.set_password(password) 59 db.session.add(user) 60 db.session.commit() 61 62 63 @mailu.command() 64 @click.argument('localpart') 65 @click.argument('domain_name') 66 @click.argument('password') 67 @click.argument('hash_scheme') 68 @flask_cli.with_appcontext 69 def user(localpart, domain_name, password, hash_scheme=None): 70 """ Create a user 71 """ 72 if hash_scheme is None: 73 hash_scheme = app.config['PASSWORD_SCHEME'] 74 domain = models.Domain.query.get(domain_name) 75 if not domain: 76 domain = models.Domain(name=domain_name) 77 db.session.add(domain) 78 user = models.User( 79 localpart=localpart, 80 domain=domain, 81 global_admin=False 82 ) 83 user.set_password(password, hash_scheme=hash_scheme) 84 db.session.add(user) 85 db.session.commit() 86 87 88 @mailu.command() 89 @click.option('-n', '--domain_name') 90 @click.option('-u', '--max_users') 91 @click.option('-a', '--max_aliases') 92 @click.option('-q', '--max_quota_bytes') 93 @flask_cli.with_appcontext 94 def domain(domain_name, max_users=-1, max_aliases=-1, max_quota_bytes=0): 95 domain = models.Domain.query.get(domain_name) 96 if not domain: 97 domain = models.Domain(name=domain_name) 98 db.session.add(domain) 99 db.session.commit() 100 101 102 @mailu.command() 103 @click.argument('localpart') 104 @click.argument('domain_name') 105 @click.argument('password_hash') 106 @click.argument('hash_scheme') 107 @flask_cli.with_appcontext 108 def user_import(localpart, domain_name, password_hash, hash_scheme = None): 109 """ Import a user along with password hash. 110 """ 111 if hash_scheme is None: 112 hash_scheme = app.config['PASSWORD_SCHEME'] 113 domain = models.Domain.query.get(domain_name) 114 if not domain: 115 domain = models.Domain(name=domain_name) 116 db.session.add(domain) 117 user = models.User( 118 localpart=localpart, 119 domain=domain, 120 global_admin=False 121 ) 122 user.set_password(password_hash, hash_scheme=hash_scheme, raw=True) 123 db.session.add(user) 124 db.session.commit() 125 126 127 @mailu.command() 128 @click.option('-v', '--verbose') 129 @click.option('-d', '--delete_objects') 130 @flask_cli.with_appcontext 131 def config_update(verbose=False, delete_objects=False): 132 """sync configuration with data from YAML-formatted stdin""" 133 import yaml 134 import sys 135 new_config = yaml.load(sys.stdin) 136 # print new_config 137 domains = new_config.get('domains', []) 138 tracked_domains = set() 139 for domain_config in domains: 140 if verbose: 141 print(str(domain_config)) 142 domain_name = domain_config['name'] 143 max_users = domain_config.get('max_users', -1) 144 max_aliases = domain_config.get('max_aliases', -1) 145 max_quota_bytes = domain_config.get('max_quota_bytes', 0) 146 tracked_domains.add(domain_name) 147 domain = models.Domain.query.get(domain_name) 148 if not domain: 149 domain = models.Domain(name=domain_name, 150 max_users=max_users, 151 max_aliases=max_aliases, 152 max_quota_bytes=max_quota_bytes) 153 db.session.add(domain) 154 print("Added " + str(domain_config)) 155 else: 156 domain.max_users = max_users 157 domain.max_aliases = max_aliases 158 domain.max_quota_bytes = max_quota_bytes 159 db.session.add(domain) 160 print("Updated " + str(domain_config)) 161 162 users = new_config.get('users', []) 163 tracked_users = set() 164 user_optional_params = ('comment', 'quota_bytes', 'global_admin', 165 'enable_imap', 'enable_pop', 'forward_enabled', 166 'forward_destination', 'reply_enabled', 167 'reply_subject', 'reply_body', 'displayed_name', 168 'spam_enabled', 'email', 'spam_threshold') 169 for user_config in users: 170 if verbose: 171 print(str(user_config)) 172 localpart = user_config['localpart'] 173 domain_name = user_config['domain'] 174 password_hash = user_config.get('password_hash', None) 175 hash_scheme = user_config.get('hash_scheme', None) 176 domain = models.Domain.query.get(domain_name) 177 email = '{0}@{1}'.format(localpart, domain_name) 178 optional_params = {} 179 for k in user_optional_params: 180 if k in user_config: 181 optional_params[k] = user_config[k] 182 if not domain: 183 domain = models.Domain(name=domain_name) 184 db.session.add(domain) 185 user = models.User.query.get(email) 186 tracked_users.add(email) 187 tracked_domains.add(domain_name) 188 if not user: 189 user = models.User( 190 localpart=localpart, 191 domain=domain, 192 **optional_params 193 ) 194 else: 195 for k in optional_params: 196 setattr(user, k, optional_params[k]) 197 user.set_password(password_hash, hash_scheme=hash_scheme, raw=True) 198 db.session.add(user) 199 200 aliases = new_config.get('aliases', []) 201 tracked_aliases = set() 202 for alias_config in aliases: 203 if verbose: 204 print(str(alias_config)) 205 localpart = alias_config['localpart'] 206 domain_name = alias_config['domain'] 207 if type(alias_config['destination']) is str: 208 destination = alias_config['destination'].split(',') 209 else: 210 destination = alias_config['destination'] 211 wildcard = alias_config.get('wildcard', False) 212 domain = models.Domain.query.get(domain_name) 213 email = '{0}@{1}'.format(localpart, domain_name) 214 if not domain: 215 domain = models.Domain(name=domain_name) 216 db.session.add(domain) 217 alias = models.Alias.query.get(email) 218 tracked_aliases.add(email) 219 tracked_domains.add(domain_name) 220 if not alias: 221 alias = models.Alias( 222 localpart=localpart, 223 domain=domain, 224 wildcard=wildcard, 225 destination=destination, 226 email=email 227 ) 228 else: 229 alias.destination = destination 230 alias.wildcard = wildcard 231 db.session.add(alias) 232 233 db.session.commit() 234 235 managers = new_config.get('managers', []) 236 # tracked_managers=set() 237 for manager_config in managers: 238 if verbose: 239 print(str(manager_config)) 240 domain_name = manager_config['domain'] 241 user_name = manager_config['user'] 242 domain = models.Domain.query.get(domain_name) 243 manageruser = models.User.query.get(user_name + '@' + domain_name) 244 if manageruser not in domain.managers: 245 domain.managers.append(manageruser) 246 db.session.add(domain) 247 248 db.session.commit() 249 250 if delete_objects: 251 for user in db.session.query(models.User).all(): 252 if not (user.email in tracked_users): 253 if verbose: 254 print("Deleting user: " + str(user.email)) 255 db.session.delete(user) 256 for alias in db.session.query(models.Alias).all(): 257 if not (alias.email in tracked_aliases): 258 if verbose: 259 print("Deleting alias: " + str(alias.email)) 260 db.session.delete(alias) 261 for domain in db.session.query(models.Domain).all(): 262 if not (domain.name in tracked_domains): 263 if verbose: 264 print("Deleting domain: " + str(domain.name)) 265 db.session.delete(domain) 266 db.session.commit() 267 268 269 @mailu.command() 270 @click.argument('email') 271 @flask_cli.with_appcontext 272 def user_delete(email): 273 """delete user""" 274 user = models.User.query.get(email) 275 if user: 276 db.session.delete(user) 277 db.session.commit() 278 279 280 @mailu.command() 281 @click.argument('email') 282 @flask_cli.with_appcontext 283 def alias_delete(email): 284 """delete alias""" 285 alias = models.Alias.query.get(email) 286 if alias: 287 db.session.delete(alias) 288 db.session.commit() 289 290 291 @mailu.command() 292 @click.argument('localpart') 293 @click.argument('domain_name') 294 @click.argument('destination') 295 @click.option('-w', '--wildcard', is_flag=True) 296 @flask_cli.with_appcontext 297 def alias(localpart, domain_name, destination, wildcard=False): 298 """ Create an alias 299 """ 300 domain = models.Domain.query.get(domain_name) 301 if not domain: 302 domain = models.Domain(name=domain_name) 303 db.session.add(domain) 304 alias = models.Alias( 305 localpart=localpart, 306 domain=domain, 307 wildcard=wildcard, 308 destination=destination.split(','), 309 email="%s@%s" % (localpart, domain_name) 310 ) 311 db.session.add(alias) 312 db.session.commit() 313 314 315 @mailu.command() 316 @click.argument('domain_name') 317 @click.argument('max_users') 318 @click.argument('max_aliases') 319 @click.argument('max_quota_bytes') 320 @flask_cli.with_appcontext 321 def setlimits(domain_name, max_users, max_aliases, max_quota_bytes): 322 """ Set domain limits 323 """ 324 domain = models.Domain.query.get(domain_name) 325 domain.max_users = max_users 326 domain.max_aliases = max_aliases 327 domain.max_quota_bytes = max_quota_bytes 328 db.session.add(domain) 329 db.session.commit() 330 331 332 @mailu.command() 333 @click.argument('domain_name') 334 @click.argument('user_name') 335 @flask_cli.with_appcontext 336 def setmanager(domain_name, user_name='manager'): 337 """ Make a user manager of a domain 338 """ 339 domain = models.Domain.query.get(domain_name) 340 manageruser = models.User.query.get(user_name + '@' + domain_name) 341 domain.managers.append(manageruser) 342 db.session.add(domain) 343 db.session.commit() 344 345 346 if __name__ == '__main__': 347 cli() 348 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/core/admin/mailu/manage.py b/core/admin/mailu/manage.py --- a/core/admin/mailu/manage.py +++ b/core/admin/mailu/manage.py @@ -64,7 +64,7 @@ @click.argument('localpart') @click.argument('domain_name') @click.argument('password') [email protected]('hash_scheme') [email protected]('hash_scheme', required=False) @flask_cli.with_appcontext def user(localpart, domain_name, password, hash_scheme=None): """ Create a user @@ -86,12 +86,14 @@ @mailu.command() [email protected]('-n', '--domain_name') [email protected]('-u', '--max_users') [email protected]('-a', '--max_aliases') [email protected]('-q', '--max_quota_bytes') [email protected]('domain_name') [email protected]('-u', '--max-users') [email protected]('-a', '--max-aliases') [email protected]('-q', '--max-quota-bytes') @flask_cli.with_appcontext def domain(domain_name, max_users=-1, max_aliases=-1, max_quota_bytes=0): + """ Create a domain + """ domain = models.Domain.query.get(domain_name) if not domain: domain = models.Domain(name=domain_name) @@ -126,7 +128,7 @@ @mailu.command() @click.option('-v', '--verbose') [email protected]('-d', '--delete_objects') [email protected]('-d', '--delete-objects') @flask_cli.with_appcontext def config_update(verbose=False, delete_objects=False): """sync configuration with data from YAML-formatted stdin"""
{"golden_diff": "diff --git a/core/admin/mailu/manage.py b/core/admin/mailu/manage.py\n--- a/core/admin/mailu/manage.py\n+++ b/core/admin/mailu/manage.py\n@@ -64,7 +64,7 @@\n @click.argument('localpart')\n @click.argument('domain_name')\n @click.argument('password')\[email protected]('hash_scheme')\[email protected]('hash_scheme', required=False)\n @flask_cli.with_appcontext\n def user(localpart, domain_name, password, hash_scheme=None):\n \"\"\" Create a user\n@@ -86,12 +86,14 @@\n \n \n @mailu.command()\[email protected]('-n', '--domain_name')\[email protected]('-u', '--max_users')\[email protected]('-a', '--max_aliases')\[email protected]('-q', '--max_quota_bytes')\[email protected]('domain_name')\[email protected]('-u', '--max-users')\[email protected]('-a', '--max-aliases')\[email protected]('-q', '--max-quota-bytes')\n @flask_cli.with_appcontext\n def domain(domain_name, max_users=-1, max_aliases=-1, max_quota_bytes=0):\n+ \"\"\" Create a domain\n+ \"\"\"\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n@@ -126,7 +128,7 @@\n \n @mailu.command()\n @click.option('-v', '--verbose')\[email protected]('-d', '--delete_objects')\[email protected]('-d', '--delete-objects')\n @flask_cli.with_appcontext\n def config_update(verbose=False, delete_objects=False):\n \"\"\"sync configuration with data from YAML-formatted stdin\"\"\"\n", "issue": "command documentation uses `_` where `-` should be used\nhttps://mailu.io/1.6/cli.html shows commands\r\n\r\n```\r\nalias\r\nalias_delete\r\nuser\r\nuser_import\r\nuser_delete\r\nconfig_update\r\n```\r\n\r\nwhere `flask mailu` shows the following subcommands:\r\n\r\n```\r\nCommands:\r\n admin Create an admin user\r\n advertise Advertise this server against statistic services.\r\n alias Create an alias\r\n alias-delete delete alias\r\n config-update sync configuration with data from YAML-formatted stdin\r\n domain\r\n setlimits Set domain limits\r\n setmanager Make a user manager of a domain\r\n user Create a user\r\n user-delete delete user\r\n user-import Import a user along with password hash.\r\n```\r\n\r\nI think the documentation must be updated to use `-` rather than `_`.\n", "before_files": [{"content": "from mailu import models\n\nfrom flask import current_app as app\nfrom flask import cli as flask_cli\n\nimport flask\nimport os\nimport socket\nimport uuid\nimport click\n\n\ndb = models.db\n\n\[email protected]()\ndef mailu(cls=flask_cli.FlaskGroup):\n \"\"\" Mailu command line\n \"\"\"\n\n\[email protected]()\n@flask_cli.with_appcontext\ndef advertise():\n \"\"\" Advertise this server against statistic services.\n \"\"\"\n if os.path.isfile(app.config[\"INSTANCE_ID_PATH\"]):\n with open(app.config[\"INSTANCE_ID_PATH\"], \"r\") as handle:\n instance_id = handle.read()\n else:\n instance_id = str(uuid.uuid4())\n with open(app.config[\"INSTANCE_ID_PATH\"], \"w\") as handle:\n handle.write(instance_id)\n if not app.config[\"DISABLE_STATISTICS\"]:\n try:\n socket.gethostbyname(app.config[\"STATS_ENDPOINT\"].format(instance_id))\n except:\n pass\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password')\n@flask_cli.with_appcontext\ndef admin(localpart, domain_name, password):\n \"\"\" Create an admin user\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User(\n localpart=localpart,\n domain=domain,\n global_admin=True\n )\n user.set_password(password)\n db.session.add(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password')\[email protected]('hash_scheme')\n@flask_cli.with_appcontext\ndef user(localpart, domain_name, password, hash_scheme=None):\n \"\"\" Create a user\n \"\"\"\n if hash_scheme is None:\n hash_scheme = app.config['PASSWORD_SCHEME']\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User(\n localpart=localpart,\n domain=domain,\n global_admin=False\n )\n user.set_password(password, hash_scheme=hash_scheme)\n db.session.add(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('-n', '--domain_name')\[email protected]('-u', '--max_users')\[email protected]('-a', '--max_aliases')\[email protected]('-q', '--max_quota_bytes')\n@flask_cli.with_appcontext\ndef domain(domain_name, max_users=-1, max_aliases=-1, max_quota_bytes=0):\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password_hash')\[email protected]('hash_scheme')\n@flask_cli.with_appcontext\ndef user_import(localpart, domain_name, password_hash, hash_scheme = None):\n \"\"\" Import a user along with password hash.\n \"\"\"\n if hash_scheme is None:\n hash_scheme = app.config['PASSWORD_SCHEME']\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User(\n localpart=localpart,\n domain=domain,\n global_admin=False\n )\n user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)\n db.session.add(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('-v', '--verbose')\[email protected]('-d', '--delete_objects')\n@flask_cli.with_appcontext\ndef config_update(verbose=False, delete_objects=False):\n \"\"\"sync configuration with data from YAML-formatted stdin\"\"\"\n import yaml\n import sys\n new_config = yaml.load(sys.stdin)\n # print new_config\n domains = new_config.get('domains', [])\n tracked_domains = set()\n for domain_config in domains:\n if verbose:\n print(str(domain_config))\n domain_name = domain_config['name']\n max_users = domain_config.get('max_users', -1)\n max_aliases = domain_config.get('max_aliases', -1)\n max_quota_bytes = domain_config.get('max_quota_bytes', 0)\n tracked_domains.add(domain_name)\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name,\n max_users=max_users,\n max_aliases=max_aliases,\n max_quota_bytes=max_quota_bytes)\n db.session.add(domain)\n print(\"Added \" + str(domain_config))\n else:\n domain.max_users = max_users\n domain.max_aliases = max_aliases\n domain.max_quota_bytes = max_quota_bytes\n db.session.add(domain)\n print(\"Updated \" + str(domain_config))\n\n users = new_config.get('users', [])\n tracked_users = set()\n user_optional_params = ('comment', 'quota_bytes', 'global_admin',\n 'enable_imap', 'enable_pop', 'forward_enabled',\n 'forward_destination', 'reply_enabled',\n 'reply_subject', 'reply_body', 'displayed_name',\n 'spam_enabled', 'email', 'spam_threshold')\n for user_config in users:\n if verbose:\n print(str(user_config))\n localpart = user_config['localpart']\n domain_name = user_config['domain']\n password_hash = user_config.get('password_hash', None)\n hash_scheme = user_config.get('hash_scheme', None)\n domain = models.Domain.query.get(domain_name)\n email = '{0}@{1}'.format(localpart, domain_name)\n optional_params = {}\n for k in user_optional_params:\n if k in user_config:\n optional_params[k] = user_config[k]\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User.query.get(email)\n tracked_users.add(email)\n tracked_domains.add(domain_name)\n if not user:\n user = models.User(\n localpart=localpart,\n domain=domain,\n **optional_params\n )\n else:\n for k in optional_params:\n setattr(user, k, optional_params[k])\n user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)\n db.session.add(user)\n\n aliases = new_config.get('aliases', [])\n tracked_aliases = set()\n for alias_config in aliases:\n if verbose:\n print(str(alias_config))\n localpart = alias_config['localpart']\n domain_name = alias_config['domain']\n if type(alias_config['destination']) is str:\n destination = alias_config['destination'].split(',')\n else:\n destination = alias_config['destination']\n wildcard = alias_config.get('wildcard', False)\n domain = models.Domain.query.get(domain_name)\n email = '{0}@{1}'.format(localpart, domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n alias = models.Alias.query.get(email)\n tracked_aliases.add(email)\n tracked_domains.add(domain_name)\n if not alias:\n alias = models.Alias(\n localpart=localpart,\n domain=domain,\n wildcard=wildcard,\n destination=destination,\n email=email\n )\n else:\n alias.destination = destination\n alias.wildcard = wildcard\n db.session.add(alias)\n\n db.session.commit()\n\n managers = new_config.get('managers', [])\n # tracked_managers=set()\n for manager_config in managers:\n if verbose:\n print(str(manager_config))\n domain_name = manager_config['domain']\n user_name = manager_config['user']\n domain = models.Domain.query.get(domain_name)\n manageruser = models.User.query.get(user_name + '@' + domain_name)\n if manageruser not in domain.managers:\n domain.managers.append(manageruser)\n db.session.add(domain)\n\n db.session.commit()\n\n if delete_objects:\n for user in db.session.query(models.User).all():\n if not (user.email in tracked_users):\n if verbose:\n print(\"Deleting user: \" + str(user.email))\n db.session.delete(user)\n for alias in db.session.query(models.Alias).all():\n if not (alias.email in tracked_aliases):\n if verbose:\n print(\"Deleting alias: \" + str(alias.email))\n db.session.delete(alias)\n for domain in db.session.query(models.Domain).all():\n if not (domain.name in tracked_domains):\n if verbose:\n print(\"Deleting domain: \" + str(domain.name))\n db.session.delete(domain)\n db.session.commit()\n\n\[email protected]()\[email protected]('email')\n@flask_cli.with_appcontext\ndef user_delete(email):\n \"\"\"delete user\"\"\"\n user = models.User.query.get(email)\n if user:\n db.session.delete(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('email')\n@flask_cli.with_appcontext\ndef alias_delete(email):\n \"\"\"delete alias\"\"\"\n alias = models.Alias.query.get(email)\n if alias:\n db.session.delete(alias)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('destination')\[email protected]('-w', '--wildcard', is_flag=True)\n@flask_cli.with_appcontext\ndef alias(localpart, domain_name, destination, wildcard=False):\n \"\"\" Create an alias\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n alias = models.Alias(\n localpart=localpart,\n domain=domain,\n wildcard=wildcard,\n destination=destination.split(','),\n email=\"%s@%s\" % (localpart, domain_name)\n )\n db.session.add(alias)\n db.session.commit()\n\n\[email protected]()\[email protected]('domain_name')\[email protected]('max_users')\[email protected]('max_aliases')\[email protected]('max_quota_bytes')\n@flask_cli.with_appcontext\ndef setlimits(domain_name, max_users, max_aliases, max_quota_bytes):\n \"\"\" Set domain limits\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n domain.max_users = max_users\n domain.max_aliases = max_aliases\n domain.max_quota_bytes = max_quota_bytes\n db.session.add(domain)\n db.session.commit()\n\n\[email protected]()\[email protected]('domain_name')\[email protected]('user_name')\n@flask_cli.with_appcontext\ndef setmanager(domain_name, user_name='manager'):\n \"\"\" Make a user manager of a domain\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n manageruser = models.User.query.get(user_name + '@' + domain_name)\n domain.managers.append(manageruser)\n db.session.add(domain)\n db.session.commit()\n\n\nif __name__ == '__main__':\n cli()\n", "path": "core/admin/mailu/manage.py"}], "after_files": [{"content": "from mailu import models\n\nfrom flask import current_app as app\nfrom flask import cli as flask_cli\n\nimport flask\nimport os\nimport socket\nimport uuid\nimport click\n\n\ndb = models.db\n\n\[email protected]()\ndef mailu(cls=flask_cli.FlaskGroup):\n \"\"\" Mailu command line\n \"\"\"\n\n\[email protected]()\n@flask_cli.with_appcontext\ndef advertise():\n \"\"\" Advertise this server against statistic services.\n \"\"\"\n if os.path.isfile(app.config[\"INSTANCE_ID_PATH\"]):\n with open(app.config[\"INSTANCE_ID_PATH\"], \"r\") as handle:\n instance_id = handle.read()\n else:\n instance_id = str(uuid.uuid4())\n with open(app.config[\"INSTANCE_ID_PATH\"], \"w\") as handle:\n handle.write(instance_id)\n if not app.config[\"DISABLE_STATISTICS\"]:\n try:\n socket.gethostbyname(app.config[\"STATS_ENDPOINT\"].format(instance_id))\n except:\n pass\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password')\n@flask_cli.with_appcontext\ndef admin(localpart, domain_name, password):\n \"\"\" Create an admin user\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User(\n localpart=localpart,\n domain=domain,\n global_admin=True\n )\n user.set_password(password)\n db.session.add(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password')\[email protected]('hash_scheme', required=False)\n@flask_cli.with_appcontext\ndef user(localpart, domain_name, password, hash_scheme=None):\n \"\"\" Create a user\n \"\"\"\n if hash_scheme is None:\n hash_scheme = app.config['PASSWORD_SCHEME']\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User(\n localpart=localpart,\n domain=domain,\n global_admin=False\n )\n user.set_password(password, hash_scheme=hash_scheme)\n db.session.add(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('domain_name')\[email protected]('-u', '--max-users')\[email protected]('-a', '--max-aliases')\[email protected]('-q', '--max-quota-bytes')\n@flask_cli.with_appcontext\ndef domain(domain_name, max_users=-1, max_aliases=-1, max_quota_bytes=0):\n \"\"\" Create a domain\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password_hash')\[email protected]('hash_scheme')\n@flask_cli.with_appcontext\ndef user_import(localpart, domain_name, password_hash, hash_scheme = None):\n \"\"\" Import a user along with password hash.\n \"\"\"\n if hash_scheme is None:\n hash_scheme = app.config['PASSWORD_SCHEME']\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User(\n localpart=localpart,\n domain=domain,\n global_admin=False\n )\n user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)\n db.session.add(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('-v', '--verbose')\[email protected]('-d', '--delete-objects')\n@flask_cli.with_appcontext\ndef config_update(verbose=False, delete_objects=False):\n \"\"\"sync configuration with data from YAML-formatted stdin\"\"\"\n import yaml\n import sys\n new_config = yaml.load(sys.stdin)\n # print new_config\n domains = new_config.get('domains', [])\n tracked_domains = set()\n for domain_config in domains:\n if verbose:\n print(str(domain_config))\n domain_name = domain_config['name']\n max_users = domain_config.get('max_users', -1)\n max_aliases = domain_config.get('max_aliases', -1)\n max_quota_bytes = domain_config.get('max_quota_bytes', 0)\n tracked_domains.add(domain_name)\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name,\n max_users=max_users,\n max_aliases=max_aliases,\n max_quota_bytes=max_quota_bytes)\n db.session.add(domain)\n print(\"Added \" + str(domain_config))\n else:\n domain.max_users = max_users\n domain.max_aliases = max_aliases\n domain.max_quota_bytes = max_quota_bytes\n db.session.add(domain)\n print(\"Updated \" + str(domain_config))\n\n users = new_config.get('users', [])\n tracked_users = set()\n user_optional_params = ('comment', 'quota_bytes', 'global_admin',\n 'enable_imap', 'enable_pop', 'forward_enabled',\n 'forward_destination', 'reply_enabled',\n 'reply_subject', 'reply_body', 'displayed_name',\n 'spam_enabled', 'email', 'spam_threshold')\n for user_config in users:\n if verbose:\n print(str(user_config))\n localpart = user_config['localpart']\n domain_name = user_config['domain']\n password_hash = user_config.get('password_hash', None)\n hash_scheme = user_config.get('hash_scheme', None)\n domain = models.Domain.query.get(domain_name)\n email = '{0}@{1}'.format(localpart, domain_name)\n optional_params = {}\n for k in user_optional_params:\n if k in user_config:\n optional_params[k] = user_config[k]\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User.query.get(email)\n tracked_users.add(email)\n tracked_domains.add(domain_name)\n if not user:\n user = models.User(\n localpart=localpart,\n domain=domain,\n **optional_params\n )\n else:\n for k in optional_params:\n setattr(user, k, optional_params[k])\n user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)\n db.session.add(user)\n\n aliases = new_config.get('aliases', [])\n tracked_aliases = set()\n for alias_config in aliases:\n if verbose:\n print(str(alias_config))\n localpart = alias_config['localpart']\n domain_name = alias_config['domain']\n if type(alias_config['destination']) is str:\n destination = alias_config['destination'].split(',')\n else:\n destination = alias_config['destination']\n wildcard = alias_config.get('wildcard', False)\n domain = models.Domain.query.get(domain_name)\n email = '{0}@{1}'.format(localpart, domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n alias = models.Alias.query.get(email)\n tracked_aliases.add(email)\n tracked_domains.add(domain_name)\n if not alias:\n alias = models.Alias(\n localpart=localpart,\n domain=domain,\n wildcard=wildcard,\n destination=destination,\n email=email\n )\n else:\n alias.destination = destination\n alias.wildcard = wildcard\n db.session.add(alias)\n\n db.session.commit()\n\n managers = new_config.get('managers', [])\n # tracked_managers=set()\n for manager_config in managers:\n if verbose:\n print(str(manager_config))\n domain_name = manager_config['domain']\n user_name = manager_config['user']\n domain = models.Domain.query.get(domain_name)\n manageruser = models.User.query.get(user_name + '@' + domain_name)\n if manageruser not in domain.managers:\n domain.managers.append(manageruser)\n db.session.add(domain)\n\n db.session.commit()\n\n if delete_objects:\n for user in db.session.query(models.User).all():\n if not (user.email in tracked_users):\n if verbose:\n print(\"Deleting user: \" + str(user.email))\n db.session.delete(user)\n for alias in db.session.query(models.Alias).all():\n if not (alias.email in tracked_aliases):\n if verbose:\n print(\"Deleting alias: \" + str(alias.email))\n db.session.delete(alias)\n for domain in db.session.query(models.Domain).all():\n if not (domain.name in tracked_domains):\n if verbose:\n print(\"Deleting domain: \" + str(domain.name))\n db.session.delete(domain)\n db.session.commit()\n\n\[email protected]()\[email protected]('email')\n@flask_cli.with_appcontext\ndef user_delete(email):\n \"\"\"delete user\"\"\"\n user = models.User.query.get(email)\n if user:\n db.session.delete(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('email')\n@flask_cli.with_appcontext\ndef alias_delete(email):\n \"\"\"delete alias\"\"\"\n alias = models.Alias.query.get(email)\n if alias:\n db.session.delete(alias)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('destination')\[email protected]('-w', '--wildcard', is_flag=True)\n@flask_cli.with_appcontext\ndef alias(localpart, domain_name, destination, wildcard=False):\n \"\"\" Create an alias\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n alias = models.Alias(\n localpart=localpart,\n domain=domain,\n wildcard=wildcard,\n destination=destination.split(','),\n email=\"%s@%s\" % (localpart, domain_name)\n )\n db.session.add(alias)\n db.session.commit()\n\n\[email protected]()\[email protected]('domain_name')\[email protected]('max_users')\[email protected]('max_aliases')\[email protected]('max_quota_bytes')\n@flask_cli.with_appcontext\ndef setlimits(domain_name, max_users, max_aliases, max_quota_bytes):\n \"\"\" Set domain limits\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n domain.max_users = max_users\n domain.max_aliases = max_aliases\n domain.max_quota_bytes = max_quota_bytes\n db.session.add(domain)\n db.session.commit()\n\n\[email protected]()\[email protected]('domain_name')\[email protected]('user_name')\n@flask_cli.with_appcontext\ndef setmanager(domain_name, user_name='manager'):\n \"\"\" Make a user manager of a domain\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n manageruser = models.User.query.get(user_name + '@' + domain_name)\n domain.managers.append(manageruser)\n db.session.add(domain)\n db.session.commit()\n\n\nif __name__ == '__main__':\n cli()\n", "path": "core/admin/mailu/manage.py"}]}
3,738
371
gh_patches_debug_29365
rasdani/github-patches
git_diff
archlinux__archinstall-1840
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- The parallel downloads page is shown half in English, half in another language if a language other than English is selected I checked every language that has this page translated and it happens on every single one. ### Example: ![French-example](https://user-images.githubusercontent.com/126529524/232617481-5f4f06ac-7359-4f61-96f2-6e7cdbeda770.png) ### Another one: ![Russian-example](https://user-images.githubusercontent.com/126529524/232617561-9463fc23-3223-4814-af8c-586dc1638ab3.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `archinstall/lib/interactions/general_conf.py` Content: ``` 1 from __future__ import annotations 2 3 import pathlib 4 from typing import List, Any, Optional, TYPE_CHECKING 5 6 from ..locale import list_timezones, list_keyboard_languages 7 from ..menu import MenuSelectionType, Menu, TextInput 8 from ..output import warn 9 from ..packages.packages import validate_package_list 10 from ..storage import storage 11 from ..translationhandler import Language 12 13 if TYPE_CHECKING: 14 _: Any 15 16 17 def ask_ntp(preset: bool = True) -> bool: 18 prompt = str(_('Would you like to use automatic time synchronization (NTP) with the default time servers?\n')) 19 prompt += str(_('Hardware time and other post-configuration steps might be required in order for NTP to work.\nFor more information, please check the Arch wiki')) 20 if preset: 21 preset_val = Menu.yes() 22 else: 23 preset_val = Menu.no() 24 choice = Menu(prompt, Menu.yes_no(), skip=False, preset_values=preset_val, default_option=Menu.yes()).run() 25 26 return False if choice.value == Menu.no() else True 27 28 29 def ask_hostname(preset: str = '') -> str: 30 while True: 31 hostname = TextInput( 32 str(_('Desired hostname for the installation: ')), 33 preset 34 ).run().strip() 35 36 if hostname: 37 return hostname 38 39 40 def ask_for_a_timezone(preset: Optional[str] = None) -> Optional[str]: 41 timezones = list_timezones() 42 default = 'UTC' 43 44 choice = Menu( 45 _('Select a timezone'), 46 list(timezones), 47 preset_values=preset, 48 default_option=default 49 ).run() 50 51 match choice.type_: 52 case MenuSelectionType.Skip: return preset 53 case MenuSelectionType.Selection: return choice.single_value 54 55 return None 56 57 58 def ask_for_audio_selection(desktop: bool = True, preset: Optional[str] = None) -> Optional[str]: 59 no_audio = str(_('No audio server')) 60 choices = ['pipewire', 'pulseaudio'] if desktop else ['pipewire', 'pulseaudio', no_audio] 61 default = 'pipewire' if desktop else no_audio 62 63 choice = Menu(_('Choose an audio server'), choices, preset_values=preset, default_option=default).run() 64 65 match choice.type_: 66 case MenuSelectionType.Skip: return preset 67 case MenuSelectionType.Selection: return choice.single_value 68 69 return None 70 71 72 def select_language(preset: Optional[str] = None) -> Optional[str]: 73 """ 74 Asks the user to select a language 75 Usually this is combined with :ref:`archinstall.list_keyboard_languages`. 76 77 :return: The language/dictionary key of the selected language 78 :rtype: str 79 """ 80 kb_lang = list_keyboard_languages() 81 # sort alphabetically and then by length 82 sorted_kb_lang = sorted(sorted(list(kb_lang)), key=len) 83 84 choice = Menu( 85 _('Select keyboard layout'), 86 sorted_kb_lang, 87 preset_values=preset, 88 sort=False 89 ).run() 90 91 match choice.type_: 92 case MenuSelectionType.Skip: return preset 93 case MenuSelectionType.Selection: return choice.single_value 94 95 return None 96 97 98 def select_archinstall_language(languages: List[Language], preset: Language) -> Language: 99 # these are the displayed language names which can either be 100 # the english name of a language or, if present, the 101 # name of the language in its own language 102 options = {lang.display_name: lang for lang in languages} 103 104 title = 'NOTE: If a language can not displayed properly, a proper font must be set manually in the console.\n' 105 title += 'All available fonts can be found in "/usr/share/kbd/consolefonts"\n' 106 title += 'e.g. setfont LatGrkCyr-8x16 (to display latin/greek/cyrillic characters)\n' 107 108 choice = Menu( 109 title, 110 list(options.keys()), 111 default_option=preset.display_name, 112 preview_size=0.5 113 ).run() 114 115 match choice.type_: 116 case MenuSelectionType.Skip: return preset 117 case MenuSelectionType.Selection: return options[choice.single_value] 118 119 raise ValueError('Language selection not handled') 120 121 122 def ask_additional_packages_to_install(preset: List[str] = []) -> List[str]: 123 # Additional packages (with some light weight error handling for invalid package names) 124 print(_('Only packages such as base, base-devel, linux, linux-firmware, efibootmgr and optional profile packages are installed.')) 125 print(_('If you desire a web browser, such as firefox or chromium, you may specify it in the following prompt.')) 126 127 def read_packages(p: List = []) -> list: 128 display = ' '.join(p) 129 input_packages = TextInput(_('Write additional packages to install (space separated, leave blank to skip): '), display).run().strip() 130 return input_packages.split() if input_packages else [] 131 132 preset = preset if preset else [] 133 packages = read_packages(preset) 134 135 if not storage['arguments']['offline'] and not storage['arguments']['no_pkg_lookups']: 136 while True: 137 if len(packages): 138 # Verify packages that were given 139 print(_("Verifying that additional packages exist (this might take a few seconds)")) 140 valid, invalid = validate_package_list(packages) 141 142 if invalid: 143 warn(f"Some packages could not be found in the repository: {invalid}") 144 packages = read_packages(valid) 145 continue 146 break 147 148 return packages 149 150 151 def add_number_of_parrallel_downloads(input_number :Optional[int] = None) -> Optional[int]: 152 max_downloads = 5 153 print(_(f"This option enables the number of parallel downloads that can occur during installation")) 154 print(_(f"Enter the number of parallel downloads to be enabled.\n (Enter a value between 1 to {max_downloads})\nNote:")) 155 print(_(f" - Maximum value : {max_downloads} ( Allows {max_downloads} parallel downloads, allows {max_downloads+1} downloads at a time )")) 156 print(_(f" - Minimum value : 1 ( Allows 1 parallel download, allows 2 downloads at a time )")) 157 print(_(f" - Disable/Default : 0 ( Disables parallel downloading, allows only 1 download at a time )")) 158 159 while True: 160 try: 161 input_number = int(TextInput(_("[Default value: 0] > ")).run().strip() or 0) 162 if input_number <= 0: 163 input_number = 0 164 elif input_number > max_downloads: 165 input_number = max_downloads 166 break 167 except: 168 print(_(f"Invalid input! Try again with a valid input [1 to {max_downloads}, or 0 to disable]")) 169 170 pacman_conf_path = pathlib.Path("/etc/pacman.conf") 171 with pacman_conf_path.open() as f: 172 pacman_conf = f.read().split("\n") 173 174 with pacman_conf_path.open("w") as fwrite: 175 for line in pacman_conf: 176 if "ParallelDownloads" in line: 177 fwrite.write(f"ParallelDownloads = {input_number+1}\n") if not input_number == 0 else fwrite.write("#ParallelDownloads = 0\n") 178 else: 179 fwrite.write(f"{line}\n") 180 181 return input_number 182 183 184 def select_additional_repositories(preset: List[str]) -> List[str]: 185 """ 186 Allows the user to select additional repositories (multilib, and testing) if desired. 187 188 :return: The string as a selected repository 189 :rtype: string 190 """ 191 192 repositories = ["multilib", "testing"] 193 194 choice = Menu( 195 _('Choose which optional additional repositories to enable'), 196 repositories, 197 sort=False, 198 multi=True, 199 preset_values=preset, 200 allow_reset=True 201 ).run() 202 203 match choice.type_: 204 case MenuSelectionType.Skip: return preset 205 case MenuSelectionType.Reset: return [] 206 case MenuSelectionType.Selection: return choice.single_value 207 208 return [] 209 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/archinstall/lib/interactions/general_conf.py b/archinstall/lib/interactions/general_conf.py --- a/archinstall/lib/interactions/general_conf.py +++ b/archinstall/lib/interactions/general_conf.py @@ -151,10 +151,10 @@ def add_number_of_parrallel_downloads(input_number :Optional[int] = None) -> Optional[int]: max_downloads = 5 print(_(f"This option enables the number of parallel downloads that can occur during installation")) - print(_(f"Enter the number of parallel downloads to be enabled.\n (Enter a value between 1 to {max_downloads})\nNote:")) - print(_(f" - Maximum value : {max_downloads} ( Allows {max_downloads} parallel downloads, allows {max_downloads+1} downloads at a time )")) - print(_(f" - Minimum value : 1 ( Allows 1 parallel download, allows 2 downloads at a time )")) - print(_(f" - Disable/Default : 0 ( Disables parallel downloading, allows only 1 download at a time )")) + print(str(_("Enter the number of parallel downloads to be enabled.\n (Enter a value between 1 to {})\nNote:")).format(max_downloads)) + print(str(_(" - Maximum value : {} ( Allows {} parallel downloads, allows {} downloads at a time )")).format(max_downloads, max_downloads, max_downloads + 1)) + print(_(" - Minimum value : 1 ( Allows 1 parallel download, allows 2 downloads at a time )")) + print(_(" - Disable/Default : 0 ( Disables parallel downloading, allows only 1 download at a time )")) while True: try: @@ -165,7 +165,7 @@ input_number = max_downloads break except: - print(_(f"Invalid input! Try again with a valid input [1 to {max_downloads}, or 0 to disable]")) + print(str(_("Invalid input! Try again with a valid input [1 to {}, or 0 to disable]")).format(max_downloads)) pacman_conf_path = pathlib.Path("/etc/pacman.conf") with pacman_conf_path.open() as f:
{"golden_diff": "diff --git a/archinstall/lib/interactions/general_conf.py b/archinstall/lib/interactions/general_conf.py\n--- a/archinstall/lib/interactions/general_conf.py\n+++ b/archinstall/lib/interactions/general_conf.py\n@@ -151,10 +151,10 @@\n def add_number_of_parrallel_downloads(input_number :Optional[int] = None) -> Optional[int]:\n \tmax_downloads = 5\n \tprint(_(f\"This option enables the number of parallel downloads that can occur during installation\"))\n-\tprint(_(f\"Enter the number of parallel downloads to be enabled.\\n (Enter a value between 1 to {max_downloads})\\nNote:\"))\n-\tprint(_(f\" - Maximum value : {max_downloads} ( Allows {max_downloads} parallel downloads, allows {max_downloads+1} downloads at a time )\"))\n-\tprint(_(f\" - Minimum value : 1 ( Allows 1 parallel download, allows 2 downloads at a time )\"))\n-\tprint(_(f\" - Disable/Default : 0 ( Disables parallel downloading, allows only 1 download at a time )\"))\n+\tprint(str(_(\"Enter the number of parallel downloads to be enabled.\\n (Enter a value between 1 to {})\\nNote:\")).format(max_downloads))\n+\tprint(str(_(\" - Maximum value : {} ( Allows {} parallel downloads, allows {} downloads at a time )\")).format(max_downloads, max_downloads, max_downloads + 1))\n+\tprint(_(\" - Minimum value : 1 ( Allows 1 parallel download, allows 2 downloads at a time )\"))\n+\tprint(_(\" - Disable/Default : 0 ( Disables parallel downloading, allows only 1 download at a time )\"))\n \n \twhile True:\n \t\ttry:\n@@ -165,7 +165,7 @@\n \t\t\t\tinput_number = max_downloads\n \t\t\tbreak\n \t\texcept:\n-\t\t\tprint(_(f\"Invalid input! Try again with a valid input [1 to {max_downloads}, or 0 to disable]\"))\n+\t\t\tprint(str(_(\"Invalid input! Try again with a valid input [1 to {}, or 0 to disable]\")).format(max_downloads))\n \n \tpacman_conf_path = pathlib.Path(\"/etc/pacman.conf\")\n \twith pacman_conf_path.open() as f:\n", "issue": "The parallel downloads page is shown half in English, half in another language if a language other than English is selected\nI checked every language that has this page translated and it happens on every single one.\r\n### Example:\r\n![French-example](https://user-images.githubusercontent.com/126529524/232617481-5f4f06ac-7359-4f61-96f2-6e7cdbeda770.png)\r\n\r\n### Another one:\r\n![Russian-example](https://user-images.githubusercontent.com/126529524/232617561-9463fc23-3223-4814-af8c-586dc1638ab3.png)\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport pathlib\nfrom typing import List, Any, Optional, TYPE_CHECKING\n\nfrom ..locale import list_timezones, list_keyboard_languages\nfrom ..menu import MenuSelectionType, Menu, TextInput\nfrom ..output import warn\nfrom ..packages.packages import validate_package_list\nfrom ..storage import storage\nfrom ..translationhandler import Language\n\nif TYPE_CHECKING:\n\t_: Any\n\n\ndef ask_ntp(preset: bool = True) -> bool:\n\tprompt = str(_('Would you like to use automatic time synchronization (NTP) with the default time servers?\\n'))\n\tprompt += str(_('Hardware time and other post-configuration steps might be required in order for NTP to work.\\nFor more information, please check the Arch wiki'))\n\tif preset:\n\t\tpreset_val = Menu.yes()\n\telse:\n\t\tpreset_val = Menu.no()\n\tchoice = Menu(prompt, Menu.yes_no(), skip=False, preset_values=preset_val, default_option=Menu.yes()).run()\n\n\treturn False if choice.value == Menu.no() else True\n\n\ndef ask_hostname(preset: str = '') -> str:\n\twhile True:\n\t\thostname = TextInput(\n\t\t\tstr(_('Desired hostname for the installation: ')),\n\t\t\tpreset\n\t\t).run().strip()\n\n\t\tif hostname:\n\t\t\treturn hostname\n\n\ndef ask_for_a_timezone(preset: Optional[str] = None) -> Optional[str]:\n\ttimezones = list_timezones()\n\tdefault = 'UTC'\n\n\tchoice = Menu(\n\t\t_('Select a timezone'),\n\t\tlist(timezones),\n\t\tpreset_values=preset,\n\t\tdefault_option=default\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Skip: return preset\n\t\tcase MenuSelectionType.Selection: return choice.single_value\n\n\treturn None\n\n\ndef ask_for_audio_selection(desktop: bool = True, preset: Optional[str] = None) -> Optional[str]:\n\tno_audio = str(_('No audio server'))\n\tchoices = ['pipewire', 'pulseaudio'] if desktop else ['pipewire', 'pulseaudio', no_audio]\n\tdefault = 'pipewire' if desktop else no_audio\n\n\tchoice = Menu(_('Choose an audio server'), choices, preset_values=preset, default_option=default).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Skip: return preset\n\t\tcase MenuSelectionType.Selection: return choice.single_value\n\n\treturn None\n\n\ndef select_language(preset: Optional[str] = None) -> Optional[str]:\n\t\"\"\"\n\tAsks the user to select a language\n\tUsually this is combined with :ref:`archinstall.list_keyboard_languages`.\n\n\t:return: The language/dictionary key of the selected language\n\t:rtype: str\n\t\"\"\"\n\tkb_lang = list_keyboard_languages()\n\t# sort alphabetically and then by length\n\tsorted_kb_lang = sorted(sorted(list(kb_lang)), key=len)\n\n\tchoice = Menu(\n\t\t_('Select keyboard layout'),\n\t\tsorted_kb_lang,\n\t\tpreset_values=preset,\n\t\tsort=False\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Skip: return preset\n\t\tcase MenuSelectionType.Selection: return choice.single_value\n\n\treturn None\n\n\ndef select_archinstall_language(languages: List[Language], preset: Language) -> Language:\n\t# these are the displayed language names which can either be\n\t# the english name of a language or, if present, the\n\t# name of the language in its own language\n\toptions = {lang.display_name: lang for lang in languages}\n\n\ttitle = 'NOTE: If a language can not displayed properly, a proper font must be set manually in the console.\\n'\n\ttitle += 'All available fonts can be found in \"/usr/share/kbd/consolefonts\"\\n'\n\ttitle += 'e.g. setfont LatGrkCyr-8x16 (to display latin/greek/cyrillic characters)\\n'\n\n\tchoice = Menu(\n\t\ttitle,\n\t\tlist(options.keys()),\n\t\tdefault_option=preset.display_name,\n\t\tpreview_size=0.5\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Skip: return preset\n\t\tcase MenuSelectionType.Selection: return options[choice.single_value]\n\n\traise ValueError('Language selection not handled')\n\n\ndef ask_additional_packages_to_install(preset: List[str] = []) -> List[str]:\n\t# Additional packages (with some light weight error handling for invalid package names)\n\tprint(_('Only packages such as base, base-devel, linux, linux-firmware, efibootmgr and optional profile packages are installed.'))\n\tprint(_('If you desire a web browser, such as firefox or chromium, you may specify it in the following prompt.'))\n\n\tdef read_packages(p: List = []) -> list:\n\t\tdisplay = ' '.join(p)\n\t\tinput_packages = TextInput(_('Write additional packages to install (space separated, leave blank to skip): '), display).run().strip()\n\t\treturn input_packages.split() if input_packages else []\n\n\tpreset = preset if preset else []\n\tpackages = read_packages(preset)\n\n\tif not storage['arguments']['offline'] and not storage['arguments']['no_pkg_lookups']:\n\t\twhile True:\n\t\t\tif len(packages):\n\t\t\t\t# Verify packages that were given\n\t\t\t\tprint(_(\"Verifying that additional packages exist (this might take a few seconds)\"))\n\t\t\t\tvalid, invalid = validate_package_list(packages)\n\n\t\t\t\tif invalid:\n\t\t\t\t\twarn(f\"Some packages could not be found in the repository: {invalid}\")\n\t\t\t\t\tpackages = read_packages(valid)\n\t\t\t\t\tcontinue\n\t\t\tbreak\n\n\treturn packages\n\n\ndef add_number_of_parrallel_downloads(input_number :Optional[int] = None) -> Optional[int]:\n\tmax_downloads = 5\n\tprint(_(f\"This option enables the number of parallel downloads that can occur during installation\"))\n\tprint(_(f\"Enter the number of parallel downloads to be enabled.\\n (Enter a value between 1 to {max_downloads})\\nNote:\"))\n\tprint(_(f\" - Maximum value : {max_downloads} ( Allows {max_downloads} parallel downloads, allows {max_downloads+1} downloads at a time )\"))\n\tprint(_(f\" - Minimum value : 1 ( Allows 1 parallel download, allows 2 downloads at a time )\"))\n\tprint(_(f\" - Disable/Default : 0 ( Disables parallel downloading, allows only 1 download at a time )\"))\n\n\twhile True:\n\t\ttry:\n\t\t\tinput_number = int(TextInput(_(\"[Default value: 0] > \")).run().strip() or 0)\n\t\t\tif input_number <= 0:\n\t\t\t\tinput_number = 0\n\t\t\telif input_number > max_downloads:\n\t\t\t\tinput_number = max_downloads\n\t\t\tbreak\n\t\texcept:\n\t\t\tprint(_(f\"Invalid input! Try again with a valid input [1 to {max_downloads}, or 0 to disable]\"))\n\n\tpacman_conf_path = pathlib.Path(\"/etc/pacman.conf\")\n\twith pacman_conf_path.open() as f:\n\t\tpacman_conf = f.read().split(\"\\n\")\n\n\twith pacman_conf_path.open(\"w\") as fwrite:\n\t\tfor line in pacman_conf:\n\t\t\tif \"ParallelDownloads\" in line:\n\t\t\t\tfwrite.write(f\"ParallelDownloads = {input_number+1}\\n\") if not input_number == 0 else fwrite.write(\"#ParallelDownloads = 0\\n\")\n\t\t\telse:\n\t\t\t\tfwrite.write(f\"{line}\\n\")\n\n\treturn input_number\n\n\ndef select_additional_repositories(preset: List[str]) -> List[str]:\n\t\"\"\"\n\tAllows the user to select additional repositories (multilib, and testing) if desired.\n\n\t:return: The string as a selected repository\n\t:rtype: string\n\t\"\"\"\n\n\trepositories = [\"multilib\", \"testing\"]\n\n\tchoice = Menu(\n\t\t_('Choose which optional additional repositories to enable'),\n\t\trepositories,\n\t\tsort=False,\n\t\tmulti=True,\n\t\tpreset_values=preset,\n\t\tallow_reset=True\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Skip: return preset\n\t\tcase MenuSelectionType.Reset: return []\n\t\tcase MenuSelectionType.Selection: return choice.single_value\n\n\treturn []\n", "path": "archinstall/lib/interactions/general_conf.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport pathlib\nfrom typing import List, Any, Optional, TYPE_CHECKING\n\nfrom ..locale import list_timezones, list_keyboard_languages\nfrom ..menu import MenuSelectionType, Menu, TextInput\nfrom ..output import warn\nfrom ..packages.packages import validate_package_list\nfrom ..storage import storage\nfrom ..translationhandler import Language\n\nif TYPE_CHECKING:\n\t_: Any\n\n\ndef ask_ntp(preset: bool = True) -> bool:\n\tprompt = str(_('Would you like to use automatic time synchronization (NTP) with the default time servers?\\n'))\n\tprompt += str(_('Hardware time and other post-configuration steps might be required in order for NTP to work.\\nFor more information, please check the Arch wiki'))\n\tif preset:\n\t\tpreset_val = Menu.yes()\n\telse:\n\t\tpreset_val = Menu.no()\n\tchoice = Menu(prompt, Menu.yes_no(), skip=False, preset_values=preset_val, default_option=Menu.yes()).run()\n\n\treturn False if choice.value == Menu.no() else True\n\n\ndef ask_hostname(preset: str = '') -> str:\n\twhile True:\n\t\thostname = TextInput(\n\t\t\tstr(_('Desired hostname for the installation: ')),\n\t\t\tpreset\n\t\t).run().strip()\n\n\t\tif hostname:\n\t\t\treturn hostname\n\n\ndef ask_for_a_timezone(preset: Optional[str] = None) -> Optional[str]:\n\ttimezones = list_timezones()\n\tdefault = 'UTC'\n\n\tchoice = Menu(\n\t\t_('Select a timezone'),\n\t\tlist(timezones),\n\t\tpreset_values=preset,\n\t\tdefault_option=default\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Skip: return preset\n\t\tcase MenuSelectionType.Selection: return choice.single_value\n\n\treturn None\n\n\ndef ask_for_audio_selection(desktop: bool = True, preset: Optional[str] = None) -> Optional[str]:\n\tno_audio = str(_('No audio server'))\n\tchoices = ['pipewire', 'pulseaudio'] if desktop else ['pipewire', 'pulseaudio', no_audio]\n\tdefault = 'pipewire' if desktop else no_audio\n\n\tchoice = Menu(_('Choose an audio server'), choices, preset_values=preset, default_option=default).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Skip: return preset\n\t\tcase MenuSelectionType.Selection: return choice.single_value\n\n\treturn None\n\n\ndef select_language(preset: Optional[str] = None) -> Optional[str]:\n\t\"\"\"\n\tAsks the user to select a language\n\tUsually this is combined with :ref:`archinstall.list_keyboard_languages`.\n\n\t:return: The language/dictionary key of the selected language\n\t:rtype: str\n\t\"\"\"\n\tkb_lang = list_keyboard_languages()\n\t# sort alphabetically and then by length\n\tsorted_kb_lang = sorted(sorted(list(kb_lang)), key=len)\n\n\tchoice = Menu(\n\t\t_('Select keyboard layout'),\n\t\tsorted_kb_lang,\n\t\tpreset_values=preset,\n\t\tsort=False\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Skip: return preset\n\t\tcase MenuSelectionType.Selection: return choice.single_value\n\n\treturn None\n\n\ndef select_archinstall_language(languages: List[Language], preset: Language) -> Language:\n\t# these are the displayed language names which can either be\n\t# the english name of a language or, if present, the\n\t# name of the language in its own language\n\toptions = {lang.display_name: lang for lang in languages}\n\n\ttitle = 'NOTE: If a language can not displayed properly, a proper font must be set manually in the console.\\n'\n\ttitle += 'All available fonts can be found in \"/usr/share/kbd/consolefonts\"\\n'\n\ttitle += 'e.g. setfont LatGrkCyr-8x16 (to display latin/greek/cyrillic characters)\\n'\n\n\tchoice = Menu(\n\t\ttitle,\n\t\tlist(options.keys()),\n\t\tdefault_option=preset.display_name,\n\t\tpreview_size=0.5\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Skip: return preset\n\t\tcase MenuSelectionType.Selection: return options[choice.single_value]\n\n\traise ValueError('Language selection not handled')\n\n\ndef ask_additional_packages_to_install(preset: List[str] = []) -> List[str]:\n\t# Additional packages (with some light weight error handling for invalid package names)\n\tprint(_('Only packages such as base, base-devel, linux, linux-firmware, efibootmgr and optional profile packages are installed.'))\n\tprint(_('If you desire a web browser, such as firefox or chromium, you may specify it in the following prompt.'))\n\n\tdef read_packages(p: List = []) -> list:\n\t\tdisplay = ' '.join(p)\n\t\tinput_packages = TextInput(_('Write additional packages to install (space separated, leave blank to skip): '), display).run().strip()\n\t\treturn input_packages.split() if input_packages else []\n\n\tpreset = preset if preset else []\n\tpackages = read_packages(preset)\n\n\tif not storage['arguments']['offline'] and not storage['arguments']['no_pkg_lookups']:\n\t\twhile True:\n\t\t\tif len(packages):\n\t\t\t\t# Verify packages that were given\n\t\t\t\tprint(_(\"Verifying that additional packages exist (this might take a few seconds)\"))\n\t\t\t\tvalid, invalid = validate_package_list(packages)\n\n\t\t\t\tif invalid:\n\t\t\t\t\twarn(f\"Some packages could not be found in the repository: {invalid}\")\n\t\t\t\t\tpackages = read_packages(valid)\n\t\t\t\t\tcontinue\n\t\t\tbreak\n\n\treturn packages\n\n\ndef add_number_of_parrallel_downloads(input_number :Optional[int] = None) -> Optional[int]:\n\tmax_downloads = 5\n\tprint(_(f\"This option enables the number of parallel downloads that can occur during installation\"))\n\tprint(str(_(\"Enter the number of parallel downloads to be enabled.\\n (Enter a value between 1 to {})\\nNote:\")).format(max_downloads))\n\tprint(str(_(\" - Maximum value : {} ( Allows {} parallel downloads, allows {} downloads at a time )\")).format(max_downloads, max_downloads, max_downloads + 1))\n\tprint(_(\" - Minimum value : 1 ( Allows 1 parallel download, allows 2 downloads at a time )\"))\n\tprint(_(\" - Disable/Default : 0 ( Disables parallel downloading, allows only 1 download at a time )\"))\n\n\twhile True:\n\t\ttry:\n\t\t\tinput_number = int(TextInput(_(\"[Default value: 0] > \")).run().strip() or 0)\n\t\t\tif input_number <= 0:\n\t\t\t\tinput_number = 0\n\t\t\telif input_number > max_downloads:\n\t\t\t\tinput_number = max_downloads\n\t\t\tbreak\n\t\texcept:\n\t\t\tprint(str(_(\"Invalid input! Try again with a valid input [1 to {}, or 0 to disable]\")).format(max_downloads))\n\n\tpacman_conf_path = pathlib.Path(\"/etc/pacman.conf\")\n\twith pacman_conf_path.open() as f:\n\t\tpacman_conf = f.read().split(\"\\n\")\n\n\twith pacman_conf_path.open(\"w\") as fwrite:\n\t\tfor line in pacman_conf:\n\t\t\tif \"ParallelDownloads\" in line:\n\t\t\t\tfwrite.write(f\"ParallelDownloads = {input_number+1}\\n\") if not input_number == 0 else fwrite.write(\"#ParallelDownloads = 0\\n\")\n\t\t\telse:\n\t\t\t\tfwrite.write(f\"{line}\\n\")\n\n\treturn input_number\n\n\ndef select_additional_repositories(preset: List[str]) -> List[str]:\n\t\"\"\"\n\tAllows the user to select additional repositories (multilib, and testing) if desired.\n\n\t:return: The string as a selected repository\n\t:rtype: string\n\t\"\"\"\n\n\trepositories = [\"multilib\", \"testing\"]\n\n\tchoice = Menu(\n\t\t_('Choose which optional additional repositories to enable'),\n\t\trepositories,\n\t\tsort=False,\n\t\tmulti=True,\n\t\tpreset_values=preset,\n\t\tallow_reset=True\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Skip: return preset\n\t\tcase MenuSelectionType.Reset: return []\n\t\tcase MenuSelectionType.Selection: return choice.single_value\n\n\treturn []\n", "path": "archinstall/lib/interactions/general_conf.py"}]}
2,754
494
gh_patches_debug_18110
rasdani/github-patches
git_diff
goauthentik__authentik-1514
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ldap group membership not updated **Is your feature request related to a problem? Please describe.** The ldap syncing code assumes group membership is based on DN. In our ldap tree this is unfortunately not the case. Instead the group membership is based on CN. So while users and groups are synced, the group memberships are not established in authentik's database. **Describe the solution you'd like** I need the 'group membership field' to be matched against the CN of a user instead of it's fully qualified DN. **Additional context** Example pseudo ldif for groups: ``` dn: cn=group-1,ou=groups,o=example,c=ch objectClass: posixGroup cn: group-1 memberUid: user-1 memberUid: user-2 memberUid: user-3 dn: cn=group-2,ou=groups,o=example,c=ch objectClass: posixGroup cn: group-2 memberUid: user-1 memberUid: user-2 ``` And for users: ``` dn: cn=user-1,ou=users,o=example,c=ch objectClass: posixAccount cn: user-1 ou: group-1 ou: group-2 dn: cn=user-2,ou=users,o=example,c=ch objectClass: posixAccount cn: user-2 ou: group-1 ou: group-2 dn: cn=user-3,ou=users,o=example,c=ch objectClass: posixAccount cn: user-3 ou: group-1 ``` If think for the current authentik sync code to work the groups would have to look like this: ``` dn: cn=group-1,ou=groups,o=example,c=ch objectClass: posixGroup cn: group-1 memberUid: user-1,ou=users,o=example,c=ch memberUid: user-2,ou=users,o=example,c=ch memberUid: user-3,ou=users,o=example,c=ch dn: cn=group-2,ou=groups,o=example,c=ch objectClass: posixGroup cn: group-2 memberUid: user-1,ou=users,o=example,c=ch memberUid: user-2,ou=users,o=example,c=ch ``` But changing this is beyond my control. This is the OpenLDAP directory tree of a large University. I'm not sure if this setup and therefor also the problem is common or unique to our deployment. I guess for the membership mapping to work I'd need the code that authentik uses to lookup group memberships in ./authentik/sources/ldap/sync/membership.py to look something like this: ``` LDAP_UNIQUENESS = "ldap_uniq" users = User.objects.filter( Q(**{f"attributes__{LDAP_UNIQUENESS}__in": members}) | Q( **{ f"attributes__{LDAP_UNIQUENESS}__isnull": True, "ak_groups__in": [ak_group], } ) ) ``` Instead of the current: ``` LDAP_DISTINGUISHED_NAME = "distinguishedName" users = User.objects.filter( Q(**{f"attributes__{LDAP_DISTINGUISHED_NAME}__in": members}) | Q( **{ f"attributes__{LDAP_DISTINGUISHED_NAME}__isnull": True, "ak_groups__in": [ak_group], } ) ) ``` I'm happy to provide a PR after discussing the right approach on how to make this configurable. ldap group membership not updated **Is your feature request related to a problem? Please describe.** The ldap syncing code assumes group membership is based on DN. In our ldap tree this is unfortunately not the case. Instead the group membership is based on CN. So while users and groups are synced, the group memberships are not established in authentik's database. **Describe the solution you'd like** I need the 'group membership field' to be matched against the CN of a user instead of it's fully qualified DN. **Additional context** Example pseudo ldif for groups: ``` dn: cn=group-1,ou=groups,o=example,c=ch objectClass: posixGroup cn: group-1 memberUid: user-1 memberUid: user-2 memberUid: user-3 dn: cn=group-2,ou=groups,o=example,c=ch objectClass: posixGroup cn: group-2 memberUid: user-1 memberUid: user-2 ``` And for users: ``` dn: cn=user-1,ou=users,o=example,c=ch objectClass: posixAccount cn: user-1 ou: group-1 ou: group-2 dn: cn=user-2,ou=users,o=example,c=ch objectClass: posixAccount cn: user-2 ou: group-1 ou: group-2 dn: cn=user-3,ou=users,o=example,c=ch objectClass: posixAccount cn: user-3 ou: group-1 ``` If think for the current authentik sync code to work the groups would have to look like this: ``` dn: cn=group-1,ou=groups,o=example,c=ch objectClass: posixGroup cn: group-1 memberUid: user-1,ou=users,o=example,c=ch memberUid: user-2,ou=users,o=example,c=ch memberUid: user-3,ou=users,o=example,c=ch dn: cn=group-2,ou=groups,o=example,c=ch objectClass: posixGroup cn: group-2 memberUid: user-1,ou=users,o=example,c=ch memberUid: user-2,ou=users,o=example,c=ch ``` But changing this is beyond my control. This is the OpenLDAP directory tree of a large University. I'm not sure if this setup and therefor also the problem is common or unique to our deployment. I guess for the membership mapping to work I'd need the code that authentik uses to lookup group memberships in ./authentik/sources/ldap/sync/membership.py to look something like this: ``` LDAP_UNIQUENESS = "ldap_uniq" users = User.objects.filter( Q(**{f"attributes__{LDAP_UNIQUENESS}__in": members}) | Q( **{ f"attributes__{LDAP_UNIQUENESS}__isnull": True, "ak_groups__in": [ak_group], } ) ) ``` Instead of the current: ``` LDAP_DISTINGUISHED_NAME = "distinguishedName" users = User.objects.filter( Q(**{f"attributes__{LDAP_DISTINGUISHED_NAME}__in": members}) | Q( **{ f"attributes__{LDAP_DISTINGUISHED_NAME}__isnull": True, "ak_groups__in": [ak_group], } ) ) ``` I'm happy to provide a PR after discussing the right approach on how to make this configurable. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `authentik/sources/ldap/sync/membership.py` Content: ``` 1 """Sync LDAP Users and groups into authentik""" 2 from typing import Any, Optional 3 4 import ldap3 5 import ldap3.core.exceptions 6 from django.db.models import Q 7 8 from authentik.core.models import Group, User 9 from authentik.sources.ldap.auth import LDAP_DISTINGUISHED_NAME 10 from authentik.sources.ldap.models import LDAPSource 11 from authentik.sources.ldap.sync.base import LDAP_UNIQUENESS, BaseLDAPSynchronizer 12 13 14 class MembershipLDAPSynchronizer(BaseLDAPSynchronizer): 15 """Sync LDAP Users and groups into authentik""" 16 17 group_cache: dict[str, Group] 18 19 def __init__(self, source: LDAPSource): 20 super().__init__(source) 21 self.group_cache: dict[str, Group] = {} 22 23 def sync(self) -> int: 24 """Iterate over all Users and assign Groups using memberOf Field""" 25 groups = self._source.connection.extend.standard.paged_search( 26 search_base=self.base_dn_groups, 27 search_filter=self._source.group_object_filter, 28 search_scope=ldap3.SUBTREE, 29 attributes=[ 30 self._source.group_membership_field, 31 self._source.object_uniqueness_field, 32 LDAP_DISTINGUISHED_NAME, 33 ], 34 ) 35 membership_count = 0 36 for group in groups: 37 members = group.get("attributes", {}).get(self._source.group_membership_field, []) 38 ak_group = self.get_group(group) 39 if not ak_group: 40 continue 41 42 users = User.objects.filter( 43 Q(**{f"attributes__{LDAP_DISTINGUISHED_NAME}__in": members}) 44 | Q( 45 **{ 46 f"attributes__{LDAP_DISTINGUISHED_NAME}__isnull": True, 47 "ak_groups__in": [ak_group], 48 } 49 ) 50 ) 51 membership_count += 1 52 membership_count += users.count() 53 ak_group.users.set(users) 54 ak_group.save() 55 self._logger.debug("Successfully updated group membership") 56 return membership_count 57 58 def get_group(self, group_dict: dict[str, Any]) -> Optional[Group]: 59 """Check if we fetched the group already, and if not cache it for later""" 60 group_dn = group_dict.get("attributes", {}).get(LDAP_DISTINGUISHED_NAME, []) 61 group_uniq = group_dict.get("attributes", {}).get(self._source.object_uniqueness_field, []) 62 # group_uniq might be a single string or an array with (hopefully) a single string 63 if isinstance(group_uniq, list): 64 if len(group_uniq) < 1: 65 self.message( 66 f"Group does not have a uniqueness attribute: '{group_dn}'", 67 group=group_dn, 68 ) 69 return None 70 group_uniq = group_uniq[0] 71 if group_uniq not in self.group_cache: 72 groups = Group.objects.filter(**{f"attributes__{LDAP_UNIQUENESS}": group_uniq}) 73 if not groups.exists(): 74 self.message( 75 f"Group does not exist in our DB yet, run sync_groups first: '{group_dn}'", 76 group=group_dn, 77 ) 78 return None 79 self.group_cache[group_uniq] = groups.first() 80 return self.group_cache[group_uniq] 81 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/authentik/sources/ldap/sync/membership.py b/authentik/sources/ldap/sync/membership.py --- a/authentik/sources/ldap/sync/membership.py +++ b/authentik/sources/ldap/sync/membership.py @@ -39,11 +39,17 @@ if not ak_group: continue + membership_mapping_attribute = LDAP_DISTINGUISHED_NAME + if self._source.group_membership_field == "memberUid": + # If memberships are based on the posixGroup's 'memberUid' + # attribute we use the RDN instead of the FDN to lookup members. + membership_mapping_attribute = LDAP_UNIQUENESS + users = User.objects.filter( - Q(**{f"attributes__{LDAP_DISTINGUISHED_NAME}__in": members}) + Q(**{f"attributes__{membership_mapping_attribute}__in": members}) | Q( **{ - f"attributes__{LDAP_DISTINGUISHED_NAME}__isnull": True, + f"attributes__{membership_mapping_attribute}__isnull": True, "ak_groups__in": [ak_group], } )
{"golden_diff": "diff --git a/authentik/sources/ldap/sync/membership.py b/authentik/sources/ldap/sync/membership.py\n--- a/authentik/sources/ldap/sync/membership.py\n+++ b/authentik/sources/ldap/sync/membership.py\n@@ -39,11 +39,17 @@\n if not ak_group:\n continue\n \n+ membership_mapping_attribute = LDAP_DISTINGUISHED_NAME\n+ if self._source.group_membership_field == \"memberUid\":\n+ # If memberships are based on the posixGroup's 'memberUid'\n+ # attribute we use the RDN instead of the FDN to lookup members.\n+ membership_mapping_attribute = LDAP_UNIQUENESS\n+\n users = User.objects.filter(\n- Q(**{f\"attributes__{LDAP_DISTINGUISHED_NAME}__in\": members})\n+ Q(**{f\"attributes__{membership_mapping_attribute}__in\": members})\n | Q(\n **{\n- f\"attributes__{LDAP_DISTINGUISHED_NAME}__isnull\": True,\n+ f\"attributes__{membership_mapping_attribute}__isnull\": True,\n \"ak_groups__in\": [ak_group],\n }\n )\n", "issue": "ldap group membership not updated\n**Is your feature request related to a problem? Please describe.**\r\nThe ldap syncing code assumes group membership is based on DN.\r\nIn our ldap tree this is unfortunately not the case. Instead the group membership is based on CN.\r\nSo while users and groups are synced, the group memberships are not established in authentik's database.\r\n\r\n**Describe the solution you'd like**\r\nI need the 'group membership field' to be matched against the CN of a user instead of it's fully qualified DN.\r\n\r\n**Additional context**\r\nExample pseudo ldif for groups:\r\n```\r\ndn: cn=group-1,ou=groups,o=example,c=ch\r\nobjectClass: posixGroup\r\ncn: group-1\r\nmemberUid: user-1\r\nmemberUid: user-2\r\nmemberUid: user-3\r\n\r\ndn: cn=group-2,ou=groups,o=example,c=ch\r\nobjectClass: posixGroup\r\ncn: group-2\r\nmemberUid: user-1\r\nmemberUid: user-2\r\n```\r\nAnd for users:\r\n\r\n```\r\ndn: cn=user-1,ou=users,o=example,c=ch\r\nobjectClass: posixAccount\r\ncn: user-1\r\nou: group-1\r\nou: group-2\r\n\r\ndn: cn=user-2,ou=users,o=example,c=ch\r\nobjectClass: posixAccount\r\ncn: user-2\r\nou: group-1\r\nou: group-2\r\n\r\ndn: cn=user-3,ou=users,o=example,c=ch\r\nobjectClass: posixAccount\r\ncn: user-3\r\nou: group-1\r\n```\r\n\r\nIf think for the current authentik sync code to work the groups would have to look like this:\r\n```\r\ndn: cn=group-1,ou=groups,o=example,c=ch\r\nobjectClass: posixGroup\r\ncn: group-1\r\nmemberUid: user-1,ou=users,o=example,c=ch\r\nmemberUid: user-2,ou=users,o=example,c=ch\r\nmemberUid: user-3,ou=users,o=example,c=ch\r\n\r\ndn: cn=group-2,ou=groups,o=example,c=ch\r\nobjectClass: posixGroup\r\ncn: group-2\r\nmemberUid: user-1,ou=users,o=example,c=ch\r\nmemberUid: user-2,ou=users,o=example,c=ch\r\n```\r\n\r\nBut changing this is beyond my control.\r\n\r\nThis is the OpenLDAP directory tree of a large University. I'm not sure if this setup and therefor also the problem is common or unique to our deployment.\r\n\r\nI guess for the membership mapping to work I'd need the code that authentik uses to lookup group memberships in ./authentik/sources/ldap/sync/membership.py to look something like this:\r\n\r\n```\r\nLDAP_UNIQUENESS = \"ldap_uniq\"\r\n\r\n users = User.objects.filter(\r\n Q(**{f\"attributes__{LDAP_UNIQUENESS}__in\": members})\r\n | Q(\r\n **{\r\n f\"attributes__{LDAP_UNIQUENESS}__isnull\": True,\r\n \"ak_groups__in\": [ak_group],\r\n }\r\n )\r\n )\r\n\r\n```\r\n\r\nInstead of the current:\r\n\r\n```\r\nLDAP_DISTINGUISHED_NAME = \"distinguishedName\"\r\n\r\n users = User.objects.filter(\r\n Q(**{f\"attributes__{LDAP_DISTINGUISHED_NAME}__in\": members})\r\n | Q(\r\n **{\r\n f\"attributes__{LDAP_DISTINGUISHED_NAME}__isnull\": True,\r\n \"ak_groups__in\": [ak_group],\r\n }\r\n )\r\n )\r\n\r\n```\r\n\r\nI'm happy to provide a PR after discussing the right approach on how to make this configurable.\nldap group membership not updated\n**Is your feature request related to a problem? Please describe.**\r\nThe ldap syncing code assumes group membership is based on DN.\r\nIn our ldap tree this is unfortunately not the case. Instead the group membership is based on CN.\r\nSo while users and groups are synced, the group memberships are not established in authentik's database.\r\n\r\n**Describe the solution you'd like**\r\nI need the 'group membership field' to be matched against the CN of a user instead of it's fully qualified DN.\r\n\r\n**Additional context**\r\nExample pseudo ldif for groups:\r\n```\r\ndn: cn=group-1,ou=groups,o=example,c=ch\r\nobjectClass: posixGroup\r\ncn: group-1\r\nmemberUid: user-1\r\nmemberUid: user-2\r\nmemberUid: user-3\r\n\r\ndn: cn=group-2,ou=groups,o=example,c=ch\r\nobjectClass: posixGroup\r\ncn: group-2\r\nmemberUid: user-1\r\nmemberUid: user-2\r\n```\r\nAnd for users:\r\n\r\n```\r\ndn: cn=user-1,ou=users,o=example,c=ch\r\nobjectClass: posixAccount\r\ncn: user-1\r\nou: group-1\r\nou: group-2\r\n\r\ndn: cn=user-2,ou=users,o=example,c=ch\r\nobjectClass: posixAccount\r\ncn: user-2\r\nou: group-1\r\nou: group-2\r\n\r\ndn: cn=user-3,ou=users,o=example,c=ch\r\nobjectClass: posixAccount\r\ncn: user-3\r\nou: group-1\r\n```\r\n\r\nIf think for the current authentik sync code to work the groups would have to look like this:\r\n```\r\ndn: cn=group-1,ou=groups,o=example,c=ch\r\nobjectClass: posixGroup\r\ncn: group-1\r\nmemberUid: user-1,ou=users,o=example,c=ch\r\nmemberUid: user-2,ou=users,o=example,c=ch\r\nmemberUid: user-3,ou=users,o=example,c=ch\r\n\r\ndn: cn=group-2,ou=groups,o=example,c=ch\r\nobjectClass: posixGroup\r\ncn: group-2\r\nmemberUid: user-1,ou=users,o=example,c=ch\r\nmemberUid: user-2,ou=users,o=example,c=ch\r\n```\r\n\r\nBut changing this is beyond my control.\r\n\r\nThis is the OpenLDAP directory tree of a large University. I'm not sure if this setup and therefor also the problem is common or unique to our deployment.\r\n\r\nI guess for the membership mapping to work I'd need the code that authentik uses to lookup group memberships in ./authentik/sources/ldap/sync/membership.py to look something like this:\r\n\r\n```\r\nLDAP_UNIQUENESS = \"ldap_uniq\"\r\n\r\n users = User.objects.filter(\r\n Q(**{f\"attributes__{LDAP_UNIQUENESS}__in\": members})\r\n | Q(\r\n **{\r\n f\"attributes__{LDAP_UNIQUENESS}__isnull\": True,\r\n \"ak_groups__in\": [ak_group],\r\n }\r\n )\r\n )\r\n\r\n```\r\n\r\nInstead of the current:\r\n\r\n```\r\nLDAP_DISTINGUISHED_NAME = \"distinguishedName\"\r\n\r\n users = User.objects.filter(\r\n Q(**{f\"attributes__{LDAP_DISTINGUISHED_NAME}__in\": members})\r\n | Q(\r\n **{\r\n f\"attributes__{LDAP_DISTINGUISHED_NAME}__isnull\": True,\r\n \"ak_groups__in\": [ak_group],\r\n }\r\n )\r\n )\r\n\r\n```\r\n\r\nI'm happy to provide a PR after discussing the right approach on how to make this configurable.\n", "before_files": [{"content": "\"\"\"Sync LDAP Users and groups into authentik\"\"\"\nfrom typing import Any, Optional\n\nimport ldap3\nimport ldap3.core.exceptions\nfrom django.db.models import Q\n\nfrom authentik.core.models import Group, User\nfrom authentik.sources.ldap.auth import LDAP_DISTINGUISHED_NAME\nfrom authentik.sources.ldap.models import LDAPSource\nfrom authentik.sources.ldap.sync.base import LDAP_UNIQUENESS, BaseLDAPSynchronizer\n\n\nclass MembershipLDAPSynchronizer(BaseLDAPSynchronizer):\n \"\"\"Sync LDAP Users and groups into authentik\"\"\"\n\n group_cache: dict[str, Group]\n\n def __init__(self, source: LDAPSource):\n super().__init__(source)\n self.group_cache: dict[str, Group] = {}\n\n def sync(self) -> int:\n \"\"\"Iterate over all Users and assign Groups using memberOf Field\"\"\"\n groups = self._source.connection.extend.standard.paged_search(\n search_base=self.base_dn_groups,\n search_filter=self._source.group_object_filter,\n search_scope=ldap3.SUBTREE,\n attributes=[\n self._source.group_membership_field,\n self._source.object_uniqueness_field,\n LDAP_DISTINGUISHED_NAME,\n ],\n )\n membership_count = 0\n for group in groups:\n members = group.get(\"attributes\", {}).get(self._source.group_membership_field, [])\n ak_group = self.get_group(group)\n if not ak_group:\n continue\n\n users = User.objects.filter(\n Q(**{f\"attributes__{LDAP_DISTINGUISHED_NAME}__in\": members})\n | Q(\n **{\n f\"attributes__{LDAP_DISTINGUISHED_NAME}__isnull\": True,\n \"ak_groups__in\": [ak_group],\n }\n )\n )\n membership_count += 1\n membership_count += users.count()\n ak_group.users.set(users)\n ak_group.save()\n self._logger.debug(\"Successfully updated group membership\")\n return membership_count\n\n def get_group(self, group_dict: dict[str, Any]) -> Optional[Group]:\n \"\"\"Check if we fetched the group already, and if not cache it for later\"\"\"\n group_dn = group_dict.get(\"attributes\", {}).get(LDAP_DISTINGUISHED_NAME, [])\n group_uniq = group_dict.get(\"attributes\", {}).get(self._source.object_uniqueness_field, [])\n # group_uniq might be a single string or an array with (hopefully) a single string\n if isinstance(group_uniq, list):\n if len(group_uniq) < 1:\n self.message(\n f\"Group does not have a uniqueness attribute: '{group_dn}'\",\n group=group_dn,\n )\n return None\n group_uniq = group_uniq[0]\n if group_uniq not in self.group_cache:\n groups = Group.objects.filter(**{f\"attributes__{LDAP_UNIQUENESS}\": group_uniq})\n if not groups.exists():\n self.message(\n f\"Group does not exist in our DB yet, run sync_groups first: '{group_dn}'\",\n group=group_dn,\n )\n return None\n self.group_cache[group_uniq] = groups.first()\n return self.group_cache[group_uniq]\n", "path": "authentik/sources/ldap/sync/membership.py"}], "after_files": [{"content": "\"\"\"Sync LDAP Users and groups into authentik\"\"\"\nfrom typing import Any, Optional\n\nimport ldap3\nimport ldap3.core.exceptions\nfrom django.db.models import Q\n\nfrom authentik.core.models import Group, User\nfrom authentik.sources.ldap.auth import LDAP_DISTINGUISHED_NAME\nfrom authentik.sources.ldap.models import LDAPSource\nfrom authentik.sources.ldap.sync.base import LDAP_UNIQUENESS, BaseLDAPSynchronizer\n\n\nclass MembershipLDAPSynchronizer(BaseLDAPSynchronizer):\n \"\"\"Sync LDAP Users and groups into authentik\"\"\"\n\n group_cache: dict[str, Group]\n\n def __init__(self, source: LDAPSource):\n super().__init__(source)\n self.group_cache: dict[str, Group] = {}\n\n def sync(self) -> int:\n \"\"\"Iterate over all Users and assign Groups using memberOf Field\"\"\"\n groups = self._source.connection.extend.standard.paged_search(\n search_base=self.base_dn_groups,\n search_filter=self._source.group_object_filter,\n search_scope=ldap3.SUBTREE,\n attributes=[\n self._source.group_membership_field,\n self._source.object_uniqueness_field,\n LDAP_DISTINGUISHED_NAME,\n ],\n )\n membership_count = 0\n for group in groups:\n members = group.get(\"attributes\", {}).get(self._source.group_membership_field, [])\n ak_group = self.get_group(group)\n if not ak_group:\n continue\n\n membership_mapping_attribute = LDAP_DISTINGUISHED_NAME\n if self._source.group_membership_field == \"memberUid\":\n # If memberships are based on the posixGroup's 'memberUid'\n # attribute we use the RDN instead of the FDN to lookup members.\n membership_mapping_attribute = LDAP_UNIQUENESS\n\n users = User.objects.filter(\n Q(**{f\"attributes__{membership_mapping_attribute}__in\": members})\n | Q(\n **{\n f\"attributes__{membership_mapping_attribute}__isnull\": True,\n \"ak_groups__in\": [ak_group],\n }\n )\n )\n membership_count += 1\n membership_count += users.count()\n ak_group.users.set(users)\n ak_group.save()\n self._logger.debug(\"Successfully updated group membership\")\n return membership_count\n\n def get_group(self, group_dict: dict[str, Any]) -> Optional[Group]:\n \"\"\"Check if we fetched the group already, and if not cache it for later\"\"\"\n group_dn = group_dict.get(\"attributes\", {}).get(LDAP_DISTINGUISHED_NAME, [])\n group_uniq = group_dict.get(\"attributes\", {}).get(self._source.object_uniqueness_field, [])\n # group_uniq might be a single string or an array with (hopefully) a single string\n if isinstance(group_uniq, list):\n if len(group_uniq) < 1:\n self.message(\n f\"Group does not have a uniqueness attribute: '{group_dn}'\",\n group=group_dn,\n )\n return None\n group_uniq = group_uniq[0]\n if group_uniq not in self.group_cache:\n groups = Group.objects.filter(**{f\"attributes__{LDAP_UNIQUENESS}\": group_uniq})\n if not groups.exists():\n self.message(\n f\"Group does not exist in our DB yet, run sync_groups first: '{group_dn}'\",\n group=group_dn,\n )\n return None\n self.group_cache[group_uniq] = groups.first()\n return self.group_cache[group_uniq]\n", "path": "authentik/sources/ldap/sync/membership.py"}]}
2,698
265
gh_patches_debug_27085
rasdani/github-patches
git_diff
fossasia__open-event-server-2825
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Event does not show up on "manage events" page when it is a copy When the user copies an event and edits it, it does not show up on the event management page. ![screenshot from 2017-01-01 14-01-06](https://cloud.githubusercontent.com/assets/1583873/21581272/d8591f9e-d02a-11e6-8325-2c707636ee5d.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `app/helpers/wizard/clone.py` Content: ``` 1 from sqlalchemy.orm import make_transient 2 3 from app.helpers.data import save_to_db 4 from app.helpers.data_getter import DataGetter 5 from app.models import db 6 7 8 def clone_row(row, event_id=None): 9 db.session.expunge(row) 10 make_transient(row) 11 row.id = None 12 if event_id: 13 row.event_id = event_id 14 save_to_db(row) 15 db.session.flush() 16 return row 17 18 19 def create_event_copy(event_id): 20 old_event = DataGetter.get_event(event_id) 21 event = clone_row(old_event) 22 event.name = "Copy of " + event.name 23 event.state = "Draft" 24 save_to_db(event) 25 26 sponsors_old = DataGetter.get_sponsors(event_id).all() 27 tracks_old = DataGetter.get_tracks(event_id).all() 28 microlocations_old = DataGetter.get_microlocations(event_id).all() 29 call_for_paper_old = DataGetter.get_call_for_papers(event_id).first() 30 social_links = DataGetter.get_social_links_by_event_id(event_id).all() 31 custom_forms = DataGetter.get_custom_form_elements(event_id) 32 33 for social_link in social_links: 34 clone_row(social_link, event.id) 35 36 for sponsor in sponsors_old: 37 clone_row(sponsor, event.id) 38 39 for track in tracks_old: 40 clone_row(track, event.id) 41 42 for microlocation in microlocations_old: 43 clone_row(microlocation, event.id) 44 45 if call_for_paper_old: 46 clone_row(call_for_paper_old, event.id) 47 48 if custom_forms: 49 clone_row(custom_forms, event.id) 50 51 return event 52 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/app/helpers/wizard/clone.py b/app/helpers/wizard/clone.py --- a/app/helpers/wizard/clone.py +++ b/app/helpers/wizard/clone.py @@ -1,8 +1,13 @@ from sqlalchemy.orm import make_transient +from flask.ext import login from app.helpers.data import save_to_db from app.helpers.data_getter import DataGetter from app.models import db +from app.models.users_events_roles import UsersEventsRoles +from app.models.role import Role +from app.models.email_notifications import EmailNotification +from app.models.user import ORGANIZER def clone_row(row, event_id=None): @@ -23,6 +28,17 @@ event.state = "Draft" save_to_db(event) + role = Role.query.filter_by(name=ORGANIZER).first() + uer = UsersEventsRoles(login.current_user, event, role) + if save_to_db(uer, "Event saved"): + new_email_notification_setting = EmailNotification(next_event=1, + new_paper=1, + session_schedule=1, + session_accept_reject=1, + user_id=login.current_user.id, + event_id=event.id) + save_to_db(new_email_notification_setting, "EmailSetting Saved") + sponsors_old = DataGetter.get_sponsors(event_id).all() tracks_old = DataGetter.get_tracks(event_id).all() microlocations_old = DataGetter.get_microlocations(event_id).all()
{"golden_diff": "diff --git a/app/helpers/wizard/clone.py b/app/helpers/wizard/clone.py\n--- a/app/helpers/wizard/clone.py\n+++ b/app/helpers/wizard/clone.py\n@@ -1,8 +1,13 @@\n from sqlalchemy.orm import make_transient\n+from flask.ext import login\n \n from app.helpers.data import save_to_db\n from app.helpers.data_getter import DataGetter\n from app.models import db\n+from app.models.users_events_roles import UsersEventsRoles\n+from app.models.role import Role\n+from app.models.email_notifications import EmailNotification\n+from app.models.user import ORGANIZER\n \n \n def clone_row(row, event_id=None):\n@@ -23,6 +28,17 @@\n event.state = \"Draft\"\n save_to_db(event)\n \n+ role = Role.query.filter_by(name=ORGANIZER).first()\n+ uer = UsersEventsRoles(login.current_user, event, role)\n+ if save_to_db(uer, \"Event saved\"):\n+ new_email_notification_setting = EmailNotification(next_event=1,\n+ new_paper=1,\n+ session_schedule=1,\n+ session_accept_reject=1,\n+ user_id=login.current_user.id,\n+ event_id=event.id)\n+ save_to_db(new_email_notification_setting, \"EmailSetting Saved\")\n+\n sponsors_old = DataGetter.get_sponsors(event_id).all()\n tracks_old = DataGetter.get_tracks(event_id).all()\n microlocations_old = DataGetter.get_microlocations(event_id).all()\n", "issue": "Event does not show up on \"manage events\" page when it is a copy\nWhen the user copies an event and edits it, it does not show up on the event management page.\r\n\r\n![screenshot from 2017-01-01 14-01-06](https://cloud.githubusercontent.com/assets/1583873/21581272/d8591f9e-d02a-11e6-8325-2c707636ee5d.png)\r\n\n", "before_files": [{"content": "from sqlalchemy.orm import make_transient\n\nfrom app.helpers.data import save_to_db\nfrom app.helpers.data_getter import DataGetter\nfrom app.models import db\n\n\ndef clone_row(row, event_id=None):\n db.session.expunge(row)\n make_transient(row)\n row.id = None\n if event_id:\n row.event_id = event_id\n save_to_db(row)\n db.session.flush()\n return row\n\n\ndef create_event_copy(event_id):\n old_event = DataGetter.get_event(event_id)\n event = clone_row(old_event)\n event.name = \"Copy of \" + event.name\n event.state = \"Draft\"\n save_to_db(event)\n\n sponsors_old = DataGetter.get_sponsors(event_id).all()\n tracks_old = DataGetter.get_tracks(event_id).all()\n microlocations_old = DataGetter.get_microlocations(event_id).all()\n call_for_paper_old = DataGetter.get_call_for_papers(event_id).first()\n social_links = DataGetter.get_social_links_by_event_id(event_id).all()\n custom_forms = DataGetter.get_custom_form_elements(event_id)\n\n for social_link in social_links:\n clone_row(social_link, event.id)\n\n for sponsor in sponsors_old:\n clone_row(sponsor, event.id)\n\n for track in tracks_old:\n clone_row(track, event.id)\n\n for microlocation in microlocations_old:\n clone_row(microlocation, event.id)\n\n if call_for_paper_old:\n clone_row(call_for_paper_old, event.id)\n\n if custom_forms:\n clone_row(custom_forms, event.id)\n\n return event\n", "path": "app/helpers/wizard/clone.py"}], "after_files": [{"content": "from sqlalchemy.orm import make_transient\nfrom flask.ext import login\n\nfrom app.helpers.data import save_to_db\nfrom app.helpers.data_getter import DataGetter\nfrom app.models import db\nfrom app.models.users_events_roles import UsersEventsRoles\nfrom app.models.role import Role\nfrom app.models.email_notifications import EmailNotification\nfrom app.models.user import ORGANIZER\n\n\ndef clone_row(row, event_id=None):\n db.session.expunge(row)\n make_transient(row)\n row.id = None\n if event_id:\n row.event_id = event_id\n save_to_db(row)\n db.session.flush()\n return row\n\n\ndef create_event_copy(event_id):\n old_event = DataGetter.get_event(event_id)\n event = clone_row(old_event)\n event.name = \"Copy of \" + event.name\n event.state = \"Draft\"\n save_to_db(event)\n\n role = Role.query.filter_by(name=ORGANIZER).first()\n uer = UsersEventsRoles(login.current_user, event, role)\n if save_to_db(uer, \"Event saved\"):\n new_email_notification_setting = EmailNotification(next_event=1,\n new_paper=1,\n session_schedule=1,\n session_accept_reject=1,\n user_id=login.current_user.id,\n event_id=event.id)\n save_to_db(new_email_notification_setting, \"EmailSetting Saved\")\n\n sponsors_old = DataGetter.get_sponsors(event_id).all()\n tracks_old = DataGetter.get_tracks(event_id).all()\n microlocations_old = DataGetter.get_microlocations(event_id).all()\n call_for_paper_old = DataGetter.get_call_for_papers(event_id).first()\n social_links = DataGetter.get_social_links_by_event_id(event_id).all()\n custom_forms = DataGetter.get_custom_form_elements(event_id)\n\n for social_link in social_links:\n clone_row(social_link, event.id)\n\n for sponsor in sponsors_old:\n clone_row(sponsor, event.id)\n\n for track in tracks_old:\n clone_row(track, event.id)\n\n for microlocation in microlocations_old:\n clone_row(microlocation, event.id)\n\n if call_for_paper_old:\n clone_row(call_for_paper_old, event.id)\n\n if custom_forms:\n clone_row(custom_forms, event.id)\n\n return event\n", "path": "app/helpers/wizard/clone.py"}]}
825
324
gh_patches_debug_6751
rasdani/github-patches
git_diff
mne-tools__mne-bids-1135
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- doc build fails: "tut-ieeg-localize" moved Our doc build is failing with a warning because the reference to `tut-ieeg-localize` cannot be found anymore. Was this something in MNE-Python that has now moved? @alexrockhill do you know more? :-) > /home/circleci/project/doc/auto_examples/convert_ieeg_to_bids.rst:1083: WARNING: undefined label: 'tut-ieeg-localize' https://app.circleci.com/pipelines/github/mne-tools/mne-bids/5179/workflows/d0b5cc9b-cda4-436b-9e96-1edeea651d21/jobs/7347 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `doc/conf.py` Content: ``` 1 """Configure details for documentation with sphinx.""" 2 import os 3 import sys 4 from datetime import date 5 6 import sphinx_gallery # noqa: F401 7 from sphinx_gallery.sorting import ExampleTitleSortKey 8 9 import mne_bids 10 11 12 # If extensions (or modules to document with autodoc) are in another directory, 13 # add these directories to sys.path here. If the directory is relative to the 14 # documentation root, use os.path.abspath to make it absolute, like shown here. 15 curdir = os.path.dirname(__file__) 16 sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne_bids'))) 17 sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext'))) 18 19 20 # -- General configuration ------------------------------------------------ 21 22 # If your documentation needs a minimal Sphinx version, state it here. 23 # 24 # needs_sphinx = '1.0' 25 26 # Add any Sphinx extension module names here, as strings. They can be 27 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 28 # ones. 29 extensions = [ 30 'sphinx.ext.githubpages', 31 'sphinx.ext.autodoc', 32 'sphinx.ext.mathjax', 33 'sphinx.ext.viewcode', 34 'sphinx.ext.autosummary', 35 'sphinx.ext.doctest', 36 'sphinx.ext.intersphinx', 37 'sphinx_gallery.gen_gallery', 38 'numpydoc', 39 'sphinx_copybutton', 40 'gen_cli', # custom extension, see ./sphinxext/gen_cli.py 41 'gh_substitutions', # custom extension, see ./sphinxext/gh_substitutions.py 42 ] 43 44 # configure sphinx-copybutton 45 copybutton_prompt_text = r">>> |\.\.\. |\$ " 46 copybutton_prompt_is_regexp = True 47 48 # configure numpydoc 49 numpydoc_xref_param_type = True 50 numpydoc_class_members_toctree = False 51 numpydoc_attributes_as_param_list = True 52 numpydoc_xref_aliases = { 53 'BIDSPath': ':class:`BIDSPath <mne_bids.BIDSPath>`', 54 'path-like': ':term:`path-like <mne:path-like>`', 55 'array-like': ':term:`array_like <numpy:array_like>`', 56 'int': ':class:`int <python:int>`', 57 'bool': ':class:`bool <python:bool>`', 58 'float': ':class:`float <python:float>`', 59 'list': ':class:`list <python:list>`', 60 'tuple': ':class:`tuple <python:tuple>`', 61 'NibabelImageObject': 'nibabel.spatialimages.SpatialImage', 62 } 63 numpydoc_xref_ignore = { 64 # words 65 'instance', 'instances', 'of' 66 } 67 68 69 # generate autosummary even if no references 70 autosummary_generate = True 71 autodoc_default_options = {'inherited-members': None} 72 default_role = 'autolink' # XXX silently allows bad syntax, someone should fix 73 74 # configure linkcheck 75 # https://sphinx-doc.org/en/master/usage/configuration.html?#options-for-the-linkcheck-builder 76 linkcheck_retries = 2 77 linkcheck_rate_limit_timeout = 15.0 78 linkcheck_ignore = [ 79 r'https://www.researchgate.net/profile/.*', 80 ] 81 82 # The suffix(es) of source filenames. 83 # You can specify multiple suffix as a list of string: 84 # 85 # source_suffix = ['.rst', '.md'] 86 source_suffix = '.rst' 87 88 # The master toctree document. 89 master_doc = 'index' 90 91 # General information about the project. 92 project = u'MNE-BIDS' 93 td = date.today() 94 copyright = u'2017-%s, MNE Developers. Last updated on %s' % (td.year, 95 td.isoformat()) 96 97 author = u'MNE Developers' 98 99 # The version info for the project you're documenting, acts as replacement for 100 # |version| and |release|, also used in various other places throughout the 101 # built documents. 102 # 103 # The short X.Y version. 104 version = mne_bids.__version__ 105 # The full version, including alpha/beta/rc tags. 106 release = version 107 108 # List of patterns, relative to source directory, that match files and 109 # directories to ignore when looking for source files. 110 # This patterns also effect to html_static_path and html_extra_path 111 exclude_patterns = ['auto_examples/index.rst', '_build', 'Thumbs.db', 112 '.DS_Store'] 113 114 # HTML options (e.g., theme) 115 html_show_sourcelink = False 116 html_copy_source = False 117 118 html_theme = 'pydata_sphinx_theme' 119 120 # Add any paths that contain templates here, relative to this directory. 121 templates_path = ['_templates'] 122 html_static_path = ['_static'] 123 html_css_files = ['style.css'] 124 125 # Theme options are theme-specific and customize the look and feel of a theme 126 # further. For a list of options available for each theme, see the 127 # documentation. 128 html_theme_options = { 129 'icon_links': [ 130 dict(name='GitHub', 131 url='https://github.com/mne-tools/mne-bids', 132 icon='fab fa-github-square'), 133 dict(name='Discourse', 134 url='https://mne.discourse.group/tags/mne-bids', 135 icon='fab fa-discourse'), 136 ], 137 'icon_links_label': 'Quick Links', # for screen reader 138 'use_edit_page_button': False, 139 'navigation_with_keys': False, 140 'show_toc_level': 1, 141 'navbar_end': ['version-switcher', 'navbar-icon-links'], 142 'analytics': dict(google_analytics_id='G-C8SH9E98QC'), 143 } 144 145 html_context = { 146 'versions_dropdown': { 147 'dev': 'v0.13 (devel)', 148 'stable': 'v0.12 (stable)', 149 'v0.11': 'v0.11', 150 'v0.10': 'v0.10', 151 'v0.9': 'v0.9', 152 'v0.8': 'v0.8', 153 'v0.7': 'v0.7', 154 'v0.6': 'v0.6', 155 'v0.5': 'v0.5', 156 'v0.4': 'v0.4', 157 'v0.3': 'v0.3', 158 'v0.2': 'v0.2', 159 'v0.1': 'v0.1', 160 }, 161 } 162 163 html_sidebars = {} 164 165 # Example configuration for intersphinx: refer to the Python standard library. 166 intersphinx_mapping = { 167 'python': ('https://docs.python.org/3', None), 168 'mne': ('https://mne.tools/dev', None), 169 'numpy': ('https://numpy.org/devdocs', None), 170 'scipy': ('https://scipy.github.io/devdocs', None), 171 'matplotlib': ('https://matplotlib.org', None), 172 'nilearn': ('http://nilearn.github.io/stable', None), 173 'pandas': ('https://pandas.pydata.org/pandas-docs/dev', None), 174 'nibabel': ('https://nipy.org/nibabel', None), 175 } 176 intersphinx_timeout = 5 177 178 # Resolve binder filepath_prefix. From the docs: 179 # "A prefix to append to the filepath in the Binder links. You should use this 180 # if you will store your built documentation in a sub-folder of a repository, 181 # instead of in the root." 182 # we will store dev docs in a `dev` subdirectory and all other docs in a 183 # directory "v" + version_str. E.g., "v0.3" 184 if 'dev' in version: 185 filepath_prefix = 'dev' 186 else: 187 filepath_prefix = 'v{}'.format(version) 188 189 sphinx_gallery_conf = { 190 'doc_module': 'mne_bids', 191 'reference_url': { 192 'mne_bids': None, 193 }, 194 'backreferences_dir': 'generated', 195 'examples_dirs': '../examples', 196 'within_subsection_order': ExampleTitleSortKey, 197 'gallery_dirs': 'auto_examples', 198 'filename_pattern': '^((?!sgskip).)*$', 199 'binder': { 200 # Required keys 201 'org': 'mne-tools', 202 'repo': 'mne-bids', 203 'branch': 'gh-pages', # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs. 204 'binderhub_url': 'https://mybinder.org', # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org). 205 'filepath_prefix': filepath_prefix, # noqa: E501 A prefix to prepend to any filepaths in Binder links. 206 'dependencies': [ 207 '../test_requirements.txt', 208 './requirements.txt', 209 ], 210 } 211 } 212 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/doc/conf.py b/doc/conf.py --- a/doc/conf.py +++ b/doc/conf.py @@ -166,6 +166,7 @@ intersphinx_mapping = { 'python': ('https://docs.python.org/3', None), 'mne': ('https://mne.tools/dev', None), + 'mne-gui-addons': ('https://mne.tools/mne-gui-addons', None), 'numpy': ('https://numpy.org/devdocs', None), 'scipy': ('https://scipy.github.io/devdocs', None), 'matplotlib': ('https://matplotlib.org', None),
{"golden_diff": "diff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -166,6 +166,7 @@\n intersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n 'mne': ('https://mne.tools/dev', None),\n+ 'mne-gui-addons': ('https://mne.tools/mne-gui-addons', None),\n 'numpy': ('https://numpy.org/devdocs', None),\n 'scipy': ('https://scipy.github.io/devdocs', None),\n 'matplotlib': ('https://matplotlib.org', None),\n", "issue": "doc build fails: \"tut-ieeg-localize\" moved\nOur doc build is failing with a warning because the reference to `tut-ieeg-localize` cannot be found anymore. Was this something in MNE-Python that has now moved? @alexrockhill do you know more? :-)\r\n\r\n> /home/circleci/project/doc/auto_examples/convert_ieeg_to_bids.rst:1083: WARNING: undefined label: 'tut-ieeg-localize'\r\n\r\nhttps://app.circleci.com/pipelines/github/mne-tools/mne-bids/5179/workflows/d0b5cc9b-cda4-436b-9e96-1edeea651d21/jobs/7347\n", "before_files": [{"content": "\"\"\"Configure details for documentation with sphinx.\"\"\"\nimport os\nimport sys\nfrom datetime import date\n\nimport sphinx_gallery # noqa: F401\nfrom sphinx_gallery.sorting import ExampleTitleSortKey\n\nimport mne_bids\n\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\ncurdir = os.path.dirname(__file__)\nsys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne_bids')))\nsys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.githubpages',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx_gallery.gen_gallery',\n 'numpydoc',\n 'sphinx_copybutton',\n 'gen_cli', # custom extension, see ./sphinxext/gen_cli.py\n 'gh_substitutions', # custom extension, see ./sphinxext/gh_substitutions.py\n]\n\n# configure sphinx-copybutton\ncopybutton_prompt_text = r\">>> |\\.\\.\\. |\\$ \"\ncopybutton_prompt_is_regexp = True\n\n# configure numpydoc\nnumpydoc_xref_param_type = True\nnumpydoc_class_members_toctree = False\nnumpydoc_attributes_as_param_list = True\nnumpydoc_xref_aliases = {\n 'BIDSPath': ':class:`BIDSPath <mne_bids.BIDSPath>`',\n 'path-like': ':term:`path-like <mne:path-like>`',\n 'array-like': ':term:`array_like <numpy:array_like>`',\n 'int': ':class:`int <python:int>`',\n 'bool': ':class:`bool <python:bool>`',\n 'float': ':class:`float <python:float>`',\n 'list': ':class:`list <python:list>`',\n 'tuple': ':class:`tuple <python:tuple>`',\n 'NibabelImageObject': 'nibabel.spatialimages.SpatialImage',\n}\nnumpydoc_xref_ignore = {\n # words\n 'instance', 'instances', 'of'\n}\n\n\n# generate autosummary even if no references\nautosummary_generate = True\nautodoc_default_options = {'inherited-members': None}\ndefault_role = 'autolink' # XXX silently allows bad syntax, someone should fix\n\n# configure linkcheck\n# https://sphinx-doc.org/en/master/usage/configuration.html?#options-for-the-linkcheck-builder\nlinkcheck_retries = 2\nlinkcheck_rate_limit_timeout = 15.0\nlinkcheck_ignore = [\n r'https://www.researchgate.net/profile/.*',\n]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'MNE-BIDS'\ntd = date.today()\ncopyright = u'2017-%s, MNE Developers. Last updated on %s' % (td.year,\n td.isoformat())\n\nauthor = u'MNE Developers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = mne_bids.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['auto_examples/index.rst', '_build', 'Thumbs.db',\n '.DS_Store']\n\n# HTML options (e.g., theme)\nhtml_show_sourcelink = False\nhtml_copy_source = False\n\nhtml_theme = 'pydata_sphinx_theme'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\nhtml_static_path = ['_static']\nhtml_css_files = ['style.css']\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n 'icon_links': [\n dict(name='GitHub',\n url='https://github.com/mne-tools/mne-bids',\n icon='fab fa-github-square'),\n dict(name='Discourse',\n url='https://mne.discourse.group/tags/mne-bids',\n icon='fab fa-discourse'),\n ],\n 'icon_links_label': 'Quick Links', # for screen reader\n 'use_edit_page_button': False,\n 'navigation_with_keys': False,\n 'show_toc_level': 1,\n 'navbar_end': ['version-switcher', 'navbar-icon-links'],\n 'analytics': dict(google_analytics_id='G-C8SH9E98QC'),\n}\n\nhtml_context = {\n 'versions_dropdown': {\n 'dev': 'v0.13 (devel)',\n 'stable': 'v0.12 (stable)',\n 'v0.11': 'v0.11',\n 'v0.10': 'v0.10',\n 'v0.9': 'v0.9',\n 'v0.8': 'v0.8',\n 'v0.7': 'v0.7',\n 'v0.6': 'v0.6',\n 'v0.5': 'v0.5',\n 'v0.4': 'v0.4',\n 'v0.3': 'v0.3',\n 'v0.2': 'v0.2',\n 'v0.1': 'v0.1',\n },\n}\n\nhtml_sidebars = {}\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n 'mne': ('https://mne.tools/dev', None),\n 'numpy': ('https://numpy.org/devdocs', None),\n 'scipy': ('https://scipy.github.io/devdocs', None),\n 'matplotlib': ('https://matplotlib.org', None),\n 'nilearn': ('http://nilearn.github.io/stable', None),\n 'pandas': ('https://pandas.pydata.org/pandas-docs/dev', None),\n 'nibabel': ('https://nipy.org/nibabel', None),\n}\nintersphinx_timeout = 5\n\n# Resolve binder filepath_prefix. From the docs:\n# \"A prefix to append to the filepath in the Binder links. You should use this\n# if you will store your built documentation in a sub-folder of a repository,\n# instead of in the root.\"\n# we will store dev docs in a `dev` subdirectory and all other docs in a\n# directory \"v\" + version_str. E.g., \"v0.3\"\nif 'dev' in version:\n filepath_prefix = 'dev'\nelse:\n filepath_prefix = 'v{}'.format(version)\n\nsphinx_gallery_conf = {\n 'doc_module': 'mne_bids',\n 'reference_url': {\n 'mne_bids': None,\n },\n 'backreferences_dir': 'generated',\n 'examples_dirs': '../examples',\n 'within_subsection_order': ExampleTitleSortKey,\n 'gallery_dirs': 'auto_examples',\n 'filename_pattern': '^((?!sgskip).)*$',\n 'binder': {\n # Required keys\n 'org': 'mne-tools',\n 'repo': 'mne-bids',\n 'branch': 'gh-pages', # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs.\n 'binderhub_url': 'https://mybinder.org', # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org).\n 'filepath_prefix': filepath_prefix, # noqa: E501 A prefix to prepend to any filepaths in Binder links.\n 'dependencies': [\n '../test_requirements.txt',\n './requirements.txt',\n ],\n }\n}\n", "path": "doc/conf.py"}], "after_files": [{"content": "\"\"\"Configure details for documentation with sphinx.\"\"\"\nimport os\nimport sys\nfrom datetime import date\n\nimport sphinx_gallery # noqa: F401\nfrom sphinx_gallery.sorting import ExampleTitleSortKey\n\nimport mne_bids\n\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\ncurdir = os.path.dirname(__file__)\nsys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne_bids')))\nsys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.githubpages',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx_gallery.gen_gallery',\n 'numpydoc',\n 'sphinx_copybutton',\n 'gen_cli', # custom extension, see ./sphinxext/gen_cli.py\n 'gh_substitutions', # custom extension, see ./sphinxext/gh_substitutions.py\n]\n\n# configure sphinx-copybutton\ncopybutton_prompt_text = r\">>> |\\.\\.\\. |\\$ \"\ncopybutton_prompt_is_regexp = True\n\n# configure numpydoc\nnumpydoc_xref_param_type = True\nnumpydoc_class_members_toctree = False\nnumpydoc_attributes_as_param_list = True\nnumpydoc_xref_aliases = {\n 'BIDSPath': ':class:`BIDSPath <mne_bids.BIDSPath>`',\n 'path-like': ':term:`path-like <mne:path-like>`',\n 'array-like': ':term:`array_like <numpy:array_like>`',\n 'int': ':class:`int <python:int>`',\n 'bool': ':class:`bool <python:bool>`',\n 'float': ':class:`float <python:float>`',\n 'list': ':class:`list <python:list>`',\n 'tuple': ':class:`tuple <python:tuple>`',\n 'NibabelImageObject': 'nibabel.spatialimages.SpatialImage',\n}\nnumpydoc_xref_ignore = {\n # words\n 'instance', 'instances', 'of'\n}\n\n\n# generate autosummary even if no references\nautosummary_generate = True\nautodoc_default_options = {'inherited-members': None}\ndefault_role = 'autolink' # XXX silently allows bad syntax, someone should fix\n\n# configure linkcheck\n# https://sphinx-doc.org/en/master/usage/configuration.html?#options-for-the-linkcheck-builder\nlinkcheck_retries = 2\nlinkcheck_rate_limit_timeout = 15.0\nlinkcheck_ignore = [\n r'https://www.researchgate.net/profile/.*',\n]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'MNE-BIDS'\ntd = date.today()\ncopyright = u'2017-%s, MNE Developers. Last updated on %s' % (td.year,\n td.isoformat())\n\nauthor = u'MNE Developers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = mne_bids.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['auto_examples/index.rst', '_build', 'Thumbs.db',\n '.DS_Store']\n\n# HTML options (e.g., theme)\nhtml_show_sourcelink = False\nhtml_copy_source = False\n\nhtml_theme = 'pydata_sphinx_theme'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\nhtml_static_path = ['_static']\nhtml_css_files = ['style.css']\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n 'icon_links': [\n dict(name='GitHub',\n url='https://github.com/mne-tools/mne-bids',\n icon='fab fa-github-square'),\n dict(name='Discourse',\n url='https://mne.discourse.group/tags/mne-bids',\n icon='fab fa-discourse'),\n ],\n 'icon_links_label': 'Quick Links', # for screen reader\n 'use_edit_page_button': False,\n 'navigation_with_keys': False,\n 'show_toc_level': 1,\n 'navbar_end': ['version-switcher', 'navbar-icon-links'],\n 'analytics': dict(google_analytics_id='G-C8SH9E98QC'),\n}\n\nhtml_context = {\n 'versions_dropdown': {\n 'dev': 'v0.13 (devel)',\n 'stable': 'v0.12 (stable)',\n 'v0.11': 'v0.11',\n 'v0.10': 'v0.10',\n 'v0.9': 'v0.9',\n 'v0.8': 'v0.8',\n 'v0.7': 'v0.7',\n 'v0.6': 'v0.6',\n 'v0.5': 'v0.5',\n 'v0.4': 'v0.4',\n 'v0.3': 'v0.3',\n 'v0.2': 'v0.2',\n 'v0.1': 'v0.1',\n },\n}\n\nhtml_sidebars = {}\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n 'mne': ('https://mne.tools/dev', None),\n 'mne-gui-addons': ('https://mne.tools/mne-gui-addons', None),\n 'numpy': ('https://numpy.org/devdocs', None),\n 'scipy': ('https://scipy.github.io/devdocs', None),\n 'matplotlib': ('https://matplotlib.org', None),\n 'nilearn': ('http://nilearn.github.io/stable', None),\n 'pandas': ('https://pandas.pydata.org/pandas-docs/dev', None),\n 'nibabel': ('https://nipy.org/nibabel', None),\n}\nintersphinx_timeout = 5\n\n# Resolve binder filepath_prefix. From the docs:\n# \"A prefix to append to the filepath in the Binder links. You should use this\n# if you will store your built documentation in a sub-folder of a repository,\n# instead of in the root.\"\n# we will store dev docs in a `dev` subdirectory and all other docs in a\n# directory \"v\" + version_str. E.g., \"v0.3\"\nif 'dev' in version:\n filepath_prefix = 'dev'\nelse:\n filepath_prefix = 'v{}'.format(version)\n\nsphinx_gallery_conf = {\n 'doc_module': 'mne_bids',\n 'reference_url': {\n 'mne_bids': None,\n },\n 'backreferences_dir': 'generated',\n 'examples_dirs': '../examples',\n 'within_subsection_order': ExampleTitleSortKey,\n 'gallery_dirs': 'auto_examples',\n 'filename_pattern': '^((?!sgskip).)*$',\n 'binder': {\n # Required keys\n 'org': 'mne-tools',\n 'repo': 'mne-bids',\n 'branch': 'gh-pages', # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs.\n 'binderhub_url': 'https://mybinder.org', # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org).\n 'filepath_prefix': filepath_prefix, # noqa: E501 A prefix to prepend to any filepaths in Binder links.\n 'dependencies': [\n '../test_requirements.txt',\n './requirements.txt',\n ],\n }\n}\n", "path": "doc/conf.py"}]}
2,882
139
gh_patches_debug_31040
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-4372
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- distutils not included with latest virtualenv (16.4.0) This was already reported in #4031. The issue was closed without a fix so I'm creating this one. **With virtualenv 16.4.0, pyinstaller reports :** ``` 3583 INFO: distutils: retargeting to non-venv dir '/usr/lib64/python3.6/distutils/__init__.py' ``` and then during "Loading module hook" sequence, the `hook-distutils.py` is missing and distutils modules are not included into the final executable binary. When executing the binary the error is: ``` ModuleNotFoundError: No module named 'distutils' [10373] Failed to execute script <name here> ``` **With virtualenv 16.1.0, pyinstaller reports :** ``` 3157 INFO: Processing pre-find module path hook distutils 5053 INFO: Loading module hook "hook-distutils.py"... ``` and distutils modules are included into the final executable binary. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `PyInstaller/hooks/pre_find_module_path/hook-distutils.py` Content: ``` 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2005-2019, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License with exception 5 # for distributing bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 #----------------------------------------------------------------------------- 9 10 """ 11 `distutils`-specific pre-find module path hook. 12 13 When run from within a venv (virtual environment), this hook changes the 14 `__path__` of the `distutils` package to that of the system-wide rather than 15 venv-specific `distutils` package. While the former is suitable for freezing, 16 the latter is intended for use _only_ from within venvs. 17 """ 18 19 20 import distutils 21 import os 22 23 from PyInstaller.utils.hooks import logger 24 25 26 def pre_find_module_path(api): 27 # Absolute path of the system-wide "distutils" package when run from within 28 # a venv or None otherwise. 29 distutils_dir = getattr(distutils, 'distutils_path', None) 30 if distutils_dir is not None: 31 # Find this package in its parent directory. 32 api.search_dirs = [os.path.dirname(distutils_dir)] 33 logger.info('distutils: retargeting to non-venv dir %r' % distutils_dir) 34 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/PyInstaller/hooks/pre_find_module_path/hook-distutils.py b/PyInstaller/hooks/pre_find_module_path/hook-distutils.py --- a/PyInstaller/hooks/pre_find_module_path/hook-distutils.py +++ b/PyInstaller/hooks/pre_find_module_path/hook-distutils.py @@ -1,11 +1,11 @@ -#----------------------------------------------------------------------------- +# ----------------------------------------------------------------------------- # Copyright (c) 2005-2019, PyInstaller Development Team. # # Distributed under the terms of the GNU General Public License with exception # for distributing bootloader. # # The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- +# ----------------------------------------------------------------------------- """ `distutils`-specific pre-find module path hook. @@ -26,8 +26,16 @@ def pre_find_module_path(api): # Absolute path of the system-wide "distutils" package when run from within # a venv or None otherwise. - distutils_dir = getattr(distutils, 'distutils_path', None) - if distutils_dir is not None: + + # opcode is not a virtualenv module, so we can use it to find the stdlib. + # Technique taken from virtualenv's "distutils" package detection at + # https://github.com/pypa/virtualenv/blob/16.3.0/virtualenv_embedded/distutils-init.py#L5 + import opcode + + system_module_path = os.path.normpath(os.path.dirname(opcode.__file__)) + loaded_module_path = os.path.normpath(os.path.dirname(distutils.__file__)) + if system_module_path != loaded_module_path: # Find this package in its parent directory. - api.search_dirs = [os.path.dirname(distutils_dir)] - logger.info('distutils: retargeting to non-venv dir %r' % distutils_dir) + api.search_dirs = [system_module_path] + logger.info('distutils: retargeting to non-venv dir %r', + system_module_path)
{"golden_diff": "diff --git a/PyInstaller/hooks/pre_find_module_path/hook-distutils.py b/PyInstaller/hooks/pre_find_module_path/hook-distutils.py\n--- a/PyInstaller/hooks/pre_find_module_path/hook-distutils.py\n+++ b/PyInstaller/hooks/pre_find_module_path/hook-distutils.py\n@@ -1,11 +1,11 @@\n-#-----------------------------------------------------------------------------\n+# -----------------------------------------------------------------------------\n # Copyright (c) 2005-2019, PyInstaller Development Team.\n #\n # Distributed under the terms of the GNU General Public License with exception\n # for distributing bootloader.\n #\n # The full license is in the file COPYING.txt, distributed with this software.\n-#-----------------------------------------------------------------------------\n+# -----------------------------------------------------------------------------\n \n \"\"\"\n `distutils`-specific pre-find module path hook.\n@@ -26,8 +26,16 @@\n def pre_find_module_path(api):\n # Absolute path of the system-wide \"distutils\" package when run from within\n # a venv or None otherwise.\n- distutils_dir = getattr(distutils, 'distutils_path', None)\n- if distutils_dir is not None:\n+\n+ # opcode is not a virtualenv module, so we can use it to find the stdlib.\n+ # Technique taken from virtualenv's \"distutils\" package detection at\n+ # https://github.com/pypa/virtualenv/blob/16.3.0/virtualenv_embedded/distutils-init.py#L5\n+ import opcode\n+\n+ system_module_path = os.path.normpath(os.path.dirname(opcode.__file__))\n+ loaded_module_path = os.path.normpath(os.path.dirname(distutils.__file__))\n+ if system_module_path != loaded_module_path:\n # Find this package in its parent directory.\n- api.search_dirs = [os.path.dirname(distutils_dir)]\n- logger.info('distutils: retargeting to non-venv dir %r' % distutils_dir)\n+ api.search_dirs = [system_module_path]\n+ logger.info('distutils: retargeting to non-venv dir %r',\n+ system_module_path)\n", "issue": "distutils not included with latest virtualenv (16.4.0)\nThis was already reported in #4031. The issue was closed without a fix so I'm creating this one.\r\n\r\n**With virtualenv 16.4.0, pyinstaller reports :**\r\n\r\n```\r\n3583 INFO: distutils: retargeting to non-venv dir '/usr/lib64/python3.6/distutils/__init__.py'\r\n```\r\nand then during \"Loading module hook\" sequence, the `hook-distutils.py` is missing and distutils modules are not included into the final executable binary.\r\n\r\nWhen executing the binary the error is:\r\n\r\n```\r\nModuleNotFoundError: No module named 'distutils'\r\n[10373] Failed to execute script <name here>\r\n```\r\n\r\n**With virtualenv 16.1.0, pyinstaller reports :**\r\n\r\n```\r\n3157 INFO: Processing pre-find module path hook distutils\r\n5053 INFO: Loading module hook \"hook-distutils.py\"...\r\n```\r\n\r\nand distutils modules are included into the final executable binary.\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2019, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n\"\"\"\n`distutils`-specific pre-find module path hook.\n\nWhen run from within a venv (virtual environment), this hook changes the\n`__path__` of the `distutils` package to that of the system-wide rather than\nvenv-specific `distutils` package. While the former is suitable for freezing,\nthe latter is intended for use _only_ from within venvs.\n\"\"\"\n\n\nimport distutils\nimport os\n\nfrom PyInstaller.utils.hooks import logger\n\n\ndef pre_find_module_path(api):\n # Absolute path of the system-wide \"distutils\" package when run from within\n # a venv or None otherwise.\n distutils_dir = getattr(distutils, 'distutils_path', None)\n if distutils_dir is not None:\n # Find this package in its parent directory.\n api.search_dirs = [os.path.dirname(distutils_dir)]\n logger.info('distutils: retargeting to non-venv dir %r' % distutils_dir)\n", "path": "PyInstaller/hooks/pre_find_module_path/hook-distutils.py"}], "after_files": [{"content": "# -----------------------------------------------------------------------------\n# Copyright (c) 2005-2019, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n# -----------------------------------------------------------------------------\n\n\"\"\"\n`distutils`-specific pre-find module path hook.\n\nWhen run from within a venv (virtual environment), this hook changes the\n`__path__` of the `distutils` package to that of the system-wide rather than\nvenv-specific `distutils` package. While the former is suitable for freezing,\nthe latter is intended for use _only_ from within venvs.\n\"\"\"\n\n\nimport distutils\nimport os\n\nfrom PyInstaller.utils.hooks import logger\n\n\ndef pre_find_module_path(api):\n # Absolute path of the system-wide \"distutils\" package when run from within\n # a venv or None otherwise.\n\n # opcode is not a virtualenv module, so we can use it to find the stdlib.\n # Technique taken from virtualenv's \"distutils\" package detection at\n # https://github.com/pypa/virtualenv/blob/16.3.0/virtualenv_embedded/distutils-init.py#L5\n import opcode\n\n system_module_path = os.path.normpath(os.path.dirname(opcode.__file__))\n loaded_module_path = os.path.normpath(os.path.dirname(distutils.__file__))\n if system_module_path != loaded_module_path:\n # Find this package in its parent directory.\n api.search_dirs = [system_module_path]\n logger.info('distutils: retargeting to non-venv dir %r',\n system_module_path)\n", "path": "PyInstaller/hooks/pre_find_module_path/hook-distutils.py"}]}
835
444
gh_patches_debug_14392
rasdani/github-patches
git_diff
pre-commit__pre-commit-216
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pre-commit potentially uses the wrong `virtualenv` when building environments It should use `sys.executable, '-m', 'virtualenv'` instead of `'virtualenv'` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pre_commit/languages/python.py` Content: ``` 1 from __future__ import unicode_literals 2 3 import contextlib 4 import distutils.spawn 5 import os 6 7 import virtualenv 8 9 from pre_commit.languages import helpers 10 from pre_commit.util import clean_path_on_failure 11 12 13 ENVIRONMENT_DIR = 'py_env' 14 15 16 class PythonEnv(helpers.Environment): 17 @property 18 def env_prefix(self): 19 return ". '{{prefix}}{0}activate' &&".format( 20 virtualenv.path_locations( 21 ENVIRONMENT_DIR, 22 )[-1].rstrip(os.sep) + os.sep, 23 'activate', 24 ) 25 26 27 @contextlib.contextmanager 28 def in_env(repo_cmd_runner): 29 yield PythonEnv(repo_cmd_runner) 30 31 32 def norm_version(version): 33 if os.name == 'nt': # pragma: no cover (windows) 34 if not distutils.spawn.find_executable(version): 35 # The default place for python on windows is: 36 # C:\PythonXX\python.exe 37 version = r'C:\{0}\python.exe'.format(version.replace('.', '')) 38 return version 39 40 41 def install_environment(repo_cmd_runner, version='default'): 42 assert repo_cmd_runner.exists('setup.py') 43 44 # Install a virtualenv 45 with clean_path_on_failure(repo_cmd_runner.path(ENVIRONMENT_DIR)): 46 venv_cmd = ['virtualenv', '{{prefix}}{0}'.format(ENVIRONMENT_DIR)] 47 if version != 'default': 48 venv_cmd.extend(['-p', norm_version(version)]) 49 repo_cmd_runner.run(venv_cmd) 50 with in_env(repo_cmd_runner) as env: 51 env.run("cd '{prefix}' && pip install .") 52 53 54 def run_hook(repo_cmd_runner, hook, file_args): 55 with in_env(repo_cmd_runner) as env: 56 return helpers.run_hook(env, hook, file_args) 57 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py --- a/pre_commit/languages/python.py +++ b/pre_commit/languages/python.py @@ -3,6 +3,7 @@ import contextlib import distutils.spawn import os +import sys import virtualenv @@ -43,7 +44,10 @@ # Install a virtualenv with clean_path_on_failure(repo_cmd_runner.path(ENVIRONMENT_DIR)): - venv_cmd = ['virtualenv', '{{prefix}}{0}'.format(ENVIRONMENT_DIR)] + venv_cmd = [ + sys.executable, '-m', 'virtualenv', + '{{prefix}}{0}'.format(ENVIRONMENT_DIR) + ] if version != 'default': venv_cmd.extend(['-p', norm_version(version)]) repo_cmd_runner.run(venv_cmd)
{"golden_diff": "diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py\n--- a/pre_commit/languages/python.py\n+++ b/pre_commit/languages/python.py\n@@ -3,6 +3,7 @@\n import contextlib\n import distutils.spawn\n import os\n+import sys\n \n import virtualenv\n \n@@ -43,7 +44,10 @@\n \n # Install a virtualenv\n with clean_path_on_failure(repo_cmd_runner.path(ENVIRONMENT_DIR)):\n- venv_cmd = ['virtualenv', '{{prefix}}{0}'.format(ENVIRONMENT_DIR)]\n+ venv_cmd = [\n+ sys.executable, '-m', 'virtualenv',\n+ '{{prefix}}{0}'.format(ENVIRONMENT_DIR)\n+ ]\n if version != 'default':\n venv_cmd.extend(['-p', norm_version(version)])\n repo_cmd_runner.run(venv_cmd)\n", "issue": "pre-commit potentially uses the wrong `virtualenv` when building environments\nIt should use `sys.executable, '-m', 'virtualenv'` instead of `'virtualenv'`\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport distutils.spawn\nimport os\n\nimport virtualenv\n\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\n\n\nENVIRONMENT_DIR = 'py_env'\n\n\nclass PythonEnv(helpers.Environment):\n @property\n def env_prefix(self):\n return \". '{{prefix}}{0}activate' &&\".format(\n virtualenv.path_locations(\n ENVIRONMENT_DIR,\n )[-1].rstrip(os.sep) + os.sep,\n 'activate',\n )\n\n\[email protected]\ndef in_env(repo_cmd_runner):\n yield PythonEnv(repo_cmd_runner)\n\n\ndef norm_version(version):\n if os.name == 'nt': # pragma: no cover (windows)\n if not distutils.spawn.find_executable(version):\n # The default place for python on windows is:\n # C:\\PythonXX\\python.exe\n version = r'C:\\{0}\\python.exe'.format(version.replace('.', ''))\n return version\n\n\ndef install_environment(repo_cmd_runner, version='default'):\n assert repo_cmd_runner.exists('setup.py')\n\n # Install a virtualenv\n with clean_path_on_failure(repo_cmd_runner.path(ENVIRONMENT_DIR)):\n venv_cmd = ['virtualenv', '{{prefix}}{0}'.format(ENVIRONMENT_DIR)]\n if version != 'default':\n venv_cmd.extend(['-p', norm_version(version)])\n repo_cmd_runner.run(venv_cmd)\n with in_env(repo_cmd_runner) as env:\n env.run(\"cd '{prefix}' && pip install .\")\n\n\ndef run_hook(repo_cmd_runner, hook, file_args):\n with in_env(repo_cmd_runner) as env:\n return helpers.run_hook(env, hook, file_args)\n", "path": "pre_commit/languages/python.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport distutils.spawn\nimport os\nimport sys\n\nimport virtualenv\n\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\n\n\nENVIRONMENT_DIR = 'py_env'\n\n\nclass PythonEnv(helpers.Environment):\n @property\n def env_prefix(self):\n return \". '{{prefix}}{0}activate' &&\".format(\n virtualenv.path_locations(\n ENVIRONMENT_DIR,\n )[-1].rstrip(os.sep) + os.sep,\n 'activate',\n )\n\n\[email protected]\ndef in_env(repo_cmd_runner):\n yield PythonEnv(repo_cmd_runner)\n\n\ndef norm_version(version):\n if os.name == 'nt': # pragma: no cover (windows)\n if not distutils.spawn.find_executable(version):\n # The default place for python on windows is:\n # C:\\PythonXX\\python.exe\n version = r'C:\\{0}\\python.exe'.format(version.replace('.', ''))\n return version\n\n\ndef install_environment(repo_cmd_runner, version='default'):\n assert repo_cmd_runner.exists('setup.py')\n\n # Install a virtualenv\n with clean_path_on_failure(repo_cmd_runner.path(ENVIRONMENT_DIR)):\n venv_cmd = [\n sys.executable, '-m', 'virtualenv',\n '{{prefix}}{0}'.format(ENVIRONMENT_DIR)\n ]\n if version != 'default':\n venv_cmd.extend(['-p', norm_version(version)])\n repo_cmd_runner.run(venv_cmd)\n with in_env(repo_cmd_runner) as env:\n env.run(\"cd '{prefix}' && pip install .\")\n\n\ndef run_hook(repo_cmd_runner, hook, file_args):\n with in_env(repo_cmd_runner) as env:\n return helpers.run_hook(env, hook, file_args)\n", "path": "pre_commit/languages/python.py"}]}
780
191
gh_patches_debug_30909
rasdani/github-patches
git_diff
ephios-dev__ephios-1012
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Improve explanation for page slugs Users did not understand what the page slug means. We should provide a help text, either directly or in the docs. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ephios/plugins/pages/models.py` Content: ``` 1 from django.db import models 2 from django.utils.translation import gettext_lazy as _ 3 4 5 class Page(models.Model): 6 title = models.CharField(verbose_name=_("Title"), max_length=250) 7 content = models.TextField(_("Content"), blank=True) 8 slug = models.SlugField(_("Slug"), max_length=250, unique=True) 9 show_in_footer = models.BooleanField(_("Show in footer"), default=False) 10 publicly_visible = models.BooleanField(_("Publicly visible"), default=False) 11 12 def __str__(self): 13 return str(self.title) 14 15 class Meta: 16 verbose_name = "Page" 17 verbose_name_plural = "Pages" 18 ``` Path: `ephios/plugins/pages/views.py` Content: ``` 1 from django.contrib import messages 2 from django.contrib.auth.views import redirect_to_login 3 from django.urls import reverse 4 from django.utils.translation import gettext as _ 5 from django.views.generic import CreateView, DeleteView, DetailView, ListView, UpdateView 6 7 from ephios.extra.mixins import CustomPermissionRequiredMixin 8 from ephios.plugins.pages.models import Page 9 10 11 class PageListView(CustomPermissionRequiredMixin, ListView): 12 model = Page 13 permission_required = "pages.add_page" 14 15 16 class PageView(DetailView): 17 model = Page 18 19 def setup(self, request, *args, **kwargs): 20 super().setup(request, *args, **kwargs) 21 self.object = self.get_object() 22 23 def dispatch(self, request, *args, **kwargs): 24 if not request.user.is_authenticated and not self.object.publicly_visible: 25 return redirect_to_login(self.request.get_full_path()) 26 return super().dispatch(request, *args, **kwargs) 27 28 29 class PageCreateView(CustomPermissionRequiredMixin, CreateView): 30 model = Page 31 permission_required = "pages.add_page" 32 fields = ["title", "content", "slug", "show_in_footer", "publicly_visible"] 33 34 def get_success_url(self): 35 messages.success(self.request, _("Page saved successfully.")) 36 return reverse("pages:settings_page_list") 37 38 39 class PageUpdateView(CustomPermissionRequiredMixin, UpdateView): 40 model = Page 41 permission_required = "pages.change_page" 42 fields = ["title", "content", "slug", "show_in_footer", "publicly_visible"] 43 44 def get_success_url(self): 45 messages.success(self.request, _("Page saved successfully.")) 46 return reverse("pages:settings_page_list") 47 48 49 class PageDeleteView(CustomPermissionRequiredMixin, DeleteView): 50 model = Page 51 permission_required = "pages.delete_page" 52 53 def get_success_url(self): 54 messages.info(self.request, _("Page deleted successfully.")) 55 return reverse("pages:settings_page_list") 56 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ephios/plugins/pages/models.py b/ephios/plugins/pages/models.py --- a/ephios/plugins/pages/models.py +++ b/ephios/plugins/pages/models.py @@ -5,7 +5,12 @@ class Page(models.Model): title = models.CharField(verbose_name=_("Title"), max_length=250) content = models.TextField(_("Content"), blank=True) - slug = models.SlugField(_("Slug"), max_length=250, unique=True) + slug = models.SlugField( + _("URL slug"), + help_text=_("The slug is used to generate the page's URL."), + max_length=250, + unique=True, + ) show_in_footer = models.BooleanField(_("Show in footer"), default=False) publicly_visible = models.BooleanField(_("Publicly visible"), default=False) diff --git a/ephios/plugins/pages/views.py b/ephios/plugins/pages/views.py --- a/ephios/plugins/pages/views.py +++ b/ephios/plugins/pages/views.py @@ -29,7 +29,7 @@ class PageCreateView(CustomPermissionRequiredMixin, CreateView): model = Page permission_required = "pages.add_page" - fields = ["title", "content", "slug", "show_in_footer", "publicly_visible"] + fields = ["title", "slug", "content", "show_in_footer", "publicly_visible"] def get_success_url(self): messages.success(self.request, _("Page saved successfully.")) @@ -39,7 +39,7 @@ class PageUpdateView(CustomPermissionRequiredMixin, UpdateView): model = Page permission_required = "pages.change_page" - fields = ["title", "content", "slug", "show_in_footer", "publicly_visible"] + fields = ["title", "slug", "content", "show_in_footer", "publicly_visible"] def get_success_url(self): messages.success(self.request, _("Page saved successfully."))
{"golden_diff": "diff --git a/ephios/plugins/pages/models.py b/ephios/plugins/pages/models.py\n--- a/ephios/plugins/pages/models.py\n+++ b/ephios/plugins/pages/models.py\n@@ -5,7 +5,12 @@\n class Page(models.Model):\n title = models.CharField(verbose_name=_(\"Title\"), max_length=250)\n content = models.TextField(_(\"Content\"), blank=True)\n- slug = models.SlugField(_(\"Slug\"), max_length=250, unique=True)\n+ slug = models.SlugField(\n+ _(\"URL slug\"),\n+ help_text=_(\"The slug is used to generate the page's URL.\"),\n+ max_length=250,\n+ unique=True,\n+ )\n show_in_footer = models.BooleanField(_(\"Show in footer\"), default=False)\n publicly_visible = models.BooleanField(_(\"Publicly visible\"), default=False)\n \ndiff --git a/ephios/plugins/pages/views.py b/ephios/plugins/pages/views.py\n--- a/ephios/plugins/pages/views.py\n+++ b/ephios/plugins/pages/views.py\n@@ -29,7 +29,7 @@\n class PageCreateView(CustomPermissionRequiredMixin, CreateView):\n model = Page\n permission_required = \"pages.add_page\"\n- fields = [\"title\", \"content\", \"slug\", \"show_in_footer\", \"publicly_visible\"]\n+ fields = [\"title\", \"slug\", \"content\", \"show_in_footer\", \"publicly_visible\"]\n \n def get_success_url(self):\n messages.success(self.request, _(\"Page saved successfully.\"))\n@@ -39,7 +39,7 @@\n class PageUpdateView(CustomPermissionRequiredMixin, UpdateView):\n model = Page\n permission_required = \"pages.change_page\"\n- fields = [\"title\", \"content\", \"slug\", \"show_in_footer\", \"publicly_visible\"]\n+ fields = [\"title\", \"slug\", \"content\", \"show_in_footer\", \"publicly_visible\"]\n \n def get_success_url(self):\n messages.success(self.request, _(\"Page saved successfully.\"))\n", "issue": "Improve explanation for page slugs\nUsers did not understand what the page slug means. We should provide a help text, either directly or in the docs.\n", "before_files": [{"content": "from django.db import models\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass Page(models.Model):\n title = models.CharField(verbose_name=_(\"Title\"), max_length=250)\n content = models.TextField(_(\"Content\"), blank=True)\n slug = models.SlugField(_(\"Slug\"), max_length=250, unique=True)\n show_in_footer = models.BooleanField(_(\"Show in footer\"), default=False)\n publicly_visible = models.BooleanField(_(\"Publicly visible\"), default=False)\n\n def __str__(self):\n return str(self.title)\n\n class Meta:\n verbose_name = \"Page\"\n verbose_name_plural = \"Pages\"\n", "path": "ephios/plugins/pages/models.py"}, {"content": "from django.contrib import messages\nfrom django.contrib.auth.views import redirect_to_login\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\nfrom django.views.generic import CreateView, DeleteView, DetailView, ListView, UpdateView\n\nfrom ephios.extra.mixins import CustomPermissionRequiredMixin\nfrom ephios.plugins.pages.models import Page\n\n\nclass PageListView(CustomPermissionRequiredMixin, ListView):\n model = Page\n permission_required = \"pages.add_page\"\n\n\nclass PageView(DetailView):\n model = Page\n\n def setup(self, request, *args, **kwargs):\n super().setup(request, *args, **kwargs)\n self.object = self.get_object()\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.is_authenticated and not self.object.publicly_visible:\n return redirect_to_login(self.request.get_full_path())\n return super().dispatch(request, *args, **kwargs)\n\n\nclass PageCreateView(CustomPermissionRequiredMixin, CreateView):\n model = Page\n permission_required = \"pages.add_page\"\n fields = [\"title\", \"content\", \"slug\", \"show_in_footer\", \"publicly_visible\"]\n\n def get_success_url(self):\n messages.success(self.request, _(\"Page saved successfully.\"))\n return reverse(\"pages:settings_page_list\")\n\n\nclass PageUpdateView(CustomPermissionRequiredMixin, UpdateView):\n model = Page\n permission_required = \"pages.change_page\"\n fields = [\"title\", \"content\", \"slug\", \"show_in_footer\", \"publicly_visible\"]\n\n def get_success_url(self):\n messages.success(self.request, _(\"Page saved successfully.\"))\n return reverse(\"pages:settings_page_list\")\n\n\nclass PageDeleteView(CustomPermissionRequiredMixin, DeleteView):\n model = Page\n permission_required = \"pages.delete_page\"\n\n def get_success_url(self):\n messages.info(self.request, _(\"Page deleted successfully.\"))\n return reverse(\"pages:settings_page_list\")\n", "path": "ephios/plugins/pages/views.py"}], "after_files": [{"content": "from django.db import models\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass Page(models.Model):\n title = models.CharField(verbose_name=_(\"Title\"), max_length=250)\n content = models.TextField(_(\"Content\"), blank=True)\n slug = models.SlugField(\n _(\"URL slug\"),\n help_text=_(\"The slug is used to generate the page's URL.\"),\n max_length=250,\n unique=True,\n )\n show_in_footer = models.BooleanField(_(\"Show in footer\"), default=False)\n publicly_visible = models.BooleanField(_(\"Publicly visible\"), default=False)\n\n def __str__(self):\n return str(self.title)\n\n class Meta:\n verbose_name = \"Page\"\n verbose_name_plural = \"Pages\"\n", "path": "ephios/plugins/pages/models.py"}, {"content": "from django.contrib import messages\nfrom django.contrib.auth.views import redirect_to_login\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\nfrom django.views.generic import CreateView, DeleteView, DetailView, ListView, UpdateView\n\nfrom ephios.extra.mixins import CustomPermissionRequiredMixin\nfrom ephios.plugins.pages.models import Page\n\n\nclass PageListView(CustomPermissionRequiredMixin, ListView):\n model = Page\n permission_required = \"pages.add_page\"\n\n\nclass PageView(DetailView):\n model = Page\n\n def setup(self, request, *args, **kwargs):\n super().setup(request, *args, **kwargs)\n self.object = self.get_object()\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.is_authenticated and not self.object.publicly_visible:\n return redirect_to_login(self.request.get_full_path())\n return super().dispatch(request, *args, **kwargs)\n\n\nclass PageCreateView(CustomPermissionRequiredMixin, CreateView):\n model = Page\n permission_required = \"pages.add_page\"\n fields = [\"title\", \"slug\", \"content\", \"show_in_footer\", \"publicly_visible\"]\n\n def get_success_url(self):\n messages.success(self.request, _(\"Page saved successfully.\"))\n return reverse(\"pages:settings_page_list\")\n\n\nclass PageUpdateView(CustomPermissionRequiredMixin, UpdateView):\n model = Page\n permission_required = \"pages.change_page\"\n fields = [\"title\", \"slug\", \"content\", \"show_in_footer\", \"publicly_visible\"]\n\n def get_success_url(self):\n messages.success(self.request, _(\"Page saved successfully.\"))\n return reverse(\"pages:settings_page_list\")\n\n\nclass PageDeleteView(CustomPermissionRequiredMixin, DeleteView):\n model = Page\n permission_required = \"pages.delete_page\"\n\n def get_success_url(self):\n messages.info(self.request, _(\"Page deleted successfully.\"))\n return reverse(\"pages:settings_page_list\")\n", "path": "ephios/plugins/pages/views.py"}]}
989
431
gh_patches_debug_4988
rasdani/github-patches
git_diff
deis__deis-2513
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- make discovery-url is doesn't work . shows no rule to create discovery-url .stop! i have search the old issues,it didn't solve the problem. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/conf.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # deis documentation build configuration file, created by 4 # sphinx-quickstart on Fri Jul 26 12:12:00 2013. 5 # 6 # This file is execfile()d with the current directory set to its containing dir. 7 # 8 # Note that not all possible configuration values are present in this 9 # autogenerated file. 10 # 11 # All configuration values have a default; values that are commented out 12 # serve to show the default. 13 14 import os 15 import sys 16 17 # If extensions (or modules to document with autodoc) are in another directory, 18 # add these directories to sys.path here. If the directory is relative to the 19 # documentation root, use os.path.abspath to make it absolute, like shown here. 20 21 # Some hackery here to get deis.py to be importable as client.deis 22 open(os.path.join('..', '__init__.py'), 'a') 23 sys.path.insert(0, os.path.abspath(os.path.join('..'))) 24 sys.path.insert(0, os.path.abspath(os.path.join('..', 'controller'))) 25 # create local_settings.py for SECRET_KEY if necessary 26 local_settings_path = os.path.abspath( 27 os.path.join('..', 'controller', 'deis', 'local_settings.py')) 28 if not os.path.exists(local_settings_path): 29 with open(local_settings_path, 'w') as local_settings: 30 local_settings.write("SECRET_KEY = 'DummySecretKey'\n") 31 # set up Django 32 os.environ['DJANGO_SETTINGS_MODULE'] = 'deis.settings' 33 from django.conf import settings # noqa 34 35 # -- General configuration ----------------------------------------------------- 36 37 # If your documentation needs a minimal Sphinx version, state it here. 38 #needs_sphinx = '1.0' 39 40 # Add any Sphinx extension module names here, as strings. They can be extensions 41 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 42 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', 43 'sphinx.ext.viewcode'] 44 45 # default flags for auto-generated python code documetation 46 autodoc_default_flags = ['members', 'undoc-members'] 47 48 # Add any paths that contain templates here, relative to this directory. 49 templates_path = ['_templates'] 50 51 # The suffix of source filenames. 52 source_suffix = '.rst' 53 54 # The encoding of source files. 55 #source_encoding = 'utf-8-sig' 56 57 # The master toctree document. 58 master_doc = 'toctree' 59 60 # General information about the project. 61 project = u'deis' 62 copyright = u'2013, OpDemand LLC' 63 64 # The version info for the project you're documenting, acts as replacement for 65 # |version| and |release|, also used in various other places throughout the 66 # built documents. 67 # 68 from deis import __version__ 69 70 # The short X.Y version. 71 version = __version__.rsplit('.', 1)[0] 72 # The full version, including alpha/beta/rc tags. 73 release = __version__ 74 75 # The language for content autogenerated by Sphinx. Refer to documentation 76 # for a list of supported languages. 77 #language = None 78 79 # There are two options for replacing |today|: either, you set today to some 80 # non-false value, then it is used: 81 #today = '' 82 # Else, today_fmt is used as the format for a strftime call. 83 #today_fmt = '%B %d, %Y' 84 85 # List of patterns, relative to source directory, that match files and 86 # directories to ignore when looking for source files. 87 exclude_patterns = ['_build', 'venv'] 88 89 # The reST default role (used for this markup: `text`) to use for all documents. 90 #default_role = None 91 92 # If true, '()' will be appended to :func: etc. cross-reference text. 93 #add_function_parentheses = True 94 95 # If true, the current module name will be prepended to all description 96 # unit titles (such as .. function::). 97 #add_module_names = True 98 99 # If true, sectionauthor and moduleauthor directives will be shown in the 100 # output. They are ignored by default. 101 #show_authors = False 102 103 # The name of the Pygments (syntax highlighting) style to use. 104 pygments_style = 'sphinx' 105 106 # A list of ignored prefixes for module index sorting. 107 #modindex_common_prefix = [] 108 109 # If true, keep warnings as "system message" paragraphs in the built documents. 110 #keep_warnings = False 111 112 113 # -- Options for HTML output --------------------------------------------------- 114 115 # The theme to use for HTML and HTML Help pages. See the documentation for 116 # a list of builtin themes. 117 html_theme = 'deis' 118 119 # Theme options are theme-specific and customize the look and feel of a theme 120 # further. For a list of options available for each theme, see the 121 # documentation. 122 #html_theme_options = {} 123 124 # Add any paths that contain custom themes here, relative to this directory. 125 html_theme_path = ['theme'] 126 127 # The name for this set of Sphinx documents. If None, it defaults to 128 # "<project> v<release> documentation". 129 #html_title = None 130 131 # A shorter title for the navigation bar. Default is the same as html_title. 132 #html_short_title = None 133 134 # The name of an image file (relative to this directory) to place at the top 135 # of the sidebar. 136 #html_logo = None 137 138 # The name of an image file (within the static path) to use as favicon of the 139 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 140 # pixels large. 141 #html_favicon = None 142 143 # Add any paths that contain custom static files (such as style sheets) here, 144 # relative to this directory. They are copied after the builtin static files, 145 # so a file named "default.css" will overwrite the builtin "default.css". 146 html_static_path = ['../controller/web/static'] 147 148 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 149 # using the given strftime format. 150 #html_last_updated_fmt = '%b %d, %Y' 151 152 # If true, SmartyPants will be used to convert quotes and dashes to 153 # typographically correct entities. 154 html_use_smartypants = True 155 156 html_add_permalinks = True 157 158 # Custom sidebar templates, maps document names to template names. 159 #html_sidebars = {} 160 161 # Additional templates that should be rendered to pages, maps page names to 162 # template names. 163 #html_additional_pages = {} 164 165 # If false, no module index is generated. 166 #html_domain_indices = True 167 168 # If false, no index is generated. 169 #html_use_index = True 170 171 # If true, the index is split into individual pages for each letter. 172 #html_split_index = False 173 174 # If true, links to the reST sources are added to the pages. 175 #html_show_sourcelink = True 176 177 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 178 #html_show_sphinx = True 179 180 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 181 #html_show_copyright = True 182 183 # If true, an OpenSearch description file will be output, and all pages will 184 # contain a <link> tag referring to it. The value of this option must be the 185 # base URL from which the finished HTML is served. 186 #html_use_opensearch = '' 187 188 # This is the file name suffix for HTML files (e.g. ".xhtml"). 189 #html_file_suffix = None 190 191 # Output file base name for HTML help builder. 192 htmlhelp_basename = 'deisdoc' 193 194 195 # -- Options for LaTeX output -------------------------------------------------- 196 197 latex_elements = { 198 # The paper size ('letterpaper' or 'a4paper'). 199 #'papersize': 'letterpaper', 200 201 # The font size ('10pt', '11pt' or '12pt'). 202 #'pointsize': '10pt', 203 204 # Additional stuff for the LaTeX preamble. 205 #'preamble': '', 206 } 207 208 # Grouping the document tree into LaTeX files. List of tuples 209 # (source start file, target name, title, author, documentclass [howto/manual]). 210 latex_documents = [ 211 ('index', 'deis.tex', u'deis Documentation', 212 u'Author', 'manual'), 213 ] 214 215 # The name of an image file (relative to this directory) to place at the top of 216 # the title page. 217 #latex_logo = None 218 219 # For "manual" documents, if this is true, then toplevel headings are parts, 220 # not chapters. 221 #latex_use_parts = False 222 223 # If true, show page references after internal links. 224 #latex_show_pagerefs = False 225 226 # If true, show URL addresses after external links. 227 #latex_show_urls = False 228 229 # Documents to append as an appendix to all manuals. 230 #latex_appendices = [] 231 232 # If false, no module index is generated. 233 #latex_domain_indices = True 234 235 236 # -- Options for manual page output -------------------------------------------- 237 238 # One entry per manual page. List of tuples 239 # (source start file, name, description, authors, manual section). 240 man_pages = [ 241 ('index', 'deis', u'deis Documentation', 242 [u'Author'], 1) 243 ] 244 245 # If true, show URL addresses after external links. 246 #man_show_urls = False 247 248 249 # -- Options for Texinfo output ------------------------------------------------ 250 251 # Grouping the document tree into Texinfo files. List of tuples 252 # (source start file, target name, title, author, 253 # dir menu entry, description, category) 254 texinfo_documents = [ 255 ('index', 'deis', u'deis Documentation', 256 u'Author', 'deis', 'One line description of project.', 257 'Miscellaneous'), 258 ] 259 260 # Documents to append as an appendix to all manuals. 261 #texinfo_appendices = [] 262 263 # If false, no module index is generated. 264 #texinfo_domain_indices = True 265 266 # How to display URL addresses: 'footnote', 'no', or 'inline'. 267 #texinfo_show_urls = 'footnote' 268 269 # If true, do not generate a @detailmenu in the "Top" node's menu. 270 #texinfo_no_detailmenu = False 271 272 273 # -- Options for Epub output --------------------------------------------------- 274 275 # Bibliographic Dublin Core info. 276 epub_title = u'deis' 277 epub_author = u'OpDemand LLC' 278 epub_publisher = u'OpDemand LLC' 279 epub_copyright = u'2013, OpDemand LLC' 280 281 # The language of the text. It defaults to the language option 282 # or en if the language is not set. 283 #epub_language = '' 284 285 # The scheme of the identifier. Typical schemes are ISBN or URL. 286 #epub_scheme = '' 287 288 # The unique identifier of the text. This can be a ISBN number 289 # or the project homepage. 290 #epub_identifier = '' 291 292 # A unique identification for the text. 293 #epub_uid = '' 294 295 # A tuple containing the cover image and cover page html template filenames. 296 #epub_cover = () 297 298 # A sequence of (type, uri, title) tuples for the guide element of content.opf. 299 #epub_guide = () 300 301 # HTML files that should be inserted before the pages created by sphinx. 302 # The format is a list of tuples containing the path and title. 303 #epub_pre_files = [] 304 305 # HTML files shat should be inserted after the pages created by sphinx. 306 # The format is a list of tuples containing the path and title. 307 #epub_post_files = [] 308 309 # A list of files that should not be packed into the epub file. 310 #epub_exclude_files = [] 311 312 # The depth of the table of contents in toc.ncx. 313 #epub_tocdepth = 3 314 315 # Allow duplicate toc entries. 316 #epub_tocdup = True 317 318 # Fix unsupported image types using the PIL. 319 #epub_fix_images = False 320 321 # Scale large images. 322 #epub_max_image_width = 0 323 324 # If 'no', URL addresses will not be shown. 325 #epub_show_urls = 'inline' 326 327 # If false, no index is generated. 328 #epub_use_index = True 329 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -84,7 +84,7 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build', 'venv'] +exclude_patterns = ['_build', 'venv', '**/_*.rst'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -84,7 +84,7 @@\n \n # List of patterns, relative to source directory, that match files and\n # directories to ignore when looking for source files.\n-exclude_patterns = ['_build', 'venv']\n+exclude_patterns = ['_build', 'venv', '**/_*.rst']\n \n # The reST default role (used for this markup: `text`) to use for all documents.\n #default_role = None\n", "issue": "make discovery-url is doesn't work .\nshows no rule to create discovery-url .stop!\ni have search the old issues,it didn't solve the problem.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# deis documentation build configuration file, created by\n# sphinx-quickstart on Fri Jul 26 12:12:00 2013.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\nimport sys\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\n# Some hackery here to get deis.py to be importable as client.deis\nopen(os.path.join('..', '__init__.py'), 'a')\nsys.path.insert(0, os.path.abspath(os.path.join('..')))\nsys.path.insert(0, os.path.abspath(os.path.join('..', 'controller')))\n# create local_settings.py for SECRET_KEY if necessary\nlocal_settings_path = os.path.abspath(\n os.path.join('..', 'controller', 'deis', 'local_settings.py'))\nif not os.path.exists(local_settings_path):\n with open(local_settings_path, 'w') as local_settings:\n local_settings.write(\"SECRET_KEY = 'DummySecretKey'\\n\")\n# set up Django\nos.environ['DJANGO_SETTINGS_MODULE'] = 'deis.settings'\nfrom django.conf import settings # noqa\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',\n 'sphinx.ext.viewcode']\n\n# default flags for auto-generated python code documetation\nautodoc_default_flags = ['members', 'undoc-members']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'toctree'\n\n# General information about the project.\nproject = u'deis'\ncopyright = u'2013, OpDemand LLC'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\nfrom deis import __version__\n\n# The short X.Y version.\nversion = __version__.rsplit('.', 1)[0]\n# The full version, including alpha/beta/rc tags.\nrelease = __version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build', 'venv']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'deis'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = ['theme']\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['../controller/web/static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\nhtml_use_smartypants = True\n\nhtml_add_permalinks = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'deisdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'deis.tex', u'deis Documentation',\n u'Author', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'deis', u'deis Documentation',\n [u'Author'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'deis', u'deis Documentation',\n u'Author', 'deis', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n\n# -- Options for Epub output ---------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = u'deis'\nepub_author = u'OpDemand LLC'\nepub_publisher = u'OpDemand LLC'\nepub_copyright = u'2013, OpDemand LLC'\n\n# The language of the text. It defaults to the language option\n# or en if the language is not set.\n#epub_language = ''\n\n# The scheme of the identifier. Typical schemes are ISBN or URL.\n#epub_scheme = ''\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#epub_identifier = ''\n\n# A unique identification for the text.\n#epub_uid = ''\n\n# A tuple containing the cover image and cover page html template filenames.\n#epub_cover = ()\n\n# A sequence of (type, uri, title) tuples for the guide element of content.opf.\n#epub_guide = ()\n\n# HTML files that should be inserted before the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n#epub_pre_files = []\n\n# HTML files shat should be inserted after the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n#epub_post_files = []\n\n# A list of files that should not be packed into the epub file.\n#epub_exclude_files = []\n\n# The depth of the table of contents in toc.ncx.\n#epub_tocdepth = 3\n\n# Allow duplicate toc entries.\n#epub_tocdup = True\n\n# Fix unsupported image types using the PIL.\n#epub_fix_images = False\n\n# Scale large images.\n#epub_max_image_width = 0\n\n# If 'no', URL addresses will not be shown.\n#epub_show_urls = 'inline'\n\n# If false, no index is generated.\n#epub_use_index = True\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# deis documentation build configuration file, created by\n# sphinx-quickstart on Fri Jul 26 12:12:00 2013.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\nimport sys\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\n# Some hackery here to get deis.py to be importable as client.deis\nopen(os.path.join('..', '__init__.py'), 'a')\nsys.path.insert(0, os.path.abspath(os.path.join('..')))\nsys.path.insert(0, os.path.abspath(os.path.join('..', 'controller')))\n# create local_settings.py for SECRET_KEY if necessary\nlocal_settings_path = os.path.abspath(\n os.path.join('..', 'controller', 'deis', 'local_settings.py'))\nif not os.path.exists(local_settings_path):\n with open(local_settings_path, 'w') as local_settings:\n local_settings.write(\"SECRET_KEY = 'DummySecretKey'\\n\")\n# set up Django\nos.environ['DJANGO_SETTINGS_MODULE'] = 'deis.settings'\nfrom django.conf import settings # noqa\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',\n 'sphinx.ext.viewcode']\n\n# default flags for auto-generated python code documetation\nautodoc_default_flags = ['members', 'undoc-members']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'toctree'\n\n# General information about the project.\nproject = u'deis'\ncopyright = u'2013, OpDemand LLC'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\nfrom deis import __version__\n\n# The short X.Y version.\nversion = __version__.rsplit('.', 1)[0]\n# The full version, including alpha/beta/rc tags.\nrelease = __version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build', 'venv', '**/_*.rst']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'deis'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = ['theme']\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['../controller/web/static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\nhtml_use_smartypants = True\n\nhtml_add_permalinks = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'deisdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'deis.tex', u'deis Documentation',\n u'Author', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'deis', u'deis Documentation',\n [u'Author'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'deis', u'deis Documentation',\n u'Author', 'deis', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n\n# -- Options for Epub output ---------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = u'deis'\nepub_author = u'OpDemand LLC'\nepub_publisher = u'OpDemand LLC'\nepub_copyright = u'2013, OpDemand LLC'\n\n# The language of the text. It defaults to the language option\n# or en if the language is not set.\n#epub_language = ''\n\n# The scheme of the identifier. Typical schemes are ISBN or URL.\n#epub_scheme = ''\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#epub_identifier = ''\n\n# A unique identification for the text.\n#epub_uid = ''\n\n# A tuple containing the cover image and cover page html template filenames.\n#epub_cover = ()\n\n# A sequence of (type, uri, title) tuples for the guide element of content.opf.\n#epub_guide = ()\n\n# HTML files that should be inserted before the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n#epub_pre_files = []\n\n# HTML files shat should be inserted after the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n#epub_post_files = []\n\n# A list of files that should not be packed into the epub file.\n#epub_exclude_files = []\n\n# The depth of the table of contents in toc.ncx.\n#epub_tocdepth = 3\n\n# Allow duplicate toc entries.\n#epub_tocdup = True\n\n# Fix unsupported image types using the PIL.\n#epub_fix_images = False\n\n# Scale large images.\n#epub_max_image_width = 0\n\n# If 'no', URL addresses will not be shown.\n#epub_show_urls = 'inline'\n\n# If false, no index is generated.\n#epub_use_index = True\n", "path": "docs/conf.py"}]}
3,805
119
gh_patches_debug_12459
rasdani/github-patches
git_diff
azavea__raster-vision-497
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Unit Tests: `task` Directory --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `rastervision/utils/files.py` Content: ``` 1 import os 2 import shutil 3 from threading import Timer 4 import logging 5 6 from google.protobuf import json_format 7 8 from rastervision.filesystem.filesystem import FileSystem 9 from rastervision.filesystem.filesystem import ProtobufParseException 10 from rastervision.filesystem.local_filesystem import make_dir 11 12 log = logging.getLogger(__name__) 13 14 15 def get_local_path(uri, download_dir, fs=None): 16 """Convert a URI into a corresponding local path. 17 18 If a uri is local, return it. If it's remote, we generate a path for it 19 within download_dir. For an S3 path of form s3://<bucket>/<key>, the path 20 is <download_dir>/s3/<bucket>/<key>. 21 22 Args: 23 uri: (string) URI of file 24 download_dir: (string) path to directory 25 fs: Optional FileSystem to use 26 27 Returns: 28 (string) a local path 29 """ 30 if uri is None: 31 return None 32 33 if not fs: 34 fs = FileSystem.get_file_system(uri, 'r') 35 path = fs.local_path(uri, download_dir) 36 37 return path 38 39 40 def sync_to_dir(src_dir_uri, dest_dir_uri, delete=False, fs=None): 41 """Synchronize a local to a local or remote directory. 42 43 Transfers files from source to destination directories so that the 44 destination has all the source files. If delete is True, also delete 45 files in the destination to match those in the source directory. 46 47 Args: 48 src_dir_uri: (string) URI of local source directory 49 dest_dir_uri: (string) URI of destination directory 50 delete: (bool) 51 fs: Optional FileSystem to use for destination 52 """ 53 if not fs: 54 fs = FileSystem.get_file_system(dest_dir_uri, 'w') 55 fs.sync_to_dir(src_dir_uri, dest_dir_uri, delete=delete) 56 57 58 def sync_from_dir(src_dir_uri, dest_dir_uri, delete=False, fs=None): 59 """Synchronize a local or remote directory to a local directory. 60 61 Transfers files from source to destination directories so that the 62 destination has all the source files. If delete is True, also delete 63 files in the destination to match those in the source directory. 64 65 Args: 66 src_dir_uri: (string) URI of source directory 67 dest_dir_uri: (string) URI of local destination directory 68 delete: (bool) 69 fs: Optional FileSystem to use 70 """ 71 if not fs: 72 fs = FileSystem.get_file_system(src_dir_uri, 'r') 73 fs.sync_from_dir(src_dir_uri, dest_dir_uri, delete=delete) 74 75 76 def start_sync(src_dir_uri, dest_dir_uri, sync_interval=600, fs=None): 77 """Start syncing a directory on a schedule. 78 79 Calls sync_to_dir on a schedule. 80 81 Args: 82 src_dir_uri: (string) Path of the local source directory 83 dest_dir_uri: (string) URI of destination directory 84 sync_interval: (int) period in seconds for syncing 85 fs: Optional FileSystem to use 86 """ 87 88 def _sync_dir(): 89 log.info('Syncing {} to {}...'.format(src_dir_uri, dest_dir_uri)) 90 sync_to_dir(src_dir_uri, dest_dir_uri, delete=False, fs=fs) 91 92 class SyncThread: 93 def __init__(self): 94 thread = Timer(sync_interval, _sync_dir) 95 thread.daemon = True 96 thread.start() 97 self.thread = thread 98 99 def __enter__(self): 100 return self.thread 101 102 def __exit__(self, type, value, traceback): 103 self.thread.cancel() 104 105 return SyncThread() 106 107 108 def download_if_needed(uri, download_dir, fs=None): 109 """Download a file into a directory if it's remote. 110 111 If uri is local, there is no need to download the file. 112 113 Args: 114 uri: (string) URI of file 115 download_dir: (string) local directory to download file into 116 fs: Optional FileSystem to use. 117 118 Returns: 119 (string) path to local file 120 121 Raises: 122 NotReadableError if URI cannot be read from 123 """ 124 if uri is None: 125 return None 126 127 if not fs: 128 fs = FileSystem.get_file_system(uri, 'r') 129 130 path = get_local_path(uri, download_dir, fs=fs) 131 make_dir(path, use_dirname=True) 132 133 if path != uri: 134 log.info('Downloading {} to {}'.format(uri, path)) 135 136 fs.copy_from(uri, path) 137 138 return path 139 140 141 def download_or_copy(uri, target_dir, fs=None): 142 """Downloads or copies a file to a directory 143 144 Args: 145 uri: (string) URI of file 146 target_dir: (string) local directory to copy file to 147 fs: Optional FileSystem to use 148 """ 149 local_path = download_if_needed(uri, target_dir, fs=fs) 150 shutil.copy(local_path, target_dir) 151 return local_path 152 153 154 def file_exists(uri, fs=None): 155 if not fs: 156 fs = FileSystem.get_file_system(uri, 'r') 157 return fs.file_exists(uri) 158 159 160 def list_paths(uri, ext=None, fs=None): 161 if uri is None: 162 return None 163 164 if not fs: 165 fs = FileSystem.get_file_system(uri, 'r') 166 167 return fs.list_paths(uri, ext=ext) 168 169 170 def upload_or_copy(src_path, dst_uri, fs=None): 171 """Upload a file if the destination is remote. 172 173 If dst_uri is local, the file is copied. 174 175 Args: 176 src_path: (string) path to source file 177 dst_uri: (string) URI of destination for file 178 fs: Optional FileSystem to use 179 Raises: 180 NotWritableError if URI cannot be written to 181 """ 182 if dst_uri is None: 183 return 184 185 if not (os.path.isfile(src_path) or os.path.isdir(src_path)): 186 raise Exception('{} does not exist.'.format(src_path)) 187 188 if not src_path == dst_uri: 189 log.info('Uploading {} to {}'.format(src_path, dst_uri)) 190 191 if not fs: 192 fs = FileSystem.get_file_system(dst_uri, 'w') 193 fs.copy_to(src_path, dst_uri) 194 195 196 def file_to_str(uri, fs=None): 197 """Download contents of text file into a string. 198 199 Args: 200 uri: (string) URI of file 201 fs: Optional FileSystem to use 202 203 Returns: 204 (string) with contents of text file 205 206 Raises: 207 NotReadableError if URI cannot be read from 208 """ 209 if not fs: 210 fs = FileSystem.get_file_system(uri, 'r') 211 return fs.read_str(uri) 212 213 214 def str_to_file(content_str, uri, fs=None): 215 """Writes string to text file. 216 217 Args: 218 content_str: string to write 219 uri: (string) URI of file to write 220 fs: Optional FileSystem to use 221 222 Raise: 223 NotWritableError if file_uri cannot be written 224 """ 225 if not fs: 226 fs = FileSystem.get_file_system(uri, 'r') 227 return fs.write_str(uri, content_str) 228 229 230 def load_json_config(uri, message, fs=None): 231 """Load a JSON-formatted protobuf config file. 232 233 Args: 234 uri: (string) URI of config file 235 message: (google.protobuf.message.Message) empty protobuf message of 236 to load the config into. The type needs to match the content of 237 uri. 238 fs: Optional FileSystem to use. 239 240 Returns: 241 the same message passed as input with fields filled in from uri 242 243 Raises: 244 ProtobufParseException if uri cannot be parsed 245 """ 246 try: 247 return json_format.Parse(file_to_str(uri, fs=fs), message) 248 except json_format.ParseError as e: 249 error_msg = ('Problem parsing protobuf file {}. '.format(uri) + 250 'You might need to run scripts/compile') 251 raise ProtobufParseException(error_msg) from e 252 253 254 def save_json_config(message, uri, fs=None): 255 """Save a protobuf object to a JSON file. 256 257 Args: 258 message: (google.protobuf.message.Message) protobuf message 259 uri: (string) URI of JSON file to write message to 260 fs: Optional FileSystem to use 261 262 Raises: 263 NotWritableError if uri cannot be written 264 """ 265 json_str = json_format.MessageToJson(message) 266 str_to_file(json_str, uri, fs=fs) 267 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/rastervision/utils/files.py b/rastervision/utils/files.py --- a/rastervision/utils/files.py +++ b/rastervision/utils/files.py @@ -73,7 +73,8 @@ fs.sync_from_dir(src_dir_uri, dest_dir_uri, delete=delete) -def start_sync(src_dir_uri, dest_dir_uri, sync_interval=600, fs=None): +def start_sync(src_dir_uri, dest_dir_uri, sync_interval=600, + fs=None): # pragma: no cover """Start syncing a directory on a schedule. Calls sync_to_dir on a schedule. @@ -157,7 +158,7 @@ return fs.file_exists(uri) -def list_paths(uri, ext=None, fs=None): +def list_paths(uri, ext='', fs=None): if uri is None: return None
{"golden_diff": "diff --git a/rastervision/utils/files.py b/rastervision/utils/files.py\n--- a/rastervision/utils/files.py\n+++ b/rastervision/utils/files.py\n@@ -73,7 +73,8 @@\n fs.sync_from_dir(src_dir_uri, dest_dir_uri, delete=delete)\n \n \n-def start_sync(src_dir_uri, dest_dir_uri, sync_interval=600, fs=None):\n+def start_sync(src_dir_uri, dest_dir_uri, sync_interval=600,\n+ fs=None): # pragma: no cover\n \"\"\"Start syncing a directory on a schedule.\n \n Calls sync_to_dir on a schedule.\n@@ -157,7 +158,7 @@\n return fs.file_exists(uri)\n \n \n-def list_paths(uri, ext=None, fs=None):\n+def list_paths(uri, ext='', fs=None):\n if uri is None:\n return None\n", "issue": "Unit Tests: `task` Directory\n\n", "before_files": [{"content": "import os\nimport shutil\nfrom threading import Timer\nimport logging\n\nfrom google.protobuf import json_format\n\nfrom rastervision.filesystem.filesystem import FileSystem\nfrom rastervision.filesystem.filesystem import ProtobufParseException\nfrom rastervision.filesystem.local_filesystem import make_dir\n\nlog = logging.getLogger(__name__)\n\n\ndef get_local_path(uri, download_dir, fs=None):\n \"\"\"Convert a URI into a corresponding local path.\n\n If a uri is local, return it. If it's remote, we generate a path for it\n within download_dir. For an S3 path of form s3://<bucket>/<key>, the path\n is <download_dir>/s3/<bucket>/<key>.\n\n Args:\n uri: (string) URI of file\n download_dir: (string) path to directory\n fs: Optional FileSystem to use\n\n Returns:\n (string) a local path\n \"\"\"\n if uri is None:\n return None\n\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n path = fs.local_path(uri, download_dir)\n\n return path\n\n\ndef sync_to_dir(src_dir_uri, dest_dir_uri, delete=False, fs=None):\n \"\"\"Synchronize a local to a local or remote directory.\n\n Transfers files from source to destination directories so that the\n destination has all the source files. If delete is True, also delete\n files in the destination to match those in the source directory.\n\n Args:\n src_dir_uri: (string) URI of local source directory\n dest_dir_uri: (string) URI of destination directory\n delete: (bool)\n fs: Optional FileSystem to use for destination\n \"\"\"\n if not fs:\n fs = FileSystem.get_file_system(dest_dir_uri, 'w')\n fs.sync_to_dir(src_dir_uri, dest_dir_uri, delete=delete)\n\n\ndef sync_from_dir(src_dir_uri, dest_dir_uri, delete=False, fs=None):\n \"\"\"Synchronize a local or remote directory to a local directory.\n\n Transfers files from source to destination directories so that the\n destination has all the source files. If delete is True, also delete\n files in the destination to match those in the source directory.\n\n Args:\n src_dir_uri: (string) URI of source directory\n dest_dir_uri: (string) URI of local destination directory\n delete: (bool)\n fs: Optional FileSystem to use\n \"\"\"\n if not fs:\n fs = FileSystem.get_file_system(src_dir_uri, 'r')\n fs.sync_from_dir(src_dir_uri, dest_dir_uri, delete=delete)\n\n\ndef start_sync(src_dir_uri, dest_dir_uri, sync_interval=600, fs=None):\n \"\"\"Start syncing a directory on a schedule.\n\n Calls sync_to_dir on a schedule.\n\n Args:\n src_dir_uri: (string) Path of the local source directory\n dest_dir_uri: (string) URI of destination directory\n sync_interval: (int) period in seconds for syncing\n fs: Optional FileSystem to use\n \"\"\"\n\n def _sync_dir():\n log.info('Syncing {} to {}...'.format(src_dir_uri, dest_dir_uri))\n sync_to_dir(src_dir_uri, dest_dir_uri, delete=False, fs=fs)\n\n class SyncThread:\n def __init__(self):\n thread = Timer(sync_interval, _sync_dir)\n thread.daemon = True\n thread.start()\n self.thread = thread\n\n def __enter__(self):\n return self.thread\n\n def __exit__(self, type, value, traceback):\n self.thread.cancel()\n\n return SyncThread()\n\n\ndef download_if_needed(uri, download_dir, fs=None):\n \"\"\"Download a file into a directory if it's remote.\n\n If uri is local, there is no need to download the file.\n\n Args:\n uri: (string) URI of file\n download_dir: (string) local directory to download file into\n fs: Optional FileSystem to use.\n\n Returns:\n (string) path to local file\n\n Raises:\n NotReadableError if URI cannot be read from\n \"\"\"\n if uri is None:\n return None\n\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n\n path = get_local_path(uri, download_dir, fs=fs)\n make_dir(path, use_dirname=True)\n\n if path != uri:\n log.info('Downloading {} to {}'.format(uri, path))\n\n fs.copy_from(uri, path)\n\n return path\n\n\ndef download_or_copy(uri, target_dir, fs=None):\n \"\"\"Downloads or copies a file to a directory\n\n Args:\n uri: (string) URI of file\n target_dir: (string) local directory to copy file to\n fs: Optional FileSystem to use\n \"\"\"\n local_path = download_if_needed(uri, target_dir, fs=fs)\n shutil.copy(local_path, target_dir)\n return local_path\n\n\ndef file_exists(uri, fs=None):\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n return fs.file_exists(uri)\n\n\ndef list_paths(uri, ext=None, fs=None):\n if uri is None:\n return None\n\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n\n return fs.list_paths(uri, ext=ext)\n\n\ndef upload_or_copy(src_path, dst_uri, fs=None):\n \"\"\"Upload a file if the destination is remote.\n\n If dst_uri is local, the file is copied.\n\n Args:\n src_path: (string) path to source file\n dst_uri: (string) URI of destination for file\n fs: Optional FileSystem to use\n Raises:\n NotWritableError if URI cannot be written to\n \"\"\"\n if dst_uri is None:\n return\n\n if not (os.path.isfile(src_path) or os.path.isdir(src_path)):\n raise Exception('{} does not exist.'.format(src_path))\n\n if not src_path == dst_uri:\n log.info('Uploading {} to {}'.format(src_path, dst_uri))\n\n if not fs:\n fs = FileSystem.get_file_system(dst_uri, 'w')\n fs.copy_to(src_path, dst_uri)\n\n\ndef file_to_str(uri, fs=None):\n \"\"\"Download contents of text file into a string.\n\n Args:\n uri: (string) URI of file\n fs: Optional FileSystem to use\n\n Returns:\n (string) with contents of text file\n\n Raises:\n NotReadableError if URI cannot be read from\n \"\"\"\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n return fs.read_str(uri)\n\n\ndef str_to_file(content_str, uri, fs=None):\n \"\"\"Writes string to text file.\n\n Args:\n content_str: string to write\n uri: (string) URI of file to write\n fs: Optional FileSystem to use\n\n Raise:\n NotWritableError if file_uri cannot be written\n \"\"\"\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n return fs.write_str(uri, content_str)\n\n\ndef load_json_config(uri, message, fs=None):\n \"\"\"Load a JSON-formatted protobuf config file.\n\n Args:\n uri: (string) URI of config file\n message: (google.protobuf.message.Message) empty protobuf message of\n to load the config into. The type needs to match the content of\n uri.\n fs: Optional FileSystem to use.\n\n Returns:\n the same message passed as input with fields filled in from uri\n\n Raises:\n ProtobufParseException if uri cannot be parsed\n \"\"\"\n try:\n return json_format.Parse(file_to_str(uri, fs=fs), message)\n except json_format.ParseError as e:\n error_msg = ('Problem parsing protobuf file {}. '.format(uri) +\n 'You might need to run scripts/compile')\n raise ProtobufParseException(error_msg) from e\n\n\ndef save_json_config(message, uri, fs=None):\n \"\"\"Save a protobuf object to a JSON file.\n\n Args:\n message: (google.protobuf.message.Message) protobuf message\n uri: (string) URI of JSON file to write message to\n fs: Optional FileSystem to use\n\n Raises:\n NotWritableError if uri cannot be written\n \"\"\"\n json_str = json_format.MessageToJson(message)\n str_to_file(json_str, uri, fs=fs)\n", "path": "rastervision/utils/files.py"}], "after_files": [{"content": "import os\nimport shutil\nfrom threading import Timer\nimport logging\n\nfrom google.protobuf import json_format\n\nfrom rastervision.filesystem.filesystem import FileSystem\nfrom rastervision.filesystem.filesystem import ProtobufParseException\nfrom rastervision.filesystem.local_filesystem import make_dir\n\nlog = logging.getLogger(__name__)\n\n\ndef get_local_path(uri, download_dir, fs=None):\n \"\"\"Convert a URI into a corresponding local path.\n\n If a uri is local, return it. If it's remote, we generate a path for it\n within download_dir. For an S3 path of form s3://<bucket>/<key>, the path\n is <download_dir>/s3/<bucket>/<key>.\n\n Args:\n uri: (string) URI of file\n download_dir: (string) path to directory\n fs: Optional FileSystem to use\n\n Returns:\n (string) a local path\n \"\"\"\n if uri is None:\n return None\n\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n path = fs.local_path(uri, download_dir)\n\n return path\n\n\ndef sync_to_dir(src_dir_uri, dest_dir_uri, delete=False, fs=None):\n \"\"\"Synchronize a local to a local or remote directory.\n\n Transfers files from source to destination directories so that the\n destination has all the source files. If delete is True, also delete\n files in the destination to match those in the source directory.\n\n Args:\n src_dir_uri: (string) URI of local source directory\n dest_dir_uri: (string) URI of destination directory\n delete: (bool)\n fs: Optional FileSystem to use for destination\n \"\"\"\n if not fs:\n fs = FileSystem.get_file_system(dest_dir_uri, 'w')\n fs.sync_to_dir(src_dir_uri, dest_dir_uri, delete=delete)\n\n\ndef sync_from_dir(src_dir_uri, dest_dir_uri, delete=False, fs=None):\n \"\"\"Synchronize a local or remote directory to a local directory.\n\n Transfers files from source to destination directories so that the\n destination has all the source files. If delete is True, also delete\n files in the destination to match those in the source directory.\n\n Args:\n src_dir_uri: (string) URI of source directory\n dest_dir_uri: (string) URI of local destination directory\n delete: (bool)\n fs: Optional FileSystem to use\n \"\"\"\n if not fs:\n fs = FileSystem.get_file_system(src_dir_uri, 'r')\n fs.sync_from_dir(src_dir_uri, dest_dir_uri, delete=delete)\n\n\ndef start_sync(src_dir_uri, dest_dir_uri, sync_interval=600,\n fs=None): # pragma: no cover\n \"\"\"Start syncing a directory on a schedule.\n\n Calls sync_to_dir on a schedule.\n\n Args:\n src_dir_uri: (string) Path of the local source directory\n dest_dir_uri: (string) URI of destination directory\n sync_interval: (int) period in seconds for syncing\n fs: Optional FileSystem to use\n \"\"\"\n\n def _sync_dir():\n log.info('Syncing {} to {}...'.format(src_dir_uri, dest_dir_uri))\n sync_to_dir(src_dir_uri, dest_dir_uri, delete=False, fs=fs)\n\n class SyncThread:\n def __init__(self):\n thread = Timer(sync_interval, _sync_dir)\n thread.daemon = True\n thread.start()\n self.thread = thread\n\n def __enter__(self):\n return self.thread\n\n def __exit__(self, type, value, traceback):\n self.thread.cancel()\n\n return SyncThread()\n\n\ndef download_if_needed(uri, download_dir, fs=None):\n \"\"\"Download a file into a directory if it's remote.\n\n If uri is local, there is no need to download the file.\n\n Args:\n uri: (string) URI of file\n download_dir: (string) local directory to download file into\n fs: Optional FileSystem to use.\n\n Returns:\n (string) path to local file\n\n Raises:\n NotReadableError if URI cannot be read from\n \"\"\"\n if uri is None:\n return None\n\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n\n path = get_local_path(uri, download_dir, fs=fs)\n make_dir(path, use_dirname=True)\n\n if path != uri:\n log.info('Downloading {} to {}'.format(uri, path))\n\n fs.copy_from(uri, path)\n\n return path\n\n\ndef download_or_copy(uri, target_dir, fs=None):\n \"\"\"Downloads or copies a file to a directory\n\n Args:\n uri: (string) URI of file\n target_dir: (string) local directory to copy file to\n fs: Optional FileSystem to use\n \"\"\"\n local_path = download_if_needed(uri, target_dir, fs=fs)\n shutil.copy(local_path, target_dir)\n return local_path\n\n\ndef file_exists(uri, fs=None):\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n return fs.file_exists(uri)\n\n\ndef list_paths(uri, ext='', fs=None):\n if uri is None:\n return None\n\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n\n return fs.list_paths(uri, ext=ext)\n\n\ndef upload_or_copy(src_path, dst_uri, fs=None):\n \"\"\"Upload a file if the destination is remote.\n\n If dst_uri is local, the file is copied.\n\n Args:\n src_path: (string) path to source file\n dst_uri: (string) URI of destination for file\n fs: Optional FileSystem to use\n Raises:\n NotWritableError if URI cannot be written to\n \"\"\"\n if dst_uri is None:\n return\n\n if not (os.path.isfile(src_path) or os.path.isdir(src_path)):\n raise Exception('{} does not exist.'.format(src_path))\n\n if not src_path == dst_uri:\n log.info('Uploading {} to {}'.format(src_path, dst_uri))\n\n if not fs:\n fs = FileSystem.get_file_system(dst_uri, 'w')\n fs.copy_to(src_path, dst_uri)\n\n\ndef file_to_str(uri, fs=None):\n \"\"\"Download contents of text file into a string.\n\n Args:\n uri: (string) URI of file\n fs: Optional FileSystem to use\n\n Returns:\n (string) with contents of text file\n\n Raises:\n NotReadableError if URI cannot be read from\n \"\"\"\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n return fs.read_str(uri)\n\n\ndef str_to_file(content_str, uri, fs=None):\n \"\"\"Writes string to text file.\n\n Args:\n content_str: string to write\n uri: (string) URI of file to write\n fs: Optional FileSystem to use\n\n Raise:\n NotWritableError if file_uri cannot be written\n \"\"\"\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n return fs.write_str(uri, content_str)\n\n\ndef load_json_config(uri, message, fs=None):\n \"\"\"Load a JSON-formatted protobuf config file.\n\n Args:\n uri: (string) URI of config file\n message: (google.protobuf.message.Message) empty protobuf message of\n to load the config into. The type needs to match the content of\n uri.\n fs: Optional FileSystem to use.\n\n Returns:\n the same message passed as input with fields filled in from uri\n\n Raises:\n ProtobufParseException if uri cannot be parsed\n \"\"\"\n try:\n return json_format.Parse(file_to_str(uri, fs=fs), message)\n except json_format.ParseError as e:\n error_msg = ('Problem parsing protobuf file {}. '.format(uri) +\n 'You might need to run scripts/compile')\n raise ProtobufParseException(error_msg) from e\n\n\ndef save_json_config(message, uri, fs=None):\n \"\"\"Save a protobuf object to a JSON file.\n\n Args:\n message: (google.protobuf.message.Message) protobuf message\n uri: (string) URI of JSON file to write message to\n fs: Optional FileSystem to use\n\n Raises:\n NotWritableError if uri cannot be written\n \"\"\"\n json_str = json_format.MessageToJson(message)\n str_to_file(json_str, uri, fs=fs)\n", "path": "rastervision/utils/files.py"}]}
2,795
196
gh_patches_debug_22015
rasdani/github-patches
git_diff
cloud-custodian__cloud-custodian-6195
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- docs - value_from examples in value filter the value_from source docs aren't making it through to the documentation page on value filter, those examples should ideally be there as its a common integration technique with external vocabularies. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/source/conf.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Cloud Custodian documentation build configuration file, created by 4 # sphinx-quickstart on Mon Dec 21 08:34:24 2015. 5 # 6 # This file is execfile()d with the current directory set to its 7 # containing dir. 8 # 9 # Note that not all possible configuration values are present in this 10 # autogenerated file. 11 # 12 # All configuration values have a default; values that are commented out 13 # serve to show the default. 14 15 import sys 16 import os 17 import shlex 18 import sphinx_rtd_theme 19 20 # If extensions (or modules to document with autodoc) are in another directory, 21 # add these directories to sys.path here. If the directory is relative to the 22 # documentation root, use os.path.abspath to make it absolute, like shown here. 23 sys.path.insert(0, os.path.abspath('../..')) 24 25 # -- General configuration ------------------------------------------------ 26 27 # If your documentation needs a minimal Sphinx version, state it here. 28 #needs_sphinx = '1.0' 29 30 # Add any Sphinx extension module names here, as strings. They can be 31 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 32 # ones. 33 extensions = [ 34 'sphinx.ext.autodoc', 35 'sphinx.ext.viewcode', 36 'c7n_sphinxext.docgen', 37 'recommonmark', 38 'sphinx_markdown_tables' 39 ] 40 41 # Add any paths that contain templates here, relative to this directory. 42 #templates_path = ['_templates'] 43 44 # The suffix(es) of source filenames. 45 # You can specify multiple suffix as a list of string: 46 source_suffix = ['.rst', '.md'] 47 48 49 # The encoding of source files. 50 #source_encoding = 'utf-8-sig' 51 52 # The master toctree document. 53 master_doc = 'index' 54 55 # General information about the project. 56 project = u'Cloud Custodian' 57 author = u'Kapil Thangavelu' 58 59 # The version info for the project you're documenting, acts as replacement for 60 # |version| and |release|, also used in various other places throughout the 61 # built documents. 62 # 63 # The short X.Y version. 64 # version = u'1.0' 65 # The full version, including alpha/beta/rc tags. 66 # release = u'1.0' 67 68 # The language for content autogenerated by Sphinx. Refer to documentation 69 # for a list of supported languages. 70 # 71 # This is also used if you do content translation via gettext catalogs. 72 # Usually you set "language" from the command line for these cases. 73 language = None 74 75 # There are two options for replacing |today|: either, you set today to some 76 # non-false value, then it is used: 77 #today = '' 78 # Else, today_fmt is used as the format for a strftime call. 79 #today_fmt = '%B %d, %Y' 80 81 # List of patterns, relative to source directory, that match files and 82 # directories to ignore when looking for source files. 83 exclude_patterns = [] 84 85 # The reST default role (used for this markup: `text`) to use for all 86 # documents. 87 #default_role = None 88 89 # If true, '()' will be appended to :func: etc. cross-reference text. 90 #add_function_parentheses = True 91 92 # If true, the current module name will be prepended to all description 93 # unit titles (such as .. function::). 94 #add_module_names = True 95 96 # If true, sectionauthor and moduleauthor directives will be shown in the 97 # output. They are ignored by default. 98 #show_authors = False 99 100 # The name of the Pygments (syntax highlighting) style to use. 101 pygments_style = 'sphinx' 102 103 # A list of ignored prefixes for module index sorting. 104 #modindex_common_prefix = [] 105 106 # If true, keep warnings as "system message" paragraphs in the built documents. 107 #keep_warnings = False 108 109 # If true, `todo` and `todoList` produce output, else they produce nothing. 110 todo_include_todos = False 111 112 113 # -- Options for HTML output ---------------------------------------------- 114 115 # The theme to use for HTML and HTML Help pages. See the documentation for 116 # a list of builtin themes. 117 html_theme = 'sphinx_rtd_theme' 118 119 # Theme options are theme-specific and customize the look and feel of a theme 120 # further. For a list of options available for each theme, see the 121 # documentation. 122 html_theme_options = { 123 'prev_next_buttons_location': 'both', 124 'style_external_links': True, 125 'analytics_id': "UA-162730326-1", 126 # Toc options 127 'collapse_navigation': False, 128 'sticky_navigation': True, 129 'includehidden': True, 130 'titles_only': False 131 } 132 133 # Add any paths that contain custom themes here, relative to this directory. 134 #html_theme_path = sphinx_bootstrap_theme.get_html_theme_path() 135 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 136 137 # The name for this set of Sphinx documents. If None, it defaults to 138 # "<project> v<release> documentation". 139 #html_title = None 140 141 # A shorter title for the navigation bar. Default is the same as html_title. 142 #html_short_title = None 143 144 # The name of an image file (relative to this directory) to place at the top 145 # of the sidebar. 146 #html_logo = 'c1_labs.png' 147 148 # The name of an image file (within the static path) to use as favicon of the 149 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 150 # pixels large. 151 html_favicon = 'icon.ico' 152 153 # Add any paths that contain custom static files (such as style sheets) here, 154 # relative to this directory. They are copied after the builtin static files, 155 # so a file named "default.css" will overwrite the builtin "default.css". 156 html_static_path = ['_static'] 157 158 # Add any extra paths that contain custom files (such as robots.txt or 159 # .htaccess) here, relative to this directory. These files are copied 160 # directly to the root of the documentation. 161 #html_extra_path = [] 162 163 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 164 # using the given strftime format. 165 #html_last_updated_fmt = '%b %d, %Y' 166 167 # If true, SmartyPants will be used to convert quotes and dashes to 168 # typographically correct entities. 169 #html_use_smartypants = True 170 171 # Custom sidebar templates, maps document names to template names. 172 #html_sidebars = {} 173 174 # Additional templates that should be rendered to pages, maps page names to 175 # template names. 176 #html_additional_pages = {} 177 178 # If false, no module index is generated. 179 #html_domain_indices = True 180 181 # If false, no index is generated. 182 #html_use_index = True 183 184 # If true, the index is split into individual pages for each letter. 185 #html_split_index = False 186 187 # If true, links to the reST sources are added to the pages. 188 html_show_sourcelink = False 189 190 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 191 #html_show_sphinx = True 192 193 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 194 #html_show_copyright = True 195 196 # If true, an OpenSearch description file will be output, and all pages will 197 # contain a <link> tag referring to it. The value of this option must be the 198 # base URL from which the finished HTML is served. 199 #html_use_opensearch = '' 200 201 # This is the file name suffix for HTML files (e.g. ".xhtml"). 202 #html_file_suffix = None 203 204 # Language to be used for generating the HTML full-text search index. 205 # Sphinx supports the following languages: 206 # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 207 # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' 208 #html_search_language = 'en' 209 210 # A dictionary with options for the search language support, empty by default. 211 # Now only 'ja' uses this config value 212 #html_search_options = {'type': 'default'} 213 214 # The name of a javascript file (relative to the configuration directory) that 215 # implements a search results scorer. If empty, the default will be used. 216 #html_search_scorer = 'scorer.js' 217 218 # Output file base name for HTML help builder. 219 htmlhelp_basename = 'CloudCustodiandoc' 220 221 # -- Options for LaTeX output --------------------------------------------- 222 223 latex_elements = { 224 # The paper size ('letterpaper' or 'a4paper'). 225 #'papersize': 'letterpaper', 226 227 # The font size ('10pt', '11pt' or '12pt'). 228 #'pointsize': '10pt', 229 230 # Additional stuff for the LaTeX preamble. 231 #'preamble': '', 232 233 # Latex figure (float) alignment 234 #'figure_align': 'htbp', 235 } 236 237 # Grouping the document tree into LaTeX files. List of tuples 238 # (source start file, target name, title, 239 # author, documentclass [howto, manual, or own class]). 240 latex_documents = [ 241 (master_doc, 'CloudCustodian.tex', u'Cloud Custodian Documentation', 242 u'Kapil Thangavelu', 'manual'), 243 ] 244 245 # The name of an image file (relative to this directory) to place at the top of 246 # the title page. 247 #latex_logo = None 248 249 # For "manual" documents, if this is true, then toplevel headings are parts, 250 # not chapters. 251 #latex_use_parts = False 252 253 # If true, show page references after internal links. 254 #latex_show_pagerefs = False 255 256 # If true, show URL addresses after external links. 257 #latex_show_urls = False 258 259 # Documents to append as an appendix to all manuals. 260 #latex_appendices = [] 261 262 # If false, no module index is generated. 263 #latex_domain_indices = True 264 265 266 # -- Options for manual page output --------------------------------------- 267 268 # One entry per manual page. List of tuples 269 # (source start file, name, description, authors, manual section). 270 man_pages = [ 271 (master_doc, 'cloudCustodian', u'Cloud Custodian Documentation', 272 [author], 1) 273 ] 274 275 # If true, show URL addresses after external links. 276 #man_show_urls = False 277 278 279 # -- Options for Texinfo output ------------------------------------------- 280 281 # Grouping the document tree into Texinfo files. List of tuples 282 # (source start file, target name, title, author, 283 # dir menu entry, description, category) 284 texinfo_documents = [ 285 (master_doc, 'CloudCustodian', u'Cloud Custodian Documentation', 286 author, 'CloudCustodian', 'One line description of project.', 287 'Miscellaneous'), 288 ] 289 290 # Documents to append as an appendix to all manuals. 291 #texinfo_appendices = [] 292 293 # If false, no module index is generated. 294 #texinfo_domain_indices = True 295 296 # How to display URL addresses: 'footnote', 'no', or 'inline'. 297 #texinfo_show_urls = 'footnote' 298 299 # If true, do not generate a @detailmenu in the "Top" node's menu. 300 #texinfo_no_detailmenu = False 301 302 # This adds in our expand/collapse JS/CSS 303 def setup(app): 304 app.add_javascript('js/expand.js') 305 app.add_stylesheet('css/expand.css') 306 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/source/conf.py b/docs/source/conf.py --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -16,6 +16,7 @@ import os import shlex import sphinx_rtd_theme +from sphinx.ext import autodoc # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -38,6 +39,17 @@ 'sphinx_markdown_tables' ] +# Extract only a classes docstrings +class DocsonlyMethodDocumenter(autodoc.MethodDocumenter): + objtype = "doconly" + content_indent = "" + + def format_signature(self, **kwargs): + return "" + + def add_directive_header(self, sig: str): + return None + # Add any paths that contain templates here, relative to this directory. #templates_path = ['_templates'] @@ -303,3 +315,4 @@ def setup(app): app.add_javascript('js/expand.js') app.add_stylesheet('css/expand.css') + app.add_autodocumenter(DocsonlyMethodDocumenter)
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -16,6 +16,7 @@\n import os\n import shlex\n import sphinx_rtd_theme\n+from sphinx.ext import autodoc\n \n # If extensions (or modules to document with autodoc) are in another directory,\n # add these directories to sys.path here. If the directory is relative to the\n@@ -38,6 +39,17 @@\n 'sphinx_markdown_tables'\n ]\n \n+# Extract only a classes docstrings\n+class DocsonlyMethodDocumenter(autodoc.MethodDocumenter):\n+ objtype = \"doconly\"\n+ content_indent = \"\"\n+\n+ def format_signature(self, **kwargs):\n+ return \"\"\n+\n+ def add_directive_header(self, sig: str):\n+ return None\n+\n # Add any paths that contain templates here, relative to this directory.\n #templates_path = ['_templates']\n \n@@ -303,3 +315,4 @@\n def setup(app):\n app.add_javascript('js/expand.js')\n app.add_stylesheet('css/expand.css')\n+ app.add_autodocumenter(DocsonlyMethodDocumenter)\n", "issue": "docs - value_from examples in value filter\nthe value_from source docs aren't making it through to the documentation page on value filter, those examples should ideally be there as its a common integration technique with external vocabularies.\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Cloud Custodian documentation build configuration file, created by\n# sphinx-quickstart on Mon Dec 21 08:34:24 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\nimport shlex\nimport sphinx_rtd_theme\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('../..'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.viewcode',\n 'c7n_sphinxext.docgen',\n 'recommonmark',\n 'sphinx_markdown_tables'\n]\n\n# Add any paths that contain templates here, relative to this directory.\n#templates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\nsource_suffix = ['.rst', '.md']\n\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Cloud Custodian'\nauthor = u'Kapil Thangavelu'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\n# version = u'1.0'\n# The full version, including alpha/beta/rc tags.\n# release = u'1.0'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n 'prev_next_buttons_location': 'both',\n 'style_external_links': True,\n 'analytics_id': \"UA-162730326-1\",\n # Toc options\n 'collapse_navigation': False,\n 'sticky_navigation': True,\n 'includehidden': True,\n 'titles_only': False\n}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = 'c1_labs.png'\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = 'icon.ico'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = False\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'CloudCustodiandoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n\n# Latex figure (float) alignment\n#'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'CloudCustodian.tex', u'Cloud Custodian Documentation',\n u'Kapil Thangavelu', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'cloudCustodian', u'Cloud Custodian Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'CloudCustodian', u'Cloud Custodian Documentation',\n author, 'CloudCustodian', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n# This adds in our expand/collapse JS/CSS\ndef setup(app):\n app.add_javascript('js/expand.js')\n app.add_stylesheet('css/expand.css')\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Cloud Custodian documentation build configuration file, created by\n# sphinx-quickstart on Mon Dec 21 08:34:24 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\nimport shlex\nimport sphinx_rtd_theme\nfrom sphinx.ext import autodoc\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('../..'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.viewcode',\n 'c7n_sphinxext.docgen',\n 'recommonmark',\n 'sphinx_markdown_tables'\n]\n\n# Extract only a classes docstrings\nclass DocsonlyMethodDocumenter(autodoc.MethodDocumenter):\n objtype = \"doconly\"\n content_indent = \"\"\n\n def format_signature(self, **kwargs):\n return \"\"\n\n def add_directive_header(self, sig: str):\n return None\n\n# Add any paths that contain templates here, relative to this directory.\n#templates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\nsource_suffix = ['.rst', '.md']\n\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Cloud Custodian'\nauthor = u'Kapil Thangavelu'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\n# version = u'1.0'\n# The full version, including alpha/beta/rc tags.\n# release = u'1.0'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n 'prev_next_buttons_location': 'both',\n 'style_external_links': True,\n 'analytics_id': \"UA-162730326-1\",\n # Toc options\n 'collapse_navigation': False,\n 'sticky_navigation': True,\n 'includehidden': True,\n 'titles_only': False\n}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = 'c1_labs.png'\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = 'icon.ico'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = False\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'CloudCustodiandoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n\n# Latex figure (float) alignment\n#'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'CloudCustodian.tex', u'Cloud Custodian Documentation',\n u'Kapil Thangavelu', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'cloudCustodian', u'Cloud Custodian Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'CloudCustodian', u'Cloud Custodian Documentation',\n author, 'CloudCustodian', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n# This adds in our expand/collapse JS/CSS\ndef setup(app):\n app.add_javascript('js/expand.js')\n app.add_stylesheet('css/expand.css')\n app.add_autodocumenter(DocsonlyMethodDocumenter)\n", "path": "docs/source/conf.py"}]}
3,626
275
gh_patches_debug_15187
rasdani/github-patches
git_diff
vispy__vispy-1362
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- examples/tutorial/app/simple_wx.py issue ``` Traceback (most recent call last): File "simple_wx.py", line 58, in <module> frame = TestFrame() File "simple_wx.py", line 49, in __init__ self.canvas = Canvas(app="wx", parent=self, show=True) File "simple_wx.py", line 20, in __init__ app.Canvas.__init__(self, *args, **kwargs) File "/home/eldar/src/vispy/vispy/app/canvas.py", line 208, in __init__ self.set_current() File "/home/eldar/src/vispy/vispy/app/canvas.py", line 406, in set_current self._backend._vispy_set_current() File "/home/eldar/src/vispy/vispy/app/backends/_wx.py", line 302, in _vispy_set_current self.SetCurrent(self._gl_context) wx._core.wxAssertionError: C++ assertion "xid" failed at /home/eldar/src/wx/wxPython_Phoenix/wxPython-4.0.0b2/ext/wxWidgets/src/unix/glx11.cpp(194) in SetCurrent(): window must be shown ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/tutorial/app/simple_wx.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # vispy: testskip 3 # Copyright (c) 2015, Vispy Development Team. 4 # Distributed under the (new) BSD License. See LICENSE.txt for more info. 5 """ 6 This is a very minimal example that opens a window and makes the background 7 color to change from black to white to black ... 8 9 The wx backend is used to embed the canvas in a simple wx Frame with 10 a menubar. 11 """ 12 13 import wx 14 import math 15 from vispy import app, gloo 16 17 18 class Canvas(app.Canvas): 19 def __init__(self, *args, **kwargs): 20 app.Canvas.__init__(self, *args, **kwargs) 21 self._timer = app.Timer('auto', connect=self.on_timer, start=True) 22 self.tick = 0 23 24 def on_draw(self, event): 25 gloo.clear(color=True) 26 27 def on_timer(self, event): 28 self.tick += 1 / 60.0 29 c = abs(math.sin(self.tick)) 30 gloo.set_clear_color((c, c, c, 1)) 31 self.update() 32 33 def stop_timer(self): 34 self._timer.stop() 35 36 37 class TestFrame(wx.Frame): 38 def __init__(self): 39 wx.Frame.__init__(self, None, -1, "Vispy Test", 40 wx.DefaultPosition, size=(500, 500)) 41 42 MenuBar = wx.MenuBar() 43 file_menu = wx.Menu() 44 file_menu.Append(wx.ID_EXIT, "&Quit") 45 self.Bind(wx.EVT_MENU, self.on_quit, id=wx.ID_EXIT) 46 MenuBar.Append(file_menu, "&File") 47 self.SetMenuBar(MenuBar) 48 49 self.canvas = Canvas(app="wx", parent=self, show=True) 50 51 def on_quit(self, event): 52 self.canvas.stop_timer() 53 self.Close(True) 54 55 56 if __name__ == '__main__': 57 myapp = wx.App(0) 58 frame = TestFrame() 59 frame.Show(True) 60 myapp.MainLoop() 61 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/tutorial/app/simple_wx.py b/examples/tutorial/app/simple_wx.py --- a/examples/tutorial/app/simple_wx.py +++ b/examples/tutorial/app/simple_wx.py @@ -43,15 +43,20 @@ file_menu = wx.Menu() file_menu.Append(wx.ID_EXIT, "&Quit") self.Bind(wx.EVT_MENU, self.on_quit, id=wx.ID_EXIT) + self.Bind(wx.EVT_SHOW, self.on_show) MenuBar.Append(file_menu, "&File") self.SetMenuBar(MenuBar) - self.canvas = Canvas(app="wx", parent=self, show=True) + self.canvas = Canvas(app="wx", parent=self) def on_quit(self, event): self.canvas.stop_timer() self.Close(True) + def on_show(self, event): + self.canvas.show() + event.Skip() + if __name__ == '__main__': myapp = wx.App(0)
{"golden_diff": "diff --git a/examples/tutorial/app/simple_wx.py b/examples/tutorial/app/simple_wx.py\n--- a/examples/tutorial/app/simple_wx.py\n+++ b/examples/tutorial/app/simple_wx.py\n@@ -43,15 +43,20 @@\n file_menu = wx.Menu()\n file_menu.Append(wx.ID_EXIT, \"&Quit\")\n self.Bind(wx.EVT_MENU, self.on_quit, id=wx.ID_EXIT)\n+ self.Bind(wx.EVT_SHOW, self.on_show)\n MenuBar.Append(file_menu, \"&File\")\n self.SetMenuBar(MenuBar)\n \n- self.canvas = Canvas(app=\"wx\", parent=self, show=True)\n+ self.canvas = Canvas(app=\"wx\", parent=self)\n \n def on_quit(self, event):\n self.canvas.stop_timer()\n self.Close(True)\n \n+ def on_show(self, event):\n+ self.canvas.show()\n+ event.Skip()\n+\n \n if __name__ == '__main__':\n myapp = wx.App(0)\n", "issue": "examples/tutorial/app/simple_wx.py issue\n```\r\nTraceback (most recent call last):\r\n File \"simple_wx.py\", line 58, in <module>\r\n frame = TestFrame()\r\n File \"simple_wx.py\", line 49, in __init__\r\n self.canvas = Canvas(app=\"wx\", parent=self, show=True)\r\n File \"simple_wx.py\", line 20, in __init__\r\n app.Canvas.__init__(self, *args, **kwargs)\r\n File \"/home/eldar/src/vispy/vispy/app/canvas.py\", line 208, in __init__\r\n self.set_current()\r\n File \"/home/eldar/src/vispy/vispy/app/canvas.py\", line 406, in set_current\r\n self._backend._vispy_set_current()\r\n File \"/home/eldar/src/vispy/vispy/app/backends/_wx.py\", line 302, in _vispy_set_current\r\n self.SetCurrent(self._gl_context)\r\nwx._core.wxAssertionError: C++ assertion \"xid\" failed at /home/eldar/src/wx/wxPython_Phoenix/wxPython-4.0.0b2/ext/wxWidgets/src/unix/glx11.cpp(194) in SetCurrent(): window must be shown\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# vispy: testskip\n# Copyright (c) 2015, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\"\"\"\nThis is a very minimal example that opens a window and makes the background\ncolor to change from black to white to black ...\n\nThe wx backend is used to embed the canvas in a simple wx Frame with\na menubar.\n\"\"\"\n\nimport wx\nimport math\nfrom vispy import app, gloo\n\n\nclass Canvas(app.Canvas):\n def __init__(self, *args, **kwargs):\n app.Canvas.__init__(self, *args, **kwargs)\n self._timer = app.Timer('auto', connect=self.on_timer, start=True)\n self.tick = 0\n\n def on_draw(self, event):\n gloo.clear(color=True)\n\n def on_timer(self, event):\n self.tick += 1 / 60.0\n c = abs(math.sin(self.tick))\n gloo.set_clear_color((c, c, c, 1))\n self.update()\n\n def stop_timer(self):\n self._timer.stop()\n\n\nclass TestFrame(wx.Frame):\n def __init__(self):\n wx.Frame.__init__(self, None, -1, \"Vispy Test\",\n wx.DefaultPosition, size=(500, 500))\n\n MenuBar = wx.MenuBar()\n file_menu = wx.Menu()\n file_menu.Append(wx.ID_EXIT, \"&Quit\")\n self.Bind(wx.EVT_MENU, self.on_quit, id=wx.ID_EXIT)\n MenuBar.Append(file_menu, \"&File\")\n self.SetMenuBar(MenuBar)\n\n self.canvas = Canvas(app=\"wx\", parent=self, show=True)\n\n def on_quit(self, event):\n self.canvas.stop_timer()\n self.Close(True)\n\n\nif __name__ == '__main__':\n myapp = wx.App(0)\n frame = TestFrame()\n frame.Show(True)\n myapp.MainLoop()\n", "path": "examples/tutorial/app/simple_wx.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# vispy: testskip\n# Copyright (c) 2015, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\"\"\"\nThis is a very minimal example that opens a window and makes the background\ncolor to change from black to white to black ...\n\nThe wx backend is used to embed the canvas in a simple wx Frame with\na menubar.\n\"\"\"\n\nimport wx\nimport math\nfrom vispy import app, gloo\n\n\nclass Canvas(app.Canvas):\n def __init__(self, *args, **kwargs):\n app.Canvas.__init__(self, *args, **kwargs)\n self._timer = app.Timer('auto', connect=self.on_timer, start=True)\n self.tick = 0\n\n def on_draw(self, event):\n gloo.clear(color=True)\n\n def on_timer(self, event):\n self.tick += 1 / 60.0\n c = abs(math.sin(self.tick))\n gloo.set_clear_color((c, c, c, 1))\n self.update()\n\n def stop_timer(self):\n self._timer.stop()\n\n\nclass TestFrame(wx.Frame):\n def __init__(self):\n wx.Frame.__init__(self, None, -1, \"Vispy Test\",\n wx.DefaultPosition, size=(500, 500))\n\n MenuBar = wx.MenuBar()\n file_menu = wx.Menu()\n file_menu.Append(wx.ID_EXIT, \"&Quit\")\n self.Bind(wx.EVT_MENU, self.on_quit, id=wx.ID_EXIT)\n self.Bind(wx.EVT_SHOW, self.on_show)\n MenuBar.Append(file_menu, \"&File\")\n self.SetMenuBar(MenuBar)\n\n self.canvas = Canvas(app=\"wx\", parent=self)\n\n def on_quit(self, event):\n self.canvas.stop_timer()\n self.Close(True)\n\n def on_show(self, event):\n self.canvas.show()\n event.Skip()\n\n\nif __name__ == '__main__':\n myapp = wx.App(0)\n frame = TestFrame()\n frame.Show(True)\n myapp.MainLoop()\n", "path": "examples/tutorial/app/simple_wx.py"}]}
1,097
207
gh_patches_debug_975
rasdani/github-patches
git_diff
PennyLaneAI__pennylane-2947
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] `qml.equal` ignore in-place inversion Currently, we have: ``` >>> qml.equal(qml.RX(1.0, wires=0), qml.RX(1.0, wires=0).inv()) True ``` If two operations are inverses of each other, they should not be equal. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pennylane/ops/functions/equal.py` Content: ``` 1 # Copyright 2018-2021 Xanadu Quantum Technologies Inc. 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 7 # http://www.apache.org/licenses/LICENSE-2.0 8 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """ 15 This module contains the qml.equal function. 16 """ 17 # pylint: disable=too-many-arguments,too-many-return-statements 18 import pennylane as qml 19 from pennylane.operation import Operator 20 21 22 def equal( 23 op1: Operator, 24 op2: Operator, 25 check_interface=True, 26 check_trainability=True, 27 rtol=1e-5, 28 atol=1e-9, 29 ): 30 r"""Function for determining operator equality. 31 32 Args: 33 op1 (.Operator): First operator to compare 34 op2 (.Operator): Second operator to compare 35 check_interface (bool, optional): Whether to compare interfaces. Default: `True` 36 check_trainability (bool, optional): Whether to compare trainability status. Default: `True` 37 rtol (float, optional): Relative tolerance for parameters 38 atol (float, optional): Absolute tolerance for parameters 39 40 Returns: 41 bool: `True` if the operators are equal, else `False` 42 43 **Example** 44 45 Given two operators, ``qml.equal`` determines their equality: 46 47 >>> op1 = qml.RX(np.array(.12), wires=0) 48 >>> op2 = qml.RY(np.array(1.23), wires=0) 49 >>> qml.equal(op1, op1), qml.equal(op1, op2) 50 True False 51 52 .. details:: 53 :title: Usage Details 54 55 You can use the optional arguments to get more specific results. 56 57 Consider the following comparisons: 58 59 >>> op1 = qml.RX(torch.tensor(1.2), wires=0) 60 >>> op2 = qml.RX(jax.numpy.array(1.2), wires=0) 61 >>> qml.equal(op1, op2) 62 False 63 64 >>> qml.equal(op1, op2, check_interface=False, check_trainability=False) 65 True 66 67 >>> op3 = qml.RX(np.array(1.2, requires_grad=True), wires=0) 68 >>> op4 = qml.RX(np.array(1.2, requires_grad=False), wires=0) 69 >>> qml.equal(op3, op4) 70 False 71 72 >>> qml.equal(op3, op4, check_trainability=False) 73 True 74 """ 75 if op1.__class__ is not op2.__class__ or op1.arithmetic_depth != op2.arithmetic_depth: 76 return False 77 if op1.arithmetic_depth > 0: 78 raise NotImplementedError( 79 "Comparison of operators with an arithmetic depth larger than 0 is not yet implemented." 80 ) 81 if not all( 82 qml.math.allclose(d1, d2, rtol=rtol, atol=atol) for d1, d2 in zip(op1.data, op2.data) 83 ): 84 return False 85 if op1.wires != op2.wires: 86 return False 87 for kwarg in op1.hyperparameters: 88 if op1.hyperparameters[kwarg] != op2.hyperparameters[kwarg]: 89 return False 90 91 if check_trainability: 92 for params_1, params_2 in zip(op1.data, op2.data): 93 if qml.math.requires_grad(params_1) != qml.math.requires_grad(params_2): 94 return False 95 96 if check_interface: 97 for params_1, params_2 in zip(op1.data, op2.data): 98 if qml.math.get_interface(params_1) != qml.math.get_interface(params_2): 99 return False 100 101 return True 102 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pennylane/ops/functions/equal.py b/pennylane/ops/functions/equal.py --- a/pennylane/ops/functions/equal.py +++ b/pennylane/ops/functions/equal.py @@ -98,4 +98,4 @@ if qml.math.get_interface(params_1) != qml.math.get_interface(params_2): return False - return True + return getattr(op1, "inverse", False) == getattr(op2, "inverse", False)
{"golden_diff": "diff --git a/pennylane/ops/functions/equal.py b/pennylane/ops/functions/equal.py\n--- a/pennylane/ops/functions/equal.py\n+++ b/pennylane/ops/functions/equal.py\n@@ -98,4 +98,4 @@\n if qml.math.get_interface(params_1) != qml.math.get_interface(params_2):\n return False\n \n- return True\n+ return getattr(op1, \"inverse\", False) == getattr(op2, \"inverse\", False)\n", "issue": "[BUG] `qml.equal` ignore in-place inversion\nCurrently, we have:\r\n```\r\n>>> qml.equal(qml.RX(1.0, wires=0), qml.RX(1.0, wires=0).inv())\r\nTrue\r\n```\r\n\r\nIf two operations are inverses of each other, they should not be equal.\n", "before_files": [{"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis module contains the qml.equal function.\n\"\"\"\n# pylint: disable=too-many-arguments,too-many-return-statements\nimport pennylane as qml\nfrom pennylane.operation import Operator\n\n\ndef equal(\n op1: Operator,\n op2: Operator,\n check_interface=True,\n check_trainability=True,\n rtol=1e-5,\n atol=1e-9,\n):\n r\"\"\"Function for determining operator equality.\n\n Args:\n op1 (.Operator): First operator to compare\n op2 (.Operator): Second operator to compare\n check_interface (bool, optional): Whether to compare interfaces. Default: `True`\n check_trainability (bool, optional): Whether to compare trainability status. Default: `True`\n rtol (float, optional): Relative tolerance for parameters\n atol (float, optional): Absolute tolerance for parameters\n\n Returns:\n bool: `True` if the operators are equal, else `False`\n\n **Example**\n\n Given two operators, ``qml.equal`` determines their equality:\n\n >>> op1 = qml.RX(np.array(.12), wires=0)\n >>> op2 = qml.RY(np.array(1.23), wires=0)\n >>> qml.equal(op1, op1), qml.equal(op1, op2)\n True False\n\n .. details::\n :title: Usage Details\n\n You can use the optional arguments to get more specific results.\n\n Consider the following comparisons:\n\n >>> op1 = qml.RX(torch.tensor(1.2), wires=0)\n >>> op2 = qml.RX(jax.numpy.array(1.2), wires=0)\n >>> qml.equal(op1, op2)\n False\n\n >>> qml.equal(op1, op2, check_interface=False, check_trainability=False)\n True\n\n >>> op3 = qml.RX(np.array(1.2, requires_grad=True), wires=0)\n >>> op4 = qml.RX(np.array(1.2, requires_grad=False), wires=0)\n >>> qml.equal(op3, op4)\n False\n\n >>> qml.equal(op3, op4, check_trainability=False)\n True\n \"\"\"\n if op1.__class__ is not op2.__class__ or op1.arithmetic_depth != op2.arithmetic_depth:\n return False\n if op1.arithmetic_depth > 0:\n raise NotImplementedError(\n \"Comparison of operators with an arithmetic depth larger than 0 is not yet implemented.\"\n )\n if not all(\n qml.math.allclose(d1, d2, rtol=rtol, atol=atol) for d1, d2 in zip(op1.data, op2.data)\n ):\n return False\n if op1.wires != op2.wires:\n return False\n for kwarg in op1.hyperparameters:\n if op1.hyperparameters[kwarg] != op2.hyperparameters[kwarg]:\n return False\n\n if check_trainability:\n for params_1, params_2 in zip(op1.data, op2.data):\n if qml.math.requires_grad(params_1) != qml.math.requires_grad(params_2):\n return False\n\n if check_interface:\n for params_1, params_2 in zip(op1.data, op2.data):\n if qml.math.get_interface(params_1) != qml.math.get_interface(params_2):\n return False\n\n return True\n", "path": "pennylane/ops/functions/equal.py"}], "after_files": [{"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis module contains the qml.equal function.\n\"\"\"\n# pylint: disable=too-many-arguments,too-many-return-statements\nimport pennylane as qml\nfrom pennylane.operation import Operator\n\n\ndef equal(\n op1: Operator,\n op2: Operator,\n check_interface=True,\n check_trainability=True,\n rtol=1e-5,\n atol=1e-9,\n):\n r\"\"\"Function for determining operator equality.\n\n Args:\n op1 (.Operator): First operator to compare\n op2 (.Operator): Second operator to compare\n check_interface (bool, optional): Whether to compare interfaces. Default: `True`\n check_trainability (bool, optional): Whether to compare trainability status. Default: `True`\n rtol (float, optional): Relative tolerance for parameters\n atol (float, optional): Absolute tolerance for parameters\n\n Returns:\n bool: `True` if the operators are equal, else `False`\n\n **Example**\n\n Given two operators, ``qml.equal`` determines their equality:\n\n >>> op1 = qml.RX(np.array(.12), wires=0)\n >>> op2 = qml.RY(np.array(1.23), wires=0)\n >>> qml.equal(op1, op1), qml.equal(op1, op2)\n True False\n\n .. details::\n :title: Usage Details\n\n You can use the optional arguments to get more specific results.\n\n Consider the following comparisons:\n\n >>> op1 = qml.RX(torch.tensor(1.2), wires=0)\n >>> op2 = qml.RX(jax.numpy.array(1.2), wires=0)\n >>> qml.equal(op1, op2)\n False\n\n >>> qml.equal(op1, op2, check_interface=False, check_trainability=False)\n True\n\n >>> op3 = qml.RX(np.array(1.2, requires_grad=True), wires=0)\n >>> op4 = qml.RX(np.array(1.2, requires_grad=False), wires=0)\n >>> qml.equal(op3, op4)\n False\n\n >>> qml.equal(op3, op4, check_trainability=False)\n True\n \"\"\"\n if op1.__class__ is not op2.__class__ or op1.arithmetic_depth != op2.arithmetic_depth:\n return False\n if op1.arithmetic_depth > 0:\n raise NotImplementedError(\n \"Comparison of operators with an arithmetic depth larger than 0 is not yet implemented.\"\n )\n if not all(\n qml.math.allclose(d1, d2, rtol=rtol, atol=atol) for d1, d2 in zip(op1.data, op2.data)\n ):\n return False\n if op1.wires != op2.wires:\n return False\n for kwarg in op1.hyperparameters:\n if op1.hyperparameters[kwarg] != op2.hyperparameters[kwarg]:\n return False\n\n if check_trainability:\n for params_1, params_2 in zip(op1.data, op2.data):\n if qml.math.requires_grad(params_1) != qml.math.requires_grad(params_2):\n return False\n\n if check_interface:\n for params_1, params_2 in zip(op1.data, op2.data):\n if qml.math.get_interface(params_1) != qml.math.get_interface(params_2):\n return False\n\n return getattr(op1, \"inverse\", False) == getattr(op2, \"inverse\", False)\n", "path": "pennylane/ops/functions/equal.py"}]}
1,438
116
gh_patches_debug_2308
rasdani/github-patches
git_diff
nonebot__nonebot2-1166
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bug: MessageTemplate无法对数字字符等类型进行格式化 **描述问题:** 上游issue:https://github.com/mnixry/nonebot-plugin-gocqhttp/issues/281 **如何复现?** 在一个消息模板中格式化数字 **期望的结果** 不报错 **故障原因** 推测原因如下 在函数`Message.__iadd__`中,如果相加类型错误会抛出`ValueError`异常: https://github.com/nonebot/nonebot2/blob/549a37b172ceeedac183fb48b58fe13a16eb56cd/nonebot/internal/adapter/message.py#L181-L190 在`MessageTemplate._add`中,只对一般类型相加出错的`TypeError`做了处理: https://github.com/nonebot/nonebot2/blob/549a37b172ceeedac183fb48b58fe13a16eb56cd/nonebot/internal/adapter/template.py#L180-L184 因此导致了该错误 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nonebot/internal/adapter/message.py` Content: ``` 1 import abc 2 from copy import deepcopy 3 from dataclasses import field, asdict, dataclass 4 from typing import ( 5 Any, 6 Dict, 7 List, 8 Type, 9 Tuple, 10 Union, 11 Generic, 12 TypeVar, 13 Iterable, 14 Optional, 15 overload, 16 ) 17 18 from pydantic import parse_obj_as 19 20 from .template import MessageTemplate 21 22 T = TypeVar("T") 23 TMS = TypeVar("TMS", bound="MessageSegment") 24 TM = TypeVar("TM", bound="Message") 25 26 27 @dataclass 28 class MessageSegment(abc.ABC, Generic[TM]): 29 """消息段基类""" 30 31 type: str 32 """消息段类型""" 33 data: Dict[str, Any] = field(default_factory=dict) 34 """消息段数据""" 35 36 @classmethod 37 @abc.abstractmethod 38 def get_message_class(cls) -> Type[TM]: 39 """获取消息数组类型""" 40 raise NotImplementedError 41 42 @abc.abstractmethod 43 def __str__(self) -> str: 44 """该消息段所代表的 str,在命令匹配部分使用""" 45 raise NotImplementedError 46 47 def __len__(self) -> int: 48 return len(str(self)) 49 50 def __ne__(self: T, other: T) -> bool: 51 return not self == other 52 53 def __add__(self: TMS, other: Union[str, TMS, Iterable[TMS]]) -> TM: 54 return self.get_message_class()(self) + other 55 56 def __radd__(self: TMS, other: Union[str, TMS, Iterable[TMS]]) -> TM: 57 return self.get_message_class()(other) + self 58 59 @classmethod 60 def __get_validators__(cls): 61 yield cls._validate 62 63 @classmethod 64 def _validate(cls, value): 65 if isinstance(value, cls): 66 return value 67 if not isinstance(value, dict): 68 raise ValueError(f"Expected dict for MessageSegment, got {type(value)}") 69 if "type" not in value: 70 raise ValueError( 71 f"Expected dict with 'type' for MessageSegment, got {value}" 72 ) 73 return cls(type=value["type"], data=value.get("data", {})) 74 75 def get(self, key: str, default: Any = None): 76 return asdict(self).get(key, default) 77 78 def keys(self): 79 return asdict(self).keys() 80 81 def values(self): 82 return asdict(self).values() 83 84 def items(self): 85 return asdict(self).items() 86 87 def copy(self: T) -> T: 88 return deepcopy(self) 89 90 @abc.abstractmethod 91 def is_text(self) -> bool: 92 """当前消息段是否为纯文本""" 93 raise NotImplementedError 94 95 96 class Message(List[TMS], abc.ABC): 97 """消息数组 98 99 参数: 100 message: 消息内容 101 """ 102 103 def __init__( 104 self, 105 message: Union[str, None, Iterable[TMS], TMS] = None, 106 ): 107 super().__init__() 108 if message is None: 109 return 110 elif isinstance(message, str): 111 self.extend(self._construct(message)) 112 elif isinstance(message, MessageSegment): 113 self.append(message) 114 elif isinstance(message, Iterable): 115 self.extend(message) 116 else: 117 self.extend(self._construct(message)) # pragma: no cover 118 119 @classmethod 120 def template(cls: Type[TM], format_string: Union[str, TM]) -> MessageTemplate[TM]: 121 """创建消息模板。 122 123 用法和 `str.format` 大致相同, 但是可以输出消息对象, 并且支持以 `Message` 对象作为消息模板 124 125 并且提供了拓展的格式化控制符, 可以用适用于该消息类型的 `MessageSegment` 的工厂方法创建消息 126 127 参数: 128 format_string: 格式化模板 129 130 返回: 131 消息格式化器 132 """ 133 return MessageTemplate(format_string, cls) 134 135 @classmethod 136 @abc.abstractmethod 137 def get_segment_class(cls) -> Type[TMS]: 138 """获取消息段类型""" 139 raise NotImplementedError 140 141 def __str__(self) -> str: 142 return "".join(str(seg) for seg in self) 143 144 @classmethod 145 def __get_validators__(cls): 146 yield cls._validate 147 148 @classmethod 149 def _validate(cls, value): 150 if isinstance(value, cls): 151 return value 152 elif isinstance(value, Message): 153 raise ValueError(f"Type {type(value)} can not be converted to {cls}") 154 elif isinstance(value, str): 155 pass 156 elif isinstance(value, dict): 157 value = parse_obj_as(cls.get_segment_class(), value) 158 elif isinstance(value, Iterable): 159 value = [parse_obj_as(cls.get_segment_class(), v) for v in value] 160 else: 161 raise ValueError( 162 f"Expected str, dict or iterable for Message, got {type(value)}" 163 ) 164 return cls(value) 165 166 @staticmethod 167 @abc.abstractmethod 168 def _construct(msg: str) -> Iterable[TMS]: 169 """构造消息数组""" 170 raise NotImplementedError 171 172 def __add__(self: TM, other: Union[str, TMS, Iterable[TMS]]) -> TM: 173 result = self.copy() 174 result += other 175 return result 176 177 def __radd__(self: TM, other: Union[str, TMS, Iterable[TMS]]) -> TM: 178 result = self.__class__(other) 179 return result + self 180 181 def __iadd__(self: TM, other: Union[str, TMS, Iterable[TMS]]) -> TM: 182 if isinstance(other, str): 183 self.extend(self._construct(other)) 184 elif isinstance(other, MessageSegment): 185 self.append(other) 186 elif isinstance(other, Iterable): 187 self.extend(other) 188 else: 189 raise ValueError(f"Unsupported type: {type(other)}") # pragma: no cover 190 return self 191 192 @overload 193 def __getitem__(self: TM, __args: str) -> TM: 194 """ 195 参数: 196 __args: 消息段类型 197 198 返回: 199 所有类型为 `__args` 的消息段 200 """ 201 202 @overload 203 def __getitem__(self, __args: Tuple[str, int]) -> TMS: 204 """ 205 参数: 206 __args: 消息段类型和索引 207 208 返回: 209 类型为 `__args[0]` 的消息段第 `__args[1]` 个 210 """ 211 212 @overload 213 def __getitem__(self: TM, __args: Tuple[str, slice]) -> TM: 214 """ 215 参数: 216 __args: 消息段类型和切片 217 218 返回: 219 类型为 `__args[0]` 的消息段切片 `__args[1]` 220 """ 221 222 @overload 223 def __getitem__(self, __args: int) -> TMS: 224 """ 225 参数: 226 __args: 索引 227 228 返回: 229 第 `__args` 个消息段 230 """ 231 232 @overload 233 def __getitem__(self: TM, __args: slice) -> TM: 234 """ 235 参数: 236 __args: 切片 237 238 返回: 239 消息切片 `__args` 240 """ 241 242 def __getitem__( 243 self: TM, 244 args: Union[ 245 str, 246 Tuple[str, int], 247 Tuple[str, slice], 248 int, 249 slice, 250 ], 251 ) -> Union[TMS, TM]: 252 arg1, arg2 = args if isinstance(args, tuple) else (args, None) 253 if isinstance(arg1, int) and arg2 is None: 254 return super().__getitem__(arg1) 255 elif isinstance(arg1, slice) and arg2 is None: 256 return self.__class__(super().__getitem__(arg1)) 257 elif isinstance(arg1, str) and arg2 is None: 258 return self.__class__(seg for seg in self if seg.type == arg1) 259 elif isinstance(arg1, str) and isinstance(arg2, int): 260 return [seg for seg in self if seg.type == arg1][arg2] 261 elif isinstance(arg1, str) and isinstance(arg2, slice): 262 return self.__class__([seg for seg in self if seg.type == arg1][arg2]) 263 else: 264 raise ValueError("Incorrect arguments to slice") # pragma: no cover 265 266 def index(self, value: Union[TMS, str], *args) -> int: 267 if isinstance(value, str): 268 first_segment = next((seg for seg in self if seg.type == value), None) 269 if first_segment is None: 270 raise ValueError(f"Segment with type {value} is not in message") 271 return super().index(first_segment, *args) 272 return super().index(value, *args) 273 274 def get(self: TM, type_: str, count: Optional[int] = None) -> TM: 275 if count is None: 276 return self[type_] 277 278 iterator, filtered = ( 279 seg for seg in self if seg.type == type_ 280 ), self.__class__() 281 for _ in range(count): 282 seg = next(iterator, None) 283 if seg is None: 284 break 285 filtered.append(seg) 286 return filtered 287 288 def count(self, value: Union[TMS, str]) -> int: 289 return len(self[value]) if isinstance(value, str) else super().count(value) 290 291 def append(self: TM, obj: Union[str, TMS]) -> TM: 292 """添加一个消息段到消息数组末尾。 293 294 参数: 295 obj: 要添加的消息段 296 """ 297 if isinstance(obj, MessageSegment): 298 super().append(obj) 299 elif isinstance(obj, str): 300 self.extend(self._construct(obj)) 301 else: 302 raise ValueError(f"Unexpected type: {type(obj)} {obj}") # pragma: no cover 303 return self 304 305 def extend(self: TM, obj: Union[TM, Iterable[TMS]]) -> TM: 306 """拼接一个消息数组或多个消息段到消息数组末尾。 307 308 参数: 309 obj: 要添加的消息数组 310 """ 311 for segment in obj: 312 self.append(segment) 313 return self 314 315 def copy(self: TM) -> TM: 316 return deepcopy(self) 317 318 def extract_plain_text(self) -> str: 319 """提取消息内纯文本消息""" 320 321 return "".join(str(seg) for seg in self if seg.is_text()) 322 323 324 __autodoc__ = { 325 "MessageSegment.__str__": True, 326 "MessageSegment.__add__": True, 327 "Message.__getitem__": True, 328 "Message._construct": True, 329 } 330 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nonebot/internal/adapter/message.py b/nonebot/internal/adapter/message.py --- a/nonebot/internal/adapter/message.py +++ b/nonebot/internal/adapter/message.py @@ -186,7 +186,7 @@ elif isinstance(other, Iterable): self.extend(other) else: - raise ValueError(f"Unsupported type: {type(other)}") # pragma: no cover + raise TypeError(f"Unsupported type {type(other)!r}") return self @overload
{"golden_diff": "diff --git a/nonebot/internal/adapter/message.py b/nonebot/internal/adapter/message.py\n--- a/nonebot/internal/adapter/message.py\n+++ b/nonebot/internal/adapter/message.py\n@@ -186,7 +186,7 @@\n elif isinstance(other, Iterable):\n self.extend(other)\n else:\n- raise ValueError(f\"Unsupported type: {type(other)}\") # pragma: no cover\n+ raise TypeError(f\"Unsupported type {type(other)!r}\")\n return self\n \n @overload\n", "issue": "Bug: MessageTemplate\u65e0\u6cd5\u5bf9\u6570\u5b57\u5b57\u7b26\u7b49\u7c7b\u578b\u8fdb\u884c\u683c\u5f0f\u5316\n**\u63cf\u8ff0\u95ee\u9898\uff1a**\r\n\r\n\u4e0a\u6e38issue\uff1ahttps://github.com/mnixry/nonebot-plugin-gocqhttp/issues/281\r\n\r\n**\u5982\u4f55\u590d\u73b0\uff1f**\r\n\r\n\u5728\u4e00\u4e2a\u6d88\u606f\u6a21\u677f\u4e2d\u683c\u5f0f\u5316\u6570\u5b57\r\n\r\n**\u671f\u671b\u7684\u7ed3\u679c**\r\n\r\n\u4e0d\u62a5\u9519\r\n\r\n**\u6545\u969c\u539f\u56e0**\r\n\r\n\u63a8\u6d4b\u539f\u56e0\u5982\u4e0b\r\n\r\n\u5728\u51fd\u6570`Message.__iadd__`\u4e2d\uff0c\u5982\u679c\u76f8\u52a0\u7c7b\u578b\u9519\u8bef\u4f1a\u629b\u51fa`ValueError`\u5f02\u5e38\uff1a\r\nhttps://github.com/nonebot/nonebot2/blob/549a37b172ceeedac183fb48b58fe13a16eb56cd/nonebot/internal/adapter/message.py#L181-L190\r\n\r\n\u5728`MessageTemplate._add`\u4e2d\uff0c\u53ea\u5bf9\u4e00\u822c\u7c7b\u578b\u76f8\u52a0\u51fa\u9519\u7684`TypeError`\u505a\u4e86\u5904\u7406\uff1a\r\nhttps://github.com/nonebot/nonebot2/blob/549a37b172ceeedac183fb48b58fe13a16eb56cd/nonebot/internal/adapter/template.py#L180-L184\r\n\r\n\u56e0\u6b64\u5bfc\u81f4\u4e86\u8be5\u9519\u8bef\n", "before_files": [{"content": "import abc\nfrom copy import deepcopy\nfrom dataclasses import field, asdict, dataclass\nfrom typing import (\n Any,\n Dict,\n List,\n Type,\n Tuple,\n Union,\n Generic,\n TypeVar,\n Iterable,\n Optional,\n overload,\n)\n\nfrom pydantic import parse_obj_as\n\nfrom .template import MessageTemplate\n\nT = TypeVar(\"T\")\nTMS = TypeVar(\"TMS\", bound=\"MessageSegment\")\nTM = TypeVar(\"TM\", bound=\"Message\")\n\n\n@dataclass\nclass MessageSegment(abc.ABC, Generic[TM]):\n \"\"\"\u6d88\u606f\u6bb5\u57fa\u7c7b\"\"\"\n\n type: str\n \"\"\"\u6d88\u606f\u6bb5\u7c7b\u578b\"\"\"\n data: Dict[str, Any] = field(default_factory=dict)\n \"\"\"\u6d88\u606f\u6bb5\u6570\u636e\"\"\"\n\n @classmethod\n @abc.abstractmethod\n def get_message_class(cls) -> Type[TM]:\n \"\"\"\u83b7\u53d6\u6d88\u606f\u6570\u7ec4\u7c7b\u578b\"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def __str__(self) -> str:\n \"\"\"\u8be5\u6d88\u606f\u6bb5\u6240\u4ee3\u8868\u7684 str\uff0c\u5728\u547d\u4ee4\u5339\u914d\u90e8\u5206\u4f7f\u7528\"\"\"\n raise NotImplementedError\n\n def __len__(self) -> int:\n return len(str(self))\n\n def __ne__(self: T, other: T) -> bool:\n return not self == other\n\n def __add__(self: TMS, other: Union[str, TMS, Iterable[TMS]]) -> TM:\n return self.get_message_class()(self) + other\n\n def __radd__(self: TMS, other: Union[str, TMS, Iterable[TMS]]) -> TM:\n return self.get_message_class()(other) + self\n\n @classmethod\n def __get_validators__(cls):\n yield cls._validate\n\n @classmethod\n def _validate(cls, value):\n if isinstance(value, cls):\n return value\n if not isinstance(value, dict):\n raise ValueError(f\"Expected dict for MessageSegment, got {type(value)}\")\n if \"type\" not in value:\n raise ValueError(\n f\"Expected dict with 'type' for MessageSegment, got {value}\"\n )\n return cls(type=value[\"type\"], data=value.get(\"data\", {}))\n\n def get(self, key: str, default: Any = None):\n return asdict(self).get(key, default)\n\n def keys(self):\n return asdict(self).keys()\n\n def values(self):\n return asdict(self).values()\n\n def items(self):\n return asdict(self).items()\n\n def copy(self: T) -> T:\n return deepcopy(self)\n\n @abc.abstractmethod\n def is_text(self) -> bool:\n \"\"\"\u5f53\u524d\u6d88\u606f\u6bb5\u662f\u5426\u4e3a\u7eaf\u6587\u672c\"\"\"\n raise NotImplementedError\n\n\nclass Message(List[TMS], abc.ABC):\n \"\"\"\u6d88\u606f\u6570\u7ec4\n\n \u53c2\u6570:\n message: \u6d88\u606f\u5185\u5bb9\n \"\"\"\n\n def __init__(\n self,\n message: Union[str, None, Iterable[TMS], TMS] = None,\n ):\n super().__init__()\n if message is None:\n return\n elif isinstance(message, str):\n self.extend(self._construct(message))\n elif isinstance(message, MessageSegment):\n self.append(message)\n elif isinstance(message, Iterable):\n self.extend(message)\n else:\n self.extend(self._construct(message)) # pragma: no cover\n\n @classmethod\n def template(cls: Type[TM], format_string: Union[str, TM]) -> MessageTemplate[TM]:\n \"\"\"\u521b\u5efa\u6d88\u606f\u6a21\u677f\u3002\n\n \u7528\u6cd5\u548c `str.format` \u5927\u81f4\u76f8\u540c, \u4f46\u662f\u53ef\u4ee5\u8f93\u51fa\u6d88\u606f\u5bf9\u8c61, \u5e76\u4e14\u652f\u6301\u4ee5 `Message` \u5bf9\u8c61\u4f5c\u4e3a\u6d88\u606f\u6a21\u677f\n\n \u5e76\u4e14\u63d0\u4f9b\u4e86\u62d3\u5c55\u7684\u683c\u5f0f\u5316\u63a7\u5236\u7b26, \u53ef\u4ee5\u7528\u9002\u7528\u4e8e\u8be5\u6d88\u606f\u7c7b\u578b\u7684 `MessageSegment` \u7684\u5de5\u5382\u65b9\u6cd5\u521b\u5efa\u6d88\u606f\n\n \u53c2\u6570:\n format_string: \u683c\u5f0f\u5316\u6a21\u677f\n\n \u8fd4\u56de:\n \u6d88\u606f\u683c\u5f0f\u5316\u5668\n \"\"\"\n return MessageTemplate(format_string, cls)\n\n @classmethod\n @abc.abstractmethod\n def get_segment_class(cls) -> Type[TMS]:\n \"\"\"\u83b7\u53d6\u6d88\u606f\u6bb5\u7c7b\u578b\"\"\"\n raise NotImplementedError\n\n def __str__(self) -> str:\n return \"\".join(str(seg) for seg in self)\n\n @classmethod\n def __get_validators__(cls):\n yield cls._validate\n\n @classmethod\n def _validate(cls, value):\n if isinstance(value, cls):\n return value\n elif isinstance(value, Message):\n raise ValueError(f\"Type {type(value)} can not be converted to {cls}\")\n elif isinstance(value, str):\n pass\n elif isinstance(value, dict):\n value = parse_obj_as(cls.get_segment_class(), value)\n elif isinstance(value, Iterable):\n value = [parse_obj_as(cls.get_segment_class(), v) for v in value]\n else:\n raise ValueError(\n f\"Expected str, dict or iterable for Message, got {type(value)}\"\n )\n return cls(value)\n\n @staticmethod\n @abc.abstractmethod\n def _construct(msg: str) -> Iterable[TMS]:\n \"\"\"\u6784\u9020\u6d88\u606f\u6570\u7ec4\"\"\"\n raise NotImplementedError\n\n def __add__(self: TM, other: Union[str, TMS, Iterable[TMS]]) -> TM:\n result = self.copy()\n result += other\n return result\n\n def __radd__(self: TM, other: Union[str, TMS, Iterable[TMS]]) -> TM:\n result = self.__class__(other)\n return result + self\n\n def __iadd__(self: TM, other: Union[str, TMS, Iterable[TMS]]) -> TM:\n if isinstance(other, str):\n self.extend(self._construct(other))\n elif isinstance(other, MessageSegment):\n self.append(other)\n elif isinstance(other, Iterable):\n self.extend(other)\n else:\n raise ValueError(f\"Unsupported type: {type(other)}\") # pragma: no cover\n return self\n\n @overload\n def __getitem__(self: TM, __args: str) -> TM:\n \"\"\"\n \u53c2\u6570:\n __args: \u6d88\u606f\u6bb5\u7c7b\u578b\n\n \u8fd4\u56de:\n \u6240\u6709\u7c7b\u578b\u4e3a `__args` \u7684\u6d88\u606f\u6bb5\n \"\"\"\n\n @overload\n def __getitem__(self, __args: Tuple[str, int]) -> TMS:\n \"\"\"\n \u53c2\u6570:\n __args: \u6d88\u606f\u6bb5\u7c7b\u578b\u548c\u7d22\u5f15\n\n \u8fd4\u56de:\n \u7c7b\u578b\u4e3a `__args[0]` \u7684\u6d88\u606f\u6bb5\u7b2c `__args[1]` \u4e2a\n \"\"\"\n\n @overload\n def __getitem__(self: TM, __args: Tuple[str, slice]) -> TM:\n \"\"\"\n \u53c2\u6570:\n __args: \u6d88\u606f\u6bb5\u7c7b\u578b\u548c\u5207\u7247\n\n \u8fd4\u56de:\n \u7c7b\u578b\u4e3a `__args[0]` \u7684\u6d88\u606f\u6bb5\u5207\u7247 `__args[1]`\n \"\"\"\n\n @overload\n def __getitem__(self, __args: int) -> TMS:\n \"\"\"\n \u53c2\u6570:\n __args: \u7d22\u5f15\n\n \u8fd4\u56de:\n \u7b2c `__args` \u4e2a\u6d88\u606f\u6bb5\n \"\"\"\n\n @overload\n def __getitem__(self: TM, __args: slice) -> TM:\n \"\"\"\n \u53c2\u6570:\n __args: \u5207\u7247\n\n \u8fd4\u56de:\n \u6d88\u606f\u5207\u7247 `__args`\n \"\"\"\n\n def __getitem__(\n self: TM,\n args: Union[\n str,\n Tuple[str, int],\n Tuple[str, slice],\n int,\n slice,\n ],\n ) -> Union[TMS, TM]:\n arg1, arg2 = args if isinstance(args, tuple) else (args, None)\n if isinstance(arg1, int) and arg2 is None:\n return super().__getitem__(arg1)\n elif isinstance(arg1, slice) and arg2 is None:\n return self.__class__(super().__getitem__(arg1))\n elif isinstance(arg1, str) and arg2 is None:\n return self.__class__(seg for seg in self if seg.type == arg1)\n elif isinstance(arg1, str) and isinstance(arg2, int):\n return [seg for seg in self if seg.type == arg1][arg2]\n elif isinstance(arg1, str) and isinstance(arg2, slice):\n return self.__class__([seg for seg in self if seg.type == arg1][arg2])\n else:\n raise ValueError(\"Incorrect arguments to slice\") # pragma: no cover\n\n def index(self, value: Union[TMS, str], *args) -> int:\n if isinstance(value, str):\n first_segment = next((seg for seg in self if seg.type == value), None)\n if first_segment is None:\n raise ValueError(f\"Segment with type {value} is not in message\")\n return super().index(first_segment, *args)\n return super().index(value, *args)\n\n def get(self: TM, type_: str, count: Optional[int] = None) -> TM:\n if count is None:\n return self[type_]\n\n iterator, filtered = (\n seg for seg in self if seg.type == type_\n ), self.__class__()\n for _ in range(count):\n seg = next(iterator, None)\n if seg is None:\n break\n filtered.append(seg)\n return filtered\n\n def count(self, value: Union[TMS, str]) -> int:\n return len(self[value]) if isinstance(value, str) else super().count(value)\n\n def append(self: TM, obj: Union[str, TMS]) -> TM:\n \"\"\"\u6dfb\u52a0\u4e00\u4e2a\u6d88\u606f\u6bb5\u5230\u6d88\u606f\u6570\u7ec4\u672b\u5c3e\u3002\n\n \u53c2\u6570:\n obj: \u8981\u6dfb\u52a0\u7684\u6d88\u606f\u6bb5\n \"\"\"\n if isinstance(obj, MessageSegment):\n super().append(obj)\n elif isinstance(obj, str):\n self.extend(self._construct(obj))\n else:\n raise ValueError(f\"Unexpected type: {type(obj)} {obj}\") # pragma: no cover\n return self\n\n def extend(self: TM, obj: Union[TM, Iterable[TMS]]) -> TM:\n \"\"\"\u62fc\u63a5\u4e00\u4e2a\u6d88\u606f\u6570\u7ec4\u6216\u591a\u4e2a\u6d88\u606f\u6bb5\u5230\u6d88\u606f\u6570\u7ec4\u672b\u5c3e\u3002\n\n \u53c2\u6570:\n obj: \u8981\u6dfb\u52a0\u7684\u6d88\u606f\u6570\u7ec4\n \"\"\"\n for segment in obj:\n self.append(segment)\n return self\n\n def copy(self: TM) -> TM:\n return deepcopy(self)\n\n def extract_plain_text(self) -> str:\n \"\"\"\u63d0\u53d6\u6d88\u606f\u5185\u7eaf\u6587\u672c\u6d88\u606f\"\"\"\n\n return \"\".join(str(seg) for seg in self if seg.is_text())\n\n\n__autodoc__ = {\n \"MessageSegment.__str__\": True,\n \"MessageSegment.__add__\": True,\n \"Message.__getitem__\": True,\n \"Message._construct\": True,\n}\n", "path": "nonebot/internal/adapter/message.py"}], "after_files": [{"content": "import abc\nfrom copy import deepcopy\nfrom dataclasses import field, asdict, dataclass\nfrom typing import (\n Any,\n Dict,\n List,\n Type,\n Tuple,\n Union,\n Generic,\n TypeVar,\n Iterable,\n Optional,\n overload,\n)\n\nfrom pydantic import parse_obj_as\n\nfrom .template import MessageTemplate\n\nT = TypeVar(\"T\")\nTMS = TypeVar(\"TMS\", bound=\"MessageSegment\")\nTM = TypeVar(\"TM\", bound=\"Message\")\n\n\n@dataclass\nclass MessageSegment(abc.ABC, Generic[TM]):\n \"\"\"\u6d88\u606f\u6bb5\u57fa\u7c7b\"\"\"\n\n type: str\n \"\"\"\u6d88\u606f\u6bb5\u7c7b\u578b\"\"\"\n data: Dict[str, Any] = field(default_factory=dict)\n \"\"\"\u6d88\u606f\u6bb5\u6570\u636e\"\"\"\n\n @classmethod\n @abc.abstractmethod\n def get_message_class(cls) -> Type[TM]:\n \"\"\"\u83b7\u53d6\u6d88\u606f\u6570\u7ec4\u7c7b\u578b\"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def __str__(self) -> str:\n \"\"\"\u8be5\u6d88\u606f\u6bb5\u6240\u4ee3\u8868\u7684 str\uff0c\u5728\u547d\u4ee4\u5339\u914d\u90e8\u5206\u4f7f\u7528\"\"\"\n raise NotImplementedError\n\n def __len__(self) -> int:\n return len(str(self))\n\n def __ne__(self: T, other: T) -> bool:\n return not self == other\n\n def __add__(self: TMS, other: Union[str, TMS, Iterable[TMS]]) -> TM:\n return self.get_message_class()(self) + other\n\n def __radd__(self: TMS, other: Union[str, TMS, Iterable[TMS]]) -> TM:\n return self.get_message_class()(other) + self\n\n @classmethod\n def __get_validators__(cls):\n yield cls._validate\n\n @classmethod\n def _validate(cls, value):\n if isinstance(value, cls):\n return value\n if not isinstance(value, dict):\n raise ValueError(f\"Expected dict for MessageSegment, got {type(value)}\")\n if \"type\" not in value:\n raise ValueError(\n f\"Expected dict with 'type' for MessageSegment, got {value}\"\n )\n return cls(type=value[\"type\"], data=value.get(\"data\", {}))\n\n def get(self, key: str, default: Any = None):\n return asdict(self).get(key, default)\n\n def keys(self):\n return asdict(self).keys()\n\n def values(self):\n return asdict(self).values()\n\n def items(self):\n return asdict(self).items()\n\n def copy(self: T) -> T:\n return deepcopy(self)\n\n @abc.abstractmethod\n def is_text(self) -> bool:\n \"\"\"\u5f53\u524d\u6d88\u606f\u6bb5\u662f\u5426\u4e3a\u7eaf\u6587\u672c\"\"\"\n raise NotImplementedError\n\n\nclass Message(List[TMS], abc.ABC):\n \"\"\"\u6d88\u606f\u6570\u7ec4\n\n \u53c2\u6570:\n message: \u6d88\u606f\u5185\u5bb9\n \"\"\"\n\n def __init__(\n self,\n message: Union[str, None, Iterable[TMS], TMS] = None,\n ):\n super().__init__()\n if message is None:\n return\n elif isinstance(message, str):\n self.extend(self._construct(message))\n elif isinstance(message, MessageSegment):\n self.append(message)\n elif isinstance(message, Iterable):\n self.extend(message)\n else:\n self.extend(self._construct(message)) # pragma: no cover\n\n @classmethod\n def template(cls: Type[TM], format_string: Union[str, TM]) -> MessageTemplate[TM]:\n \"\"\"\u521b\u5efa\u6d88\u606f\u6a21\u677f\u3002\n\n \u7528\u6cd5\u548c `str.format` \u5927\u81f4\u76f8\u540c, \u4f46\u662f\u53ef\u4ee5\u8f93\u51fa\u6d88\u606f\u5bf9\u8c61, \u5e76\u4e14\u652f\u6301\u4ee5 `Message` \u5bf9\u8c61\u4f5c\u4e3a\u6d88\u606f\u6a21\u677f\n\n \u5e76\u4e14\u63d0\u4f9b\u4e86\u62d3\u5c55\u7684\u683c\u5f0f\u5316\u63a7\u5236\u7b26, \u53ef\u4ee5\u7528\u9002\u7528\u4e8e\u8be5\u6d88\u606f\u7c7b\u578b\u7684 `MessageSegment` \u7684\u5de5\u5382\u65b9\u6cd5\u521b\u5efa\u6d88\u606f\n\n \u53c2\u6570:\n format_string: \u683c\u5f0f\u5316\u6a21\u677f\n\n \u8fd4\u56de:\n \u6d88\u606f\u683c\u5f0f\u5316\u5668\n \"\"\"\n return MessageTemplate(format_string, cls)\n\n @classmethod\n @abc.abstractmethod\n def get_segment_class(cls) -> Type[TMS]:\n \"\"\"\u83b7\u53d6\u6d88\u606f\u6bb5\u7c7b\u578b\"\"\"\n raise NotImplementedError\n\n def __str__(self) -> str:\n return \"\".join(str(seg) for seg in self)\n\n @classmethod\n def __get_validators__(cls):\n yield cls._validate\n\n @classmethod\n def _validate(cls, value):\n if isinstance(value, cls):\n return value\n elif isinstance(value, Message):\n raise ValueError(f\"Type {type(value)} can not be converted to {cls}\")\n elif isinstance(value, str):\n pass\n elif isinstance(value, dict):\n value = parse_obj_as(cls.get_segment_class(), value)\n elif isinstance(value, Iterable):\n value = [parse_obj_as(cls.get_segment_class(), v) for v in value]\n else:\n raise ValueError(\n f\"Expected str, dict or iterable for Message, got {type(value)}\"\n )\n return cls(value)\n\n @staticmethod\n @abc.abstractmethod\n def _construct(msg: str) -> Iterable[TMS]:\n \"\"\"\u6784\u9020\u6d88\u606f\u6570\u7ec4\"\"\"\n raise NotImplementedError\n\n def __add__(self: TM, other: Union[str, TMS, Iterable[TMS]]) -> TM:\n result = self.copy()\n result += other\n return result\n\n def __radd__(self: TM, other: Union[str, TMS, Iterable[TMS]]) -> TM:\n result = self.__class__(other)\n return result + self\n\n def __iadd__(self: TM, other: Union[str, TMS, Iterable[TMS]]) -> TM:\n if isinstance(other, str):\n self.extend(self._construct(other))\n elif isinstance(other, MessageSegment):\n self.append(other)\n elif isinstance(other, Iterable):\n self.extend(other)\n else:\n raise TypeError(f\"Unsupported type {type(other)!r}\")\n return self\n\n @overload\n def __getitem__(self: TM, __args: str) -> TM:\n \"\"\"\n \u53c2\u6570:\n __args: \u6d88\u606f\u6bb5\u7c7b\u578b\n\n \u8fd4\u56de:\n \u6240\u6709\u7c7b\u578b\u4e3a `__args` \u7684\u6d88\u606f\u6bb5\n \"\"\"\n\n @overload\n def __getitem__(self, __args: Tuple[str, int]) -> TMS:\n \"\"\"\n \u53c2\u6570:\n __args: \u6d88\u606f\u6bb5\u7c7b\u578b\u548c\u7d22\u5f15\n\n \u8fd4\u56de:\n \u7c7b\u578b\u4e3a `__args[0]` \u7684\u6d88\u606f\u6bb5\u7b2c `__args[1]` \u4e2a\n \"\"\"\n\n @overload\n def __getitem__(self: TM, __args: Tuple[str, slice]) -> TM:\n \"\"\"\n \u53c2\u6570:\n __args: \u6d88\u606f\u6bb5\u7c7b\u578b\u548c\u5207\u7247\n\n \u8fd4\u56de:\n \u7c7b\u578b\u4e3a `__args[0]` \u7684\u6d88\u606f\u6bb5\u5207\u7247 `__args[1]`\n \"\"\"\n\n @overload\n def __getitem__(self, __args: int) -> TMS:\n \"\"\"\n \u53c2\u6570:\n __args: \u7d22\u5f15\n\n \u8fd4\u56de:\n \u7b2c `__args` \u4e2a\u6d88\u606f\u6bb5\n \"\"\"\n\n @overload\n def __getitem__(self: TM, __args: slice) -> TM:\n \"\"\"\n \u53c2\u6570:\n __args: \u5207\u7247\n\n \u8fd4\u56de:\n \u6d88\u606f\u5207\u7247 `__args`\n \"\"\"\n\n def __getitem__(\n self: TM,\n args: Union[\n str,\n Tuple[str, int],\n Tuple[str, slice],\n int,\n slice,\n ],\n ) -> Union[TMS, TM]:\n arg1, arg2 = args if isinstance(args, tuple) else (args, None)\n if isinstance(arg1, int) and arg2 is None:\n return super().__getitem__(arg1)\n elif isinstance(arg1, slice) and arg2 is None:\n return self.__class__(super().__getitem__(arg1))\n elif isinstance(arg1, str) and arg2 is None:\n return self.__class__(seg for seg in self if seg.type == arg1)\n elif isinstance(arg1, str) and isinstance(arg2, int):\n return [seg for seg in self if seg.type == arg1][arg2]\n elif isinstance(arg1, str) and isinstance(arg2, slice):\n return self.__class__([seg for seg in self if seg.type == arg1][arg2])\n else:\n raise ValueError(\"Incorrect arguments to slice\") # pragma: no cover\n\n def index(self, value: Union[TMS, str], *args) -> int:\n if isinstance(value, str):\n first_segment = next((seg for seg in self if seg.type == value), None)\n if first_segment is None:\n raise ValueError(f\"Segment with type {value} is not in message\")\n return super().index(first_segment, *args)\n return super().index(value, *args)\n\n def get(self: TM, type_: str, count: Optional[int] = None) -> TM:\n if count is None:\n return self[type_]\n\n iterator, filtered = (\n seg for seg in self if seg.type == type_\n ), self.__class__()\n for _ in range(count):\n seg = next(iterator, None)\n if seg is None:\n break\n filtered.append(seg)\n return filtered\n\n def count(self, value: Union[TMS, str]) -> int:\n return len(self[value]) if isinstance(value, str) else super().count(value)\n\n def append(self: TM, obj: Union[str, TMS]) -> TM:\n \"\"\"\u6dfb\u52a0\u4e00\u4e2a\u6d88\u606f\u6bb5\u5230\u6d88\u606f\u6570\u7ec4\u672b\u5c3e\u3002\n\n \u53c2\u6570:\n obj: \u8981\u6dfb\u52a0\u7684\u6d88\u606f\u6bb5\n \"\"\"\n if isinstance(obj, MessageSegment):\n super().append(obj)\n elif isinstance(obj, str):\n self.extend(self._construct(obj))\n else:\n raise ValueError(f\"Unexpected type: {type(obj)} {obj}\") # pragma: no cover\n return self\n\n def extend(self: TM, obj: Union[TM, Iterable[TMS]]) -> TM:\n \"\"\"\u62fc\u63a5\u4e00\u4e2a\u6d88\u606f\u6570\u7ec4\u6216\u591a\u4e2a\u6d88\u606f\u6bb5\u5230\u6d88\u606f\u6570\u7ec4\u672b\u5c3e\u3002\n\n \u53c2\u6570:\n obj: \u8981\u6dfb\u52a0\u7684\u6d88\u606f\u6570\u7ec4\n \"\"\"\n for segment in obj:\n self.append(segment)\n return self\n\n def copy(self: TM) -> TM:\n return deepcopy(self)\n\n def extract_plain_text(self) -> str:\n \"\"\"\u63d0\u53d6\u6d88\u606f\u5185\u7eaf\u6587\u672c\u6d88\u606f\"\"\"\n\n return \"\".join(str(seg) for seg in self if seg.is_text())\n\n\n__autodoc__ = {\n \"MessageSegment.__str__\": True,\n \"MessageSegment.__add__\": True,\n \"Message.__getitem__\": True,\n \"Message._construct\": True,\n}\n", "path": "nonebot/internal/adapter/message.py"}]}
3,724
117
gh_patches_debug_5263
rasdani/github-patches
git_diff
python-poetry__poetry-4420
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Throw a RuntimeError on hash mismatch in Chooser._get_links Throw a specific exception in the case of finding a matching name+version, but none of the digests for a link matching the `poetry.lock` metadata. Resolves: #2422 - [✅] Added **tests** for changed code. - [✅] Updated **documentation** for changed code. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `poetry/installation/chooser.py` Content: ``` 1 import re 2 3 from typing import List 4 from typing import Tuple 5 6 from packaging.tags import Tag 7 8 from poetry.core.packages.package import Package 9 from poetry.core.packages.utils.link import Link 10 from poetry.repositories.pool import Pool 11 from poetry.utils.env import Env 12 from poetry.utils.patterns import wheel_file_re 13 14 15 class InvalidWheelName(Exception): 16 pass 17 18 19 class Wheel(object): 20 def __init__(self, filename): # type: (str) -> None 21 wheel_info = wheel_file_re.match(filename) 22 if not wheel_info: 23 raise InvalidWheelName("{} is not a valid wheel filename.".format(filename)) 24 25 self.filename = filename 26 self.name = wheel_info.group("name").replace("_", "-") 27 self.version = wheel_info.group("ver").replace("_", "-") 28 self.build_tag = wheel_info.group("build") 29 self.pyversions = wheel_info.group("pyver").split(".") 30 self.abis = wheel_info.group("abi").split(".") 31 self.plats = wheel_info.group("plat").split(".") 32 33 self.tags = { 34 Tag(x, y, z) for x in self.pyversions for y in self.abis for z in self.plats 35 } 36 37 def get_minimum_supported_index(self, tags): 38 indexes = [tags.index(t) for t in self.tags if t in tags] 39 40 return min(indexes) if indexes else None 41 42 def is_supported_by_environment(self, env): 43 return bool(set(env.supported_tags).intersection(self.tags)) 44 45 46 class Chooser: 47 """ 48 A Chooser chooses an appropriate release archive for packages. 49 """ 50 51 def __init__(self, pool, env): # type: (Pool, Env) -> None 52 self._pool = pool 53 self._env = env 54 55 def choose_for(self, package): # type: (Package) -> Link 56 """ 57 Return the url of the selected archive for a given package. 58 """ 59 links = [] 60 for link in self._get_links(package): 61 if link.is_wheel and not Wheel(link.filename).is_supported_by_environment( 62 self._env 63 ): 64 continue 65 66 if link.ext in {".egg", ".exe", ".msi", ".rpm", ".srpm"}: 67 continue 68 69 links.append(link) 70 71 if not links: 72 raise RuntimeError( 73 "Unable to find installation candidates for {}".format(package) 74 ) 75 76 # Get the best link 77 chosen = max(links, key=lambda link: self._sort_key(package, link)) 78 if not chosen: 79 raise RuntimeError( 80 "Unable to find installation candidates for {}".format(package) 81 ) 82 83 return chosen 84 85 def _get_links(self, package): # type: (Package) -> List[Link] 86 if not package.source_type: 87 if not self._pool.has_repository("pypi"): 88 repository = self._pool.repositories[0] 89 else: 90 repository = self._pool.repository("pypi") 91 else: 92 repository = self._pool.repository(package.source_reference) 93 94 links = repository.find_links_for_package(package) 95 96 hashes = [f["hash"] for f in package.files] 97 if not hashes: 98 return links 99 100 selected_links = [] 101 for link in links: 102 if not link.hash: 103 selected_links.append(link) 104 continue 105 106 h = link.hash_name + ":" + link.hash 107 if h not in hashes: 108 continue 109 110 selected_links.append(link) 111 112 return selected_links 113 114 def _sort_key(self, package, link): # type: (Package, Link) -> Tuple 115 """ 116 Function to pass as the `key` argument to a call to sorted() to sort 117 InstallationCandidates by preference. 118 Returns a tuple such that tuples sorting as greater using Python's 119 default comparison operator are more preferred. 120 The preference is as follows: 121 First and foremost, candidates with allowed (matching) hashes are 122 always preferred over candidates without matching hashes. This is 123 because e.g. if the only candidate with an allowed hash is yanked, 124 we still want to use that candidate. 125 Second, excepting hash considerations, candidates that have been 126 yanked (in the sense of PEP 592) are always less preferred than 127 candidates that haven't been yanked. Then: 128 If not finding wheels, they are sorted by version only. 129 If finding wheels, then the sort order is by version, then: 130 1. existing installs 131 2. wheels ordered via Wheel.support_index_min(self._supported_tags) 132 3. source archives 133 If prefer_binary was set, then all wheels are sorted above sources. 134 Note: it was considered to embed this logic into the Link 135 comparison operators, but then different sdist links 136 with the same version, would have to be considered equal 137 """ 138 support_num = len(self._env.supported_tags) 139 build_tag = () 140 binary_preference = 0 141 if link.is_wheel: 142 wheel = Wheel(link.filename) 143 if not wheel.is_supported_by_environment(self._env): 144 raise RuntimeError( 145 "{} is not a supported wheel for this platform. It " 146 "can't be sorted.".format(wheel.filename) 147 ) 148 149 # TODO: Binary preference 150 pri = -(wheel.get_minimum_supported_index(self._env.supported_tags)) 151 if wheel.build_tag is not None: 152 match = re.match(r"^(\d+)(.*)$", wheel.build_tag) 153 build_tag_groups = match.groups() 154 build_tag = (int(build_tag_groups[0]), build_tag_groups[1]) 155 else: # sdist 156 pri = -support_num 157 158 has_allowed_hash = int(self._is_link_hash_allowed_for_package(link, package)) 159 160 # TODO: Proper yank value 161 yank_value = 0 162 163 return ( 164 has_allowed_hash, 165 yank_value, 166 binary_preference, 167 package.version, 168 build_tag, 169 pri, 170 ) 171 172 def _is_link_hash_allowed_for_package( 173 self, link, package 174 ): # type: (Link, Package) -> bool 175 if not link.hash: 176 return True 177 178 h = link.hash_name + ":" + link.hash 179 180 return h in {f["hash"] for f in package.files} 181 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/poetry/installation/chooser.py b/poetry/installation/chooser.py --- a/poetry/installation/chooser.py +++ b/poetry/installation/chooser.py @@ -109,6 +109,13 @@ selected_links.append(link) + if links and not selected_links: + raise RuntimeError( + "Retrieved digest for link {}({}) not in poetry.lock metadata {}".format( + link.filename, h, hashes + ) + ) + return selected_links def _sort_key(self, package, link): # type: (Package, Link) -> Tuple
{"golden_diff": "diff --git a/poetry/installation/chooser.py b/poetry/installation/chooser.py\n--- a/poetry/installation/chooser.py\n+++ b/poetry/installation/chooser.py\n@@ -109,6 +109,13 @@\n \n selected_links.append(link)\n \n+ if links and not selected_links:\n+ raise RuntimeError(\n+ \"Retrieved digest for link {}({}) not in poetry.lock metadata {}\".format(\n+ link.filename, h, hashes\n+ )\n+ )\n+\n return selected_links\n \n def _sort_key(self, package, link): # type: (Package, Link) -> Tuple\n", "issue": "Throw a RuntimeError on hash mismatch in Chooser._get_links\nThrow a specific exception in the case of finding a matching name+version, but none of the digests for a link matching the `poetry.lock` metadata.\r\n\r\nResolves: #2422\r\n\r\n- [\u2705] Added **tests** for changed code.\r\n- [\u2705] Updated **documentation** for changed code.\r\n\n", "before_files": [{"content": "import re\n\nfrom typing import List\nfrom typing import Tuple\n\nfrom packaging.tags import Tag\n\nfrom poetry.core.packages.package import Package\nfrom poetry.core.packages.utils.link import Link\nfrom poetry.repositories.pool import Pool\nfrom poetry.utils.env import Env\nfrom poetry.utils.patterns import wheel_file_re\n\n\nclass InvalidWheelName(Exception):\n pass\n\n\nclass Wheel(object):\n def __init__(self, filename): # type: (str) -> None\n wheel_info = wheel_file_re.match(filename)\n if not wheel_info:\n raise InvalidWheelName(\"{} is not a valid wheel filename.\".format(filename))\n\n self.filename = filename\n self.name = wheel_info.group(\"name\").replace(\"_\", \"-\")\n self.version = wheel_info.group(\"ver\").replace(\"_\", \"-\")\n self.build_tag = wheel_info.group(\"build\")\n self.pyversions = wheel_info.group(\"pyver\").split(\".\")\n self.abis = wheel_info.group(\"abi\").split(\".\")\n self.plats = wheel_info.group(\"plat\").split(\".\")\n\n self.tags = {\n Tag(x, y, z) for x in self.pyversions for y in self.abis for z in self.plats\n }\n\n def get_minimum_supported_index(self, tags):\n indexes = [tags.index(t) for t in self.tags if t in tags]\n\n return min(indexes) if indexes else None\n\n def is_supported_by_environment(self, env):\n return bool(set(env.supported_tags).intersection(self.tags))\n\n\nclass Chooser:\n \"\"\"\n A Chooser chooses an appropriate release archive for packages.\n \"\"\"\n\n def __init__(self, pool, env): # type: (Pool, Env) -> None\n self._pool = pool\n self._env = env\n\n def choose_for(self, package): # type: (Package) -> Link\n \"\"\"\n Return the url of the selected archive for a given package.\n \"\"\"\n links = []\n for link in self._get_links(package):\n if link.is_wheel and not Wheel(link.filename).is_supported_by_environment(\n self._env\n ):\n continue\n\n if link.ext in {\".egg\", \".exe\", \".msi\", \".rpm\", \".srpm\"}:\n continue\n\n links.append(link)\n\n if not links:\n raise RuntimeError(\n \"Unable to find installation candidates for {}\".format(package)\n )\n\n # Get the best link\n chosen = max(links, key=lambda link: self._sort_key(package, link))\n if not chosen:\n raise RuntimeError(\n \"Unable to find installation candidates for {}\".format(package)\n )\n\n return chosen\n\n def _get_links(self, package): # type: (Package) -> List[Link]\n if not package.source_type:\n if not self._pool.has_repository(\"pypi\"):\n repository = self._pool.repositories[0]\n else:\n repository = self._pool.repository(\"pypi\")\n else:\n repository = self._pool.repository(package.source_reference)\n\n links = repository.find_links_for_package(package)\n\n hashes = [f[\"hash\"] for f in package.files]\n if not hashes:\n return links\n\n selected_links = []\n for link in links:\n if not link.hash:\n selected_links.append(link)\n continue\n\n h = link.hash_name + \":\" + link.hash\n if h not in hashes:\n continue\n\n selected_links.append(link)\n\n return selected_links\n\n def _sort_key(self, package, link): # type: (Package, Link) -> Tuple\n \"\"\"\n Function to pass as the `key` argument to a call to sorted() to sort\n InstallationCandidates by preference.\n Returns a tuple such that tuples sorting as greater using Python's\n default comparison operator are more preferred.\n The preference is as follows:\n First and foremost, candidates with allowed (matching) hashes are\n always preferred over candidates without matching hashes. This is\n because e.g. if the only candidate with an allowed hash is yanked,\n we still want to use that candidate.\n Second, excepting hash considerations, candidates that have been\n yanked (in the sense of PEP 592) are always less preferred than\n candidates that haven't been yanked. Then:\n If not finding wheels, they are sorted by version only.\n If finding wheels, then the sort order is by version, then:\n 1. existing installs\n 2. wheels ordered via Wheel.support_index_min(self._supported_tags)\n 3. source archives\n If prefer_binary was set, then all wheels are sorted above sources.\n Note: it was considered to embed this logic into the Link\n comparison operators, but then different sdist links\n with the same version, would have to be considered equal\n \"\"\"\n support_num = len(self._env.supported_tags)\n build_tag = ()\n binary_preference = 0\n if link.is_wheel:\n wheel = Wheel(link.filename)\n if not wheel.is_supported_by_environment(self._env):\n raise RuntimeError(\n \"{} is not a supported wheel for this platform. It \"\n \"can't be sorted.\".format(wheel.filename)\n )\n\n # TODO: Binary preference\n pri = -(wheel.get_minimum_supported_index(self._env.supported_tags))\n if wheel.build_tag is not None:\n match = re.match(r\"^(\\d+)(.*)$\", wheel.build_tag)\n build_tag_groups = match.groups()\n build_tag = (int(build_tag_groups[0]), build_tag_groups[1])\n else: # sdist\n pri = -support_num\n\n has_allowed_hash = int(self._is_link_hash_allowed_for_package(link, package))\n\n # TODO: Proper yank value\n yank_value = 0\n\n return (\n has_allowed_hash,\n yank_value,\n binary_preference,\n package.version,\n build_tag,\n pri,\n )\n\n def _is_link_hash_allowed_for_package(\n self, link, package\n ): # type: (Link, Package) -> bool\n if not link.hash:\n return True\n\n h = link.hash_name + \":\" + link.hash\n\n return h in {f[\"hash\"] for f in package.files}\n", "path": "poetry/installation/chooser.py"}], "after_files": [{"content": "import re\n\nfrom typing import List\nfrom typing import Tuple\n\nfrom packaging.tags import Tag\n\nfrom poetry.core.packages.package import Package\nfrom poetry.core.packages.utils.link import Link\nfrom poetry.repositories.pool import Pool\nfrom poetry.utils.env import Env\nfrom poetry.utils.patterns import wheel_file_re\n\n\nclass InvalidWheelName(Exception):\n pass\n\n\nclass Wheel(object):\n def __init__(self, filename): # type: (str) -> None\n wheel_info = wheel_file_re.match(filename)\n if not wheel_info:\n raise InvalidWheelName(\"{} is not a valid wheel filename.\".format(filename))\n\n self.filename = filename\n self.name = wheel_info.group(\"name\").replace(\"_\", \"-\")\n self.version = wheel_info.group(\"ver\").replace(\"_\", \"-\")\n self.build_tag = wheel_info.group(\"build\")\n self.pyversions = wheel_info.group(\"pyver\").split(\".\")\n self.abis = wheel_info.group(\"abi\").split(\".\")\n self.plats = wheel_info.group(\"plat\").split(\".\")\n\n self.tags = {\n Tag(x, y, z) for x in self.pyversions for y in self.abis for z in self.plats\n }\n\n def get_minimum_supported_index(self, tags):\n indexes = [tags.index(t) for t in self.tags if t in tags]\n\n return min(indexes) if indexes else None\n\n def is_supported_by_environment(self, env):\n return bool(set(env.supported_tags).intersection(self.tags))\n\n\nclass Chooser:\n \"\"\"\n A Chooser chooses an appropriate release archive for packages.\n \"\"\"\n\n def __init__(self, pool, env): # type: (Pool, Env) -> None\n self._pool = pool\n self._env = env\n\n def choose_for(self, package): # type: (Package) -> Link\n \"\"\"\n Return the url of the selected archive for a given package.\n \"\"\"\n links = []\n for link in self._get_links(package):\n if link.is_wheel and not Wheel(link.filename).is_supported_by_environment(\n self._env\n ):\n continue\n\n if link.ext in {\".egg\", \".exe\", \".msi\", \".rpm\", \".srpm\"}:\n continue\n\n links.append(link)\n\n if not links:\n raise RuntimeError(\n \"Unable to find installation candidates for {}\".format(package)\n )\n\n # Get the best link\n chosen = max(links, key=lambda link: self._sort_key(package, link))\n if not chosen:\n raise RuntimeError(\n \"Unable to find installation candidates for {}\".format(package)\n )\n\n return chosen\n\n def _get_links(self, package): # type: (Package) -> List[Link]\n if not package.source_type:\n if not self._pool.has_repository(\"pypi\"):\n repository = self._pool.repositories[0]\n else:\n repository = self._pool.repository(\"pypi\")\n else:\n repository = self._pool.repository(package.source_reference)\n\n links = repository.find_links_for_package(package)\n\n hashes = [f[\"hash\"] for f in package.files]\n if not hashes:\n return links\n\n selected_links = []\n for link in links:\n if not link.hash:\n selected_links.append(link)\n continue\n\n h = link.hash_name + \":\" + link.hash\n if h not in hashes:\n continue\n\n selected_links.append(link)\n\n if links and not selected_links:\n raise RuntimeError(\n \"Retrieved digest for link {}({}) not in poetry.lock metadata {}\".format(\n link.filename, h, hashes\n )\n )\n\n return selected_links\n\n def _sort_key(self, package, link): # type: (Package, Link) -> Tuple\n \"\"\"\n Function to pass as the `key` argument to a call to sorted() to sort\n InstallationCandidates by preference.\n Returns a tuple such that tuples sorting as greater using Python's\n default comparison operator are more preferred.\n The preference is as follows:\n First and foremost, candidates with allowed (matching) hashes are\n always preferred over candidates without matching hashes. This is\n because e.g. if the only candidate with an allowed hash is yanked,\n we still want to use that candidate.\n Second, excepting hash considerations, candidates that have been\n yanked (in the sense of PEP 592) are always less preferred than\n candidates that haven't been yanked. Then:\n If not finding wheels, they are sorted by version only.\n If finding wheels, then the sort order is by version, then:\n 1. existing installs\n 2. wheels ordered via Wheel.support_index_min(self._supported_tags)\n 3. source archives\n If prefer_binary was set, then all wheels are sorted above sources.\n Note: it was considered to embed this logic into the Link\n comparison operators, but then different sdist links\n with the same version, would have to be considered equal\n \"\"\"\n support_num = len(self._env.supported_tags)\n build_tag = ()\n binary_preference = 0\n if link.is_wheel:\n wheel = Wheel(link.filename)\n if not wheel.is_supported_by_environment(self._env):\n raise RuntimeError(\n \"{} is not a supported wheel for this platform. It \"\n \"can't be sorted.\".format(wheel.filename)\n )\n\n # TODO: Binary preference\n pri = -(wheel.get_minimum_supported_index(self._env.supported_tags))\n if wheel.build_tag is not None:\n match = re.match(r\"^(\\d+)(.*)$\", wheel.build_tag)\n build_tag_groups = match.groups()\n build_tag = (int(build_tag_groups[0]), build_tag_groups[1])\n else: # sdist\n pri = -support_num\n\n has_allowed_hash = int(self._is_link_hash_allowed_for_package(link, package))\n\n # TODO: Proper yank value\n yank_value = 0\n\n return (\n has_allowed_hash,\n yank_value,\n binary_preference,\n package.version,\n build_tag,\n pri,\n )\n\n def _is_link_hash_allowed_for_package(\n self, link, package\n ): # type: (Link, Package) -> bool\n if not link.hash:\n return True\n\n h = link.hash_name + \":\" + link.hash\n\n return h in {f[\"hash\"] for f in package.files}\n", "path": "poetry/installation/chooser.py"}]}
2,126
144
gh_patches_debug_27327
rasdani/github-patches
git_diff
quantumlib__Cirq-4805
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `cirq.plot_density_matrix` outputs buggy plots (abstract art) and doesn't scale with number of qubits. **Description of the issue** `cirq.plot_density_matrix` is buggy and output's random plots (tested in a colab) and would also doesn't scale well with number of qubits. My guess is that this due to the magic numbers hardcoded in the code, which should be changed and computed on the fly based on the size of the density matrix. https://github.com/quantumlib/Cirq/blob/09a4667e3d1768fc2f1934b18ae52e5b13ecd378/cirq-core/cirq/vis/density_matrix.py#L39 **How to reproduce the issue** ```python import cirq cirq.plot_density_matrix(cirq.testing.random_density_matrix(dim=4, random_state = 12345)) ``` <details> ![image](https://user-images.githubusercontent.com/7863287/148300842-9c302e93-1437-4e88-8513-0dcb5451186a.png) </details> **Cirq version** 0.14.0.dev20220105215702 cc @AnimeshSinha1309 `cirq.plot_density_matrix` outputs buggy plots (abstract art) and doesn't scale with number of qubits. **Description of the issue** `cirq.plot_density_matrix` is buggy and output's random plots (tested in a colab) and would also doesn't scale well with number of qubits. My guess is that this due to the magic numbers hardcoded in the code, which should be changed and computed on the fly based on the size of the density matrix. https://github.com/quantumlib/Cirq/blob/09a4667e3d1768fc2f1934b18ae52e5b13ecd378/cirq-core/cirq/vis/density_matrix.py#L39 **How to reproduce the issue** ```python import cirq cirq.plot_density_matrix(cirq.testing.random_density_matrix(dim=4, random_state = 12345)) ``` <details> ![image](https://user-images.githubusercontent.com/7863287/148300842-9c302e93-1437-4e88-8513-0dcb5451186a.png) </details> **Cirq version** 0.14.0.dev20220105215702 cc @AnimeshSinha1309 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cirq-core/cirq/vis/density_matrix.py` Content: ``` 1 # Copyright 2021 The Cirq Developers 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Tool to visualize the magnitudes and phases in the density matrix""" 16 17 from typing import Optional 18 19 import numpy as np 20 import matplotlib.pyplot as plt 21 from matplotlib import lines, patches 22 23 from cirq.qis.states import validate_density_matrix 24 25 26 def _plot_element_of_density_matrix(ax, x, y, r, phase, show_rect=False, show_text=False): 27 """Plots a single element of a density matrix 28 29 Args: 30 x: x coordinate of the cell we are plotting 31 y: y coordinate of the cell we are plotting 32 r: the amplitude of the qubit in that cell 33 phase: phase of the qubit in that cell, in radians 34 show_rect: Boolean on if to show the amplitude rectangle, used for diagonal elements 35 show_text: Boolean on if to show text labels or not 36 ax: The axes to plot on 37 """ 38 # Setting up a few magic numbers for graphics 39 _half_cell_size_after_padding = (1 / 1.1) * 0.5 40 _rectangle_margin = 0.01 41 _image_opacity = 0.8 if not show_text else 0.4 42 43 circle_out = plt.Circle( 44 (x + 0.5, y + 0.5), radius=1 / _half_cell_size_after_padding, fill=False, color='#333333' 45 ) 46 circle_in = plt.Circle( 47 (x + 0.5, y + 0.5), 48 radius=r / _half_cell_size_after_padding, 49 fill=True, 50 color='IndianRed', 51 alpha=_image_opacity, 52 ) 53 line = lines.Line2D( 54 (x + 0.5, x + 0.5 + np.cos(phase) / _half_cell_size_after_padding), 55 (y + 0.5, y + 0.5 + np.sin(phase) / _half_cell_size_after_padding), 56 color='#333333', 57 alpha=_image_opacity, 58 ) 59 ax.add_artist(circle_in) 60 ax.add_artist(circle_out) 61 ax.add_artist(line) 62 if show_rect: 63 rect = patches.Rectangle( 64 (x + _rectangle_margin, y + _rectangle_margin), 65 1.0 - 2 * _rectangle_margin, 66 r * (1 - 2 * _rectangle_margin), 67 alpha=0.25, 68 ) 69 ax.add_artist(rect) 70 if show_text: 71 plt.text( 72 x + 0.5, 73 y + 0.5, 74 f"{np.round(r, decimals=2)}\n{np.round(phase * 180 / np.pi, decimals=2)} deg", 75 horizontalalignment='center', 76 verticalalignment='center', 77 ) 78 79 80 def plot_density_matrix( 81 matrix: np.ndarray, 82 ax: Optional[plt.Axes] = None, 83 *, 84 show_text: bool = False, 85 title: Optional[str] = None, 86 ) -> plt.Axes: 87 """Generates a plot for a given density matrix. 88 89 1. Each entry of the density matrix, a complex number, is plotted as an 90 Argand Diagram where the partially filled red circle represents the magnitude 91 and the line represents the phase angle, going anti-clockwise from positive x - axis. 92 2. The blue rectangles on the diagonal elements represent the probability 93 of measuring the system in state $|i\rangle$. 94 Rendering scheme is inspired from https://algassert.com/quirk 95 96 Args: 97 matrix: The density matrix to visualize 98 show_text: If true, the density matrix values are also shown as text labels 99 ax: The axes to plot on 100 title: Title of the plot 101 """ 102 plt.style.use('ggplot') 103 104 _padding_around_plot = 0.001 105 106 matrix = matrix.astype(np.complex128) 107 num_qubits = int(np.log2(matrix.shape[0])) 108 validate_density_matrix(matrix, qid_shape=(2 ** num_qubits,)) 109 110 if ax is None: 111 _, ax = plt.subplots(figsize=(10, 10)) 112 ax.set_xlim(0 - _padding_around_plot, 2 ** num_qubits + _padding_around_plot) 113 ax.set_ylim(0 - _padding_around_plot, 2 ** num_qubits + _padding_around_plot) 114 115 for i in range(matrix.shape[0]): 116 for j in range(matrix.shape[1]): 117 _plot_element_of_density_matrix( 118 ax, 119 i, 120 j, 121 np.abs(matrix[i][-j - 1]), 122 np.angle(matrix[i][-j - 1]), 123 show_rect=(i == matrix.shape[1] - j - 1), 124 show_text=show_text, 125 ) 126 127 ticks, labels = np.arange(0.5, matrix.shape[0]), [ 128 f"{'0'*(num_qubits - len(f'{i:b}'))}{i:b}" for i in range(matrix.shape[0]) 129 ] 130 ax.set_xticks(ticks) 131 ax.set_xticklabels(labels) 132 ax.set_yticks(ticks) 133 ax.set_yticklabels(reversed(labels)) 134 ax.set_facecolor('#eeeeee') 135 if title is not None: 136 ax.set_title(title) 137 return ax 138 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cirq-core/cirq/vis/density_matrix.py b/cirq-core/cirq/vis/density_matrix.py --- a/cirq-core/cirq/vis/density_matrix.py +++ b/cirq-core/cirq/vis/density_matrix.py @@ -41,18 +41,18 @@ _image_opacity = 0.8 if not show_text else 0.4 circle_out = plt.Circle( - (x + 0.5, y + 0.5), radius=1 / _half_cell_size_after_padding, fill=False, color='#333333' + (x + 0.5, y + 0.5), radius=1 * _half_cell_size_after_padding, fill=False, color='#333333' ) circle_in = plt.Circle( (x + 0.5, y + 0.5), - radius=r / _half_cell_size_after_padding, + radius=r * _half_cell_size_after_padding, fill=True, color='IndianRed', alpha=_image_opacity, ) line = lines.Line2D( - (x + 0.5, x + 0.5 + np.cos(phase) / _half_cell_size_after_padding), - (y + 0.5, y + 0.5 + np.sin(phase) / _half_cell_size_after_padding), + (x + 0.5, x + 0.5 + np.cos(phase) * _half_cell_size_after_padding), + (y + 0.5, y + 0.5 + np.sin(phase) * _half_cell_size_after_padding), color='#333333', alpha=_image_opacity, ) @@ -128,7 +128,7 @@ f"{'0'*(num_qubits - len(f'{i:b}'))}{i:b}" for i in range(matrix.shape[0]) ] ax.set_xticks(ticks) - ax.set_xticklabels(labels) + ax.set_xticklabels(labels, rotation=90) ax.set_yticks(ticks) ax.set_yticklabels(reversed(labels)) ax.set_facecolor('#eeeeee')
{"golden_diff": "diff --git a/cirq-core/cirq/vis/density_matrix.py b/cirq-core/cirq/vis/density_matrix.py\n--- a/cirq-core/cirq/vis/density_matrix.py\n+++ b/cirq-core/cirq/vis/density_matrix.py\n@@ -41,18 +41,18 @@\n _image_opacity = 0.8 if not show_text else 0.4\n \n circle_out = plt.Circle(\n- (x + 0.5, y + 0.5), radius=1 / _half_cell_size_after_padding, fill=False, color='#333333'\n+ (x + 0.5, y + 0.5), radius=1 * _half_cell_size_after_padding, fill=False, color='#333333'\n )\n circle_in = plt.Circle(\n (x + 0.5, y + 0.5),\n- radius=r / _half_cell_size_after_padding,\n+ radius=r * _half_cell_size_after_padding,\n fill=True,\n color='IndianRed',\n alpha=_image_opacity,\n )\n line = lines.Line2D(\n- (x + 0.5, x + 0.5 + np.cos(phase) / _half_cell_size_after_padding),\n- (y + 0.5, y + 0.5 + np.sin(phase) / _half_cell_size_after_padding),\n+ (x + 0.5, x + 0.5 + np.cos(phase) * _half_cell_size_after_padding),\n+ (y + 0.5, y + 0.5 + np.sin(phase) * _half_cell_size_after_padding),\n color='#333333',\n alpha=_image_opacity,\n )\n@@ -128,7 +128,7 @@\n f\"{'0'*(num_qubits - len(f'{i:b}'))}{i:b}\" for i in range(matrix.shape[0])\n ]\n ax.set_xticks(ticks)\n- ax.set_xticklabels(labels)\n+ ax.set_xticklabels(labels, rotation=90)\n ax.set_yticks(ticks)\n ax.set_yticklabels(reversed(labels))\n ax.set_facecolor('#eeeeee')\n", "issue": "`cirq.plot_density_matrix` outputs buggy plots (abstract art) and doesn't scale with number of qubits. \n**Description of the issue**\r\n`cirq.plot_density_matrix` is buggy and output's random plots (tested in a colab) and would also doesn't scale well with number of qubits. \r\n\r\nMy guess is that this due to the magic numbers hardcoded in the code, which should be changed and computed on the fly based on the size of the density matrix. \r\n\r\nhttps://github.com/quantumlib/Cirq/blob/09a4667e3d1768fc2f1934b18ae52e5b13ecd378/cirq-core/cirq/vis/density_matrix.py#L39\r\n\r\n**How to reproduce the issue**\r\n\r\n```python\r\nimport cirq\r\ncirq.plot_density_matrix(cirq.testing.random_density_matrix(dim=4, random_state = 12345))\r\n```\r\n\r\n<details>\r\n\r\n![image](https://user-images.githubusercontent.com/7863287/148300842-9c302e93-1437-4e88-8513-0dcb5451186a.png)\r\n\r\n\r\n</details>\r\n\r\n**Cirq version**\r\n0.14.0.dev20220105215702\r\n\r\n\r\ncc @AnimeshSinha1309 \n`cirq.plot_density_matrix` outputs buggy plots (abstract art) and doesn't scale with number of qubits. \n**Description of the issue**\r\n`cirq.plot_density_matrix` is buggy and output's random plots (tested in a colab) and would also doesn't scale well with number of qubits. \r\n\r\nMy guess is that this due to the magic numbers hardcoded in the code, which should be changed and computed on the fly based on the size of the density matrix. \r\n\r\nhttps://github.com/quantumlib/Cirq/blob/09a4667e3d1768fc2f1934b18ae52e5b13ecd378/cirq-core/cirq/vis/density_matrix.py#L39\r\n\r\n**How to reproduce the issue**\r\n\r\n```python\r\nimport cirq\r\ncirq.plot_density_matrix(cirq.testing.random_density_matrix(dim=4, random_state = 12345))\r\n```\r\n\r\n<details>\r\n\r\n![image](https://user-images.githubusercontent.com/7863287/148300842-9c302e93-1437-4e88-8513-0dcb5451186a.png)\r\n\r\n\r\n</details>\r\n\r\n**Cirq version**\r\n0.14.0.dev20220105215702\r\n\r\n\r\ncc @AnimeshSinha1309 \n", "before_files": [{"content": "# Copyright 2021 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tool to visualize the magnitudes and phases in the density matrix\"\"\"\n\nfrom typing import Optional\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import lines, patches\n\nfrom cirq.qis.states import validate_density_matrix\n\n\ndef _plot_element_of_density_matrix(ax, x, y, r, phase, show_rect=False, show_text=False):\n \"\"\"Plots a single element of a density matrix\n\n Args:\n x: x coordinate of the cell we are plotting\n y: y coordinate of the cell we are plotting\n r: the amplitude of the qubit in that cell\n phase: phase of the qubit in that cell, in radians\n show_rect: Boolean on if to show the amplitude rectangle, used for diagonal elements\n show_text: Boolean on if to show text labels or not\n ax: The axes to plot on\n \"\"\"\n # Setting up a few magic numbers for graphics\n _half_cell_size_after_padding = (1 / 1.1) * 0.5\n _rectangle_margin = 0.01\n _image_opacity = 0.8 if not show_text else 0.4\n\n circle_out = plt.Circle(\n (x + 0.5, y + 0.5), radius=1 / _half_cell_size_after_padding, fill=False, color='#333333'\n )\n circle_in = plt.Circle(\n (x + 0.5, y + 0.5),\n radius=r / _half_cell_size_after_padding,\n fill=True,\n color='IndianRed',\n alpha=_image_opacity,\n )\n line = lines.Line2D(\n (x + 0.5, x + 0.5 + np.cos(phase) / _half_cell_size_after_padding),\n (y + 0.5, y + 0.5 + np.sin(phase) / _half_cell_size_after_padding),\n color='#333333',\n alpha=_image_opacity,\n )\n ax.add_artist(circle_in)\n ax.add_artist(circle_out)\n ax.add_artist(line)\n if show_rect:\n rect = patches.Rectangle(\n (x + _rectangle_margin, y + _rectangle_margin),\n 1.0 - 2 * _rectangle_margin,\n r * (1 - 2 * _rectangle_margin),\n alpha=0.25,\n )\n ax.add_artist(rect)\n if show_text:\n plt.text(\n x + 0.5,\n y + 0.5,\n f\"{np.round(r, decimals=2)}\\n{np.round(phase * 180 / np.pi, decimals=2)} deg\",\n horizontalalignment='center',\n verticalalignment='center',\n )\n\n\ndef plot_density_matrix(\n matrix: np.ndarray,\n ax: Optional[plt.Axes] = None,\n *,\n show_text: bool = False,\n title: Optional[str] = None,\n) -> plt.Axes:\n \"\"\"Generates a plot for a given density matrix.\n\n 1. Each entry of the density matrix, a complex number, is plotted as an\n Argand Diagram where the partially filled red circle represents the magnitude\n and the line represents the phase angle, going anti-clockwise from positive x - axis.\n 2. The blue rectangles on the diagonal elements represent the probability\n of measuring the system in state $|i\\rangle$.\n Rendering scheme is inspired from https://algassert.com/quirk\n\n Args:\n matrix: The density matrix to visualize\n show_text: If true, the density matrix values are also shown as text labels\n ax: The axes to plot on\n title: Title of the plot\n \"\"\"\n plt.style.use('ggplot')\n\n _padding_around_plot = 0.001\n\n matrix = matrix.astype(np.complex128)\n num_qubits = int(np.log2(matrix.shape[0]))\n validate_density_matrix(matrix, qid_shape=(2 ** num_qubits,))\n\n if ax is None:\n _, ax = plt.subplots(figsize=(10, 10))\n ax.set_xlim(0 - _padding_around_plot, 2 ** num_qubits + _padding_around_plot)\n ax.set_ylim(0 - _padding_around_plot, 2 ** num_qubits + _padding_around_plot)\n\n for i in range(matrix.shape[0]):\n for j in range(matrix.shape[1]):\n _plot_element_of_density_matrix(\n ax,\n i,\n j,\n np.abs(matrix[i][-j - 1]),\n np.angle(matrix[i][-j - 1]),\n show_rect=(i == matrix.shape[1] - j - 1),\n show_text=show_text,\n )\n\n ticks, labels = np.arange(0.5, matrix.shape[0]), [\n f\"{'0'*(num_qubits - len(f'{i:b}'))}{i:b}\" for i in range(matrix.shape[0])\n ]\n ax.set_xticks(ticks)\n ax.set_xticklabels(labels)\n ax.set_yticks(ticks)\n ax.set_yticklabels(reversed(labels))\n ax.set_facecolor('#eeeeee')\n if title is not None:\n ax.set_title(title)\n return ax\n", "path": "cirq-core/cirq/vis/density_matrix.py"}], "after_files": [{"content": "# Copyright 2021 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tool to visualize the magnitudes and phases in the density matrix\"\"\"\n\nfrom typing import Optional\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import lines, patches\n\nfrom cirq.qis.states import validate_density_matrix\n\n\ndef _plot_element_of_density_matrix(ax, x, y, r, phase, show_rect=False, show_text=False):\n \"\"\"Plots a single element of a density matrix\n\n Args:\n x: x coordinate of the cell we are plotting\n y: y coordinate of the cell we are plotting\n r: the amplitude of the qubit in that cell\n phase: phase of the qubit in that cell, in radians\n show_rect: Boolean on if to show the amplitude rectangle, used for diagonal elements\n show_text: Boolean on if to show text labels or not\n ax: The axes to plot on\n \"\"\"\n # Setting up a few magic numbers for graphics\n _half_cell_size_after_padding = (1 / 1.1) * 0.5\n _rectangle_margin = 0.01\n _image_opacity = 0.8 if not show_text else 0.4\n\n circle_out = plt.Circle(\n (x + 0.5, y + 0.5), radius=1 * _half_cell_size_after_padding, fill=False, color='#333333'\n )\n circle_in = plt.Circle(\n (x + 0.5, y + 0.5),\n radius=r * _half_cell_size_after_padding,\n fill=True,\n color='IndianRed',\n alpha=_image_opacity,\n )\n line = lines.Line2D(\n (x + 0.5, x + 0.5 + np.cos(phase) * _half_cell_size_after_padding),\n (y + 0.5, y + 0.5 + np.sin(phase) * _half_cell_size_after_padding),\n color='#333333',\n alpha=_image_opacity,\n )\n ax.add_artist(circle_in)\n ax.add_artist(circle_out)\n ax.add_artist(line)\n if show_rect:\n rect = patches.Rectangle(\n (x + _rectangle_margin, y + _rectangle_margin),\n 1.0 - 2 * _rectangle_margin,\n r * (1 - 2 * _rectangle_margin),\n alpha=0.25,\n )\n ax.add_artist(rect)\n if show_text:\n plt.text(\n x + 0.5,\n y + 0.5,\n f\"{np.round(r, decimals=2)}\\n{np.round(phase * 180 / np.pi, decimals=2)} deg\",\n horizontalalignment='center',\n verticalalignment='center',\n )\n\n\ndef plot_density_matrix(\n matrix: np.ndarray,\n ax: Optional[plt.Axes] = None,\n *,\n show_text: bool = False,\n title: Optional[str] = None,\n) -> plt.Axes:\n \"\"\"Generates a plot for a given density matrix.\n\n 1. Each entry of the density matrix, a complex number, is plotted as an\n Argand Diagram where the partially filled red circle represents the magnitude\n and the line represents the phase angle, going anti-clockwise from positive x - axis.\n 2. The blue rectangles on the diagonal elements represent the probability\n of measuring the system in state $|i\\rangle$.\n Rendering scheme is inspired from https://algassert.com/quirk\n\n Args:\n matrix: The density matrix to visualize\n show_text: If true, the density matrix values are also shown as text labels\n ax: The axes to plot on\n title: Title of the plot\n \"\"\"\n plt.style.use('ggplot')\n\n _padding_around_plot = 0.001\n\n matrix = matrix.astype(np.complex128)\n num_qubits = int(np.log2(matrix.shape[0]))\n validate_density_matrix(matrix, qid_shape=(2 ** num_qubits,))\n\n if ax is None:\n _, ax = plt.subplots(figsize=(10, 10))\n ax.set_xlim(0 - _padding_around_plot, 2 ** num_qubits + _padding_around_plot)\n ax.set_ylim(0 - _padding_around_plot, 2 ** num_qubits + _padding_around_plot)\n\n for i in range(matrix.shape[0]):\n for j in range(matrix.shape[1]):\n _plot_element_of_density_matrix(\n ax,\n i,\n j,\n np.abs(matrix[i][-j - 1]),\n np.angle(matrix[i][-j - 1]),\n show_rect=(i == matrix.shape[1] - j - 1),\n show_text=show_text,\n )\n\n ticks, labels = np.arange(0.5, matrix.shape[0]), [\n f\"{'0'*(num_qubits - len(f'{i:b}'))}{i:b}\" for i in range(matrix.shape[0])\n ]\n ax.set_xticks(ticks)\n ax.set_xticklabels(labels, rotation=90)\n ax.set_yticks(ticks)\n ax.set_yticklabels(reversed(labels))\n ax.set_facecolor('#eeeeee')\n if title is not None:\n ax.set_title(title)\n return ax\n", "path": "cirq-core/cirq/vis/density_matrix.py"}]}
2,477
490
gh_patches_debug_4858
rasdani/github-patches
git_diff
Gallopsled__pwntools-752
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 3.0.3 Release Broken It appears that the archive uploaded to PyPI does not include README.md, which is referred to by setup.py. @Idolf can you update the release to include the README? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python2 2 import glob 3 import os 4 import platform 5 import sys 6 from distutils.command.install import INSTALL_SCHEMES 7 from distutils.sysconfig import get_python_inc 8 from distutils.util import convert_path 9 10 from setuptools import find_packages 11 from setuptools import setup 12 13 # Get all template files 14 templates = [] 15 for dirpath, dirnames, filenames in os.walk(convert_path('pwnlib/shellcraft/templates')): 16 for f in filenames: 17 templates.append(os.path.relpath(os.path.join(dirpath, f), 'pwnlib')) 18 19 # This makes pwntools-LICENSE.txt appear with the package folders 20 for scheme in INSTALL_SCHEMES.values(): 21 scheme['data'] = scheme['purelib'] 22 23 # Find all of the console scripts 24 console_scripts = [] 25 26 for filename in glob.glob('pwnlib/commandline/*'): 27 filename = os.path.basename(filename) 28 filename, ext = os.path.splitext(filename) 29 30 if ext != '.py' or '__init__' in filename: 31 continue 32 33 script = '%s=pwnlib.commandline.%s:main' % (filename, filename) 34 console_scripts.append(script) 35 36 install_requires = ['paramiko>=1.15.2', 37 'mako>=1.0.0', 38 'pyelftools>=0.2.4', 39 'capstone', 40 'ropgadget>=5.3', 41 'pyserial>=2.7', 42 'requests>=2.0', 43 'pip>=6.0.8', 44 'tox>=1.8.1', 45 'pygments>=2.0', 46 'pysocks', 47 'python-dateutil', 48 'pypandoc', 49 'packaging'] 50 51 # This is a hack until somebody ports psutil to OpenBSD 52 if platform.system() != 'OpenBSD': 53 install_requires.append('psutil>=2.1.3') 54 55 # Check that the user has installed the Python development headers 56 PythonH = os.path.join(get_python_inc(), 'Python.h') 57 if not os.path.exists(PythonH): 58 print >> sys.stderr, "You must install the Python development headers!" 59 print >> sys.stderr, "$ apt-get install python-dev" 60 sys.exit(-1) 61 62 # Convert README.md to reStructuredText for PyPI 63 long_description = '' 64 try: 65 import pypandoc 66 try: 67 pypandoc.get_pandoc_path() 68 except OSError: 69 pypandoc.download_pandoc() 70 long_description = pypandoc.convert_file('README.md', 'rst') 71 except ImportError: 72 pass 73 74 75 setup( 76 name = 'pwntools', 77 packages = find_packages(), 78 version = '3.0.3', 79 data_files = [('', 80 ['LICENSE-pwntools.txt', 81 ]), 82 ], 83 package_data = { 84 'pwnlib': [ 85 'data/crcsums.txt', 86 'data/useragents/useragents.txt', 87 'data/binutils/*', 88 'data/includes/*.h', 89 'data/includes/*/*.h', 90 ] + templates, 91 }, 92 entry_points = {'console_scripts': console_scripts}, 93 scripts = glob.glob("bin/*"), 94 description = "Pwntools CTF framework and exploit development library.", 95 long_description = long_description, 96 author = "Gallopsled et al.", 97 author_email = "#pwntools @ freenode.net", 98 url = 'https://pwntools.com', 99 download_url = "https://github.com/Gallopsled/pwntools/releases", 100 install_requires = install_requires, 101 license = "Mostly MIT, some GPL/BSD, see LICENSE-pwntools.txt", 102 keywords = 'pwntools exploit ctf capture the flag binary wargame overflow stack heap defcon', 103 classifiers = [ 104 'Development Status :: 5 - Production/Stable', 105 'Environment :: Console', 106 'Intended Audience :: Developers', 107 'Intended Audience :: Science/Research', 108 'Intended Audience :: System Administrators', 109 'License :: OSI Approved :: MIT License', 110 'Natural Language :: English', 111 'Operating System :: POSIX :: Linux', 112 'Programming Language :: Python :: 2.7', 113 'Topic :: Security', 114 'Topic :: Software Development :: Assemblers', 115 'Topic :: Software Development :: Debuggers', 116 'Topic :: Software Development :: Disassemblers', 117 'Topic :: Software Development :: Embedded Systems', 118 'Topic :: Software Development :: Libraries :: Python Modules', 119 'Topic :: System :: System Shells', 120 'Topic :: Utilities', 121 ] 122 ) 123 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -77,8 +77,7 @@ packages = find_packages(), version = '3.0.3', data_files = [('', - ['LICENSE-pwntools.txt', - ]), + glob.glob('*.md') + glob.glob('*.txt')), ], package_data = { 'pwnlib': [
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -77,8 +77,7 @@\n packages = find_packages(),\n version = '3.0.3',\n data_files = [('',\n- ['LICENSE-pwntools.txt',\n- ]),\n+ glob.glob('*.md') + glob.glob('*.txt')),\n ],\n package_data = {\n 'pwnlib': [\n", "issue": "3.0.3 Release Broken\nIt appears that the archive uploaded to PyPI does not include README.md, which is referred to by setup.py.\n\n@Idolf can you update the release to include the README?\n\n", "before_files": [{"content": "#!/usr/bin/env python2\nimport glob\nimport os\nimport platform\nimport sys\nfrom distutils.command.install import INSTALL_SCHEMES\nfrom distutils.sysconfig import get_python_inc\nfrom distutils.util import convert_path\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n# Get all template files\ntemplates = []\nfor dirpath, dirnames, filenames in os.walk(convert_path('pwnlib/shellcraft/templates')):\n for f in filenames:\n templates.append(os.path.relpath(os.path.join(dirpath, f), 'pwnlib'))\n\n# This makes pwntools-LICENSE.txt appear with the package folders\nfor scheme in INSTALL_SCHEMES.values():\n scheme['data'] = scheme['purelib']\n\n# Find all of the console scripts\nconsole_scripts = []\n\nfor filename in glob.glob('pwnlib/commandline/*'):\n filename = os.path.basename(filename)\n filename, ext = os.path.splitext(filename)\n\n if ext != '.py' or '__init__' in filename:\n continue\n\n script = '%s=pwnlib.commandline.%s:main' % (filename, filename)\n console_scripts.append(script)\n\ninstall_requires = ['paramiko>=1.15.2',\n 'mako>=1.0.0',\n 'pyelftools>=0.2.4',\n 'capstone',\n 'ropgadget>=5.3',\n 'pyserial>=2.7',\n 'requests>=2.0',\n 'pip>=6.0.8',\n 'tox>=1.8.1',\n 'pygments>=2.0',\n 'pysocks',\n 'python-dateutil',\n 'pypandoc',\n 'packaging']\n\n# This is a hack until somebody ports psutil to OpenBSD\nif platform.system() != 'OpenBSD':\n install_requires.append('psutil>=2.1.3')\n\n# Check that the user has installed the Python development headers\nPythonH = os.path.join(get_python_inc(), 'Python.h')\nif not os.path.exists(PythonH):\n print >> sys.stderr, \"You must install the Python development headers!\"\n print >> sys.stderr, \"$ apt-get install python-dev\"\n sys.exit(-1)\n\n# Convert README.md to reStructuredText for PyPI\nlong_description = ''\ntry:\n import pypandoc\n try:\n pypandoc.get_pandoc_path()\n except OSError:\n pypandoc.download_pandoc()\n long_description = pypandoc.convert_file('README.md', 'rst')\nexcept ImportError:\n pass\n\n\nsetup(\n name = 'pwntools',\n packages = find_packages(),\n version = '3.0.3',\n data_files = [('',\n ['LICENSE-pwntools.txt',\n ]),\n ],\n package_data = {\n 'pwnlib': [\n 'data/crcsums.txt',\n 'data/useragents/useragents.txt',\n 'data/binutils/*',\n 'data/includes/*.h',\n 'data/includes/*/*.h',\n ] + templates,\n },\n entry_points = {'console_scripts': console_scripts},\n scripts = glob.glob(\"bin/*\"),\n description = \"Pwntools CTF framework and exploit development library.\",\n long_description = long_description,\n author = \"Gallopsled et al.\",\n author_email = \"#pwntools @ freenode.net\",\n url = 'https://pwntools.com',\n download_url = \"https://github.com/Gallopsled/pwntools/releases\",\n install_requires = install_requires,\n license = \"Mostly MIT, some GPL/BSD, see LICENSE-pwntools.txt\",\n keywords = 'pwntools exploit ctf capture the flag binary wargame overflow stack heap defcon',\n classifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Security',\n 'Topic :: Software Development :: Assemblers',\n 'Topic :: Software Development :: Debuggers',\n 'Topic :: Software Development :: Disassemblers',\n 'Topic :: Software Development :: Embedded Systems',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: System :: System Shells',\n 'Topic :: Utilities',\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python2\nimport glob\nimport os\nimport platform\nimport sys\nfrom distutils.command.install import INSTALL_SCHEMES\nfrom distutils.sysconfig import get_python_inc\nfrom distutils.util import convert_path\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n# Get all template files\ntemplates = []\nfor dirpath, dirnames, filenames in os.walk(convert_path('pwnlib/shellcraft/templates')):\n for f in filenames:\n templates.append(os.path.relpath(os.path.join(dirpath, f), 'pwnlib'))\n\n# This makes pwntools-LICENSE.txt appear with the package folders\nfor scheme in INSTALL_SCHEMES.values():\n scheme['data'] = scheme['purelib']\n\n# Find all of the console scripts\nconsole_scripts = []\n\nfor filename in glob.glob('pwnlib/commandline/*'):\n filename = os.path.basename(filename)\n filename, ext = os.path.splitext(filename)\n\n if ext != '.py' or '__init__' in filename:\n continue\n\n script = '%s=pwnlib.commandline.%s:main' % (filename, filename)\n console_scripts.append(script)\n\ninstall_requires = ['paramiko>=1.15.2',\n 'mako>=1.0.0',\n 'pyelftools>=0.2.4',\n 'capstone',\n 'ropgadget>=5.3',\n 'pyserial>=2.7',\n 'requests>=2.0',\n 'pip>=6.0.8',\n 'tox>=1.8.1',\n 'pygments>=2.0',\n 'pysocks',\n 'python-dateutil',\n 'pypandoc',\n 'packaging']\n\n# This is a hack until somebody ports psutil to OpenBSD\nif platform.system() != 'OpenBSD':\n install_requires.append('psutil>=2.1.3')\n\n# Check that the user has installed the Python development headers\nPythonH = os.path.join(get_python_inc(), 'Python.h')\nif not os.path.exists(PythonH):\n print >> sys.stderr, \"You must install the Python development headers!\"\n print >> sys.stderr, \"$ apt-get install python-dev\"\n sys.exit(-1)\n\n# Convert README.md to reStructuredText for PyPI\nlong_description = ''\ntry:\n import pypandoc\n try:\n pypandoc.get_pandoc_path()\n except OSError:\n pypandoc.download_pandoc()\n long_description = pypandoc.convert_file('README.md', 'rst')\nexcept ImportError:\n pass\n\n\nsetup(\n name = 'pwntools',\n packages = find_packages(),\n version = '3.0.3',\n data_files = [('',\n glob.glob('*.md') + glob.glob('*.txt')),\n ],\n package_data = {\n 'pwnlib': [\n 'data/crcsums.txt',\n 'data/useragents/useragents.txt',\n 'data/binutils/*',\n 'data/includes/*.h',\n 'data/includes/*/*.h',\n ] + templates,\n },\n entry_points = {'console_scripts': console_scripts},\n scripts = glob.glob(\"bin/*\"),\n description = \"Pwntools CTF framework and exploit development library.\",\n long_description = long_description,\n author = \"Gallopsled et al.\",\n author_email = \"#pwntools @ freenode.net\",\n url = 'https://pwntools.com',\n download_url = \"https://github.com/Gallopsled/pwntools/releases\",\n install_requires = install_requires,\n license = \"Mostly MIT, some GPL/BSD, see LICENSE-pwntools.txt\",\n keywords = 'pwntools exploit ctf capture the flag binary wargame overflow stack heap defcon',\n classifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Security',\n 'Topic :: Software Development :: Assemblers',\n 'Topic :: Software Development :: Debuggers',\n 'Topic :: Software Development :: Disassemblers',\n 'Topic :: Software Development :: Embedded Systems',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: System :: System Shells',\n 'Topic :: Utilities',\n ]\n)\n", "path": "setup.py"}]}
1,556
100
gh_patches_debug_37468
rasdani/github-patches
git_diff
DataDog__dd-trace-py-1338
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- logging in `ddtrace.context.Context` causes deadlock ### Summary `Context.close_span` logs out any unfinished child spans when the parent span closes. if log injection is enabled, the patched logger [tries to get the current span](https://github.com/DataDog/dd-trace-py/blob/682f9d9fdf8a5ad4285cc6adc31e8177599f5d0a/ddtrace/contrib/logging/patch.py#L38) which [tries to grab the same lock](https://github.com/DataDog/dd-trace-py/blob/682f9d9fdf8a5ad4285cc6adc31e8177599f5d0a/ddtrace/context.py#L92-L100) that `Context.close_span` is already holding. https://github.com/DataDog/dd-trace-py/blob/682f9d9fdf8a5ad4285cc6adc31e8177599f5d0a/ddtrace/context.py#L125-L147 ### Which version of dd-trace-py are you using? `0.36.0` ### How can we reproduce your problem? run `DATADOG_TRACE_DEBUG=true DD_LOGS_INJECTION=true ddtrace-run python script.py` ```py # script.py import ddtrace def main(): with ddtrace.tracer.trace("main") as span: unfinished_span = ddtrace.tracer.trace("unfinished_span") # unfinished_span.finish() # <-- if this runs, deadlock will not occur if __name__ == "__main__": main() ``` ### What is the result that you get? script deadlocks and hangs ### What is the result that you expected? unfinished spans don't deadlock code ### Notes possibly similar to https://github.com/DataDog/dd-trace-py/issues/1003 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ddtrace/context.py` Content: ``` 1 import logging 2 import threading 3 4 from .constants import HOSTNAME_KEY, SAMPLING_PRIORITY_KEY, ORIGIN_KEY 5 from .internal.logger import get_logger 6 from .internal import hostname 7 from .settings import config 8 from .utils.formats import asbool, get_env 9 10 log = get_logger(__name__) 11 12 13 class Context(object): 14 """ 15 Context is used to keep track of a hierarchy of spans for the current 16 execution flow. During each logical execution, the same ``Context`` is 17 used to represent a single logical trace, even if the trace is built 18 asynchronously. 19 20 A single code execution may use multiple ``Context`` if part of the execution 21 must not be related to the current tracing. As example, a delayed job may 22 compose a standalone trace instead of being related to the same trace that 23 generates the job itself. On the other hand, if it's part of the same 24 ``Context``, it will be related to the original trace. 25 26 This data structure is thread-safe. 27 """ 28 _partial_flush_enabled = asbool(get_env('tracer', 'partial_flush_enabled', default=False)) 29 _partial_flush_min_spans = int(get_env('tracer', 'partial_flush_min_spans', default=500)) 30 31 def __init__(self, trace_id=None, span_id=None, sampling_priority=None, _dd_origin=None): 32 """ 33 Initialize a new thread-safe ``Context``. 34 35 :param int trace_id: trace_id of parent span 36 :param int span_id: span_id of parent span 37 """ 38 self._trace = [] 39 self._finished_spans = 0 40 self._current_span = None 41 self._lock = threading.Lock() 42 43 self._parent_trace_id = trace_id 44 self._parent_span_id = span_id 45 self._sampling_priority = sampling_priority 46 self._dd_origin = _dd_origin 47 48 @property 49 def trace_id(self): 50 """Return current context trace_id.""" 51 with self._lock: 52 return self._parent_trace_id 53 54 @property 55 def span_id(self): 56 """Return current context span_id.""" 57 with self._lock: 58 return self._parent_span_id 59 60 @property 61 def sampling_priority(self): 62 """Return current context sampling priority.""" 63 with self._lock: 64 return self._sampling_priority 65 66 @sampling_priority.setter 67 def sampling_priority(self, value): 68 """Set sampling priority.""" 69 with self._lock: 70 self._sampling_priority = value 71 72 def clone(self): 73 """ 74 Partially clones the current context. 75 It copies everything EXCEPT the registered and finished spans. 76 """ 77 with self._lock: 78 new_ctx = Context( 79 trace_id=self._parent_trace_id, 80 span_id=self._parent_span_id, 81 sampling_priority=self._sampling_priority, 82 ) 83 new_ctx._current_span = self._current_span 84 return new_ctx 85 86 def get_current_root_span(self): 87 """ 88 Return the root span of the context or None if it does not exist. 89 """ 90 return self._trace[0] if len(self._trace) > 0 else None 91 92 def get_current_span(self): 93 """ 94 Return the last active span that corresponds to the last inserted 95 item in the trace list. This cannot be considered as the current active 96 span in asynchronous environments, because some spans can be closed 97 earlier while child spans still need to finish their traced execution. 98 """ 99 with self._lock: 100 return self._current_span 101 102 def _set_current_span(self, span): 103 """ 104 Set current span internally. 105 106 Non-safe if not used with a lock. For internal Context usage only. 107 """ 108 self._current_span = span 109 if span: 110 self._parent_trace_id = span.trace_id 111 self._parent_span_id = span.span_id 112 else: 113 self._parent_span_id = None 114 115 def add_span(self, span): 116 """ 117 Add a span to the context trace list, keeping it as the last active span. 118 """ 119 with self._lock: 120 self._set_current_span(span) 121 122 self._trace.append(span) 123 span._context = self 124 125 def close_span(self, span): 126 """ 127 Mark a span as a finished, increasing the internal counter to prevent 128 cycles inside _trace list. 129 """ 130 with self._lock: 131 self._finished_spans += 1 132 self._set_current_span(span._parent) 133 134 # notify if the trace is not closed properly; this check is executed only 135 # if the debug logging is enabled and when the root span is closed 136 # for an unfinished trace. This logging is meant to be used for debugging 137 # reasons, and it doesn't mean that the trace is wrongly generated. 138 # In asynchronous environments, it's legit to close the root span before 139 # some children. On the other hand, asynchronous web frameworks still expect 140 # to close the root span after all the children. 141 if span.tracer and span.tracer.log.isEnabledFor(logging.DEBUG) and span._parent is None: 142 unfinished_spans = [x for x in self._trace if not x.finished] 143 if unfinished_spans: 144 log.debug('Root span "%s" closed, but the trace has %d unfinished spans:', 145 span.name, len(unfinished_spans)) 146 for wrong_span in unfinished_spans: 147 log.debug('\n%s', wrong_span.pprint()) 148 149 def _is_sampled(self): 150 return any(span.sampled for span in self._trace) 151 152 def get(self): 153 """ 154 Returns a tuple containing the trace list generated in the current context and 155 if the context is sampled or not. It returns (None, None) if the ``Context`` is 156 not finished. If a trace is returned, the ``Context`` will be reset so that it 157 can be re-used immediately. 158 159 This operation is thread-safe. 160 """ 161 with self._lock: 162 # All spans are finished? 163 if self._finished_spans == len(self._trace): 164 # get the trace 165 trace = self._trace 166 sampled = self._is_sampled() 167 sampling_priority = self._sampling_priority 168 # attach the sampling priority to the context root span 169 if sampled and sampling_priority is not None and trace: 170 trace[0].set_metric(SAMPLING_PRIORITY_KEY, sampling_priority) 171 origin = self._dd_origin 172 # attach the origin to the root span tag 173 if sampled and origin is not None and trace: 174 trace[0].set_tag(ORIGIN_KEY, origin) 175 176 # Set hostname tag if they requested it 177 if config.report_hostname: 178 # DEV: `get_hostname()` value is cached 179 trace[0].set_tag(HOSTNAME_KEY, hostname.get_hostname()) 180 181 # clean the current state 182 self._trace = [] 183 self._finished_spans = 0 184 self._parent_trace_id = None 185 self._parent_span_id = None 186 self._sampling_priority = None 187 return trace, sampled 188 189 elif self._partial_flush_enabled: 190 finished_spans = [t for t in self._trace if t.finished] 191 if len(finished_spans) >= self._partial_flush_min_spans: 192 # partial flush when enabled and we have more than the minimal required spans 193 trace = self._trace 194 sampled = self._is_sampled() 195 sampling_priority = self._sampling_priority 196 # attach the sampling priority to the context root span 197 if sampled and sampling_priority is not None and trace: 198 trace[0].set_metric(SAMPLING_PRIORITY_KEY, sampling_priority) 199 origin = self._dd_origin 200 # attach the origin to the root span tag 201 if sampled and origin is not None and trace: 202 trace[0].set_tag(ORIGIN_KEY, origin) 203 204 # Set hostname tag if they requested it 205 if config.report_hostname: 206 # DEV: `get_hostname()` value is cached 207 trace[0].set_tag(HOSTNAME_KEY, hostname.get_hostname()) 208 209 self._finished_spans = 0 210 211 # Any open spans will remain as `self._trace` 212 # Any finished spans will get returned to be flushed 213 self._trace = [t for t in self._trace if not t.finished] 214 215 return finished_spans, sampled 216 return None, None 217 ``` Path: `ddtrace/constants.py` Content: ``` 1 FILTERS_KEY = 'FILTERS' 2 SAMPLE_RATE_METRIC_KEY = '_sample_rate' 3 SAMPLING_PRIORITY_KEY = '_sampling_priority_v1' 4 ANALYTICS_SAMPLE_RATE_KEY = '_dd1.sr.eausr' 5 SAMPLING_AGENT_DECISION = '_dd.agent_psr' 6 SAMPLING_RULE_DECISION = '_dd.rule_psr' 7 SAMPLING_LIMIT_DECISION = '_dd.limit_psr' 8 ORIGIN_KEY = '_dd.origin' 9 HOSTNAME_KEY = '_dd.hostname' 10 ENV_KEY = 'env' 11 VERSION_KEY = 'version' 12 SERVICE_KEY = 'service.name' 13 SERVICE_VERSION_KEY = 'service.version' 14 SPAN_MEASURED_KEY = '_dd.measured' 15 16 NUMERIC_TAGS = (ANALYTICS_SAMPLE_RATE_KEY, ) 17 18 MANUAL_DROP_KEY = 'manual.drop' 19 MANUAL_KEEP_KEY = 'manual.keep' 20 ``` Path: `ddtrace/contrib/logging/patch.py` Content: ``` 1 import logging 2 3 import ddtrace 4 5 from ...utils.wrappers import unwrap as _u 6 from ...vendor.wrapt import wrap_function_wrapper as _w 7 8 RECORD_ATTR_TRACE_ID = "dd.trace_id" 9 RECORD_ATTR_SPAN_ID = "dd.span_id" 10 RECORD_ATTR_ENV = "dd.env" 11 RECORD_ATTR_VERSION = "dd.version" 12 RECORD_ATTR_SERVICE = "dd.service" 13 RECORD_ATTR_VALUE_ZERO = 0 14 RECORD_ATTR_VALUE_EMPTY = "" 15 16 ddtrace.config._add("logging", dict(tracer=None,)) # by default, override here for custom tracer 17 18 19 def _get_current_span(tracer=None): 20 """Helper to get the currently active span""" 21 if not tracer: 22 tracer = ddtrace.tracer 23 24 if not tracer.enabled: 25 return None 26 27 return tracer.current_span() 28 29 30 def _w_makeRecord(func, instance, args, kwargs): 31 # Get the LogRecord instance for this log 32 record = func(*args, **kwargs) 33 34 setattr(record, RECORD_ATTR_VERSION, ddtrace.config.version or "") 35 setattr(record, RECORD_ATTR_ENV, ddtrace.config.env or "") 36 setattr(record, RECORD_ATTR_SERVICE, ddtrace.config.service or "") 37 38 span = _get_current_span(tracer=ddtrace.config.logging.tracer) 39 if span: 40 setattr(record, RECORD_ATTR_TRACE_ID, span.trace_id) 41 setattr(record, RECORD_ATTR_SPAN_ID, span.span_id) 42 else: 43 setattr(record, RECORD_ATTR_TRACE_ID, RECORD_ATTR_VALUE_ZERO) 44 setattr(record, RECORD_ATTR_SPAN_ID, RECORD_ATTR_VALUE_ZERO) 45 46 return record 47 48 49 def patch(): 50 """ 51 Patch ``logging`` module in the Python Standard Library for injection of 52 tracer information by wrapping the base factory method ``Logger.makeRecord`` 53 """ 54 if getattr(logging, "_datadog_patch", False): 55 return 56 setattr(logging, "_datadog_patch", True) 57 58 _w(logging.Logger, "makeRecord", _w_makeRecord) 59 60 61 def unpatch(): 62 if getattr(logging, "_datadog_patch", False): 63 setattr(logging, "_datadog_patch", False) 64 65 _u(logging.Logger, "makeRecord") 66 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ddtrace/constants.py b/ddtrace/constants.py --- a/ddtrace/constants.py +++ b/ddtrace/constants.py @@ -17,3 +17,5 @@ MANUAL_DROP_KEY = 'manual.drop' MANUAL_KEEP_KEY = 'manual.keep' + +LOG_SPAN_KEY = '__datadog_log_span' diff --git a/ddtrace/context.py b/ddtrace/context.py --- a/ddtrace/context.py +++ b/ddtrace/context.py @@ -1,7 +1,7 @@ import logging import threading -from .constants import HOSTNAME_KEY, SAMPLING_PRIORITY_KEY, ORIGIN_KEY +from .constants import HOSTNAME_KEY, SAMPLING_PRIORITY_KEY, ORIGIN_KEY, LOG_SPAN_KEY from .internal.logger import get_logger from .internal import hostname from .settings import config @@ -139,12 +139,13 @@ # some children. On the other hand, asynchronous web frameworks still expect # to close the root span after all the children. if span.tracer and span.tracer.log.isEnabledFor(logging.DEBUG) and span._parent is None: + extra = {LOG_SPAN_KEY: span} unfinished_spans = [x for x in self._trace if not x.finished] if unfinished_spans: log.debug('Root span "%s" closed, but the trace has %d unfinished spans:', - span.name, len(unfinished_spans)) + span.name, len(unfinished_spans), extra=extra) for wrong_span in unfinished_spans: - log.debug('\n%s', wrong_span.pprint()) + log.debug('\n%s', wrong_span.pprint(), extra=extra) def _is_sampled(self): return any(span.sampled for span in self._trace) diff --git a/ddtrace/contrib/logging/patch.py b/ddtrace/contrib/logging/patch.py --- a/ddtrace/contrib/logging/patch.py +++ b/ddtrace/contrib/logging/patch.py @@ -35,7 +35,14 @@ setattr(record, RECORD_ATTR_ENV, ddtrace.config.env or "") setattr(record, RECORD_ATTR_SERVICE, ddtrace.config.service or "") - span = _get_current_span(tracer=ddtrace.config.logging.tracer) + # logs from internal logger may explicitly pass the current span to + # avoid deadlocks in getting the current span while already in locked code. + span_from_log = getattr(record, ddtrace.constants.LOG_SPAN_KEY, None) + if isinstance(span_from_log, ddtrace.Span): + span = span_from_log + else: + span = _get_current_span(tracer=ddtrace.config.logging.tracer) + if span: setattr(record, RECORD_ATTR_TRACE_ID, span.trace_id) setattr(record, RECORD_ATTR_SPAN_ID, span.span_id)
{"golden_diff": "diff --git a/ddtrace/constants.py b/ddtrace/constants.py\n--- a/ddtrace/constants.py\n+++ b/ddtrace/constants.py\n@@ -17,3 +17,5 @@\n \n MANUAL_DROP_KEY = 'manual.drop'\n MANUAL_KEEP_KEY = 'manual.keep'\n+\n+LOG_SPAN_KEY = '__datadog_log_span'\ndiff --git a/ddtrace/context.py b/ddtrace/context.py\n--- a/ddtrace/context.py\n+++ b/ddtrace/context.py\n@@ -1,7 +1,7 @@\n import logging\n import threading\n \n-from .constants import HOSTNAME_KEY, SAMPLING_PRIORITY_KEY, ORIGIN_KEY\n+from .constants import HOSTNAME_KEY, SAMPLING_PRIORITY_KEY, ORIGIN_KEY, LOG_SPAN_KEY\n from .internal.logger import get_logger\n from .internal import hostname\n from .settings import config\n@@ -139,12 +139,13 @@\n # some children. On the other hand, asynchronous web frameworks still expect\n # to close the root span after all the children.\n if span.tracer and span.tracer.log.isEnabledFor(logging.DEBUG) and span._parent is None:\n+ extra = {LOG_SPAN_KEY: span}\n unfinished_spans = [x for x in self._trace if not x.finished]\n if unfinished_spans:\n log.debug('Root span \"%s\" closed, but the trace has %d unfinished spans:',\n- span.name, len(unfinished_spans))\n+ span.name, len(unfinished_spans), extra=extra)\n for wrong_span in unfinished_spans:\n- log.debug('\\n%s', wrong_span.pprint())\n+ log.debug('\\n%s', wrong_span.pprint(), extra=extra)\n \n def _is_sampled(self):\n return any(span.sampled for span in self._trace)\ndiff --git a/ddtrace/contrib/logging/patch.py b/ddtrace/contrib/logging/patch.py\n--- a/ddtrace/contrib/logging/patch.py\n+++ b/ddtrace/contrib/logging/patch.py\n@@ -35,7 +35,14 @@\n setattr(record, RECORD_ATTR_ENV, ddtrace.config.env or \"\")\n setattr(record, RECORD_ATTR_SERVICE, ddtrace.config.service or \"\")\n \n- span = _get_current_span(tracer=ddtrace.config.logging.tracer)\n+ # logs from internal logger may explicitly pass the current span to\n+ # avoid deadlocks in getting the current span while already in locked code.\n+ span_from_log = getattr(record, ddtrace.constants.LOG_SPAN_KEY, None)\n+ if isinstance(span_from_log, ddtrace.Span):\n+ span = span_from_log\n+ else:\n+ span = _get_current_span(tracer=ddtrace.config.logging.tracer)\n+\n if span:\n setattr(record, RECORD_ATTR_TRACE_ID, span.trace_id)\n setattr(record, RECORD_ATTR_SPAN_ID, span.span_id)\n", "issue": "logging in `ddtrace.context.Context` causes deadlock\n### Summary\r\n`Context.close_span` logs out any unfinished child spans when the parent span closes. if log injection is enabled, the patched logger [tries to get the current span](https://github.com/DataDog/dd-trace-py/blob/682f9d9fdf8a5ad4285cc6adc31e8177599f5d0a/ddtrace/contrib/logging/patch.py#L38) which [tries to grab the same lock](https://github.com/DataDog/dd-trace-py/blob/682f9d9fdf8a5ad4285cc6adc31e8177599f5d0a/ddtrace/context.py#L92-L100) that `Context.close_span` is already holding.\r\n\r\nhttps://github.com/DataDog/dd-trace-py/blob/682f9d9fdf8a5ad4285cc6adc31e8177599f5d0a/ddtrace/context.py#L125-L147\r\n\r\n### Which version of dd-trace-py are you using?\r\n`0.36.0`\r\n\r\n### How can we reproduce your problem?\r\nrun `DATADOG_TRACE_DEBUG=true DD_LOGS_INJECTION=true ddtrace-run python script.py`\r\n```py\r\n# script.py\r\nimport ddtrace\r\n\r\ndef main():\r\n with ddtrace.tracer.trace(\"main\") as span:\r\n unfinished_span = ddtrace.tracer.trace(\"unfinished_span\")\r\n # unfinished_span.finish() # <-- if this runs, deadlock will not occur\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n```\r\n\r\n### What is the result that you get?\r\nscript deadlocks and hangs\r\n\r\n### What is the result that you expected?\r\nunfinished spans don't deadlock code\r\n\r\n### Notes\r\npossibly similar to https://github.com/DataDog/dd-trace-py/issues/1003\n", "before_files": [{"content": "import logging\nimport threading\n\nfrom .constants import HOSTNAME_KEY, SAMPLING_PRIORITY_KEY, ORIGIN_KEY\nfrom .internal.logger import get_logger\nfrom .internal import hostname\nfrom .settings import config\nfrom .utils.formats import asbool, get_env\n\nlog = get_logger(__name__)\n\n\nclass Context(object):\n \"\"\"\n Context is used to keep track of a hierarchy of spans for the current\n execution flow. During each logical execution, the same ``Context`` is\n used to represent a single logical trace, even if the trace is built\n asynchronously.\n\n A single code execution may use multiple ``Context`` if part of the execution\n must not be related to the current tracing. As example, a delayed job may\n compose a standalone trace instead of being related to the same trace that\n generates the job itself. On the other hand, if it's part of the same\n ``Context``, it will be related to the original trace.\n\n This data structure is thread-safe.\n \"\"\"\n _partial_flush_enabled = asbool(get_env('tracer', 'partial_flush_enabled', default=False))\n _partial_flush_min_spans = int(get_env('tracer', 'partial_flush_min_spans', default=500))\n\n def __init__(self, trace_id=None, span_id=None, sampling_priority=None, _dd_origin=None):\n \"\"\"\n Initialize a new thread-safe ``Context``.\n\n :param int trace_id: trace_id of parent span\n :param int span_id: span_id of parent span\n \"\"\"\n self._trace = []\n self._finished_spans = 0\n self._current_span = None\n self._lock = threading.Lock()\n\n self._parent_trace_id = trace_id\n self._parent_span_id = span_id\n self._sampling_priority = sampling_priority\n self._dd_origin = _dd_origin\n\n @property\n def trace_id(self):\n \"\"\"Return current context trace_id.\"\"\"\n with self._lock:\n return self._parent_trace_id\n\n @property\n def span_id(self):\n \"\"\"Return current context span_id.\"\"\"\n with self._lock:\n return self._parent_span_id\n\n @property\n def sampling_priority(self):\n \"\"\"Return current context sampling priority.\"\"\"\n with self._lock:\n return self._sampling_priority\n\n @sampling_priority.setter\n def sampling_priority(self, value):\n \"\"\"Set sampling priority.\"\"\"\n with self._lock:\n self._sampling_priority = value\n\n def clone(self):\n \"\"\"\n Partially clones the current context.\n It copies everything EXCEPT the registered and finished spans.\n \"\"\"\n with self._lock:\n new_ctx = Context(\n trace_id=self._parent_trace_id,\n span_id=self._parent_span_id,\n sampling_priority=self._sampling_priority,\n )\n new_ctx._current_span = self._current_span\n return new_ctx\n\n def get_current_root_span(self):\n \"\"\"\n Return the root span of the context or None if it does not exist.\n \"\"\"\n return self._trace[0] if len(self._trace) > 0 else None\n\n def get_current_span(self):\n \"\"\"\n Return the last active span that corresponds to the last inserted\n item in the trace list. This cannot be considered as the current active\n span in asynchronous environments, because some spans can be closed\n earlier while child spans still need to finish their traced execution.\n \"\"\"\n with self._lock:\n return self._current_span\n\n def _set_current_span(self, span):\n \"\"\"\n Set current span internally.\n\n Non-safe if not used with a lock. For internal Context usage only.\n \"\"\"\n self._current_span = span\n if span:\n self._parent_trace_id = span.trace_id\n self._parent_span_id = span.span_id\n else:\n self._parent_span_id = None\n\n def add_span(self, span):\n \"\"\"\n Add a span to the context trace list, keeping it as the last active span.\n \"\"\"\n with self._lock:\n self._set_current_span(span)\n\n self._trace.append(span)\n span._context = self\n\n def close_span(self, span):\n \"\"\"\n Mark a span as a finished, increasing the internal counter to prevent\n cycles inside _trace list.\n \"\"\"\n with self._lock:\n self._finished_spans += 1\n self._set_current_span(span._parent)\n\n # notify if the trace is not closed properly; this check is executed only\n # if the debug logging is enabled and when the root span is closed\n # for an unfinished trace. This logging is meant to be used for debugging\n # reasons, and it doesn't mean that the trace is wrongly generated.\n # In asynchronous environments, it's legit to close the root span before\n # some children. On the other hand, asynchronous web frameworks still expect\n # to close the root span after all the children.\n if span.tracer and span.tracer.log.isEnabledFor(logging.DEBUG) and span._parent is None:\n unfinished_spans = [x for x in self._trace if not x.finished]\n if unfinished_spans:\n log.debug('Root span \"%s\" closed, but the trace has %d unfinished spans:',\n span.name, len(unfinished_spans))\n for wrong_span in unfinished_spans:\n log.debug('\\n%s', wrong_span.pprint())\n\n def _is_sampled(self):\n return any(span.sampled for span in self._trace)\n\n def get(self):\n \"\"\"\n Returns a tuple containing the trace list generated in the current context and\n if the context is sampled or not. It returns (None, None) if the ``Context`` is\n not finished. If a trace is returned, the ``Context`` will be reset so that it\n can be re-used immediately.\n\n This operation is thread-safe.\n \"\"\"\n with self._lock:\n # All spans are finished?\n if self._finished_spans == len(self._trace):\n # get the trace\n trace = self._trace\n sampled = self._is_sampled()\n sampling_priority = self._sampling_priority\n # attach the sampling priority to the context root span\n if sampled and sampling_priority is not None and trace:\n trace[0].set_metric(SAMPLING_PRIORITY_KEY, sampling_priority)\n origin = self._dd_origin\n # attach the origin to the root span tag\n if sampled and origin is not None and trace:\n trace[0].set_tag(ORIGIN_KEY, origin)\n\n # Set hostname tag if they requested it\n if config.report_hostname:\n # DEV: `get_hostname()` value is cached\n trace[0].set_tag(HOSTNAME_KEY, hostname.get_hostname())\n\n # clean the current state\n self._trace = []\n self._finished_spans = 0\n self._parent_trace_id = None\n self._parent_span_id = None\n self._sampling_priority = None\n return trace, sampled\n\n elif self._partial_flush_enabled:\n finished_spans = [t for t in self._trace if t.finished]\n if len(finished_spans) >= self._partial_flush_min_spans:\n # partial flush when enabled and we have more than the minimal required spans\n trace = self._trace\n sampled = self._is_sampled()\n sampling_priority = self._sampling_priority\n # attach the sampling priority to the context root span\n if sampled and sampling_priority is not None and trace:\n trace[0].set_metric(SAMPLING_PRIORITY_KEY, sampling_priority)\n origin = self._dd_origin\n # attach the origin to the root span tag\n if sampled and origin is not None and trace:\n trace[0].set_tag(ORIGIN_KEY, origin)\n\n # Set hostname tag if they requested it\n if config.report_hostname:\n # DEV: `get_hostname()` value is cached\n trace[0].set_tag(HOSTNAME_KEY, hostname.get_hostname())\n\n self._finished_spans = 0\n\n # Any open spans will remain as `self._trace`\n # Any finished spans will get returned to be flushed\n self._trace = [t for t in self._trace if not t.finished]\n\n return finished_spans, sampled\n return None, None\n", "path": "ddtrace/context.py"}, {"content": "FILTERS_KEY = 'FILTERS'\nSAMPLE_RATE_METRIC_KEY = '_sample_rate'\nSAMPLING_PRIORITY_KEY = '_sampling_priority_v1'\nANALYTICS_SAMPLE_RATE_KEY = '_dd1.sr.eausr'\nSAMPLING_AGENT_DECISION = '_dd.agent_psr'\nSAMPLING_RULE_DECISION = '_dd.rule_psr'\nSAMPLING_LIMIT_DECISION = '_dd.limit_psr'\nORIGIN_KEY = '_dd.origin'\nHOSTNAME_KEY = '_dd.hostname'\nENV_KEY = 'env'\nVERSION_KEY = 'version'\nSERVICE_KEY = 'service.name'\nSERVICE_VERSION_KEY = 'service.version'\nSPAN_MEASURED_KEY = '_dd.measured'\n\nNUMERIC_TAGS = (ANALYTICS_SAMPLE_RATE_KEY, )\n\nMANUAL_DROP_KEY = 'manual.drop'\nMANUAL_KEEP_KEY = 'manual.keep'\n", "path": "ddtrace/constants.py"}, {"content": "import logging\n\nimport ddtrace\n\nfrom ...utils.wrappers import unwrap as _u\nfrom ...vendor.wrapt import wrap_function_wrapper as _w\n\nRECORD_ATTR_TRACE_ID = \"dd.trace_id\"\nRECORD_ATTR_SPAN_ID = \"dd.span_id\"\nRECORD_ATTR_ENV = \"dd.env\"\nRECORD_ATTR_VERSION = \"dd.version\"\nRECORD_ATTR_SERVICE = \"dd.service\"\nRECORD_ATTR_VALUE_ZERO = 0\nRECORD_ATTR_VALUE_EMPTY = \"\"\n\nddtrace.config._add(\"logging\", dict(tracer=None,)) # by default, override here for custom tracer\n\n\ndef _get_current_span(tracer=None):\n \"\"\"Helper to get the currently active span\"\"\"\n if not tracer:\n tracer = ddtrace.tracer\n\n if not tracer.enabled:\n return None\n\n return tracer.current_span()\n\n\ndef _w_makeRecord(func, instance, args, kwargs):\n # Get the LogRecord instance for this log\n record = func(*args, **kwargs)\n\n setattr(record, RECORD_ATTR_VERSION, ddtrace.config.version or \"\")\n setattr(record, RECORD_ATTR_ENV, ddtrace.config.env or \"\")\n setattr(record, RECORD_ATTR_SERVICE, ddtrace.config.service or \"\")\n\n span = _get_current_span(tracer=ddtrace.config.logging.tracer)\n if span:\n setattr(record, RECORD_ATTR_TRACE_ID, span.trace_id)\n setattr(record, RECORD_ATTR_SPAN_ID, span.span_id)\n else:\n setattr(record, RECORD_ATTR_TRACE_ID, RECORD_ATTR_VALUE_ZERO)\n setattr(record, RECORD_ATTR_SPAN_ID, RECORD_ATTR_VALUE_ZERO)\n\n return record\n\n\ndef patch():\n \"\"\"\n Patch ``logging`` module in the Python Standard Library for injection of\n tracer information by wrapping the base factory method ``Logger.makeRecord``\n \"\"\"\n if getattr(logging, \"_datadog_patch\", False):\n return\n setattr(logging, \"_datadog_patch\", True)\n\n _w(logging.Logger, \"makeRecord\", _w_makeRecord)\n\n\ndef unpatch():\n if getattr(logging, \"_datadog_patch\", False):\n setattr(logging, \"_datadog_patch\", False)\n\n _u(logging.Logger, \"makeRecord\")\n", "path": "ddtrace/contrib/logging/patch.py"}], "after_files": [{"content": "import logging\nimport threading\n\nfrom .constants import HOSTNAME_KEY, SAMPLING_PRIORITY_KEY, ORIGIN_KEY, LOG_SPAN_KEY\nfrom .internal.logger import get_logger\nfrom .internal import hostname\nfrom .settings import config\nfrom .utils.formats import asbool, get_env\n\nlog = get_logger(__name__)\n\n\nclass Context(object):\n \"\"\"\n Context is used to keep track of a hierarchy of spans for the current\n execution flow. During each logical execution, the same ``Context`` is\n used to represent a single logical trace, even if the trace is built\n asynchronously.\n\n A single code execution may use multiple ``Context`` if part of the execution\n must not be related to the current tracing. As example, a delayed job may\n compose a standalone trace instead of being related to the same trace that\n generates the job itself. On the other hand, if it's part of the same\n ``Context``, it will be related to the original trace.\n\n This data structure is thread-safe.\n \"\"\"\n _partial_flush_enabled = asbool(get_env('tracer', 'partial_flush_enabled', default=False))\n _partial_flush_min_spans = int(get_env('tracer', 'partial_flush_min_spans', default=500))\n\n def __init__(self, trace_id=None, span_id=None, sampling_priority=None, _dd_origin=None):\n \"\"\"\n Initialize a new thread-safe ``Context``.\n\n :param int trace_id: trace_id of parent span\n :param int span_id: span_id of parent span\n \"\"\"\n self._trace = []\n self._finished_spans = 0\n self._current_span = None\n self._lock = threading.Lock()\n\n self._parent_trace_id = trace_id\n self._parent_span_id = span_id\n self._sampling_priority = sampling_priority\n self._dd_origin = _dd_origin\n\n @property\n def trace_id(self):\n \"\"\"Return current context trace_id.\"\"\"\n with self._lock:\n return self._parent_trace_id\n\n @property\n def span_id(self):\n \"\"\"Return current context span_id.\"\"\"\n with self._lock:\n return self._parent_span_id\n\n @property\n def sampling_priority(self):\n \"\"\"Return current context sampling priority.\"\"\"\n with self._lock:\n return self._sampling_priority\n\n @sampling_priority.setter\n def sampling_priority(self, value):\n \"\"\"Set sampling priority.\"\"\"\n with self._lock:\n self._sampling_priority = value\n\n def clone(self):\n \"\"\"\n Partially clones the current context.\n It copies everything EXCEPT the registered and finished spans.\n \"\"\"\n with self._lock:\n new_ctx = Context(\n trace_id=self._parent_trace_id,\n span_id=self._parent_span_id,\n sampling_priority=self._sampling_priority,\n )\n new_ctx._current_span = self._current_span\n return new_ctx\n\n def get_current_root_span(self):\n \"\"\"\n Return the root span of the context or None if it does not exist.\n \"\"\"\n return self._trace[0] if len(self._trace) > 0 else None\n\n def get_current_span(self):\n \"\"\"\n Return the last active span that corresponds to the last inserted\n item in the trace list. This cannot be considered as the current active\n span in asynchronous environments, because some spans can be closed\n earlier while child spans still need to finish their traced execution.\n \"\"\"\n with self._lock:\n return self._current_span\n\n def _set_current_span(self, span):\n \"\"\"\n Set current span internally.\n\n Non-safe if not used with a lock. For internal Context usage only.\n \"\"\"\n self._current_span = span\n if span:\n self._parent_trace_id = span.trace_id\n self._parent_span_id = span.span_id\n else:\n self._parent_span_id = None\n\n def add_span(self, span):\n \"\"\"\n Add a span to the context trace list, keeping it as the last active span.\n \"\"\"\n with self._lock:\n self._set_current_span(span)\n\n self._trace.append(span)\n span._context = self\n\n def close_span(self, span):\n \"\"\"\n Mark a span as a finished, increasing the internal counter to prevent\n cycles inside _trace list.\n \"\"\"\n with self._lock:\n self._finished_spans += 1\n self._set_current_span(span._parent)\n\n # notify if the trace is not closed properly; this check is executed only\n # if the debug logging is enabled and when the root span is closed\n # for an unfinished trace. This logging is meant to be used for debugging\n # reasons, and it doesn't mean that the trace is wrongly generated.\n # In asynchronous environments, it's legit to close the root span before\n # some children. On the other hand, asynchronous web frameworks still expect\n # to close the root span after all the children.\n if span.tracer and span.tracer.log.isEnabledFor(logging.DEBUG) and span._parent is None:\n extra = {LOG_SPAN_KEY: span}\n unfinished_spans = [x for x in self._trace if not x.finished]\n if unfinished_spans:\n log.debug('Root span \"%s\" closed, but the trace has %d unfinished spans:',\n span.name, len(unfinished_spans), extra=extra)\n for wrong_span in unfinished_spans:\n log.debug('\\n%s', wrong_span.pprint(), extra=extra)\n\n def _is_sampled(self):\n return any(span.sampled for span in self._trace)\n\n def get(self):\n \"\"\"\n Returns a tuple containing the trace list generated in the current context and\n if the context is sampled or not. It returns (None, None) if the ``Context`` is\n not finished. If a trace is returned, the ``Context`` will be reset so that it\n can be re-used immediately.\n\n This operation is thread-safe.\n \"\"\"\n with self._lock:\n # All spans are finished?\n if self._finished_spans == len(self._trace):\n # get the trace\n trace = self._trace\n sampled = self._is_sampled()\n sampling_priority = self._sampling_priority\n # attach the sampling priority to the context root span\n if sampled and sampling_priority is not None and trace:\n trace[0].set_metric(SAMPLING_PRIORITY_KEY, sampling_priority)\n origin = self._dd_origin\n # attach the origin to the root span tag\n if sampled and origin is not None and trace:\n trace[0].set_tag(ORIGIN_KEY, origin)\n\n # Set hostname tag if they requested it\n if config.report_hostname:\n # DEV: `get_hostname()` value is cached\n trace[0].set_tag(HOSTNAME_KEY, hostname.get_hostname())\n\n # clean the current state\n self._trace = []\n self._finished_spans = 0\n self._parent_trace_id = None\n self._parent_span_id = None\n self._sampling_priority = None\n return trace, sampled\n\n elif self._partial_flush_enabled:\n finished_spans = [t for t in self._trace if t.finished]\n if len(finished_spans) >= self._partial_flush_min_spans:\n # partial flush when enabled and we have more than the minimal required spans\n trace = self._trace\n sampled = self._is_sampled()\n sampling_priority = self._sampling_priority\n # attach the sampling priority to the context root span\n if sampled and sampling_priority is not None and trace:\n trace[0].set_metric(SAMPLING_PRIORITY_KEY, sampling_priority)\n origin = self._dd_origin\n # attach the origin to the root span tag\n if sampled and origin is not None and trace:\n trace[0].set_tag(ORIGIN_KEY, origin)\n\n # Set hostname tag if they requested it\n if config.report_hostname:\n # DEV: `get_hostname()` value is cached\n trace[0].set_tag(HOSTNAME_KEY, hostname.get_hostname())\n\n self._finished_spans = 0\n\n # Any open spans will remain as `self._trace`\n # Any finished spans will get returned to be flushed\n self._trace = [t for t in self._trace if not t.finished]\n\n return finished_spans, sampled\n return None, None\n", "path": "ddtrace/context.py"}, {"content": "FILTERS_KEY = 'FILTERS'\nSAMPLE_RATE_METRIC_KEY = '_sample_rate'\nSAMPLING_PRIORITY_KEY = '_sampling_priority_v1'\nANALYTICS_SAMPLE_RATE_KEY = '_dd1.sr.eausr'\nSAMPLING_AGENT_DECISION = '_dd.agent_psr'\nSAMPLING_RULE_DECISION = '_dd.rule_psr'\nSAMPLING_LIMIT_DECISION = '_dd.limit_psr'\nORIGIN_KEY = '_dd.origin'\nHOSTNAME_KEY = '_dd.hostname'\nENV_KEY = 'env'\nVERSION_KEY = 'version'\nSERVICE_KEY = 'service.name'\nSERVICE_VERSION_KEY = 'service.version'\nSPAN_MEASURED_KEY = '_dd.measured'\n\nNUMERIC_TAGS = (ANALYTICS_SAMPLE_RATE_KEY, )\n\nMANUAL_DROP_KEY = 'manual.drop'\nMANUAL_KEEP_KEY = 'manual.keep'\n\nLOG_SPAN_KEY = '__datadog_log_span'\n", "path": "ddtrace/constants.py"}, {"content": "import logging\n\nimport ddtrace\n\nfrom ...utils.wrappers import unwrap as _u\nfrom ...vendor.wrapt import wrap_function_wrapper as _w\n\nRECORD_ATTR_TRACE_ID = \"dd.trace_id\"\nRECORD_ATTR_SPAN_ID = \"dd.span_id\"\nRECORD_ATTR_ENV = \"dd.env\"\nRECORD_ATTR_VERSION = \"dd.version\"\nRECORD_ATTR_SERVICE = \"dd.service\"\nRECORD_ATTR_VALUE_ZERO = 0\nRECORD_ATTR_VALUE_EMPTY = \"\"\n\nddtrace.config._add(\"logging\", dict(tracer=None,)) # by default, override here for custom tracer\n\n\ndef _get_current_span(tracer=None):\n \"\"\"Helper to get the currently active span\"\"\"\n if not tracer:\n tracer = ddtrace.tracer\n\n if not tracer.enabled:\n return None\n\n return tracer.current_span()\n\n\ndef _w_makeRecord(func, instance, args, kwargs):\n # Get the LogRecord instance for this log\n record = func(*args, **kwargs)\n\n setattr(record, RECORD_ATTR_VERSION, ddtrace.config.version or \"\")\n setattr(record, RECORD_ATTR_ENV, ddtrace.config.env or \"\")\n setattr(record, RECORD_ATTR_SERVICE, ddtrace.config.service or \"\")\n\n # logs from internal logger may explicitly pass the current span to\n # avoid deadlocks in getting the current span while already in locked code.\n span_from_log = getattr(record, ddtrace.constants.LOG_SPAN_KEY, None)\n if isinstance(span_from_log, ddtrace.Span):\n span = span_from_log\n else:\n span = _get_current_span(tracer=ddtrace.config.logging.tracer)\n\n if span:\n setattr(record, RECORD_ATTR_TRACE_ID, span.trace_id)\n setattr(record, RECORD_ATTR_SPAN_ID, span.span_id)\n else:\n setattr(record, RECORD_ATTR_TRACE_ID, RECORD_ATTR_VALUE_ZERO)\n setattr(record, RECORD_ATTR_SPAN_ID, RECORD_ATTR_VALUE_ZERO)\n\n return record\n\n\ndef patch():\n \"\"\"\n Patch ``logging`` module in the Python Standard Library for injection of\n tracer information by wrapping the base factory method ``Logger.makeRecord``\n \"\"\"\n if getattr(logging, \"_datadog_patch\", False):\n return\n setattr(logging, \"_datadog_patch\", True)\n\n _w(logging.Logger, \"makeRecord\", _w_makeRecord)\n\n\ndef unpatch():\n if getattr(logging, \"_datadog_patch\", False):\n setattr(logging, \"_datadog_patch\", False)\n\n _u(logging.Logger, \"makeRecord\")\n", "path": "ddtrace/contrib/logging/patch.py"}]}
3,844
615
gh_patches_debug_2598
rasdani/github-patches
git_diff
ivy-llc__ivy-13425
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- normal --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ivy/functional/frontends/torch/random_sampling.py` Content: ``` 1 import ivy 2 from ivy.func_wrapper import with_supported_dtypes 3 from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back 4 5 try: 6 from torch import Generator 7 except ImportError: 8 from types import SimpleNamespace 9 10 Generator = SimpleNamespace 11 12 13 def seed() -> int: 14 """Returns a 64 bit number used to seed the RNG""" 15 return int(ivy.randint(-(2**63), 2**63 - 1)) 16 17 18 @to_ivy_arrays_and_back 19 def manual_seed(seed: int): 20 ivy.seed(seed_value=seed) 21 return Generator().manual_seed(seed) 22 23 24 @with_supported_dtypes( 25 { 26 "1.11.0 and below": ( 27 "float32", 28 "float64", 29 ) 30 }, 31 "torch", 32 ) 33 @to_ivy_arrays_and_back 34 def multinomial(input, num_samples, replacement=False, *, generator=None, out=None): 35 return ivy.multinomial( 36 num_samples + 1, # doesn't matter because `probs` is provided, but should be 37 # greater than the number of samples 38 num_samples, 39 probs=input, 40 replace=replacement, 41 out=out, 42 ) 43 44 45 @with_supported_dtypes( 46 { 47 "1.11.0 and below": ( 48 "float32", 49 "float64", 50 ) 51 }, 52 "torch", 53 ) 54 @to_ivy_arrays_and_back 55 def poisson(input, generator=None): 56 return ivy.poisson(input, shape=None) 57 58 59 @to_ivy_arrays_and_back 60 def rand( 61 size, 62 *, 63 generator=None, 64 out=None, 65 dtype=None, 66 layout=None, 67 device=None, 68 requires_grad=False, 69 pin_memory=False 70 ): 71 return ivy.random_uniform( 72 shape=size, 73 out=out, 74 dtype=dtype, 75 device=device, 76 ) 77 78 79 @to_ivy_arrays_and_back 80 def rand_like( 81 input, 82 *, 83 dtype=None, 84 layout=None, 85 device=None, 86 requires_grad=False, 87 memory_format=False 88 ): 89 shape = input.shape 90 if not dtype: 91 dtype = input.dtype 92 93 return ivy.random_uniform( 94 shape=shape, 95 dtype=dtype, 96 device=device, 97 ) 98 99 100 @to_ivy_arrays_and_back 101 def randn( 102 size, 103 *, 104 generator=None, 105 out=None, 106 dtype=None, 107 layout=None, 108 device=None, 109 requires_grad=False, 110 pin_memory=False 111 ): 112 return ivy.random_normal( 113 shape=size, 114 out=out, 115 dtype=dtype, 116 device=device, 117 ) 118 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ivy/functional/frontends/torch/random_sampling.py b/ivy/functional/frontends/torch/random_sampling.py --- a/ivy/functional/frontends/torch/random_sampling.py +++ b/ivy/functional/frontends/torch/random_sampling.py @@ -76,6 +76,20 @@ ) +@with_supported_dtypes( + { + "1.11.0 and below": ( + "float32", + "float64", + ) + }, + "torch", +) +@to_ivy_arrays_and_back +def normal(mean, std, *, generator=None, out=None): + return ivy.random_normal(mean=mean, std=std, out=out) + + @to_ivy_arrays_and_back def rand_like( input,
{"golden_diff": "diff --git a/ivy/functional/frontends/torch/random_sampling.py b/ivy/functional/frontends/torch/random_sampling.py\n--- a/ivy/functional/frontends/torch/random_sampling.py\n+++ b/ivy/functional/frontends/torch/random_sampling.py\n@@ -76,6 +76,20 @@\n )\n \n \n+@with_supported_dtypes(\n+ {\n+ \"1.11.0 and below\": (\n+ \"float32\",\n+ \"float64\",\n+ )\n+ },\n+ \"torch\",\n+)\n+@to_ivy_arrays_and_back\n+def normal(mean, std, *, generator=None, out=None):\n+ return ivy.random_normal(mean=mean, std=std, out=out)\n+ \n+\n @to_ivy_arrays_and_back\n def rand_like(\n input,\n", "issue": "normal\n\n", "before_files": [{"content": "import ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\n\ntry:\n from torch import Generator\nexcept ImportError:\n from types import SimpleNamespace\n\n Generator = SimpleNamespace\n\n\ndef seed() -> int:\n \"\"\"Returns a 64 bit number used to seed the RNG\"\"\"\n return int(ivy.randint(-(2**63), 2**63 - 1))\n\n\n@to_ivy_arrays_and_back\ndef manual_seed(seed: int):\n ivy.seed(seed_value=seed)\n return Generator().manual_seed(seed)\n\n\n@with_supported_dtypes(\n {\n \"1.11.0 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef multinomial(input, num_samples, replacement=False, *, generator=None, out=None):\n return ivy.multinomial(\n num_samples + 1, # doesn't matter because `probs` is provided, but should be\n # greater than the number of samples\n num_samples,\n probs=input,\n replace=replacement,\n out=out,\n )\n\n\n@with_supported_dtypes(\n {\n \"1.11.0 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef poisson(input, generator=None):\n return ivy.poisson(input, shape=None)\n\n\n@to_ivy_arrays_and_back\ndef rand(\n size,\n *,\n generator=None,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n pin_memory=False\n):\n return ivy.random_uniform(\n shape=size,\n out=out,\n dtype=dtype,\n device=device,\n )\n\n\n@to_ivy_arrays_and_back\ndef rand_like(\n input,\n *,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n memory_format=False\n):\n shape = input.shape\n if not dtype:\n dtype = input.dtype\n\n return ivy.random_uniform(\n shape=shape,\n dtype=dtype,\n device=device,\n )\n\n\n@to_ivy_arrays_and_back\ndef randn(\n size,\n *,\n generator=None,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n pin_memory=False\n):\n return ivy.random_normal(\n shape=size,\n out=out,\n dtype=dtype,\n device=device,\n )\n", "path": "ivy/functional/frontends/torch/random_sampling.py"}], "after_files": [{"content": "import ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\n\ntry:\n from torch import Generator\nexcept ImportError:\n from types import SimpleNamespace\n\n Generator = SimpleNamespace\n\n\ndef seed() -> int:\n \"\"\"Returns a 64 bit number used to seed the RNG\"\"\"\n return int(ivy.randint(-(2**63), 2**63 - 1))\n\n\n@to_ivy_arrays_and_back\ndef manual_seed(seed: int):\n ivy.seed(seed_value=seed)\n return Generator().manual_seed(seed)\n\n\n@with_supported_dtypes(\n {\n \"1.11.0 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef multinomial(input, num_samples, replacement=False, *, generator=None, out=None):\n return ivy.multinomial(\n num_samples + 1, # doesn't matter because `probs` is provided, but should be\n # greater than the number of samples\n num_samples,\n probs=input,\n replace=replacement,\n out=out,\n )\n\n\n@with_supported_dtypes(\n {\n \"1.11.0 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef poisson(input, generator=None):\n return ivy.poisson(input, shape=None)\n\n\n@to_ivy_arrays_and_back\ndef rand(\n size,\n *,\n generator=None,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n pin_memory=False\n):\n return ivy.random_uniform(\n shape=size,\n out=out,\n dtype=dtype,\n device=device,\n )\n\n\n@with_supported_dtypes(\n {\n \"1.11.0 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef normal(mean, std, *, generator=None, out=None):\n return ivy.random_normal(mean=mean, std=std, out=out)\n \n\n@to_ivy_arrays_and_back\ndef rand_like(\n input,\n *,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n memory_format=False\n):\n shape = input.shape\n if not dtype:\n dtype = input.dtype\n\n return ivy.random_uniform(\n shape=shape,\n dtype=dtype,\n device=device,\n )\n\n\n@to_ivy_arrays_and_back\ndef randn(\n size,\n *,\n generator=None,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n pin_memory=False\n):\n return ivy.random_normal(\n shape=size,\n out=out,\n dtype=dtype,\n device=device,\n )\n", "path": "ivy/functional/frontends/torch/random_sampling.py"}]}
1,083
181
gh_patches_debug_4790
rasdani/github-patches
git_diff
gratipay__gratipay.com-3040
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- show total ever given Suggested [via Twitter](https://twitter.com/tripflex/status/532597015210131456): > is there no way for me to see the total I have donated? I know I can see it weekly, but what about overall total? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gratipay/utils/history.py` Content: ``` 1 def iter_payday_events(db, participant): 2 """Yields payday events for the given participant. 3 """ 4 username = participant.username 5 exchanges = db.all(""" 6 SELECT * 7 FROM exchanges 8 WHERE participant=%s 9 """, (username,), back_as=dict) 10 transfers = db.all(""" 11 SELECT * 12 FROM transfers 13 WHERE tipper=%(username)s OR tippee=%(username)s 14 """, locals(), back_as=dict) 15 16 if not (exchanges or transfers): 17 return 18 19 payday_dates = db.all(""" 20 SELECT ts_start::date 21 FROM paydays 22 ORDER BY ts_start ASC 23 """) 24 25 balance = participant.balance 26 prev_date = None 27 get_timestamp = lambda e: e['timestamp'] 28 events = sorted(exchanges+transfers, key=get_timestamp, reverse=True) 29 for event in events: 30 31 event['balance'] = balance 32 33 event_date = event['timestamp'].date() 34 if event_date != prev_date: 35 if prev_date: 36 yield dict(kind='day-close', balance=balance) 37 day_open = dict(kind='day-open', date=event_date, balance=balance) 38 if payday_dates: 39 while payday_dates and payday_dates[-1] > event_date: 40 payday_dates.pop() 41 payday_date = payday_dates[-1] if payday_dates else None 42 if event_date == payday_date: 43 day_open['payday_number'] = len(payday_dates) - 1 44 yield day_open 45 prev_date = event_date 46 47 if 'fee' in event: 48 if event['amount'] > 0: 49 kind = 'charge' 50 if event['status'] in (None, 'succeeded'): 51 balance -= event['amount'] 52 else: 53 kind = 'credit' 54 if event['status'] != 'failed': 55 balance -= event['amount'] - event['fee'] 56 else: 57 kind = 'transfer' 58 if event['tippee'] == username: 59 balance -= event['amount'] 60 else: 61 balance += event['amount'] 62 event['kind'] = kind 63 64 yield event 65 66 yield dict(kind='day-close', balance='0.00') 67 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gratipay/utils/history.py b/gratipay/utils/history.py --- a/gratipay/utils/history.py +++ b/gratipay/utils/history.py @@ -16,6 +16,13 @@ if not (exchanges or transfers): return + if transfers: + yield dict( + kind='totals', + given=sum(t['amount'] for t in transfers if t['tipper'] == username), + received=sum(t['amount'] for t in transfers if t['tippee'] == username), + ) + payday_dates = db.all(""" SELECT ts_start::date FROM paydays
{"golden_diff": "diff --git a/gratipay/utils/history.py b/gratipay/utils/history.py\n--- a/gratipay/utils/history.py\n+++ b/gratipay/utils/history.py\n@@ -16,6 +16,13 @@\n if not (exchanges or transfers):\n return\n \n+ if transfers:\n+ yield dict(\n+ kind='totals',\n+ given=sum(t['amount'] for t in transfers if t['tipper'] == username),\n+ received=sum(t['amount'] for t in transfers if t['tippee'] == username),\n+ )\n+\n payday_dates = db.all(\"\"\"\n SELECT ts_start::date\n FROM paydays\n", "issue": "show total ever given\nSuggested [via Twitter](https://twitter.com/tripflex/status/532597015210131456):\n\n> is there no way for me to see the total I have donated? I know I can see it weekly, but what about overall total?\n\n", "before_files": [{"content": "def iter_payday_events(db, participant):\n \"\"\"Yields payday events for the given participant.\n \"\"\"\n username = participant.username\n exchanges = db.all(\"\"\"\n SELECT *\n FROM exchanges\n WHERE participant=%s\n \"\"\", (username,), back_as=dict)\n transfers = db.all(\"\"\"\n SELECT *\n FROM transfers\n WHERE tipper=%(username)s OR tippee=%(username)s\n \"\"\", locals(), back_as=dict)\n\n if not (exchanges or transfers):\n return\n\n payday_dates = db.all(\"\"\"\n SELECT ts_start::date\n FROM paydays\n ORDER BY ts_start ASC\n \"\"\")\n\n balance = participant.balance\n prev_date = None\n get_timestamp = lambda e: e['timestamp']\n events = sorted(exchanges+transfers, key=get_timestamp, reverse=True)\n for event in events:\n\n event['balance'] = balance\n\n event_date = event['timestamp'].date()\n if event_date != prev_date:\n if prev_date:\n yield dict(kind='day-close', balance=balance)\n day_open = dict(kind='day-open', date=event_date, balance=balance)\n if payday_dates:\n while payday_dates and payday_dates[-1] > event_date:\n payday_dates.pop()\n payday_date = payday_dates[-1] if payday_dates else None\n if event_date == payday_date:\n day_open['payday_number'] = len(payday_dates) - 1\n yield day_open\n prev_date = event_date\n\n if 'fee' in event:\n if event['amount'] > 0:\n kind = 'charge'\n if event['status'] in (None, 'succeeded'):\n balance -= event['amount']\n else:\n kind = 'credit'\n if event['status'] != 'failed':\n balance -= event['amount'] - event['fee']\n else:\n kind = 'transfer'\n if event['tippee'] == username:\n balance -= event['amount']\n else:\n balance += event['amount']\n event['kind'] = kind\n\n yield event\n\n yield dict(kind='day-close', balance='0.00')\n", "path": "gratipay/utils/history.py"}], "after_files": [{"content": "def iter_payday_events(db, participant):\n \"\"\"Yields payday events for the given participant.\n \"\"\"\n username = participant.username\n exchanges = db.all(\"\"\"\n SELECT *\n FROM exchanges\n WHERE participant=%s\n \"\"\", (username,), back_as=dict)\n transfers = db.all(\"\"\"\n SELECT *\n FROM transfers\n WHERE tipper=%(username)s OR tippee=%(username)s\n \"\"\", locals(), back_as=dict)\n\n if not (exchanges or transfers):\n return\n\n if transfers:\n yield dict(\n kind='totals',\n given=sum(t['amount'] for t in transfers if t['tipper'] == username),\n received=sum(t['amount'] for t in transfers if t['tippee'] == username),\n )\n\n payday_dates = db.all(\"\"\"\n SELECT ts_start::date\n FROM paydays\n ORDER BY ts_start ASC\n \"\"\")\n\n balance = participant.balance\n prev_date = None\n get_timestamp = lambda e: e['timestamp']\n events = sorted(exchanges+transfers, key=get_timestamp, reverse=True)\n for event in events:\n\n event['balance'] = balance\n\n event_date = event['timestamp'].date()\n if event_date != prev_date:\n if prev_date:\n yield dict(kind='day-close', balance=balance)\n day_open = dict(kind='day-open', date=event_date, balance=balance)\n if payday_dates:\n while payday_dates and payday_dates[-1] > event_date:\n payday_dates.pop()\n payday_date = payday_dates[-1] if payday_dates else None\n if event_date == payday_date:\n day_open['payday_number'] = len(payday_dates) - 1\n yield day_open\n prev_date = event_date\n\n if 'fee' in event:\n if event['amount'] > 0:\n kind = 'charge'\n if event['status'] in (None, 'succeeded'):\n balance -= event['amount']\n else:\n kind = 'credit'\n if event['status'] != 'failed':\n balance -= event['amount'] - event['fee']\n else:\n kind = 'transfer'\n if event['tippee'] == username:\n balance -= event['amount']\n else:\n balance += event['amount']\n event['kind'] = kind\n\n yield event\n\n yield dict(kind='day-close', balance='0.00')\n", "path": "gratipay/utils/history.py"}]}
922
142
gh_patches_debug_8048
rasdani/github-patches
git_diff
mosaicml__composer-756
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- BlurPool has incorrect docstrings BlurPool docstrings imply that parameters have default values, but I don't see those in the function signatures: https://github.com/mosaicml/composer/blob/dev/composer/algorithms/blurpool/blurpool.py#L96 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `composer/algorithms/blurpool/blurpool.py` Content: ``` 1 # Copyright 2021 MosaicML. All Rights Reserved. 2 3 from __future__ import annotations 4 5 import functools 6 import logging 7 from typing import Optional 8 9 import numpy as np 10 import torch 11 12 from composer.algorithms.blurpool.blurpool_layers import BlurConv2d, BlurMaxPool2d 13 from composer.core import Algorithm, Event, State 14 from composer.core.types import Optimizers 15 from composer.loggers import Logger 16 from composer.utils import module_surgery 17 18 log = logging.getLogger(__name__) 19 20 21 def apply_blurpool(model: torch.nn.Module, 22 replace_convs: bool = True, 23 replace_maxpools: bool = True, 24 blur_first: bool = True, 25 optimizers: Optional[Optimizers] = None) -> torch.nn.Module: 26 """Add anti-aliasing filters to the strided :class:`torch.nn.Conv2d` and/or :class:`torch.nn.MaxPool2d` modules 27 within `model`. 28 29 These filters increase invariance to small spatial shifts in the input 30 (`Zhang 2019 <http://proceedings.mlr.press/v97/zhang19a.html>`_). 31 32 Args: 33 model (torch.nn.Module): the model to modify in-place 34 replace_convs (bool, optional): replace strided :class:`torch.nn.Conv2d` modules with 35 :class:`.BlurConv2d` modules. Default: ``True``. 36 replace_maxpools (bool, optional): replace eligible :class:`torch.nn.MaxPool2d` modules 37 with :class:`.BlurMaxPool2d` modules. Default: ``True``. 38 blur_first (bool, optional): for ``replace_convs``, blur input before the associated 39 convolution. When set to ``False``, the convolution is applied with 40 a stride of 1 before the blurring, resulting in significant 41 overhead (though more closely matching 42 `the paper <http://proceedings.mlr.press/v97/zhang19a.html>`_). 43 See :class:`.BlurConv2d` for further discussion. Default: ``True``. 44 optimizers (Optimizers, optional): Existing optimizers bound to 45 ``model.parameters()``. All optimizers that have already been 46 constructed with ``model.parameters()`` must be specified here so 47 they will optimize the correct parameters. 48 49 If the optimizer(s) are constructed *after* calling this function, 50 then it is safe to omit this parameter. These optimizers will see 51 the correct model parameters. 52 53 Returns: 54 The modified model 55 56 Example: 57 .. testcode:: 58 59 import composer.functional as cf 60 from torchvision import models 61 model = models.resnet50() 62 cf.apply_blurpool(model) 63 """ 64 transforms = {} 65 if replace_maxpools: 66 transforms[torch.nn.MaxPool2d] = BlurMaxPool2d.from_maxpool2d 67 if replace_convs: 68 transforms[torch.nn.Conv2d] = functools.partial( 69 _maybe_replace_strided_conv2d, 70 blur_first=blur_first, 71 ) 72 module_surgery.replace_module_classes(model, optimizers=optimizers, policies=transforms) 73 _log_surgery_result(model) 74 75 return model 76 77 78 class BlurPool(Algorithm): 79 """`BlurPool <http://proceedings.mlr.press/v97/zhang19a.html>`_ adds anti-aliasing filters to convolutional layers 80 to increase accuracy and invariance to small shifts in the input. 81 82 Runs on :attr:`~composer.core.event.Event.INIT`. 83 84 Args: 85 replace_convs (bool): replace strided :class:`torch.nn.Conv2d` modules with 86 :class:`.BlurConv2d` modules. Default: ``True``. 87 replace_maxpools (bool): replace eligible :class:`torch.nn.MaxPool2d` modules 88 with :class:`.BlurMaxPool2d` modules. Default: ``True``. 89 blur_first (bool): when ``replace_convs`` is ``True``, blur input before the 90 associated convolution. When set to ``False``, the convolution is 91 applied with a stride of 1 before the blurring, resulting in 92 significant overhead (though more closely matching the paper). 93 See :class:`.BlurConv2d` for further discussion. Default: ``True``. 94 """ 95 96 def __init__(self, replace_convs: bool, replace_maxpools: bool, blur_first: bool) -> None: 97 self.replace_convs = replace_convs 98 self.replace_maxpools = replace_maxpools 99 self.blur_first = blur_first 100 101 if self.replace_maxpools is False and \ 102 self.replace_convs is False: 103 log.warning('Both replace_maxpool and replace_convs set to false ' 104 'BlurPool will not be modifying the model.') 105 106 def match(self, event: Event, state: State) -> bool: 107 """Runs on :attr:`~composer.core.event.Event.INIT`. 108 109 Args: 110 event (Event): The current event. 111 state (State): The current state. 112 Returns: 113 bool: True if this algorithm should run now. 114 """ 115 return event == Event.INIT 116 117 def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]: 118 """Adds anti-aliasing filters to the maxpools and/or convolutions. 119 120 Args: 121 event (Event): the current event 122 state (State): the current trainer state 123 logger (Logger): the training logger 124 """ 125 assert state.model is not None 126 127 apply_blurpool(state.model, 128 optimizers=state.optimizers, 129 replace_convs=self.replace_convs, 130 replace_maxpools=self.replace_maxpools, 131 blur_first=self.blur_first) 132 self._log_results(event, state, logger) 133 134 def _log_results(self, event: Event, state: State, logger: Logger) -> None: 135 """Logs the result of BlurPool application, including the number of layers that have been replaced.""" 136 assert state.model is not None 137 138 num_blurpool_layers = module_surgery.count_module_instances(state.model, BlurMaxPool2d) 139 num_blurconv_layers = module_surgery.count_module_instances(state.model, BlurConv2d) 140 141 # python logger 142 log.info(f'Applied BlurPool to model {state.model.__class__.__name__} ' 143 f'with replace_maxpools={self.replace_maxpools}, ' 144 f'replace_convs={self.replace_convs}. ' 145 f'Model now has {num_blurpool_layers} BlurMaxPool2d ' 146 f'and {num_blurconv_layers} BlurConv2D layers.') 147 148 logger.data_fit({ 149 'blurpool/num_blurpool_layers': num_blurpool_layers, 150 'blurpool/num_blurconv_layers': num_blurconv_layers, 151 }) 152 153 154 def _log_surgery_result(model: torch.nn.Module): 155 num_blurpool_layers = module_surgery.count_module_instances(model, BlurMaxPool2d) 156 num_blurconv_layers = module_surgery.count_module_instances(model, BlurConv2d) 157 log.info(f'Applied BlurPool to model {model.__class__.__name__}. ' 158 f'Model now has {num_blurpool_layers} BlurMaxPool2d ' 159 f'and {num_blurconv_layers} BlurConv2D layers.') 160 161 162 def _maybe_replace_strided_conv2d(module: torch.nn.Conv2d, module_index: int, blur_first: bool): 163 if (np.max(module.stride) > 1 and module.in_channels >= 16): 164 return BlurConv2d.from_conv2d(module, module_index, blur_first=blur_first) 165 return None 166 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/composer/algorithms/blurpool/blurpool.py b/composer/algorithms/blurpool/blurpool.py --- a/composer/algorithms/blurpool/blurpool.py +++ b/composer/algorithms/blurpool/blurpool.py @@ -93,7 +93,7 @@ See :class:`.BlurConv2d` for further discussion. Default: ``True``. """ - def __init__(self, replace_convs: bool, replace_maxpools: bool, blur_first: bool) -> None: + def __init__(self, replace_convs: bool = True, replace_maxpools: bool = True, blur_first: bool = True) -> None: self.replace_convs = replace_convs self.replace_maxpools = replace_maxpools self.blur_first = blur_first
{"golden_diff": "diff --git a/composer/algorithms/blurpool/blurpool.py b/composer/algorithms/blurpool/blurpool.py\n--- a/composer/algorithms/blurpool/blurpool.py\n+++ b/composer/algorithms/blurpool/blurpool.py\n@@ -93,7 +93,7 @@\n See :class:`.BlurConv2d` for further discussion. Default: ``True``.\n \"\"\"\n \n- def __init__(self, replace_convs: bool, replace_maxpools: bool, blur_first: bool) -> None:\n+ def __init__(self, replace_convs: bool = True, replace_maxpools: bool = True, blur_first: bool = True) -> None:\n self.replace_convs = replace_convs\n self.replace_maxpools = replace_maxpools\n self.blur_first = blur_first\n", "issue": "BlurPool has incorrect docstrings\nBlurPool docstrings imply that parameters have default values, but I don't see those in the function signatures: https://github.com/mosaicml/composer/blob/dev/composer/algorithms/blurpool/blurpool.py#L96\r\n\n", "before_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom __future__ import annotations\n\nimport functools\nimport logging\nfrom typing import Optional\n\nimport numpy as np\nimport torch\n\nfrom composer.algorithms.blurpool.blurpool_layers import BlurConv2d, BlurMaxPool2d\nfrom composer.core import Algorithm, Event, State\nfrom composer.core.types import Optimizers\nfrom composer.loggers import Logger\nfrom composer.utils import module_surgery\n\nlog = logging.getLogger(__name__)\n\n\ndef apply_blurpool(model: torch.nn.Module,\n replace_convs: bool = True,\n replace_maxpools: bool = True,\n blur_first: bool = True,\n optimizers: Optional[Optimizers] = None) -> torch.nn.Module:\n \"\"\"Add anti-aliasing filters to the strided :class:`torch.nn.Conv2d` and/or :class:`torch.nn.MaxPool2d` modules\n within `model`.\n\n These filters increase invariance to small spatial shifts in the input\n (`Zhang 2019 <http://proceedings.mlr.press/v97/zhang19a.html>`_).\n\n Args:\n model (torch.nn.Module): the model to modify in-place\n replace_convs (bool, optional): replace strided :class:`torch.nn.Conv2d` modules with\n :class:`.BlurConv2d` modules. Default: ``True``.\n replace_maxpools (bool, optional): replace eligible :class:`torch.nn.MaxPool2d` modules\n with :class:`.BlurMaxPool2d` modules. Default: ``True``.\n blur_first (bool, optional): for ``replace_convs``, blur input before the associated\n convolution. When set to ``False``, the convolution is applied with\n a stride of 1 before the blurring, resulting in significant\n overhead (though more closely matching\n `the paper <http://proceedings.mlr.press/v97/zhang19a.html>`_).\n See :class:`.BlurConv2d` for further discussion. Default: ``True``.\n optimizers (Optimizers, optional): Existing optimizers bound to\n ``model.parameters()``. All optimizers that have already been\n constructed with ``model.parameters()`` must be specified here so\n they will optimize the correct parameters.\n\n If the optimizer(s) are constructed *after* calling this function,\n then it is safe to omit this parameter. These optimizers will see\n the correct model parameters.\n\n Returns:\n The modified model\n\n Example:\n .. testcode::\n\n import composer.functional as cf\n from torchvision import models\n model = models.resnet50()\n cf.apply_blurpool(model)\n \"\"\"\n transforms = {}\n if replace_maxpools:\n transforms[torch.nn.MaxPool2d] = BlurMaxPool2d.from_maxpool2d\n if replace_convs:\n transforms[torch.nn.Conv2d] = functools.partial(\n _maybe_replace_strided_conv2d,\n blur_first=blur_first,\n )\n module_surgery.replace_module_classes(model, optimizers=optimizers, policies=transforms)\n _log_surgery_result(model)\n\n return model\n\n\nclass BlurPool(Algorithm):\n \"\"\"`BlurPool <http://proceedings.mlr.press/v97/zhang19a.html>`_ adds anti-aliasing filters to convolutional layers\n to increase accuracy and invariance to small shifts in the input.\n\n Runs on :attr:`~composer.core.event.Event.INIT`.\n\n Args:\n replace_convs (bool): replace strided :class:`torch.nn.Conv2d` modules with\n :class:`.BlurConv2d` modules. Default: ``True``.\n replace_maxpools (bool): replace eligible :class:`torch.nn.MaxPool2d` modules\n with :class:`.BlurMaxPool2d` modules. Default: ``True``.\n blur_first (bool): when ``replace_convs`` is ``True``, blur input before the\n associated convolution. When set to ``False``, the convolution is\n applied with a stride of 1 before the blurring, resulting in\n significant overhead (though more closely matching the paper).\n See :class:`.BlurConv2d` for further discussion. Default: ``True``.\n \"\"\"\n\n def __init__(self, replace_convs: bool, replace_maxpools: bool, blur_first: bool) -> None:\n self.replace_convs = replace_convs\n self.replace_maxpools = replace_maxpools\n self.blur_first = blur_first\n\n if self.replace_maxpools is False and \\\n self.replace_convs is False:\n log.warning('Both replace_maxpool and replace_convs set to false '\n 'BlurPool will not be modifying the model.')\n\n def match(self, event: Event, state: State) -> bool:\n \"\"\"Runs on :attr:`~composer.core.event.Event.INIT`.\n\n Args:\n event (Event): The current event.\n state (State): The current state.\n Returns:\n bool: True if this algorithm should run now.\n \"\"\"\n return event == Event.INIT\n\n def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:\n \"\"\"Adds anti-aliasing filters to the maxpools and/or convolutions.\n\n Args:\n event (Event): the current event\n state (State): the current trainer state\n logger (Logger): the training logger\n \"\"\"\n assert state.model is not None\n\n apply_blurpool(state.model,\n optimizers=state.optimizers,\n replace_convs=self.replace_convs,\n replace_maxpools=self.replace_maxpools,\n blur_first=self.blur_first)\n self._log_results(event, state, logger)\n\n def _log_results(self, event: Event, state: State, logger: Logger) -> None:\n \"\"\"Logs the result of BlurPool application, including the number of layers that have been replaced.\"\"\"\n assert state.model is not None\n\n num_blurpool_layers = module_surgery.count_module_instances(state.model, BlurMaxPool2d)\n num_blurconv_layers = module_surgery.count_module_instances(state.model, BlurConv2d)\n\n # python logger\n log.info(f'Applied BlurPool to model {state.model.__class__.__name__} '\n f'with replace_maxpools={self.replace_maxpools}, '\n f'replace_convs={self.replace_convs}. '\n f'Model now has {num_blurpool_layers} BlurMaxPool2d '\n f'and {num_blurconv_layers} BlurConv2D layers.')\n\n logger.data_fit({\n 'blurpool/num_blurpool_layers': num_blurpool_layers,\n 'blurpool/num_blurconv_layers': num_blurconv_layers,\n })\n\n\ndef _log_surgery_result(model: torch.nn.Module):\n num_blurpool_layers = module_surgery.count_module_instances(model, BlurMaxPool2d)\n num_blurconv_layers = module_surgery.count_module_instances(model, BlurConv2d)\n log.info(f'Applied BlurPool to model {model.__class__.__name__}. '\n f'Model now has {num_blurpool_layers} BlurMaxPool2d '\n f'and {num_blurconv_layers} BlurConv2D layers.')\n\n\ndef _maybe_replace_strided_conv2d(module: torch.nn.Conv2d, module_index: int, blur_first: bool):\n if (np.max(module.stride) > 1 and module.in_channels >= 16):\n return BlurConv2d.from_conv2d(module, module_index, blur_first=blur_first)\n return None\n", "path": "composer/algorithms/blurpool/blurpool.py"}], "after_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom __future__ import annotations\n\nimport functools\nimport logging\nfrom typing import Optional\n\nimport numpy as np\nimport torch\n\nfrom composer.algorithms.blurpool.blurpool_layers import BlurConv2d, BlurMaxPool2d\nfrom composer.core import Algorithm, Event, State\nfrom composer.core.types import Optimizers\nfrom composer.loggers import Logger\nfrom composer.utils import module_surgery\n\nlog = logging.getLogger(__name__)\n\n\ndef apply_blurpool(model: torch.nn.Module,\n replace_convs: bool = True,\n replace_maxpools: bool = True,\n blur_first: bool = True,\n optimizers: Optional[Optimizers] = None) -> torch.nn.Module:\n \"\"\"Add anti-aliasing filters to the strided :class:`torch.nn.Conv2d` and/or :class:`torch.nn.MaxPool2d` modules\n within `model`.\n\n These filters increase invariance to small spatial shifts in the input\n (`Zhang 2019 <http://proceedings.mlr.press/v97/zhang19a.html>`_).\n\n Args:\n model (torch.nn.Module): the model to modify in-place\n replace_convs (bool, optional): replace strided :class:`torch.nn.Conv2d` modules with\n :class:`.BlurConv2d` modules. Default: ``True``.\n replace_maxpools (bool, optional): replace eligible :class:`torch.nn.MaxPool2d` modules\n with :class:`.BlurMaxPool2d` modules. Default: ``True``.\n blur_first (bool, optional): for ``replace_convs``, blur input before the associated\n convolution. When set to ``False``, the convolution is applied with\n a stride of 1 before the blurring, resulting in significant\n overhead (though more closely matching\n `the paper <http://proceedings.mlr.press/v97/zhang19a.html>`_).\n See :class:`.BlurConv2d` for further discussion. Default: ``True``.\n optimizers (Optimizers, optional): Existing optimizers bound to\n ``model.parameters()``. All optimizers that have already been\n constructed with ``model.parameters()`` must be specified here so\n they will optimize the correct parameters.\n\n If the optimizer(s) are constructed *after* calling this function,\n then it is safe to omit this parameter. These optimizers will see\n the correct model parameters.\n\n Returns:\n The modified model\n\n Example:\n .. testcode::\n\n import composer.functional as cf\n from torchvision import models\n model = models.resnet50()\n cf.apply_blurpool(model)\n \"\"\"\n transforms = {}\n if replace_maxpools:\n transforms[torch.nn.MaxPool2d] = BlurMaxPool2d.from_maxpool2d\n if replace_convs:\n transforms[torch.nn.Conv2d] = functools.partial(\n _maybe_replace_strided_conv2d,\n blur_first=blur_first,\n )\n module_surgery.replace_module_classes(model, optimizers=optimizers, policies=transforms)\n _log_surgery_result(model)\n\n return model\n\n\nclass BlurPool(Algorithm):\n \"\"\"`BlurPool <http://proceedings.mlr.press/v97/zhang19a.html>`_ adds anti-aliasing filters to convolutional layers\n to increase accuracy and invariance to small shifts in the input.\n\n Runs on :attr:`~composer.core.event.Event.INIT`.\n\n Args:\n replace_convs (bool): replace strided :class:`torch.nn.Conv2d` modules with\n :class:`.BlurConv2d` modules. Default: ``True``.\n replace_maxpools (bool): replace eligible :class:`torch.nn.MaxPool2d` modules\n with :class:`.BlurMaxPool2d` modules. Default: ``True``.\n blur_first (bool): when ``replace_convs`` is ``True``, blur input before the\n associated convolution. When set to ``False``, the convolution is\n applied with a stride of 1 before the blurring, resulting in\n significant overhead (though more closely matching the paper).\n See :class:`.BlurConv2d` for further discussion. Default: ``True``.\n \"\"\"\n\n def __init__(self, replace_convs: bool = True, replace_maxpools: bool = True, blur_first: bool = True) -> None:\n self.replace_convs = replace_convs\n self.replace_maxpools = replace_maxpools\n self.blur_first = blur_first\n\n if self.replace_maxpools is False and \\\n self.replace_convs is False:\n log.warning('Both replace_maxpool and replace_convs set to false '\n 'BlurPool will not be modifying the model.')\n\n def match(self, event: Event, state: State) -> bool:\n \"\"\"Runs on :attr:`~composer.core.event.Event.INIT`.\n\n Args:\n event (Event): The current event.\n state (State): The current state.\n Returns:\n bool: True if this algorithm should run now.\n \"\"\"\n return event == Event.INIT\n\n def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:\n \"\"\"Adds anti-aliasing filters to the maxpools and/or convolutions.\n\n Args:\n event (Event): the current event\n state (State): the current trainer state\n logger (Logger): the training logger\n \"\"\"\n assert state.model is not None\n\n apply_blurpool(state.model,\n optimizers=state.optimizers,\n replace_convs=self.replace_convs,\n replace_maxpools=self.replace_maxpools,\n blur_first=self.blur_first)\n self._log_results(event, state, logger)\n\n def _log_results(self, event: Event, state: State, logger: Logger) -> None:\n \"\"\"Logs the result of BlurPool application, including the number of layers that have been replaced.\"\"\"\n assert state.model is not None\n\n num_blurpool_layers = module_surgery.count_module_instances(state.model, BlurMaxPool2d)\n num_blurconv_layers = module_surgery.count_module_instances(state.model, BlurConv2d)\n\n # python logger\n log.info(f'Applied BlurPool to model {state.model.__class__.__name__} '\n f'with replace_maxpools={self.replace_maxpools}, '\n f'replace_convs={self.replace_convs}. '\n f'Model now has {num_blurpool_layers} BlurMaxPool2d '\n f'and {num_blurconv_layers} BlurConv2D layers.')\n\n logger.data_fit({\n 'blurpool/num_blurpool_layers': num_blurpool_layers,\n 'blurpool/num_blurconv_layers': num_blurconv_layers,\n })\n\n\ndef _log_surgery_result(model: torch.nn.Module):\n num_blurpool_layers = module_surgery.count_module_instances(model, BlurMaxPool2d)\n num_blurconv_layers = module_surgery.count_module_instances(model, BlurConv2d)\n log.info(f'Applied BlurPool to model {model.__class__.__name__}. '\n f'Model now has {num_blurpool_layers} BlurMaxPool2d '\n f'and {num_blurconv_layers} BlurConv2D layers.')\n\n\ndef _maybe_replace_strided_conv2d(module: torch.nn.Conv2d, module_index: int, blur_first: bool):\n if (np.max(module.stride) > 1 and module.in_channels >= 16):\n return BlurConv2d.from_conv2d(module, module_index, blur_first=blur_first)\n return None\n", "path": "composer/algorithms/blurpool/blurpool.py"}]}
2,392
186
gh_patches_debug_5928
rasdani/github-patches
git_diff
DataDog__dd-trace-py-616
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Unable to install with opentracing extras I was following the [OpenTracing setup instructions](https://docs.datadoghq.com/tracing/advanced_usage/?tab=python#opentracing) but got a warning about missing extras: ```sh (blah-YneZd-6L) sam@sam-Q325UAR ~/t/blah> pip list Package Version ---------- ------- pip 18.0 setuptools 40.4.1 wheel 0.31.1 (blah-YneZd-6L) sam@sam-Q325UAR ~/t/blah> python --version Python 2.7.14 (blah-YneZd-6L) sam@sam-Q325UAR ~/t/blah> pip --version pip 18.0 from /home/sam/.local/share/virtualenvs/blah-YneZd-6L/lib/python2.7/site-packages/pip (python 2.7) (blah-YneZd-6L) sam@sam-Q325UAR ~/t/blah> pip install 'ddtrace[opentracing] == 0.14.0' Collecting ddtrace[opentracing]==0.14.0 ddtrace 0.14.0 does not provide the extra 'opentracing' Collecting msgpack-python (from ddtrace[opentracing]==0.14.0) Collecting wrapt (from ddtrace[opentracing]==0.14.0) Installing collected packages: msgpack-python, wrapt, ddtrace Successfully installed ddtrace-0.14.0 msgpack-python-0.5.6 wrapt-1.10.11 ``` > `ddtrace 0.14.0 does not provide the extra 'opentracing'` Happens on Python 3.6 as well. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 import os 2 import sys 3 import re 4 5 from setuptools import setup, find_packages 6 from setuptools.command.test import test as TestCommand 7 8 9 def get_version(package): 10 """ 11 Return package version as listed in `__version__` in `__init__.py`. 12 This method prevents to import packages at setup-time. 13 """ 14 init_py = open(os.path.join(package, '__init__.py')).read() 15 return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1) 16 17 18 class Tox(TestCommand): 19 20 user_options = [('tox-args=', 'a', "Arguments to pass to tox")] 21 22 def initialize_options(self): 23 TestCommand.initialize_options(self) 24 self.tox_args = None 25 26 def finalize_options(self): 27 TestCommand.finalize_options(self) 28 self.test_args = [] 29 self.test_suite = True 30 31 def run_tests(self): 32 # import here, cause outside the eggs aren't loaded 33 import tox 34 import shlex 35 args = self.tox_args 36 if args: 37 args = shlex.split(self.tox_args) 38 errno = tox.cmdline(args=args) 39 sys.exit(errno) 40 41 42 version = get_version('ddtrace') 43 # Append a suffix to the version for dev builds 44 if os.environ.get('VERSION_SUFFIX'): 45 version = '{v}+{s}'.format( 46 v=version, 47 s=os.environ.get('VERSION_SUFFIX'), 48 ) 49 50 setup( 51 name='ddtrace', 52 version=version, 53 description='Datadog tracing code', 54 url='https://github.com/DataDog/dd-trace-py', 55 author='Datadog, Inc.', 56 author_email='[email protected]', 57 license='BSD', 58 packages=find_packages(exclude=['tests*']), 59 install_requires=[ 60 "wrapt", 61 "msgpack-python", 62 ], 63 extra_requires={ 64 # users can include opentracing by having: 65 # install_requires=["ddtrace[opentracing]", ...] 66 "opentracing": ["opentracing"], 67 }, 68 # plugin tox 69 tests_require=['tox', 'flake8'], 70 cmdclass={'test': Tox}, 71 entry_points={ 72 'console_scripts': [ 73 'ddtrace-run = ddtrace.commands.ddtrace_run:main' 74 ] 75 }, 76 classifiers=[ 77 'Programming Language :: Python', 78 'Programming Language :: Python :: 2.7', 79 'Programming Language :: Python :: 3.4', 80 'Programming Language :: Python :: 3.5', 81 'Programming Language :: Python :: 3.6', 82 ], 83 ) 84 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -60,10 +60,10 @@ "wrapt", "msgpack-python", ], - extra_requires={ + extras_require={ # users can include opentracing by having: # install_requires=["ddtrace[opentracing]", ...] - "opentracing": ["opentracing"], + "opentracing": ["opentracing>=2.0.0"], }, # plugin tox tests_require=['tox', 'flake8'],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -60,10 +60,10 @@\n \"wrapt\",\n \"msgpack-python\",\n ],\n- extra_requires={\n+ extras_require={\n # users can include opentracing by having:\n # install_requires=[\"ddtrace[opentracing]\", ...]\n- \"opentracing\": [\"opentracing\"],\n+ \"opentracing\": [\"opentracing>=2.0.0\"],\n },\n # plugin tox\n tests_require=['tox', 'flake8'],\n", "issue": "Unable to install with opentracing extras\nI was following the [OpenTracing setup instructions](https://docs.datadoghq.com/tracing/advanced_usage/?tab=python#opentracing) but got a warning about missing extras:\r\n\r\n```sh\r\n(blah-YneZd-6L) sam@sam-Q325UAR ~/t/blah> pip list\r\nPackage Version\r\n---------- -------\r\npip 18.0 \r\nsetuptools 40.4.1 \r\nwheel 0.31.1 \r\n\r\n(blah-YneZd-6L) sam@sam-Q325UAR ~/t/blah> python --version\r\nPython 2.7.14\r\n\r\n(blah-YneZd-6L) sam@sam-Q325UAR ~/t/blah> pip --version\r\npip 18.0 from /home/sam/.local/share/virtualenvs/blah-YneZd-6L/lib/python2.7/site-packages/pip (python 2.7)\r\n\r\n\r\n(blah-YneZd-6L) sam@sam-Q325UAR ~/t/blah> pip install 'ddtrace[opentracing] == 0.14.0'\r\nCollecting ddtrace[opentracing]==0.14.0\r\n ddtrace 0.14.0 does not provide the extra 'opentracing'\r\nCollecting msgpack-python (from ddtrace[opentracing]==0.14.0)\r\nCollecting wrapt (from ddtrace[opentracing]==0.14.0)\r\nInstalling collected packages: msgpack-python, wrapt, ddtrace\r\nSuccessfully installed ddtrace-0.14.0 msgpack-python-0.5.6 wrapt-1.10.11\r\n```\r\n\r\n> `ddtrace 0.14.0 does not provide the extra 'opentracing'`\r\n\r\nHappens on Python 3.6 as well.\n", "before_files": [{"content": "import os\nimport sys\nimport re\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `__init__.py`.\n This method prevents to import packages at setup-time.\n \"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\nclass Tox(TestCommand):\n\n user_options = [('tox-args=', 'a', \"Arguments to pass to tox\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.tox_args = None\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import tox\n import shlex\n args = self.tox_args\n if args:\n args = shlex.split(self.tox_args)\n errno = tox.cmdline(args=args)\n sys.exit(errno)\n\n\nversion = get_version('ddtrace')\n# Append a suffix to the version for dev builds\nif os.environ.get('VERSION_SUFFIX'):\n version = '{v}+{s}'.format(\n v=version,\n s=os.environ.get('VERSION_SUFFIX'),\n )\n\nsetup(\n name='ddtrace',\n version=version,\n description='Datadog tracing code',\n url='https://github.com/DataDog/dd-trace-py',\n author='Datadog, Inc.',\n author_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=['tests*']),\n install_requires=[\n \"wrapt\",\n \"msgpack-python\",\n ],\n extra_requires={\n # users can include opentracing by having:\n # install_requires=[\"ddtrace[opentracing]\", ...]\n \"opentracing\": [\"opentracing\"],\n },\n # plugin tox\n tests_require=['tox', 'flake8'],\n cmdclass={'test': Tox},\n entry_points={\n 'console_scripts': [\n 'ddtrace-run = ddtrace.commands.ddtrace_run:main'\n ]\n },\n classifiers=[\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nimport sys\nimport re\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `__init__.py`.\n This method prevents to import packages at setup-time.\n \"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\nclass Tox(TestCommand):\n\n user_options = [('tox-args=', 'a', \"Arguments to pass to tox\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.tox_args = None\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import tox\n import shlex\n args = self.tox_args\n if args:\n args = shlex.split(self.tox_args)\n errno = tox.cmdline(args=args)\n sys.exit(errno)\n\n\nversion = get_version('ddtrace')\n# Append a suffix to the version for dev builds\nif os.environ.get('VERSION_SUFFIX'):\n version = '{v}+{s}'.format(\n v=version,\n s=os.environ.get('VERSION_SUFFIX'),\n )\n\nsetup(\n name='ddtrace',\n version=version,\n description='Datadog tracing code',\n url='https://github.com/DataDog/dd-trace-py',\n author='Datadog, Inc.',\n author_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=['tests*']),\n install_requires=[\n \"wrapt\",\n \"msgpack-python\",\n ],\n extras_require={\n # users can include opentracing by having:\n # install_requires=[\"ddtrace[opentracing]\", ...]\n \"opentracing\": [\"opentracing>=2.0.0\"],\n },\n # plugin tox\n tests_require=['tox', 'flake8'],\n cmdclass={'test': Tox},\n entry_points={\n 'console_scripts': [\n 'ddtrace-run = ddtrace.commands.ddtrace_run:main'\n ]\n },\n classifiers=[\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "path": "setup.py"}]}
1,395
129
gh_patches_debug_30674
rasdani/github-patches
git_diff
mirumee__ariadne-181
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Example not working I followed the example from the documentation ```python from ariadne import QueryType, gql, make_executable_schema from ariadne.asgi import GraphQL type_defs = gql(""" type Query { hello: String! } """) # Create type instance for Query type defined in our schema... query = QueryType() # ...and assign our resolver function to its "hello" field. @query.field("hello") def resolve_hello(_, info): request = info.context["request"] user_agent = request.headers.get("user-agent", "guest") return "Hello, %s!" % user_agent schema = make_executable_schema(type_defs, query) app = GraphQL(schema, debug=True) ``` I started the app with `uvicorn test:app`, which starts just fine. But when I browse to http://localhost:8000/ it errors: ```ERROR: Exception in ASGI application Traceback (most recent call last): File "/home/jonas/.local/share/virtualenvs/api-ariadne-WWJK7Q5Z/lib/python3.7/site-packages/uvicorn/protocols/http/httptools_impl.py", line 372, in run_asgi result = await asgi(self.receive, self.send) File "/home/jonas/.local/share/virtualenvs/api-ariadne-WWJK7Q5Z/lib/python3.7/site-packages/ariadne/asgi.py", line 71, in handle_http await response(receive, send) TypeError: __call__() missing 1 required positional argument: 'send' INFO: ('127.0.0.1', 42908) - "GET / HTTP/1.1" 500 ``` I'm using ariadne 0.3.0 and tried with uvicorn version 0.7.1, 0.7.0 and 0.6.1 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #! /usr/bin/env python 2 import os 3 from setuptools import setup 4 5 CLASSIFIERS = [ 6 "Development Status :: 4 - Beta", 7 "Intended Audience :: Developers", 8 "License :: OSI Approved :: BSD License", 9 "Operating System :: OS Independent", 10 "Programming Language :: Python", 11 "Programming Language :: Python :: 3.6", 12 "Programming Language :: Python :: 3.7", 13 "Topic :: Software Development :: Libraries :: Python Modules", 14 ] 15 16 README_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md") 17 with open(README_PATH, "r") as f: 18 README = f.read() 19 20 setup( 21 name="ariadne", 22 author="Mirumee Software", 23 author_email="[email protected]", 24 description="Ariadne is a Python library for implementing GraphQL servers.", 25 long_description=README, 26 long_description_content_type="text/markdown", 27 license="BSD", 28 version="0.4.0", 29 url="https://github.com/mirumee/ariadne", 30 packages=["ariadne"], 31 package_data={"ariadne": ["py.typed"]}, 32 install_requires=[ 33 "graphql-core-next>=1.0.3", 34 "starlette>=0.12.0b3", 35 "typing_extensions>=3.6.0", 36 ], 37 classifiers=CLASSIFIERS, 38 platforms=["any"], 39 zip_safe=False, 40 ) 41 ``` Path: `ariadne/asgi.py` Content: ``` 1 import asyncio 2 from typing import Any, AsyncGenerator, Dict, List, Optional, Tuple, cast 3 4 from graphql import GraphQLError, GraphQLSchema 5 from starlette.requests import Request 6 from starlette.responses import HTMLResponse, JSONResponse, Response 7 from starlette.types import Receive, Scope, Send 8 from starlette.websockets import WebSocket, WebSocketState, WebSocketDisconnect 9 10 from .constants import DATA_TYPE_JSON, PLAYGROUND_HTML 11 from .exceptions import HttpBadRequestError, HttpError 12 from .format_error import format_error 13 from .graphql import graphql, subscribe 14 from .logger import log_error 15 from .types import ContextValue, ErrorFormatter, RootValue 16 17 GQL_CONNECTION_INIT = "connection_init" # Client -> Server 18 GQL_CONNECTION_ACK = "connection_ack" # Server -> Client 19 GQL_CONNECTION_ERROR = "connection_error" # Server -> Client 20 21 # NOTE: The keep alive message type does not follow the standard due to connection optimizations 22 GQL_CONNECTION_KEEP_ALIVE = "ka" # Server -> Client 23 24 GQL_CONNECTION_TERMINATE = "connection_terminate" # Client -> Server 25 GQL_START = "start" # Client -> Server 26 GQL_DATA = "data" # Server -> Client 27 GQL_ERROR = "error" # Server -> Client 28 GQL_COMPLETE = "complete" # Server -> Client 29 GQL_STOP = "stop" # Client -> Server 30 31 32 class GraphQL: 33 def __init__( 34 self, 35 schema: GraphQLSchema, 36 *, 37 context_value: Optional[ContextValue] = None, 38 root_value: Optional[RootValue] = None, 39 debug: bool = False, 40 logger: Optional[str] = None, 41 error_formatter: ErrorFormatter = format_error, 42 keepalive: float = None, 43 ): 44 self.context_value = context_value 45 self.root_value = root_value 46 self.debug = debug 47 self.logger = logger 48 self.error_formatter = error_formatter 49 self.keepalive = keepalive 50 self.schema = schema 51 52 async def __call__(self, scope: Scope, receive: Receive, send: Send): 53 if scope["type"] == "http": 54 await self.handle_http(scope=scope, receive=receive, send=send) 55 elif scope["type"] == "websocket": 56 await self.handle_websocket(scope=scope, receive=receive, send=send) 57 else: 58 raise ValueError("Unknown scope type: %r" % (scope["type"],)) 59 60 async def get_context_for_request(self, request: Any) -> Any: 61 if callable(self.context_value): 62 return self.context_value(request) 63 return self.context_value or {"request": request} 64 65 async def handle_http(self, scope: Scope, receive: Receive, send: Send): 66 request = Request(scope=scope, receive=receive) 67 if request.method == "GET": 68 response = await self.render_playground(request) 69 elif request.method == "POST": 70 response = await self.graphql_http_server(request) 71 else: 72 response = Response(status_code=405) 73 await response(scope, receive, send) 74 75 async def handle_websocket(self, scope: Scope, receive: Receive, send: Send): 76 websocket = WebSocket(scope=scope, receive=receive, send=send) 77 await self.websocket_server(websocket) 78 79 async def extract_data_from_request( 80 self, request: Request 81 ) -> Tuple[str, Optional[dict], Optional[str]]: 82 if request.headers.get("Content-Type") != DATA_TYPE_JSON: 83 raise HttpBadRequestError( 84 "Posted content must be of type {}".format(DATA_TYPE_JSON) 85 ) 86 try: 87 return await request.json() 88 except ValueError: 89 raise HttpBadRequestError("Request body is not a valid JSON") 90 91 async def render_playground( # pylint: disable=unused-argument 92 self, request: Request 93 ) -> HTMLResponse: 94 return HTMLResponse(PLAYGROUND_HTML) 95 96 async def graphql_http_server(self, request: Request) -> Response: 97 try: 98 data = await self.extract_data_from_request(request) 99 except HttpError as error: 100 return Response(error.message or error.status, status_code=400) 101 102 context_value = await self.get_context_for_request(request) 103 success, response = await graphql( 104 self.schema, 105 data, 106 context_value=context_value, 107 root_value=self.root_value, 108 debug=self.debug, 109 logger=self.logger, 110 error_formatter=self.error_formatter, 111 ) 112 status_code = 200 if success else 400 113 return JSONResponse(response, status_code=status_code) 114 115 async def websocket_server(self, websocket: WebSocket) -> None: 116 subscriptions: Dict[str, AsyncGenerator] = {} 117 await websocket.accept("graphql-ws") 118 try: 119 while websocket.application_state != WebSocketState.DISCONNECTED: 120 message = await websocket.receive_json() 121 await self.handle_websocket_message(message, websocket, subscriptions) 122 except WebSocketDisconnect: 123 pass 124 finally: 125 for operation_id in subscriptions: 126 await subscriptions[operation_id].aclose() 127 128 async def handle_websocket_message( # pylint: disable=too-complex 129 self, 130 message: dict, 131 websocket: WebSocket, 132 subscriptions: Dict[str, AsyncGenerator], 133 ): 134 operation_id = cast(str, message.get("id")) 135 message_type = cast(str, message.get("type")) 136 137 if message_type == GQL_CONNECTION_INIT: 138 await websocket.send_json({"type": GQL_CONNECTION_ACK}) 139 asyncio.ensure_future(self.keep_websocket_alive(websocket)) 140 elif message_type == GQL_CONNECTION_TERMINATE: 141 await websocket.close() 142 elif message_type == GQL_START: 143 await self.start_websocket_subscription( 144 message.get("payload"), operation_id, websocket, subscriptions 145 ) 146 elif message_type == GQL_STOP: 147 if operation_id in subscriptions: 148 await subscriptions[operation_id].aclose() 149 del subscriptions[operation_id] 150 151 async def keep_websocket_alive(self, websocket: WebSocket): 152 if not self.keepalive: 153 return 154 while websocket.application_state != WebSocketState.DISCONNECTED: 155 try: 156 await websocket.send_json({"type": GQL_CONNECTION_KEEP_ALIVE}) 157 except WebSocketDisconnect: 158 return 159 await asyncio.sleep(self.keepalive) 160 161 async def start_websocket_subscription( 162 self, 163 data: Any, 164 operation_id: str, 165 websocket: WebSocket, 166 subscriptions: Dict[str, AsyncGenerator], 167 ): 168 context_value = await self.get_context_for_request(websocket) 169 success, results = await subscribe( 170 self.schema, 171 data, 172 context_value=context_value, 173 root_value=self.root_value, 174 debug=self.debug, 175 logger=self.logger, 176 error_formatter=self.error_formatter, 177 ) 178 if not success: 179 results = cast(List[dict], results) 180 await websocket.send_json( 181 {"type": GQL_ERROR, "id": operation_id, "payload": results[0]} 182 ) 183 else: 184 results = cast(AsyncGenerator, results) 185 subscriptions[operation_id] = results 186 asyncio.ensure_future( 187 self.observe_async_results(results, operation_id, websocket) 188 ) 189 190 async def observe_async_results( # pylint: disable=too-complex 191 self, results: AsyncGenerator, operation_id: str, websocket: WebSocket 192 ) -> None: 193 try: 194 async for result in results: 195 payload = {} 196 if result.data: 197 payload["data"] = result.data 198 if result.errors: 199 for error in result.errors: 200 log_error(error, self.logger) 201 payload["errors"] = [ 202 self.error_formatter(error, self.debug) 203 for error in result.errors 204 ] 205 await websocket.send_json( 206 {"type": GQL_DATA, "id": operation_id, "payload": payload} 207 ) 208 except Exception as error: # pylint: disable=broad-except 209 if not isinstance(error, GraphQLError): 210 error = GraphQLError(str(error), original_error=error) 211 log_error(error, self.logger) 212 payload = {"errors": [self.error_formatter(error, self.debug)]} 213 await websocket.send_json( 214 {"type": GQL_DATA, "id": operation_id, "payload": payload} 215 ) 216 217 if websocket.application_state != WebSocketState.DISCONNECTED: 218 await websocket.send_json({"type": GQL_COMPLETE, "id": operation_id}) 219 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ariadne/asgi.py b/ariadne/asgi.py --- a/ariadne/asgi.py +++ b/ariadne/asgi.py @@ -3,7 +3,7 @@ from graphql import GraphQLError, GraphQLSchema from starlette.requests import Request -from starlette.responses import HTMLResponse, JSONResponse, Response +from starlette.responses import HTMLResponse, JSONResponse, PlainTextResponse, Response from starlette.types import Receive, Scope, Send from starlette.websockets import WebSocket, WebSocketState, WebSocketDisconnect @@ -90,14 +90,14 @@ async def render_playground( # pylint: disable=unused-argument self, request: Request - ) -> HTMLResponse: + ) -> Response: return HTMLResponse(PLAYGROUND_HTML) async def graphql_http_server(self, request: Request) -> Response: try: data = await self.extract_data_from_request(request) except HttpError as error: - return Response(error.message or error.status, status_code=400) + return PlainTextResponse(error.message or error.status, status_code=400) context_value = await self.get_context_for_request(request) success, response = await graphql( diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -31,7 +31,7 @@ package_data={"ariadne": ["py.typed"]}, install_requires=[ "graphql-core-next>=1.0.3", - "starlette>=0.12.0b3", + "starlette<0.13", "typing_extensions>=3.6.0", ], classifiers=CLASSIFIERS,
{"golden_diff": "diff --git a/ariadne/asgi.py b/ariadne/asgi.py\n--- a/ariadne/asgi.py\n+++ b/ariadne/asgi.py\n@@ -3,7 +3,7 @@\n \n from graphql import GraphQLError, GraphQLSchema\n from starlette.requests import Request\n-from starlette.responses import HTMLResponse, JSONResponse, Response\n+from starlette.responses import HTMLResponse, JSONResponse, PlainTextResponse, Response\n from starlette.types import Receive, Scope, Send\n from starlette.websockets import WebSocket, WebSocketState, WebSocketDisconnect\n \n@@ -90,14 +90,14 @@\n \n async def render_playground( # pylint: disable=unused-argument\n self, request: Request\n- ) -> HTMLResponse:\n+ ) -> Response:\n return HTMLResponse(PLAYGROUND_HTML)\n \n async def graphql_http_server(self, request: Request) -> Response:\n try:\n data = await self.extract_data_from_request(request)\n except HttpError as error:\n- return Response(error.message or error.status, status_code=400)\n+ return PlainTextResponse(error.message or error.status, status_code=400)\n \n context_value = await self.get_context_for_request(request)\n success, response = await graphql(\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -31,7 +31,7 @@\n package_data={\"ariadne\": [\"py.typed\"]},\n install_requires=[\n \"graphql-core-next>=1.0.3\",\n- \"starlette>=0.12.0b3\",\n+ \"starlette<0.13\",\n \"typing_extensions>=3.6.0\",\n ],\n classifiers=CLASSIFIERS,\n", "issue": "Example not working\nI followed the example from the documentation\r\n```python\r\nfrom ariadne import QueryType, gql, make_executable_schema\r\nfrom ariadne.asgi import GraphQL\r\n\r\ntype_defs = gql(\"\"\"\r\n type Query {\r\n hello: String!\r\n }\r\n\"\"\")\r\n\r\n# Create type instance for Query type defined in our schema...\r\nquery = QueryType()\r\n\r\n# ...and assign our resolver function to its \"hello\" field.\r\[email protected](\"hello\")\r\ndef resolve_hello(_, info):\r\n request = info.context[\"request\"]\r\n user_agent = request.headers.get(\"user-agent\", \"guest\")\r\n return \"Hello, %s!\" % user_agent\r\n\r\nschema = make_executable_schema(type_defs, query)\r\napp = GraphQL(schema, debug=True)\r\n```\r\nI started the app with `uvicorn test:app`, which starts just fine.\r\nBut when I browse to http://localhost:8000/ it errors:\r\n```ERROR: Exception in ASGI application\r\nTraceback (most recent call last):\r\n File \"/home/jonas/.local/share/virtualenvs/api-ariadne-WWJK7Q5Z/lib/python3.7/site-packages/uvicorn/protocols/http/httptools_impl.py\", line 372, in run_asgi\r\n result = await asgi(self.receive, self.send)\r\n File \"/home/jonas/.local/share/virtualenvs/api-ariadne-WWJK7Q5Z/lib/python3.7/site-packages/ariadne/asgi.py\", line 71, in handle_http\r\n await response(receive, send)\r\nTypeError: __call__() missing 1 required positional argument: 'send'\r\nINFO: ('127.0.0.1', 42908) - \"GET / HTTP/1.1\" 500\r\n``` \r\nI'm using ariadne 0.3.0 and tried with uvicorn version 0.7.1, 0.7.0 and 0.6.1\n", "before_files": [{"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.4.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n package_data={\"ariadne\": [\"py.typed\"]},\n install_requires=[\n \"graphql-core-next>=1.0.3\",\n \"starlette>=0.12.0b3\",\n \"typing_extensions>=3.6.0\",\n ],\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n", "path": "setup.py"}, {"content": "import asyncio\nfrom typing import Any, AsyncGenerator, Dict, List, Optional, Tuple, cast\n\nfrom graphql import GraphQLError, GraphQLSchema\nfrom starlette.requests import Request\nfrom starlette.responses import HTMLResponse, JSONResponse, Response\nfrom starlette.types import Receive, Scope, Send\nfrom starlette.websockets import WebSocket, WebSocketState, WebSocketDisconnect\n\nfrom .constants import DATA_TYPE_JSON, PLAYGROUND_HTML\nfrom .exceptions import HttpBadRequestError, HttpError\nfrom .format_error import format_error\nfrom .graphql import graphql, subscribe\nfrom .logger import log_error\nfrom .types import ContextValue, ErrorFormatter, RootValue\n\nGQL_CONNECTION_INIT = \"connection_init\" # Client -> Server\nGQL_CONNECTION_ACK = \"connection_ack\" # Server -> Client\nGQL_CONNECTION_ERROR = \"connection_error\" # Server -> Client\n\n# NOTE: The keep alive message type does not follow the standard due to connection optimizations\nGQL_CONNECTION_KEEP_ALIVE = \"ka\" # Server -> Client\n\nGQL_CONNECTION_TERMINATE = \"connection_terminate\" # Client -> Server\nGQL_START = \"start\" # Client -> Server\nGQL_DATA = \"data\" # Server -> Client\nGQL_ERROR = \"error\" # Server -> Client\nGQL_COMPLETE = \"complete\" # Server -> Client\nGQL_STOP = \"stop\" # Client -> Server\n\n\nclass GraphQL:\n def __init__(\n self,\n schema: GraphQLSchema,\n *,\n context_value: Optional[ContextValue] = None,\n root_value: Optional[RootValue] = None,\n debug: bool = False,\n logger: Optional[str] = None,\n error_formatter: ErrorFormatter = format_error,\n keepalive: float = None,\n ):\n self.context_value = context_value\n self.root_value = root_value\n self.debug = debug\n self.logger = logger\n self.error_formatter = error_formatter\n self.keepalive = keepalive\n self.schema = schema\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send):\n if scope[\"type\"] == \"http\":\n await self.handle_http(scope=scope, receive=receive, send=send)\n elif scope[\"type\"] == \"websocket\":\n await self.handle_websocket(scope=scope, receive=receive, send=send)\n else:\n raise ValueError(\"Unknown scope type: %r\" % (scope[\"type\"],))\n\n async def get_context_for_request(self, request: Any) -> Any:\n if callable(self.context_value):\n return self.context_value(request)\n return self.context_value or {\"request\": request}\n\n async def handle_http(self, scope: Scope, receive: Receive, send: Send):\n request = Request(scope=scope, receive=receive)\n if request.method == \"GET\":\n response = await self.render_playground(request)\n elif request.method == \"POST\":\n response = await self.graphql_http_server(request)\n else:\n response = Response(status_code=405)\n await response(scope, receive, send)\n\n async def handle_websocket(self, scope: Scope, receive: Receive, send: Send):\n websocket = WebSocket(scope=scope, receive=receive, send=send)\n await self.websocket_server(websocket)\n\n async def extract_data_from_request(\n self, request: Request\n ) -> Tuple[str, Optional[dict], Optional[str]]:\n if request.headers.get(\"Content-Type\") != DATA_TYPE_JSON:\n raise HttpBadRequestError(\n \"Posted content must be of type {}\".format(DATA_TYPE_JSON)\n )\n try:\n return await request.json()\n except ValueError:\n raise HttpBadRequestError(\"Request body is not a valid JSON\")\n\n async def render_playground( # pylint: disable=unused-argument\n self, request: Request\n ) -> HTMLResponse:\n return HTMLResponse(PLAYGROUND_HTML)\n\n async def graphql_http_server(self, request: Request) -> Response:\n try:\n data = await self.extract_data_from_request(request)\n except HttpError as error:\n return Response(error.message or error.status, status_code=400)\n\n context_value = await self.get_context_for_request(request)\n success, response = await graphql(\n self.schema,\n data,\n context_value=context_value,\n root_value=self.root_value,\n debug=self.debug,\n logger=self.logger,\n error_formatter=self.error_formatter,\n )\n status_code = 200 if success else 400\n return JSONResponse(response, status_code=status_code)\n\n async def websocket_server(self, websocket: WebSocket) -> None:\n subscriptions: Dict[str, AsyncGenerator] = {}\n await websocket.accept(\"graphql-ws\")\n try:\n while websocket.application_state != WebSocketState.DISCONNECTED:\n message = await websocket.receive_json()\n await self.handle_websocket_message(message, websocket, subscriptions)\n except WebSocketDisconnect:\n pass\n finally:\n for operation_id in subscriptions:\n await subscriptions[operation_id].aclose()\n\n async def handle_websocket_message( # pylint: disable=too-complex\n self,\n message: dict,\n websocket: WebSocket,\n subscriptions: Dict[str, AsyncGenerator],\n ):\n operation_id = cast(str, message.get(\"id\"))\n message_type = cast(str, message.get(\"type\"))\n\n if message_type == GQL_CONNECTION_INIT:\n await websocket.send_json({\"type\": GQL_CONNECTION_ACK})\n asyncio.ensure_future(self.keep_websocket_alive(websocket))\n elif message_type == GQL_CONNECTION_TERMINATE:\n await websocket.close()\n elif message_type == GQL_START:\n await self.start_websocket_subscription(\n message.get(\"payload\"), operation_id, websocket, subscriptions\n )\n elif message_type == GQL_STOP:\n if operation_id in subscriptions:\n await subscriptions[operation_id].aclose()\n del subscriptions[operation_id]\n\n async def keep_websocket_alive(self, websocket: WebSocket):\n if not self.keepalive:\n return\n while websocket.application_state != WebSocketState.DISCONNECTED:\n try:\n await websocket.send_json({\"type\": GQL_CONNECTION_KEEP_ALIVE})\n except WebSocketDisconnect:\n return\n await asyncio.sleep(self.keepalive)\n\n async def start_websocket_subscription(\n self,\n data: Any,\n operation_id: str,\n websocket: WebSocket,\n subscriptions: Dict[str, AsyncGenerator],\n ):\n context_value = await self.get_context_for_request(websocket)\n success, results = await subscribe(\n self.schema,\n data,\n context_value=context_value,\n root_value=self.root_value,\n debug=self.debug,\n logger=self.logger,\n error_formatter=self.error_formatter,\n )\n if not success:\n results = cast(List[dict], results)\n await websocket.send_json(\n {\"type\": GQL_ERROR, \"id\": operation_id, \"payload\": results[0]}\n )\n else:\n results = cast(AsyncGenerator, results)\n subscriptions[operation_id] = results\n asyncio.ensure_future(\n self.observe_async_results(results, operation_id, websocket)\n )\n\n async def observe_async_results( # pylint: disable=too-complex\n self, results: AsyncGenerator, operation_id: str, websocket: WebSocket\n ) -> None:\n try:\n async for result in results:\n payload = {}\n if result.data:\n payload[\"data\"] = result.data\n if result.errors:\n for error in result.errors:\n log_error(error, self.logger)\n payload[\"errors\"] = [\n self.error_formatter(error, self.debug)\n for error in result.errors\n ]\n await websocket.send_json(\n {\"type\": GQL_DATA, \"id\": operation_id, \"payload\": payload}\n )\n except Exception as error: # pylint: disable=broad-except\n if not isinstance(error, GraphQLError):\n error = GraphQLError(str(error), original_error=error)\n log_error(error, self.logger)\n payload = {\"errors\": [self.error_formatter(error, self.debug)]}\n await websocket.send_json(\n {\"type\": GQL_DATA, \"id\": operation_id, \"payload\": payload}\n )\n\n if websocket.application_state != WebSocketState.DISCONNECTED:\n await websocket.send_json({\"type\": GQL_COMPLETE, \"id\": operation_id})\n", "path": "ariadne/asgi.py"}], "after_files": [{"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.4.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n package_data={\"ariadne\": [\"py.typed\"]},\n install_requires=[\n \"graphql-core-next>=1.0.3\",\n \"starlette<0.13\",\n \"typing_extensions>=3.6.0\",\n ],\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n", "path": "setup.py"}, {"content": "import asyncio\nfrom typing import Any, AsyncGenerator, Dict, List, Optional, Tuple, cast\n\nfrom graphql import GraphQLError, GraphQLSchema\nfrom starlette.requests import Request\nfrom starlette.responses import HTMLResponse, JSONResponse, PlainTextResponse, Response\nfrom starlette.types import Receive, Scope, Send\nfrom starlette.websockets import WebSocket, WebSocketState, WebSocketDisconnect\n\nfrom .constants import DATA_TYPE_JSON, PLAYGROUND_HTML\nfrom .exceptions import HttpBadRequestError, HttpError\nfrom .format_error import format_error\nfrom .graphql import graphql, subscribe\nfrom .logger import log_error\nfrom .types import ContextValue, ErrorFormatter, RootValue\n\nGQL_CONNECTION_INIT = \"connection_init\" # Client -> Server\nGQL_CONNECTION_ACK = \"connection_ack\" # Server -> Client\nGQL_CONNECTION_ERROR = \"connection_error\" # Server -> Client\n\n# NOTE: The keep alive message type does not follow the standard due to connection optimizations\nGQL_CONNECTION_KEEP_ALIVE = \"ka\" # Server -> Client\n\nGQL_CONNECTION_TERMINATE = \"connection_terminate\" # Client -> Server\nGQL_START = \"start\" # Client -> Server\nGQL_DATA = \"data\" # Server -> Client\nGQL_ERROR = \"error\" # Server -> Client\nGQL_COMPLETE = \"complete\" # Server -> Client\nGQL_STOP = \"stop\" # Client -> Server\n\n\nclass GraphQL:\n def __init__(\n self,\n schema: GraphQLSchema,\n *,\n context_value: Optional[ContextValue] = None,\n root_value: Optional[RootValue] = None,\n debug: bool = False,\n logger: Optional[str] = None,\n error_formatter: ErrorFormatter = format_error,\n keepalive: float = None,\n ):\n self.context_value = context_value\n self.root_value = root_value\n self.debug = debug\n self.logger = logger\n self.error_formatter = error_formatter\n self.keepalive = keepalive\n self.schema = schema\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send):\n if scope[\"type\"] == \"http\":\n await self.handle_http(scope=scope, receive=receive, send=send)\n elif scope[\"type\"] == \"websocket\":\n await self.handle_websocket(scope=scope, receive=receive, send=send)\n else:\n raise ValueError(\"Unknown scope type: %r\" % (scope[\"type\"],))\n\n async def get_context_for_request(self, request: Any) -> Any:\n if callable(self.context_value):\n return self.context_value(request)\n return self.context_value or {\"request\": request}\n\n async def handle_http(self, scope: Scope, receive: Receive, send: Send):\n request = Request(scope=scope, receive=receive)\n if request.method == \"GET\":\n response = await self.render_playground(request)\n elif request.method == \"POST\":\n response = await self.graphql_http_server(request)\n else:\n response = Response(status_code=405)\n await response(scope, receive, send)\n\n async def handle_websocket(self, scope: Scope, receive: Receive, send: Send):\n websocket = WebSocket(scope=scope, receive=receive, send=send)\n await self.websocket_server(websocket)\n\n async def extract_data_from_request(\n self, request: Request\n ) -> Tuple[str, Optional[dict], Optional[str]]:\n if request.headers.get(\"Content-Type\") != DATA_TYPE_JSON:\n raise HttpBadRequestError(\n \"Posted content must be of type {}\".format(DATA_TYPE_JSON)\n )\n try:\n return await request.json()\n except ValueError:\n raise HttpBadRequestError(\"Request body is not a valid JSON\")\n\n async def render_playground( # pylint: disable=unused-argument\n self, request: Request\n ) -> Response:\n return HTMLResponse(PLAYGROUND_HTML)\n\n async def graphql_http_server(self, request: Request) -> Response:\n try:\n data = await self.extract_data_from_request(request)\n except HttpError as error:\n return PlainTextResponse(error.message or error.status, status_code=400)\n\n context_value = await self.get_context_for_request(request)\n success, response = await graphql(\n self.schema,\n data,\n context_value=context_value,\n root_value=self.root_value,\n debug=self.debug,\n logger=self.logger,\n error_formatter=self.error_formatter,\n )\n status_code = 200 if success else 400\n return JSONResponse(response, status_code=status_code)\n\n async def websocket_server(self, websocket: WebSocket) -> None:\n subscriptions: Dict[str, AsyncGenerator] = {}\n await websocket.accept(\"graphql-ws\")\n try:\n while websocket.application_state != WebSocketState.DISCONNECTED:\n message = await websocket.receive_json()\n await self.handle_websocket_message(message, websocket, subscriptions)\n except WebSocketDisconnect:\n pass\n finally:\n for operation_id in subscriptions:\n await subscriptions[operation_id].aclose()\n\n async def handle_websocket_message( # pylint: disable=too-complex\n self,\n message: dict,\n websocket: WebSocket,\n subscriptions: Dict[str, AsyncGenerator],\n ):\n operation_id = cast(str, message.get(\"id\"))\n message_type = cast(str, message.get(\"type\"))\n\n if message_type == GQL_CONNECTION_INIT:\n await websocket.send_json({\"type\": GQL_CONNECTION_ACK})\n asyncio.ensure_future(self.keep_websocket_alive(websocket))\n elif message_type == GQL_CONNECTION_TERMINATE:\n await websocket.close()\n elif message_type == GQL_START:\n await self.start_websocket_subscription(\n message.get(\"payload\"), operation_id, websocket, subscriptions\n )\n elif message_type == GQL_STOP:\n if operation_id in subscriptions:\n await subscriptions[operation_id].aclose()\n del subscriptions[operation_id]\n\n async def keep_websocket_alive(self, websocket: WebSocket):\n if not self.keepalive:\n return\n while websocket.application_state != WebSocketState.DISCONNECTED:\n try:\n await websocket.send_json({\"type\": GQL_CONNECTION_KEEP_ALIVE})\n except WebSocketDisconnect:\n return\n await asyncio.sleep(self.keepalive)\n\n async def start_websocket_subscription(\n self,\n data: Any,\n operation_id: str,\n websocket: WebSocket,\n subscriptions: Dict[str, AsyncGenerator],\n ):\n context_value = await self.get_context_for_request(websocket)\n success, results = await subscribe(\n self.schema,\n data,\n context_value=context_value,\n root_value=self.root_value,\n debug=self.debug,\n logger=self.logger,\n error_formatter=self.error_formatter,\n )\n if not success:\n results = cast(List[dict], results)\n await websocket.send_json(\n {\"type\": GQL_ERROR, \"id\": operation_id, \"payload\": results[0]}\n )\n else:\n results = cast(AsyncGenerator, results)\n subscriptions[operation_id] = results\n asyncio.ensure_future(\n self.observe_async_results(results, operation_id, websocket)\n )\n\n async def observe_async_results( # pylint: disable=too-complex\n self, results: AsyncGenerator, operation_id: str, websocket: WebSocket\n ) -> None:\n try:\n async for result in results:\n payload = {}\n if result.data:\n payload[\"data\"] = result.data\n if result.errors:\n for error in result.errors:\n log_error(error, self.logger)\n payload[\"errors\"] = [\n self.error_formatter(error, self.debug)\n for error in result.errors\n ]\n await websocket.send_json(\n {\"type\": GQL_DATA, \"id\": operation_id, \"payload\": payload}\n )\n except Exception as error: # pylint: disable=broad-except\n if not isinstance(error, GraphQLError):\n error = GraphQLError(str(error), original_error=error)\n log_error(error, self.logger)\n payload = {\"errors\": [self.error_formatter(error, self.debug)]}\n await websocket.send_json(\n {\"type\": GQL_DATA, \"id\": operation_id, \"payload\": payload}\n )\n\n if websocket.application_state != WebSocketState.DISCONNECTED:\n await websocket.send_json({\"type\": GQL_COMPLETE, \"id\": operation_id})\n", "path": "ariadne/asgi.py"}]}
3,410
387
gh_patches_debug_33594
rasdani/github-patches
git_diff
saleor__saleor-5530
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ProductCreate weight mutation ![p1](https://user-images.githubusercontent.com/36409853/79555939-f2032000-80d2-11ea-88cc-5e1809e51ec5.PNG) ![p2](https://user-images.githubusercontent.com/36409853/79556174-4b6b4f00-80d3-11ea-96d7-f06ee76bf1ee.PNG) productCreate mutation I followed the "amount unit" as said in the comment, but returns a "'StringValue(value='10.00 kg')' value must be a float." Sorry just a beginner --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `saleor/graphql/core/scalars.py` Content: ``` 1 import decimal 2 3 import graphene 4 from measurement.measures import Weight 5 6 from ...core.weight import convert_weight, get_default_weight_unit 7 8 9 class Decimal(graphene.Float): 10 """Custom Decimal implementation. 11 12 Returns Decimal as a float in the API, 13 parses float to the Decimal on the way back. 14 """ 15 16 @staticmethod 17 def parse_literal(node): 18 try: 19 return decimal.Decimal(node.value) 20 except decimal.DecimalException: 21 return None 22 23 @staticmethod 24 def parse_value(value): 25 try: 26 # Converting the float to str before parsing it to Decimal is 27 # necessary to keep the decimal places as typed 28 value = str(value) 29 return decimal.Decimal(value) 30 except decimal.DecimalException: 31 return None 32 33 34 class WeightScalar(graphene.Scalar): 35 @staticmethod 36 def parse_value(value): 37 # Expects value to be a string "amount unit" separated by a single 38 # space. 39 try: 40 value = decimal.Decimal(value) 41 except decimal.DecimalException: 42 return None 43 default_unit = get_default_weight_unit() 44 return Weight(**{default_unit: value}) 45 46 @staticmethod 47 def serialize(weight): 48 if isinstance(weight, Weight): 49 default_unit = get_default_weight_unit() 50 if weight.unit != default_unit: 51 weight = convert_weight(weight, default_unit) 52 return str(weight) 53 return None 54 55 @staticmethod 56 def parse_literal(node): 57 return node 58 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/saleor/graphql/core/scalars.py b/saleor/graphql/core/scalars.py --- a/saleor/graphql/core/scalars.py +++ b/saleor/graphql/core/scalars.py @@ -1,6 +1,8 @@ import decimal import graphene +from graphql.language import ast +from graphql.error import GraphQLError from measurement.measures import Weight from ...core.weight import convert_weight, get_default_weight_unit @@ -34,14 +36,14 @@ class WeightScalar(graphene.Scalar): @staticmethod def parse_value(value): - # Expects value to be a string "amount unit" separated by a single - # space. - try: - value = decimal.Decimal(value) - except decimal.DecimalException: - return None - default_unit = get_default_weight_unit() - return Weight(**{default_unit: value}) + weight = None + if isinstance(value, dict): + weight = Weight(**{value["unit"]: value["value"]}) + else: + weight = WeightScalar.parse_decimal(value) + if not weight: + raise GraphQLError(f"Unsupported value: {value}") + return weight @staticmethod def serialize(weight): @@ -54,4 +56,35 @@ @staticmethod def parse_literal(node): - return node + weight = None + if isinstance(node, ast.ObjectValue): + weight = WeightScalar.parse_literal_object(node) + else: + weight = WeightScalar.parse_decimal(node.value) + if not weight: + raise GraphQLError(f"Unsupported value: {node.value}") + return weight + + @staticmethod + def parse_decimal(value): + try: + value = decimal.Decimal(value) + except decimal.DecimalException: + return None + default_unit = get_default_weight_unit() + return Weight(**{default_unit: value}) + + @staticmethod + def parse_literal_object(node): + value = 0 + unit = get_default_weight_unit() + + for field in node.fields: + if field.name.value == "value": + try: + value = decimal.Decimal(field.value.value) + except decimal.DecimalException: + raise GraphQLError(f"Unsupported value: {field.value.value}") + if field.name.value == "unit": + unit = field.value.value + return Weight(**{unit: value})
{"golden_diff": "diff --git a/saleor/graphql/core/scalars.py b/saleor/graphql/core/scalars.py\n--- a/saleor/graphql/core/scalars.py\n+++ b/saleor/graphql/core/scalars.py\n@@ -1,6 +1,8 @@\n import decimal\n \n import graphene\n+from graphql.language import ast\n+from graphql.error import GraphQLError\n from measurement.measures import Weight\n \n from ...core.weight import convert_weight, get_default_weight_unit\n@@ -34,14 +36,14 @@\n class WeightScalar(graphene.Scalar):\n @staticmethod\n def parse_value(value):\n- # Expects value to be a string \"amount unit\" separated by a single\n- # space.\n- try:\n- value = decimal.Decimal(value)\n- except decimal.DecimalException:\n- return None\n- default_unit = get_default_weight_unit()\n- return Weight(**{default_unit: value})\n+ weight = None\n+ if isinstance(value, dict):\n+ weight = Weight(**{value[\"unit\"]: value[\"value\"]})\n+ else:\n+ weight = WeightScalar.parse_decimal(value)\n+ if not weight:\n+ raise GraphQLError(f\"Unsupported value: {value}\")\n+ return weight\n \n @staticmethod\n def serialize(weight):\n@@ -54,4 +56,35 @@\n \n @staticmethod\n def parse_literal(node):\n- return node\n+ weight = None\n+ if isinstance(node, ast.ObjectValue):\n+ weight = WeightScalar.parse_literal_object(node)\n+ else:\n+ weight = WeightScalar.parse_decimal(node.value)\n+ if not weight:\n+ raise GraphQLError(f\"Unsupported value: {node.value}\")\n+ return weight\n+\n+ @staticmethod\n+ def parse_decimal(value):\n+ try:\n+ value = decimal.Decimal(value)\n+ except decimal.DecimalException:\n+ return None\n+ default_unit = get_default_weight_unit()\n+ return Weight(**{default_unit: value})\n+\n+ @staticmethod\n+ def parse_literal_object(node):\n+ value = 0\n+ unit = get_default_weight_unit()\n+\n+ for field in node.fields:\n+ if field.name.value == \"value\":\n+ try:\n+ value = decimal.Decimal(field.value.value)\n+ except decimal.DecimalException:\n+ raise GraphQLError(f\"Unsupported value: {field.value.value}\")\n+ if field.name.value == \"unit\":\n+ unit = field.value.value\n+ return Weight(**{unit: value})\n", "issue": "ProductCreate weight mutation\n\r\n![p1](https://user-images.githubusercontent.com/36409853/79555939-f2032000-80d2-11ea-88cc-5e1809e51ec5.PNG)\r\n![p2](https://user-images.githubusercontent.com/36409853/79556174-4b6b4f00-80d3-11ea-96d7-f06ee76bf1ee.PNG)\r\n\r\n\r\nproductCreate mutation\r\n\r\nI followed the \"amount unit\" as said in the comment, but returns a \"'StringValue(value='10.00 kg')' value must be a float.\"\r\nSorry just a beginner\n", "before_files": [{"content": "import decimal\n\nimport graphene\nfrom measurement.measures import Weight\n\nfrom ...core.weight import convert_weight, get_default_weight_unit\n\n\nclass Decimal(graphene.Float):\n \"\"\"Custom Decimal implementation.\n\n Returns Decimal as a float in the API,\n parses float to the Decimal on the way back.\n \"\"\"\n\n @staticmethod\n def parse_literal(node):\n try:\n return decimal.Decimal(node.value)\n except decimal.DecimalException:\n return None\n\n @staticmethod\n def parse_value(value):\n try:\n # Converting the float to str before parsing it to Decimal is\n # necessary to keep the decimal places as typed\n value = str(value)\n return decimal.Decimal(value)\n except decimal.DecimalException:\n return None\n\n\nclass WeightScalar(graphene.Scalar):\n @staticmethod\n def parse_value(value):\n # Expects value to be a string \"amount unit\" separated by a single\n # space.\n try:\n value = decimal.Decimal(value)\n except decimal.DecimalException:\n return None\n default_unit = get_default_weight_unit()\n return Weight(**{default_unit: value})\n\n @staticmethod\n def serialize(weight):\n if isinstance(weight, Weight):\n default_unit = get_default_weight_unit()\n if weight.unit != default_unit:\n weight = convert_weight(weight, default_unit)\n return str(weight)\n return None\n\n @staticmethod\n def parse_literal(node):\n return node\n", "path": "saleor/graphql/core/scalars.py"}], "after_files": [{"content": "import decimal\n\nimport graphene\nfrom graphql.language import ast\nfrom graphql.error import GraphQLError\nfrom measurement.measures import Weight\n\nfrom ...core.weight import convert_weight, get_default_weight_unit\n\n\nclass Decimal(graphene.Float):\n \"\"\"Custom Decimal implementation.\n\n Returns Decimal as a float in the API,\n parses float to the Decimal on the way back.\n \"\"\"\n\n @staticmethod\n def parse_literal(node):\n try:\n return decimal.Decimal(node.value)\n except decimal.DecimalException:\n return None\n\n @staticmethod\n def parse_value(value):\n try:\n # Converting the float to str before parsing it to Decimal is\n # necessary to keep the decimal places as typed\n value = str(value)\n return decimal.Decimal(value)\n except decimal.DecimalException:\n return None\n\n\nclass WeightScalar(graphene.Scalar):\n @staticmethod\n def parse_value(value):\n weight = None\n if isinstance(value, dict):\n weight = Weight(**{value[\"unit\"]: value[\"value\"]})\n else:\n weight = WeightScalar.parse_decimal(value)\n if not weight:\n raise GraphQLError(f\"Unsupported value: {value}\")\n return weight\n\n @staticmethod\n def serialize(weight):\n if isinstance(weight, Weight):\n default_unit = get_default_weight_unit()\n if weight.unit != default_unit:\n weight = convert_weight(weight, default_unit)\n return str(weight)\n return None\n\n @staticmethod\n def parse_literal(node):\n weight = None\n if isinstance(node, ast.ObjectValue):\n weight = WeightScalar.parse_literal_object(node)\n else:\n weight = WeightScalar.parse_decimal(node.value)\n if not weight:\n raise GraphQLError(f\"Unsupported value: {node.value}\")\n return weight\n\n @staticmethod\n def parse_decimal(value):\n try:\n value = decimal.Decimal(value)\n except decimal.DecimalException:\n return None\n default_unit = get_default_weight_unit()\n return Weight(**{default_unit: value})\n\n @staticmethod\n def parse_literal_object(node):\n value = 0\n unit = get_default_weight_unit()\n\n for field in node.fields:\n if field.name.value == \"value\":\n try:\n value = decimal.Decimal(field.value.value)\n except decimal.DecimalException:\n raise GraphQLError(f\"Unsupported value: {field.value.value}\")\n if field.name.value == \"unit\":\n unit = field.value.value\n return Weight(**{unit: value})\n", "path": "saleor/graphql/core/scalars.py"}]}
850
544
gh_patches_debug_23896
rasdani/github-patches
git_diff
rdmorganiser__rdmo-524
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Sorting causes problems with import ### Description / Beschreibung When using different `uri_prefix`es for, e.g. a domain import, the sorting by `uri` destroys the order in the file, and parent Attributes are imported *after* their children (with a different `uri_prefix` earlier in the alphabet). This is the problematic line: https://github.com/rdmorganiser/rdmo/blob/master/rdmo/core/xml.py#L52 ### Expected behaviour / Erwartetes Verhalten I am not sure if we could get rid of the sorting, we could also sort by `path` (which would give the field some meaning in the xml again). Ideas? @triole @MyPyDavid --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `rdmo/core/xml.py` Content: ``` 1 import logging 2 import re 3 4 import defusedxml.ElementTree as ET 5 6 log = logging.getLogger(__name__) 7 8 9 def read_xml_file(file_name): 10 try: 11 return ET.parse(file_name).getroot() 12 except Exception as e: 13 log.error('Xml parsing error: ' + str(e)) 14 15 16 def parse_xml_string(string): 17 try: 18 return ET.fromstring(string) 19 except Exception as e: 20 log.error('Xml parsing error: ' + str(e)) 21 22 23 def flat_xml_to_elements(treenode): 24 elements = [] 25 ns_map = get_ns_map(treenode) 26 uri_attrib = get_ns_tag('dc:uri', ns_map) 27 28 for node in treenode: 29 30 element = { 31 'uri': get_uri(node, ns_map), 32 'type': node.tag 33 } 34 35 for subnode in node: 36 tag = strip_ns(subnode.tag, ns_map) 37 38 if uri_attrib in subnode.attrib: 39 # this node has an uri! 40 element[tag] = subnode.attrib[uri_attrib] 41 elif 'lang' in subnode.attrib: 42 # this node has the lang attribute! 43 element['%s_%s' % (tag, subnode.attrib['lang'])] = subnode.text 44 elif list(subnode): 45 # this node is a list! 46 element[tag] = [subsubnode.attrib[uri_attrib] for subsubnode in subnode] 47 else: 48 element[tag] = subnode.text 49 50 elements.append(element) 51 52 elements = sort_elements_by_key(elements, 'uri') 53 return elements 54 55 56 def get_ns_tag(tag, ns_map): 57 tag_split = tag.split(':') 58 try: 59 return '{%s}%s' % (ns_map[tag_split[0]], tag_split[1]) 60 except KeyError: 61 return None 62 63 64 def get_ns_map(treenode): 65 ns_map = {} 66 treestring = ET.tostring(treenode, encoding='utf8', method='xml') 67 68 for match in re.finditer(r'(xmlns:)(.*?)(=")(.*?)(")', str(treestring)): 69 if match: 70 ns_map[match.group(2)] = match.group(4) 71 72 return ns_map 73 74 75 def get_uri(treenode, ns_map): 76 if treenode is not None: 77 ns_tag = get_ns_tag('dc:uri', ns_map) 78 if ns_tag is not None: 79 return treenode.attrib.get(ns_tag) 80 81 82 def strip_ns(tag, ns_map): 83 for ns in ns_map.values(): 84 if tag.startswith('{%s}' % ns): 85 return tag.replace('{%s}' % ns, '') 86 return tag 87 88 89 def filter_elements_by_type(elements, element_type): 90 for element in elements: 91 if element['type'] == element_type: 92 yield element 93 94 95 def sort_elements_by_key(dictlist, key, reverse=False): 96 return sorted(dictlist, key=lambda k: k[key], reverse=reverse) 97 ``` Path: `rdmo/core/constants.py` Content: ``` 1 from django.utils.translation import gettext_lazy as _ 2 3 VALUE_TYPE_TEXT = 'text' 4 VALUE_TYPE_URL = 'url' 5 VALUE_TYPE_INTEGER = 'integer' 6 VALUE_TYPE_FLOAT = 'float' 7 VALUE_TYPE_BOOLEAN = 'boolean' 8 VALUE_TYPE_DATETIME = 'datetime' 9 VALUE_TYPE_OPTIONS = 'option' 10 VALUE_TYPE_EMAIL = 'email' 11 VALUE_TYPE_PHONE = 'phone' 12 VALUE_TYPE_FILE = 'file' 13 VALUE_TYPE_CHOICES = ( 14 (VALUE_TYPE_TEXT, _('Text')), 15 (VALUE_TYPE_URL, _('URL')), 16 (VALUE_TYPE_INTEGER, _('Integer')), 17 (VALUE_TYPE_FLOAT, _('Float')), 18 (VALUE_TYPE_BOOLEAN, _('Boolean')), 19 (VALUE_TYPE_DATETIME, _('Datetime')), 20 (VALUE_TYPE_EMAIL, _('E-mail')), 21 (VALUE_TYPE_PHONE, _('Phone')), 22 (VALUE_TYPE_OPTIONS, _('Option')), 23 (VALUE_TYPE_FILE, _('File')) 24 ) 25 26 PERMISSIONS = { 27 'condition': ( 28 'conditions.add_condition', 'conditions.change_condition', 'conditions.delete_condition' 29 ), 30 'attribute': ( 31 'domain.add_attribute', 'domain.change_attribute', 'domain.delete_attribute' 32 ), 33 'optionset': ( 34 'options.add_optionset', 'options.change_optionset', 'options.delete_optionset' 35 ), 36 'option': ( 37 'options.add_option', 'options.change_option', 'options.delete_option' 38 ), 39 'catalog': ( 40 'questions.add_catalog', 'questions.change_catalog', 'questions.delete_catalog' 41 ), 42 'section': ( 43 'questions.add_section', 'questions.change_section', 'questions.delete_section' 44 ), 45 'questionset': ( 46 'questions.add_questionset', 'questions.change_questionset', 'questions.delete_questionset' 47 ), 48 'question': ( 49 'questions.add_question', 'questions.change_question', 'questions.delete_question' 50 ), 51 'task': ( 52 'tasks.add_task', 'tasks.change_task', 'tasks.delete_task' 53 ), 54 'view': ( 55 'views.add_view', 'views.change_view', 'views.delete_view' 56 ) 57 } 58 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/rdmo/core/constants.py b/rdmo/core/constants.py --- a/rdmo/core/constants.py +++ b/rdmo/core/constants.py @@ -55,3 +55,12 @@ 'views.add_view', 'views.change_view', 'views.delete_view' ) } + +IMPORT_SORT_ORDER = ( + 'domain', + 'conditions', + 'options', + 'questions', + 'tasks', + 'views' +) diff --git a/rdmo/core/xml.py b/rdmo/core/xml.py --- a/rdmo/core/xml.py +++ b/rdmo/core/xml.py @@ -3,6 +3,9 @@ import defusedxml.ElementTree as ET +from .constants import IMPORT_SORT_ORDER + + log = logging.getLogger(__name__) @@ -49,7 +52,7 @@ elements.append(element) - elements = sort_elements_by_key(elements, 'uri') + elements = sorted(elements, key=sort_elements) return elements @@ -92,5 +95,13 @@ yield element -def sort_elements_by_key(dictlist, key, reverse=False): - return sorted(dictlist, key=lambda k: k[key], reverse=reverse) +def sort_elements(element): + # remove the uri_prefix from the uri to create the key to be sorted by + sort_key = element['uri'].replace(element['uri_prefix'], '') + + # remove the app name from the sort_key and replace it by its import order + for i, item in enumerate(IMPORT_SORT_ORDER): + if sort_key.startswith(item): + sort_key = sort_key.replace(item, str(i)) + + return sort_key
{"golden_diff": "diff --git a/rdmo/core/constants.py b/rdmo/core/constants.py\n--- a/rdmo/core/constants.py\n+++ b/rdmo/core/constants.py\n@@ -55,3 +55,12 @@\n 'views.add_view', 'views.change_view', 'views.delete_view'\n )\n }\n+\n+IMPORT_SORT_ORDER = (\n+ 'domain',\n+ 'conditions',\n+ 'options',\n+ 'questions',\n+ 'tasks',\n+ 'views'\n+)\ndiff --git a/rdmo/core/xml.py b/rdmo/core/xml.py\n--- a/rdmo/core/xml.py\n+++ b/rdmo/core/xml.py\n@@ -3,6 +3,9 @@\n \n import defusedxml.ElementTree as ET\n \n+from .constants import IMPORT_SORT_ORDER\n+\n+\n log = logging.getLogger(__name__)\n \n \n@@ -49,7 +52,7 @@\n \n elements.append(element)\n \n- elements = sort_elements_by_key(elements, 'uri')\n+ elements = sorted(elements, key=sort_elements)\n return elements\n \n \n@@ -92,5 +95,13 @@\n yield element\n \n \n-def sort_elements_by_key(dictlist, key, reverse=False):\n- return sorted(dictlist, key=lambda k: k[key], reverse=reverse)\n+def sort_elements(element):\n+ # remove the uri_prefix from the uri to create the key to be sorted by\n+ sort_key = element['uri'].replace(element['uri_prefix'], '')\n+\n+ # remove the app name from the sort_key and replace it by its import order\n+ for i, item in enumerate(IMPORT_SORT_ORDER):\n+ if sort_key.startswith(item):\n+ sort_key = sort_key.replace(item, str(i))\n+\n+ return sort_key\n", "issue": "Sorting causes problems with import\n### Description / Beschreibung\r\n\r\nWhen using different `uri_prefix`es for, e.g. a domain import, the sorting by `uri` destroys the order in the file, and parent Attributes are imported *after* their children (with a different `uri_prefix` earlier in the alphabet). This is the problematic line:\r\n\r\nhttps://github.com/rdmorganiser/rdmo/blob/master/rdmo/core/xml.py#L52\r\n\r\n### Expected behaviour / Erwartetes Verhalten\r\n\r\nI am not sure if we could get rid of the sorting, we could also sort by `path` (which would give the field some meaning in the xml again). Ideas? @triole @MyPyDavid \r\n\n", "before_files": [{"content": "import logging\nimport re\n\nimport defusedxml.ElementTree as ET\n\nlog = logging.getLogger(__name__)\n\n\ndef read_xml_file(file_name):\n try:\n return ET.parse(file_name).getroot()\n except Exception as e:\n log.error('Xml parsing error: ' + str(e))\n\n\ndef parse_xml_string(string):\n try:\n return ET.fromstring(string)\n except Exception as e:\n log.error('Xml parsing error: ' + str(e))\n\n\ndef flat_xml_to_elements(treenode):\n elements = []\n ns_map = get_ns_map(treenode)\n uri_attrib = get_ns_tag('dc:uri', ns_map)\n\n for node in treenode:\n\n element = {\n 'uri': get_uri(node, ns_map),\n 'type': node.tag\n }\n\n for subnode in node:\n tag = strip_ns(subnode.tag, ns_map)\n\n if uri_attrib in subnode.attrib:\n # this node has an uri!\n element[tag] = subnode.attrib[uri_attrib]\n elif 'lang' in subnode.attrib:\n # this node has the lang attribute!\n element['%s_%s' % (tag, subnode.attrib['lang'])] = subnode.text\n elif list(subnode):\n # this node is a list!\n element[tag] = [subsubnode.attrib[uri_attrib] for subsubnode in subnode]\n else:\n element[tag] = subnode.text\n\n elements.append(element)\n\n elements = sort_elements_by_key(elements, 'uri')\n return elements\n\n\ndef get_ns_tag(tag, ns_map):\n tag_split = tag.split(':')\n try:\n return '{%s}%s' % (ns_map[tag_split[0]], tag_split[1])\n except KeyError:\n return None\n\n\ndef get_ns_map(treenode):\n ns_map = {}\n treestring = ET.tostring(treenode, encoding='utf8', method='xml')\n\n for match in re.finditer(r'(xmlns:)(.*?)(=\")(.*?)(\")', str(treestring)):\n if match:\n ns_map[match.group(2)] = match.group(4)\n\n return ns_map\n\n\ndef get_uri(treenode, ns_map):\n if treenode is not None:\n ns_tag = get_ns_tag('dc:uri', ns_map)\n if ns_tag is not None:\n return treenode.attrib.get(ns_tag)\n\n\ndef strip_ns(tag, ns_map):\n for ns in ns_map.values():\n if tag.startswith('{%s}' % ns):\n return tag.replace('{%s}' % ns, '')\n return tag\n\n\ndef filter_elements_by_type(elements, element_type):\n for element in elements:\n if element['type'] == element_type:\n yield element\n\n\ndef sort_elements_by_key(dictlist, key, reverse=False):\n return sorted(dictlist, key=lambda k: k[key], reverse=reverse)\n", "path": "rdmo/core/xml.py"}, {"content": "from django.utils.translation import gettext_lazy as _\n\nVALUE_TYPE_TEXT = 'text'\nVALUE_TYPE_URL = 'url'\nVALUE_TYPE_INTEGER = 'integer'\nVALUE_TYPE_FLOAT = 'float'\nVALUE_TYPE_BOOLEAN = 'boolean'\nVALUE_TYPE_DATETIME = 'datetime'\nVALUE_TYPE_OPTIONS = 'option'\nVALUE_TYPE_EMAIL = 'email'\nVALUE_TYPE_PHONE = 'phone'\nVALUE_TYPE_FILE = 'file'\nVALUE_TYPE_CHOICES = (\n (VALUE_TYPE_TEXT, _('Text')),\n (VALUE_TYPE_URL, _('URL')),\n (VALUE_TYPE_INTEGER, _('Integer')),\n (VALUE_TYPE_FLOAT, _('Float')),\n (VALUE_TYPE_BOOLEAN, _('Boolean')),\n (VALUE_TYPE_DATETIME, _('Datetime')),\n (VALUE_TYPE_EMAIL, _('E-mail')),\n (VALUE_TYPE_PHONE, _('Phone')),\n (VALUE_TYPE_OPTIONS, _('Option')),\n (VALUE_TYPE_FILE, _('File'))\n)\n\nPERMISSIONS = {\n 'condition': (\n 'conditions.add_condition', 'conditions.change_condition', 'conditions.delete_condition'\n ),\n 'attribute': (\n 'domain.add_attribute', 'domain.change_attribute', 'domain.delete_attribute'\n ),\n 'optionset': (\n 'options.add_optionset', 'options.change_optionset', 'options.delete_optionset'\n ),\n 'option': (\n 'options.add_option', 'options.change_option', 'options.delete_option'\n ),\n 'catalog': (\n 'questions.add_catalog', 'questions.change_catalog', 'questions.delete_catalog'\n ),\n 'section': (\n 'questions.add_section', 'questions.change_section', 'questions.delete_section'\n ),\n 'questionset': (\n 'questions.add_questionset', 'questions.change_questionset', 'questions.delete_questionset'\n ),\n 'question': (\n 'questions.add_question', 'questions.change_question', 'questions.delete_question'\n ),\n 'task': (\n 'tasks.add_task', 'tasks.change_task', 'tasks.delete_task'\n ),\n 'view': (\n 'views.add_view', 'views.change_view', 'views.delete_view'\n )\n}\n", "path": "rdmo/core/constants.py"}], "after_files": [{"content": "import logging\nimport re\n\nimport defusedxml.ElementTree as ET\n\nfrom .constants import IMPORT_SORT_ORDER\n\n\nlog = logging.getLogger(__name__)\n\n\ndef read_xml_file(file_name):\n try:\n return ET.parse(file_name).getroot()\n except Exception as e:\n log.error('Xml parsing error: ' + str(e))\n\n\ndef parse_xml_string(string):\n try:\n return ET.fromstring(string)\n except Exception as e:\n log.error('Xml parsing error: ' + str(e))\n\n\ndef flat_xml_to_elements(treenode):\n elements = []\n ns_map = get_ns_map(treenode)\n uri_attrib = get_ns_tag('dc:uri', ns_map)\n\n for node in treenode:\n\n element = {\n 'uri': get_uri(node, ns_map),\n 'type': node.tag\n }\n\n for subnode in node:\n tag = strip_ns(subnode.tag, ns_map)\n\n if uri_attrib in subnode.attrib:\n # this node has an uri!\n element[tag] = subnode.attrib[uri_attrib]\n elif 'lang' in subnode.attrib:\n # this node has the lang attribute!\n element['%s_%s' % (tag, subnode.attrib['lang'])] = subnode.text\n elif list(subnode):\n # this node is a list!\n element[tag] = [subsubnode.attrib[uri_attrib] for subsubnode in subnode]\n else:\n element[tag] = subnode.text\n\n elements.append(element)\n\n elements = sorted(elements, key=sort_elements)\n return elements\n\n\ndef get_ns_tag(tag, ns_map):\n tag_split = tag.split(':')\n try:\n return '{%s}%s' % (ns_map[tag_split[0]], tag_split[1])\n except KeyError:\n return None\n\n\ndef get_ns_map(treenode):\n ns_map = {}\n treestring = ET.tostring(treenode, encoding='utf8', method='xml')\n\n for match in re.finditer(r'(xmlns:)(.*?)(=\")(.*?)(\")', str(treestring)):\n if match:\n ns_map[match.group(2)] = match.group(4)\n\n return ns_map\n\n\ndef get_uri(treenode, ns_map):\n if treenode is not None:\n ns_tag = get_ns_tag('dc:uri', ns_map)\n if ns_tag is not None:\n return treenode.attrib.get(ns_tag)\n\n\ndef strip_ns(tag, ns_map):\n for ns in ns_map.values():\n if tag.startswith('{%s}' % ns):\n return tag.replace('{%s}' % ns, '')\n return tag\n\n\ndef filter_elements_by_type(elements, element_type):\n for element in elements:\n if element['type'] == element_type:\n yield element\n\n\ndef sort_elements(element):\n # remove the uri_prefix from the uri to create the key to be sorted by\n sort_key = element['uri'].replace(element['uri_prefix'], '')\n\n # remove the app name from the sort_key and replace it by its import order\n for i, item in enumerate(IMPORT_SORT_ORDER):\n if sort_key.startswith(item):\n sort_key = sort_key.replace(item, str(i))\n\n return sort_key\n", "path": "rdmo/core/xml.py"}, {"content": "from django.utils.translation import gettext_lazy as _\n\nVALUE_TYPE_TEXT = 'text'\nVALUE_TYPE_URL = 'url'\nVALUE_TYPE_INTEGER = 'integer'\nVALUE_TYPE_FLOAT = 'float'\nVALUE_TYPE_BOOLEAN = 'boolean'\nVALUE_TYPE_DATETIME = 'datetime'\nVALUE_TYPE_OPTIONS = 'option'\nVALUE_TYPE_EMAIL = 'email'\nVALUE_TYPE_PHONE = 'phone'\nVALUE_TYPE_FILE = 'file'\nVALUE_TYPE_CHOICES = (\n (VALUE_TYPE_TEXT, _('Text')),\n (VALUE_TYPE_URL, _('URL')),\n (VALUE_TYPE_INTEGER, _('Integer')),\n (VALUE_TYPE_FLOAT, _('Float')),\n (VALUE_TYPE_BOOLEAN, _('Boolean')),\n (VALUE_TYPE_DATETIME, _('Datetime')),\n (VALUE_TYPE_EMAIL, _('E-mail')),\n (VALUE_TYPE_PHONE, _('Phone')),\n (VALUE_TYPE_OPTIONS, _('Option')),\n (VALUE_TYPE_FILE, _('File'))\n)\n\nPERMISSIONS = {\n 'condition': (\n 'conditions.add_condition', 'conditions.change_condition', 'conditions.delete_condition'\n ),\n 'attribute': (\n 'domain.add_attribute', 'domain.change_attribute', 'domain.delete_attribute'\n ),\n 'optionset': (\n 'options.add_optionset', 'options.change_optionset', 'options.delete_optionset'\n ),\n 'option': (\n 'options.add_option', 'options.change_option', 'options.delete_option'\n ),\n 'catalog': (\n 'questions.add_catalog', 'questions.change_catalog', 'questions.delete_catalog'\n ),\n 'section': (\n 'questions.add_section', 'questions.change_section', 'questions.delete_section'\n ),\n 'questionset': (\n 'questions.add_questionset', 'questions.change_questionset', 'questions.delete_questionset'\n ),\n 'question': (\n 'questions.add_question', 'questions.change_question', 'questions.delete_question'\n ),\n 'task': (\n 'tasks.add_task', 'tasks.change_task', 'tasks.delete_task'\n ),\n 'view': (\n 'views.add_view', 'views.change_view', 'views.delete_view'\n )\n}\n\nIMPORT_SORT_ORDER = (\n 'domain',\n 'conditions',\n 'options',\n 'questions',\n 'tasks',\n 'views'\n)\n", "path": "rdmo/core/constants.py"}]}
1,784
382
gh_patches_debug_18183
rasdani/github-patches
git_diff
keras-team__autokeras-627
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pip install autokeras fails on torch ==1.1.0 ### Bug Description When executing `pip install autokeras`, I get the following message: `Could not find a version that satisfies the requirement torch==1.0.1.post2 (from autokeras) (from versions: 0.1.2, 0.1.2.post1) No matching distribution found for torch==1.0.1.post2 (from autokeras)` ### Reproducing Steps Steps to reproduce the behavior: * Step 1: set up anaconda environment * Step 2: install pytorch via their website's recommended command: `conda install pytorch-cpu torchvision-cpu -c pytorch` * Step 3: try to install autokeras via `pip install autokeras` * Step 4: get the following output: ``` Collecting autokeras Downloading https://files.pythonhosted.org/packages/c2/32/de74bf6afd09925980340355a05aa6a19e7378ed91dac09e76a487bd136d/autokeras-0.4.0.tar.gz (67kB) 100% |████████████████████████████████| 71kB 1.3MB/s Collecting scipy==1.2.0 (from autokeras) Downloading https://files.pythonhosted.org/packages/c4/0f/2bdeab43db2b4a75863863bf7eddda8920b031b0a70494fd2665c73c9aec/scipy-1.2.0-cp36-cp36m-win_amd64.whl (31.9MB) 100% |████████████████████████████████| 31.9MB 508kB/s Requirement already satisfied: tensorflow==1.13.1 in c:\[...]\lib\site-packages (from autokeras) (1.13.1) Collecting torch==1.0.1.post2 (from autokeras) Could not find a version that satisfies the requirement torch==1.0.1.post2 (from autokeras) (from versions: 0.1.2, 0.1.2.post1) No matching distribution found for torch==1.0.1.post2 (from autokeras) ``` ### Expected Behavior Autokeras is installed without error. ### Setup Details Include the details about the versions of: - OS type and version: Windows 10 Version 10.0.17763 Build 17763 - Python: 3.6.8 (anaconda) - autokeras: 0.4.0 - scikit-learn: 0.20.3 - numpy:1.16.2 - keras: 2.2.4 - scipy:1.2.1 - tensorflow:1.13.1 - pytorch:1.1.0 ### Additional context <!--- Add any other context about the problem here. --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 from distutils.core import setup 2 from setuptools import find_packages 3 4 setup( 5 name='autokeras', 6 packages=find_packages(exclude=('tests',)), 7 install_requires=['scipy==1.2.0', 8 'tensorflow==1.13.1', 9 'torch==1.0.1.post2', 10 'torchvision==0.2.1', 11 'numpy==1.16.1', 12 'scikit-learn==0.20.2', 13 'scikit-image==0.14.2', 14 'tqdm==4.31.0', 15 'imageio==2.5.0', 16 'requests==2.21.0' 17 ], 18 version='0.4.0', 19 description='AutoML for deep learning', 20 author='DATA Lab at Texas A&M University', 21 author_email='[email protected]', 22 url='http://autokeras.com', 23 download_url='https://github.com/keras-team/autokeras/archive/0.3.7.tar.gz', 24 keywords=['AutoML', 'keras'], 25 classifiers=[] 26 ) 27 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -4,16 +4,16 @@ setup( name='autokeras', packages=find_packages(exclude=('tests',)), - install_requires=['scipy==1.2.0', - 'tensorflow==1.13.1', - 'torch==1.0.1.post2', - 'torchvision==0.2.1', - 'numpy==1.16.1', - 'scikit-learn==0.20.2', - 'scikit-image==0.14.2', - 'tqdm==4.31.0', - 'imageio==2.5.0', - 'requests==2.21.0' + install_requires=['scipy>=1.2.0', + 'tensorflow>=1.13.1', + 'torch>=1.0.1.post2', + 'torchvision>=0.2.1', + 'numpy>=1.16.1', + 'scikit-learn>=0.20.2', + 'scikit-image>=0.14.2', + 'tqdm>=4.31.0', + 'imageio>=2.5.0', + 'requests>=2.21.0' ], version='0.4.0', description='AutoML for deep learning',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,16 +4,16 @@\n setup(\n name='autokeras',\n packages=find_packages(exclude=('tests',)),\n- install_requires=['scipy==1.2.0',\n- 'tensorflow==1.13.1',\n- 'torch==1.0.1.post2',\n- 'torchvision==0.2.1',\n- 'numpy==1.16.1',\n- 'scikit-learn==0.20.2',\n- 'scikit-image==0.14.2',\n- 'tqdm==4.31.0',\n- 'imageio==2.5.0',\n- 'requests==2.21.0'\n+ install_requires=['scipy>=1.2.0',\n+ 'tensorflow>=1.13.1',\n+ 'torch>=1.0.1.post2',\n+ 'torchvision>=0.2.1',\n+ 'numpy>=1.16.1',\n+ 'scikit-learn>=0.20.2',\n+ 'scikit-image>=0.14.2',\n+ 'tqdm>=4.31.0',\n+ 'imageio>=2.5.0',\n+ 'requests>=2.21.0'\n ],\n version='0.4.0',\n description='AutoML for deep learning',\n", "issue": "pip install autokeras fails on torch ==1.1.0\n### Bug Description\r\nWhen executing `pip install autokeras`, I get the following message:\r\n`Could not find a version that satisfies the requirement torch==1.0.1.post2 (from autokeras) (from versions: 0.1.2, 0.1.2.post1)\r\nNo matching distribution found for torch==1.0.1.post2 (from autokeras)`\r\n\r\n### Reproducing Steps\r\nSteps to reproduce the behavior:\r\n * Step 1: set up anaconda environment\r\n * Step 2: install pytorch via their website's recommended command: `conda install pytorch-cpu torchvision-cpu -c pytorch`\r\n * Step 3: try to install autokeras via `pip install autokeras`\r\n * Step 4: get the following output:\r\n\r\n```\r\nCollecting autokeras\r\n Downloading https://files.pythonhosted.org/packages/c2/32/de74bf6afd09925980340355a05aa6a19e7378ed91dac09e76a487bd136d/autokeras-0.4.0.tar.gz (67kB)\r\n 100% |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 71kB 1.3MB/s\r\nCollecting scipy==1.2.0 (from autokeras)\r\n Downloading https://files.pythonhosted.org/packages/c4/0f/2bdeab43db2b4a75863863bf7eddda8920b031b0a70494fd2665c73c9aec/scipy-1.2.0-cp36-cp36m-win_amd64.whl (31.9MB)\r\n 100% |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 31.9MB 508kB/s\r\nRequirement already satisfied: tensorflow==1.13.1 in c:\\[...]\\lib\\site-packages (from autokeras) (1.13.1)\r\nCollecting torch==1.0.1.post2 (from autokeras)\r\n Could not find a version that satisfies the requirement torch==1.0.1.post2 (from autokeras) (from versions: 0.1.2, 0.1.2.post1)\r\nNo matching distribution found for torch==1.0.1.post2 (from autokeras)\r\n```\r\n\r\n### Expected Behavior\r\nAutokeras is installed without error.\r\n\r\n### Setup Details\r\nInclude the details about the versions of:\r\n - OS type and version: Windows 10 Version\t10.0.17763 Build 17763\r\n - Python: 3.6.8 (anaconda)\r\n - autokeras: 0.4.0\r\n - scikit-learn: 0.20.3\r\n - numpy:1.16.2\r\n - keras: 2.2.4\r\n - scipy:1.2.1\r\n - tensorflow:1.13.1\r\n - pytorch:1.1.0\r\n\r\n### Additional context\r\n<!---\r\nAdd any other context about the problem here.\r\n-->\r\n\n", "before_files": [{"content": "from distutils.core import setup\nfrom setuptools import find_packages\n\nsetup(\n name='autokeras',\n packages=find_packages(exclude=('tests',)),\n install_requires=['scipy==1.2.0',\n 'tensorflow==1.13.1',\n 'torch==1.0.1.post2',\n 'torchvision==0.2.1',\n 'numpy==1.16.1',\n 'scikit-learn==0.20.2',\n 'scikit-image==0.14.2',\n 'tqdm==4.31.0',\n 'imageio==2.5.0',\n 'requests==2.21.0'\n ],\n version='0.4.0',\n description='AutoML for deep learning',\n author='DATA Lab at Texas A&M University',\n author_email='[email protected]',\n url='http://autokeras.com',\n download_url='https://github.com/keras-team/autokeras/archive/0.3.7.tar.gz',\n keywords=['AutoML', 'keras'],\n classifiers=[]\n)\n", "path": "setup.py"}], "after_files": [{"content": "from distutils.core import setup\nfrom setuptools import find_packages\n\nsetup(\n name='autokeras',\n packages=find_packages(exclude=('tests',)),\n install_requires=['scipy>=1.2.0',\n 'tensorflow>=1.13.1',\n 'torch>=1.0.1.post2',\n 'torchvision>=0.2.1',\n 'numpy>=1.16.1',\n 'scikit-learn>=0.20.2',\n 'scikit-image>=0.14.2',\n 'tqdm>=4.31.0',\n 'imageio>=2.5.0',\n 'requests>=2.21.0'\n ],\n version='0.4.0',\n description='AutoML for deep learning',\n author='DATA Lab at Texas A&M University',\n author_email='[email protected]',\n url='http://autokeras.com',\n download_url='https://github.com/keras-team/autokeras/archive/0.3.7.tar.gz',\n keywords=['AutoML', 'keras'],\n classifiers=[]\n)\n", "path": "setup.py"}]}
1,274
331
gh_patches_debug_7877
rasdani/github-patches
git_diff
pyqtgraph__pyqtgraph-679
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- OverflowError in Point.py It seems like [this old (2013) bug](https://bugs.launchpad.net/pyqtgraph/+bug/1234528) still hasn't been fixed. I've just bumped into the same error. I will test solution suggested by author. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pyqtgraph/Point.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """ 3 Point.py - Extension of QPointF which adds a few missing methods. 4 Copyright 2010 Luke Campagnola 5 Distributed under MIT/X11 license. See license.txt for more infomation. 6 """ 7 8 from .Qt import QtCore 9 import numpy as np 10 11 def clip(x, mn, mx): 12 if x > mx: 13 return mx 14 if x < mn: 15 return mn 16 return x 17 18 class Point(QtCore.QPointF): 19 """Extension of QPointF which adds a few missing methods.""" 20 21 def __init__(self, *args): 22 if len(args) == 1: 23 if isinstance(args[0], QtCore.QSizeF): 24 QtCore.QPointF.__init__(self, float(args[0].width()), float(args[0].height())) 25 return 26 elif isinstance(args[0], float) or isinstance(args[0], int): 27 QtCore.QPointF.__init__(self, float(args[0]), float(args[0])) 28 return 29 elif hasattr(args[0], '__getitem__'): 30 QtCore.QPointF.__init__(self, float(args[0][0]), float(args[0][1])) 31 return 32 elif len(args) == 2: 33 QtCore.QPointF.__init__(self, args[0], args[1]) 34 return 35 QtCore.QPointF.__init__(self, *args) 36 37 def __len__(self): 38 return 2 39 40 def __reduce__(self): 41 return (Point, (self.x(), self.y())) 42 43 def __getitem__(self, i): 44 if i == 0: 45 return self.x() 46 elif i == 1: 47 return self.y() 48 else: 49 raise IndexError("Point has no index %s" % str(i)) 50 51 def __setitem__(self, i, x): 52 if i == 0: 53 return self.setX(x) 54 elif i == 1: 55 return self.setY(x) 56 else: 57 raise IndexError("Point has no index %s" % str(i)) 58 59 def __radd__(self, a): 60 return self._math_('__radd__', a) 61 62 def __add__(self, a): 63 return self._math_('__add__', a) 64 65 def __rsub__(self, a): 66 return self._math_('__rsub__', a) 67 68 def __sub__(self, a): 69 return self._math_('__sub__', a) 70 71 def __rmul__(self, a): 72 return self._math_('__rmul__', a) 73 74 def __mul__(self, a): 75 return self._math_('__mul__', a) 76 77 def __rdiv__(self, a): 78 return self._math_('__rdiv__', a) 79 80 def __div__(self, a): 81 return self._math_('__div__', a) 82 83 def __truediv__(self, a): 84 return self._math_('__truediv__', a) 85 86 def __rtruediv__(self, a): 87 return self._math_('__rtruediv__', a) 88 89 def __rpow__(self, a): 90 return self._math_('__rpow__', a) 91 92 def __pow__(self, a): 93 return self._math_('__pow__', a) 94 95 def _math_(self, op, x): 96 #print "point math:", op 97 #try: 98 #fn = getattr(QtCore.QPointF, op) 99 #pt = fn(self, x) 100 #print fn, pt, self, x 101 #return Point(pt) 102 #except AttributeError: 103 x = Point(x) 104 return Point(getattr(self[0], op)(x[0]), getattr(self[1], op)(x[1])) 105 106 def length(self): 107 """Returns the vector length of this Point.""" 108 return (self[0]**2 + self[1]**2) ** 0.5 109 110 def norm(self): 111 """Returns a vector in the same direction with unit length.""" 112 return self / self.length() 113 114 def angle(self, a): 115 """Returns the angle in degrees between this vector and the vector a.""" 116 n1 = self.length() 117 n2 = a.length() 118 if n1 == 0. or n2 == 0.: 119 return None 120 ## Probably this should be done with arctan2 instead.. 121 ang = np.arccos(clip(self.dot(a) / (n1 * n2), -1.0, 1.0)) ### in radians 122 c = self.cross(a) 123 if c > 0: 124 ang *= -1. 125 return ang * 180. / np.pi 126 127 def dot(self, a): 128 """Returns the dot product of a and this Point.""" 129 a = Point(a) 130 return self[0]*a[0] + self[1]*a[1] 131 132 def cross(self, a): 133 a = Point(a) 134 return self[0]*a[1] - self[1]*a[0] 135 136 def proj(self, b): 137 """Return the projection of this vector onto the vector b""" 138 b1 = b / b.length() 139 return self.dot(b1) * b1 140 141 def __repr__(self): 142 return "Point(%f, %f)" % (self[0], self[1]) 143 144 145 def min(self): 146 return min(self[0], self[1]) 147 148 def max(self): 149 return max(self[0], self[1]) 150 151 def copy(self): 152 return Point(self) 153 154 def toQPoint(self): 155 return QtCore.QPoint(*self) 156 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pyqtgraph/Point.py b/pyqtgraph/Point.py --- a/pyqtgraph/Point.py +++ b/pyqtgraph/Point.py @@ -105,7 +105,13 @@ def length(self): """Returns the vector length of this Point.""" - return (self[0]**2 + self[1]**2) ** 0.5 + try: + return (self[0]**2 + self[1]**2) ** 0.5 + except OverflowError: + try: + return self[1] / np.sin(np.arctan2(self[1], self[0])) + except OverflowError: + return np.inf def norm(self): """Returns a vector in the same direction with unit length."""
{"golden_diff": "diff --git a/pyqtgraph/Point.py b/pyqtgraph/Point.py\n--- a/pyqtgraph/Point.py\n+++ b/pyqtgraph/Point.py\n@@ -105,7 +105,13 @@\n \n def length(self):\n \"\"\"Returns the vector length of this Point.\"\"\"\n- return (self[0]**2 + self[1]**2) ** 0.5\n+ try:\n+ return (self[0]**2 + self[1]**2) ** 0.5\n+ except OverflowError:\n+ try:\n+ return self[1] / np.sin(np.arctan2(self[1], self[0]))\n+ except OverflowError:\n+ return np.inf\n \n def norm(self):\n \"\"\"Returns a vector in the same direction with unit length.\"\"\"\n", "issue": "OverflowError in Point.py\nIt seems like [this old (2013) bug](https://bugs.launchpad.net/pyqtgraph/+bug/1234528) still hasn't been fixed. I've just bumped into the same error.\r\n\r\nI will test solution suggested by author.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nPoint.py - Extension of QPointF which adds a few missing methods.\nCopyright 2010 Luke Campagnola\nDistributed under MIT/X11 license. See license.txt for more infomation.\n\"\"\"\n\nfrom .Qt import QtCore\nimport numpy as np\n\ndef clip(x, mn, mx):\n if x > mx:\n return mx\n if x < mn:\n return mn\n return x\n\nclass Point(QtCore.QPointF):\n \"\"\"Extension of QPointF which adds a few missing methods.\"\"\"\n \n def __init__(self, *args):\n if len(args) == 1:\n if isinstance(args[0], QtCore.QSizeF):\n QtCore.QPointF.__init__(self, float(args[0].width()), float(args[0].height()))\n return\n elif isinstance(args[0], float) or isinstance(args[0], int):\n QtCore.QPointF.__init__(self, float(args[0]), float(args[0]))\n return\n elif hasattr(args[0], '__getitem__'):\n QtCore.QPointF.__init__(self, float(args[0][0]), float(args[0][1]))\n return\n elif len(args) == 2:\n QtCore.QPointF.__init__(self, args[0], args[1])\n return\n QtCore.QPointF.__init__(self, *args)\n \n def __len__(self):\n return 2\n \n def __reduce__(self):\n return (Point, (self.x(), self.y()))\n \n def __getitem__(self, i):\n if i == 0:\n return self.x()\n elif i == 1:\n return self.y()\n else:\n raise IndexError(\"Point has no index %s\" % str(i))\n \n def __setitem__(self, i, x):\n if i == 0:\n return self.setX(x)\n elif i == 1:\n return self.setY(x)\n else:\n raise IndexError(\"Point has no index %s\" % str(i))\n \n def __radd__(self, a):\n return self._math_('__radd__', a)\n \n def __add__(self, a):\n return self._math_('__add__', a)\n \n def __rsub__(self, a):\n return self._math_('__rsub__', a)\n \n def __sub__(self, a):\n return self._math_('__sub__', a)\n \n def __rmul__(self, a):\n return self._math_('__rmul__', a)\n \n def __mul__(self, a):\n return self._math_('__mul__', a)\n \n def __rdiv__(self, a):\n return self._math_('__rdiv__', a)\n \n def __div__(self, a):\n return self._math_('__div__', a)\n \n def __truediv__(self, a):\n return self._math_('__truediv__', a)\n \n def __rtruediv__(self, a):\n return self._math_('__rtruediv__', a)\n \n def __rpow__(self, a):\n return self._math_('__rpow__', a)\n \n def __pow__(self, a):\n return self._math_('__pow__', a)\n \n def _math_(self, op, x):\n #print \"point math:\", op\n #try:\n #fn = getattr(QtCore.QPointF, op)\n #pt = fn(self, x)\n #print fn, pt, self, x\n #return Point(pt)\n #except AttributeError:\n x = Point(x)\n return Point(getattr(self[0], op)(x[0]), getattr(self[1], op)(x[1]))\n \n def length(self):\n \"\"\"Returns the vector length of this Point.\"\"\"\n return (self[0]**2 + self[1]**2) ** 0.5\n \n def norm(self):\n \"\"\"Returns a vector in the same direction with unit length.\"\"\"\n return self / self.length()\n \n def angle(self, a):\n \"\"\"Returns the angle in degrees between this vector and the vector a.\"\"\"\n n1 = self.length()\n n2 = a.length()\n if n1 == 0. or n2 == 0.:\n return None\n ## Probably this should be done with arctan2 instead..\n ang = np.arccos(clip(self.dot(a) / (n1 * n2), -1.0, 1.0)) ### in radians\n c = self.cross(a)\n if c > 0:\n ang *= -1.\n return ang * 180. / np.pi\n \n def dot(self, a):\n \"\"\"Returns the dot product of a and this Point.\"\"\"\n a = Point(a)\n return self[0]*a[0] + self[1]*a[1]\n \n def cross(self, a):\n a = Point(a)\n return self[0]*a[1] - self[1]*a[0]\n \n def proj(self, b):\n \"\"\"Return the projection of this vector onto the vector b\"\"\"\n b1 = b / b.length()\n return self.dot(b1) * b1\n \n def __repr__(self):\n return \"Point(%f, %f)\" % (self[0], self[1])\n \n \n def min(self):\n return min(self[0], self[1])\n \n def max(self):\n return max(self[0], self[1])\n \n def copy(self):\n return Point(self)\n \n def toQPoint(self):\n return QtCore.QPoint(*self)\n", "path": "pyqtgraph/Point.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nPoint.py - Extension of QPointF which adds a few missing methods.\nCopyright 2010 Luke Campagnola\nDistributed under MIT/X11 license. See license.txt for more infomation.\n\"\"\"\n\nfrom .Qt import QtCore\nimport numpy as np\n\ndef clip(x, mn, mx):\n if x > mx:\n return mx\n if x < mn:\n return mn\n return x\n\nclass Point(QtCore.QPointF):\n \"\"\"Extension of QPointF which adds a few missing methods.\"\"\"\n \n def __init__(self, *args):\n if len(args) == 1:\n if isinstance(args[0], QtCore.QSizeF):\n QtCore.QPointF.__init__(self, float(args[0].width()), float(args[0].height()))\n return\n elif isinstance(args[0], float) or isinstance(args[0], int):\n QtCore.QPointF.__init__(self, float(args[0]), float(args[0]))\n return\n elif hasattr(args[0], '__getitem__'):\n QtCore.QPointF.__init__(self, float(args[0][0]), float(args[0][1]))\n return\n elif len(args) == 2:\n QtCore.QPointF.__init__(self, args[0], args[1])\n return\n QtCore.QPointF.__init__(self, *args)\n \n def __len__(self):\n return 2\n \n def __reduce__(self):\n return (Point, (self.x(), self.y()))\n \n def __getitem__(self, i):\n if i == 0:\n return self.x()\n elif i == 1:\n return self.y()\n else:\n raise IndexError(\"Point has no index %s\" % str(i))\n \n def __setitem__(self, i, x):\n if i == 0:\n return self.setX(x)\n elif i == 1:\n return self.setY(x)\n else:\n raise IndexError(\"Point has no index %s\" % str(i))\n \n def __radd__(self, a):\n return self._math_('__radd__', a)\n \n def __add__(self, a):\n return self._math_('__add__', a)\n \n def __rsub__(self, a):\n return self._math_('__rsub__', a)\n \n def __sub__(self, a):\n return self._math_('__sub__', a)\n \n def __rmul__(self, a):\n return self._math_('__rmul__', a)\n \n def __mul__(self, a):\n return self._math_('__mul__', a)\n \n def __rdiv__(self, a):\n return self._math_('__rdiv__', a)\n \n def __div__(self, a):\n return self._math_('__div__', a)\n \n def __truediv__(self, a):\n return self._math_('__truediv__', a)\n \n def __rtruediv__(self, a):\n return self._math_('__rtruediv__', a)\n \n def __rpow__(self, a):\n return self._math_('__rpow__', a)\n \n def __pow__(self, a):\n return self._math_('__pow__', a)\n \n def _math_(self, op, x):\n #print \"point math:\", op\n #try:\n #fn = getattr(QtCore.QPointF, op)\n #pt = fn(self, x)\n #print fn, pt, self, x\n #return Point(pt)\n #except AttributeError:\n x = Point(x)\n return Point(getattr(self[0], op)(x[0]), getattr(self[1], op)(x[1]))\n \n def length(self):\n \"\"\"Returns the vector length of this Point.\"\"\"\n try:\n return (self[0]**2 + self[1]**2) ** 0.5\n except OverflowError:\n try:\n return self[1] / np.sin(np.arctan2(self[1], self[0]))\n except OverflowError:\n return np.inf\n \n def norm(self):\n \"\"\"Returns a vector in the same direction with unit length.\"\"\"\n return self / self.length()\n \n def angle(self, a):\n \"\"\"Returns the angle in degrees between this vector and the vector a.\"\"\"\n n1 = self.length()\n n2 = a.length()\n if n1 == 0. or n2 == 0.:\n return None\n ## Probably this should be done with arctan2 instead..\n ang = np.arccos(clip(self.dot(a) / (n1 * n2), -1.0, 1.0)) ### in radians\n c = self.cross(a)\n if c > 0:\n ang *= -1.\n return ang * 180. / np.pi\n \n def dot(self, a):\n \"\"\"Returns the dot product of a and this Point.\"\"\"\n a = Point(a)\n return self[0]*a[0] + self[1]*a[1]\n \n def cross(self, a):\n a = Point(a)\n return self[0]*a[1] - self[1]*a[0]\n \n def proj(self, b):\n \"\"\"Return the projection of this vector onto the vector b\"\"\"\n b1 = b / b.length()\n return self.dot(b1) * b1\n \n def __repr__(self):\n return \"Point(%f, %f)\" % (self[0], self[1])\n \n \n def min(self):\n return min(self[0], self[1])\n \n def max(self):\n return max(self[0], self[1])\n \n def copy(self):\n return Point(self)\n \n def toQPoint(self):\n return QtCore.QPoint(*self)\n", "path": "pyqtgraph/Point.py"}]}
1,900
178
gh_patches_debug_39980
rasdani/github-patches
git_diff
microsoft__ptvsd-1161
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- sys.stdin not None and missing encoding attribute when running with pythonw.exe ## Environment data - PTVSD version: 4.2 - OS and version: windows 10 - Python version (& distribution if applicable, e.g. Anaconda): CPython 3.7 using **pythonw.exe** - Using VS Code or Visual Studio: VS ## Actual behavior None has no attribute encoding exception ## Expected behavior Either sys.stdin.encoding works, or sys.stdin is None (it is None when running without debugging) ## Steps to reproduce: 1. Debug this code using pythonw.exe (no console) ``` import sys with open('issue4866.txt', 'wt') as f: f.write('hello\n') f.write(str(type(sys.stdin)) + '\n') if sys.stdin is not None: f.write(str(sys.stdin.encoding) + '\n') f.write('bye\n') ``` From https://github.com/Microsoft/PTVS/issues/4866 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py` Content: ``` 1 from _pydevd_bundle import pydevd_constants 2 3 IS_PY3K = pydevd_constants.IS_PY3K 4 5 class IORedirector: 6 ''' 7 This class works to wrap a stream (stdout/stderr) with an additional redirect. 8 ''' 9 10 def __init__(self, original, new_redirect, wrap_buffer=False): 11 ''' 12 :param stream original: 13 The stream to be wrapped (usually stdout/stderr). 14 15 :param stream new_redirect: 16 Usually IOBuf (below). 17 18 :param bool wrap_buffer: 19 Whether to create a buffer attribute (needed to mimick python 3 s 20 tdout/stderr which has a buffer to write binary data). 21 ''' 22 self._redirect_to = (original, new_redirect) 23 if wrap_buffer and hasattr(original, 'buffer'): 24 self.buffer = IORedirector(original.buffer, new_redirect.buffer, False) 25 26 def write(self, s): 27 # Note that writing to the original stream may fail for some reasons 28 # (such as trying to write something that's not a string or having it closed). 29 for r in self._redirect_to: 30 r.write(s) 31 32 def isatty(self): 33 return self._redirect_to[0].isatty() 34 35 def flush(self): 36 for r in self._redirect_to: 37 r.flush() 38 39 def __getattr__(self, name): 40 for r in self._redirect_to: 41 if hasattr(r, name): 42 return getattr(r, name) 43 raise AttributeError(name) 44 45 class IOBuf: 46 '''This class works as a replacement for stdio and stderr. 47 It is a buffer and when its contents are requested, it will erase what 48 it has so far so that the next return will not return the same contents again. 49 ''' 50 def __init__(self): 51 self.buflist = [] 52 import os 53 self.encoding = os.environ.get('PYTHONIOENCODING', 'utf-8') 54 55 def getvalue(self): 56 b = self.buflist 57 self.buflist = [] # clear it 58 return ''.join(b) # bytes on py2, str on py3. 59 60 def write(self, s): 61 if not IS_PY3K: 62 if isinstance(s, unicode): 63 # can't use 'errors' as kwargs in py 2.6 64 s = s.encode(self.encoding, 'replace') 65 else: 66 if isinstance(s, bytes): 67 s = s.decode(self.encoding, errors='replace') 68 self.buflist.append(s) 69 70 def isatty(self): 71 return False 72 73 def flush(self): 74 pass 75 76 def empty(self): 77 return len(self.buflist) == 0 78 79 class _RedirectionsHolder: 80 _stack_stdout = [] 81 _stack_stderr = [] 82 83 84 def start_redirect(keep_original_redirection=False, std='stdout'): 85 ''' 86 @param std: 'stdout', 'stderr', or 'both' 87 ''' 88 import sys 89 buf = IOBuf() 90 91 if std == 'both': 92 config_stds = ['stdout', 'stderr'] 93 else: 94 config_stds = [std] 95 96 for std in config_stds: 97 original = getattr(sys, std) 98 stack = getattr(_RedirectionsHolder, '_stack_%s' % std) 99 stack.append(original) 100 101 if keep_original_redirection: 102 setattr(sys, std, IORedirector(getattr(sys, std), buf)) 103 else: 104 setattr(sys, std, buf) 105 return buf 106 107 108 def end_redirect(std='stdout'): 109 import sys 110 if std == 'both': 111 config_stds = ['stdout', 'stderr'] 112 else: 113 config_stds = [std] 114 for std in config_stds: 115 stack = getattr(_RedirectionsHolder, '_stack_%s' % std) 116 setattr(sys, std, stack.pop()) 117 118 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py b/src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py --- a/src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py +++ b/src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py @@ -2,6 +2,7 @@ IS_PY3K = pydevd_constants.IS_PY3K + class IORedirector: ''' This class works to wrap a stream (stdout/stderr) with an additional redirect. @@ -10,7 +11,7 @@ def __init__(self, original, new_redirect, wrap_buffer=False): ''' :param stream original: - The stream to be wrapped (usually stdout/stderr). + The stream to be wrapped (usually stdout/stderr, but could be None). :param stream new_redirect: Usually IOBuf (below). @@ -27,14 +28,19 @@ # Note that writing to the original stream may fail for some reasons # (such as trying to write something that's not a string or having it closed). for r in self._redirect_to: - r.write(s) + if hasattr(r, 'write'): + r.write(s) def isatty(self): - return self._redirect_to[0].isatty() + for r in self._redirect_to: + if hasattr(r, 'isatty'): + return r.isatty() + return False def flush(self): for r in self._redirect_to: - r.flush() + if hasattr(r, 'flush'): + r.flush() def __getattr__(self, name): for r in self._redirect_to: @@ -42,11 +48,13 @@ return getattr(r, name) raise AttributeError(name) + class IOBuf: '''This class works as a replacement for stdio and stderr. It is a buffer and when its contents are requested, it will erase what it has so far so that the next return will not return the same contents again. ''' + def __init__(self): self.buflist = [] import os @@ -56,7 +64,7 @@ b = self.buflist self.buflist = [] # clear it return ''.join(b) # bytes on py2, str on py3. - + def write(self, s): if not IS_PY3K: if isinstance(s, unicode): @@ -76,6 +84,7 @@ def empty(self): return len(self.buflist) == 0 + class _RedirectionsHolder: _stack_stdout = [] _stack_stderr = []
{"golden_diff": "diff --git a/src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py b/src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py\n--- a/src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py\n+++ b/src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py\n@@ -2,6 +2,7 @@\n \n IS_PY3K = pydevd_constants.IS_PY3K\n \n+\n class IORedirector:\n '''\n This class works to wrap a stream (stdout/stderr) with an additional redirect.\n@@ -10,7 +11,7 @@\n def __init__(self, original, new_redirect, wrap_buffer=False):\n '''\n :param stream original:\n- The stream to be wrapped (usually stdout/stderr).\n+ The stream to be wrapped (usually stdout/stderr, but could be None).\n \n :param stream new_redirect:\n Usually IOBuf (below).\n@@ -27,14 +28,19 @@\n # Note that writing to the original stream may fail for some reasons\n # (such as trying to write something that's not a string or having it closed).\n for r in self._redirect_to:\n- r.write(s)\n+ if hasattr(r, 'write'):\n+ r.write(s)\n \n def isatty(self):\n- return self._redirect_to[0].isatty()\n+ for r in self._redirect_to:\n+ if hasattr(r, 'isatty'):\n+ return r.isatty()\n+ return False\n \n def flush(self):\n for r in self._redirect_to:\n- r.flush()\n+ if hasattr(r, 'flush'):\n+ r.flush()\n \n def __getattr__(self, name):\n for r in self._redirect_to:\n@@ -42,11 +48,13 @@\n return getattr(r, name)\n raise AttributeError(name)\n \n+\n class IOBuf:\n '''This class works as a replacement for stdio and stderr.\n It is a buffer and when its contents are requested, it will erase what\n it has so far so that the next return will not return the same contents again.\n '''\n+\n def __init__(self):\n self.buflist = []\n import os\n@@ -56,7 +64,7 @@\n b = self.buflist\n self.buflist = [] # clear it\n return ''.join(b) # bytes on py2, str on py3.\n- \n+\n def write(self, s):\n if not IS_PY3K:\n if isinstance(s, unicode):\n@@ -76,6 +84,7 @@\n def empty(self):\n return len(self.buflist) == 0\n \n+\n class _RedirectionsHolder:\n _stack_stdout = []\n _stack_stderr = []\n", "issue": "sys.stdin not None and missing encoding attribute when running with pythonw.exe\n## Environment data\r\n\r\n- PTVSD version: 4.2\r\n- OS and version: windows 10\r\n- Python version (& distribution if applicable, e.g. Anaconda): CPython 3.7 using **pythonw.exe**\r\n- Using VS Code or Visual Studio: VS\r\n\r\n## Actual behavior\r\n\r\nNone has no attribute encoding exception\r\n\r\n## Expected behavior\r\n\r\nEither sys.stdin.encoding works, or sys.stdin is None (it is None when running without debugging)\r\n\r\n\r\n## Steps to reproduce:\r\n1. Debug this code using pythonw.exe (no console)\r\n```\r\nimport sys\r\n\r\nwith open('issue4866.txt', 'wt') as f:\r\n f.write('hello\\n')\r\n f.write(str(type(sys.stdin)) + '\\n')\r\n if sys.stdin is not None:\r\n f.write(str(sys.stdin.encoding) + '\\n')\r\n f.write('bye\\n')\r\n```\r\n\r\nFrom https://github.com/Microsoft/PTVS/issues/4866\n", "before_files": [{"content": "from _pydevd_bundle import pydevd_constants\n\nIS_PY3K = pydevd_constants.IS_PY3K\n\nclass IORedirector:\n '''\n This class works to wrap a stream (stdout/stderr) with an additional redirect.\n '''\n\n def __init__(self, original, new_redirect, wrap_buffer=False):\n '''\n :param stream original:\n The stream to be wrapped (usually stdout/stderr).\n\n :param stream new_redirect:\n Usually IOBuf (below).\n\n :param bool wrap_buffer:\n Whether to create a buffer attribute (needed to mimick python 3 s\n tdout/stderr which has a buffer to write binary data).\n '''\n self._redirect_to = (original, new_redirect)\n if wrap_buffer and hasattr(original, 'buffer'):\n self.buffer = IORedirector(original.buffer, new_redirect.buffer, False)\n\n def write(self, s):\n # Note that writing to the original stream may fail for some reasons\n # (such as trying to write something that's not a string or having it closed).\n for r in self._redirect_to:\n r.write(s)\n\n def isatty(self):\n return self._redirect_to[0].isatty()\n\n def flush(self):\n for r in self._redirect_to:\n r.flush()\n\n def __getattr__(self, name):\n for r in self._redirect_to:\n if hasattr(r, name):\n return getattr(r, name)\n raise AttributeError(name)\n\nclass IOBuf:\n '''This class works as a replacement for stdio and stderr.\n It is a buffer and when its contents are requested, it will erase what\n it has so far so that the next return will not return the same contents again.\n '''\n def __init__(self):\n self.buflist = []\n import os\n self.encoding = os.environ.get('PYTHONIOENCODING', 'utf-8')\n\n def getvalue(self):\n b = self.buflist\n self.buflist = [] # clear it\n return ''.join(b) # bytes on py2, str on py3.\n \n def write(self, s):\n if not IS_PY3K:\n if isinstance(s, unicode):\n # can't use 'errors' as kwargs in py 2.6\n s = s.encode(self.encoding, 'replace')\n else:\n if isinstance(s, bytes):\n s = s.decode(self.encoding, errors='replace')\n self.buflist.append(s)\n\n def isatty(self):\n return False\n\n def flush(self):\n pass\n\n def empty(self):\n return len(self.buflist) == 0\n\nclass _RedirectionsHolder:\n _stack_stdout = []\n _stack_stderr = []\n\n\ndef start_redirect(keep_original_redirection=False, std='stdout'):\n '''\n @param std: 'stdout', 'stderr', or 'both'\n '''\n import sys\n buf = IOBuf()\n\n if std == 'both':\n config_stds = ['stdout', 'stderr']\n else:\n config_stds = [std]\n\n for std in config_stds:\n original = getattr(sys, std)\n stack = getattr(_RedirectionsHolder, '_stack_%s' % std)\n stack.append(original)\n\n if keep_original_redirection:\n setattr(sys, std, IORedirector(getattr(sys, std), buf))\n else:\n setattr(sys, std, buf)\n return buf\n\n\ndef end_redirect(std='stdout'):\n import sys\n if std == 'both':\n config_stds = ['stdout', 'stderr']\n else:\n config_stds = [std]\n for std in config_stds:\n stack = getattr(_RedirectionsHolder, '_stack_%s' % std)\n setattr(sys, std, stack.pop())\n\n", "path": "src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py"}], "after_files": [{"content": "from _pydevd_bundle import pydevd_constants\n\nIS_PY3K = pydevd_constants.IS_PY3K\n\n\nclass IORedirector:\n '''\n This class works to wrap a stream (stdout/stderr) with an additional redirect.\n '''\n\n def __init__(self, original, new_redirect, wrap_buffer=False):\n '''\n :param stream original:\n The stream to be wrapped (usually stdout/stderr, but could be None).\n\n :param stream new_redirect:\n Usually IOBuf (below).\n\n :param bool wrap_buffer:\n Whether to create a buffer attribute (needed to mimick python 3 s\n tdout/stderr which has a buffer to write binary data).\n '''\n self._redirect_to = (original, new_redirect)\n if wrap_buffer and hasattr(original, 'buffer'):\n self.buffer = IORedirector(original.buffer, new_redirect.buffer, False)\n\n def write(self, s):\n # Note that writing to the original stream may fail for some reasons\n # (such as trying to write something that's not a string or having it closed).\n for r in self._redirect_to:\n if hasattr(r, 'write'):\n r.write(s)\n\n def isatty(self):\n for r in self._redirect_to:\n if hasattr(r, 'isatty'):\n return r.isatty()\n return False\n\n def flush(self):\n for r in self._redirect_to:\n if hasattr(r, 'flush'):\n r.flush()\n\n def __getattr__(self, name):\n for r in self._redirect_to:\n if hasattr(r, name):\n return getattr(r, name)\n raise AttributeError(name)\n\n\nclass IOBuf:\n '''This class works as a replacement for stdio and stderr.\n It is a buffer and when its contents are requested, it will erase what\n it has so far so that the next return will not return the same contents again.\n '''\n\n def __init__(self):\n self.buflist = []\n import os\n self.encoding = os.environ.get('PYTHONIOENCODING', 'utf-8')\n\n def getvalue(self):\n b = self.buflist\n self.buflist = [] # clear it\n return ''.join(b) # bytes on py2, str on py3.\n\n def write(self, s):\n if not IS_PY3K:\n if isinstance(s, unicode):\n # can't use 'errors' as kwargs in py 2.6\n s = s.encode(self.encoding, 'replace')\n else:\n if isinstance(s, bytes):\n s = s.decode(self.encoding, errors='replace')\n self.buflist.append(s)\n\n def isatty(self):\n return False\n\n def flush(self):\n pass\n\n def empty(self):\n return len(self.buflist) == 0\n\n\nclass _RedirectionsHolder:\n _stack_stdout = []\n _stack_stderr = []\n\n\ndef start_redirect(keep_original_redirection=False, std='stdout'):\n '''\n @param std: 'stdout', 'stderr', or 'both'\n '''\n import sys\n buf = IOBuf()\n\n if std == 'both':\n config_stds = ['stdout', 'stderr']\n else:\n config_stds = [std]\n\n for std in config_stds:\n original = getattr(sys, std)\n stack = getattr(_RedirectionsHolder, '_stack_%s' % std)\n stack.append(original)\n\n if keep_original_redirection:\n setattr(sys, std, IORedirector(getattr(sys, std), buf))\n else:\n setattr(sys, std, buf)\n return buf\n\n\ndef end_redirect(std='stdout'):\n import sys\n if std == 'both':\n config_stds = ['stdout', 'stderr']\n else:\n config_stds = [std]\n for std in config_stds:\n stack = getattr(_RedirectionsHolder, '_stack_%s' % std)\n setattr(sys, std, stack.pop())\n\n", "path": "src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py"}]}
1,575
646
gh_patches_debug_4883
rasdani/github-patches
git_diff
pre-commit__pre-commit-2996
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Use of --dev deprecated for npm I'm seeing this warning sometimes (output seems to be hidden unless the install fails): ``` npm WARN install Usage of the `--dev` option is deprecated. Use `--include=dev` instead. ``` Which seems to be because of this: https://github.com/pre-commit/pre-commit/blob/fe436f1eb09dfdd67032b4f9f8dfa543fb99cf06/pre_commit/languages/node.py#L104 The problem with this command was that it installed dependencies recursively, rendering them useless (AFAICT, not a node expert). The developers decided it was only a footgun in https://github.com/npm/npm/issues/5554#issuecomment-56121953 and deprecated in https://github.com/npm/npm/issues/6200. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pre_commit/languages/node.py` Content: ``` 1 from __future__ import annotations 2 3 import contextlib 4 import functools 5 import os 6 import sys 7 from typing import Generator 8 from typing import Sequence 9 10 import pre_commit.constants as C 11 from pre_commit import lang_base 12 from pre_commit.envcontext import envcontext 13 from pre_commit.envcontext import PatchesT 14 from pre_commit.envcontext import UNSET 15 from pre_commit.envcontext import Var 16 from pre_commit.languages.python import bin_dir 17 from pre_commit.prefix import Prefix 18 from pre_commit.util import cmd_output 19 from pre_commit.util import cmd_output_b 20 from pre_commit.util import rmtree 21 22 ENVIRONMENT_DIR = 'node_env' 23 run_hook = lang_base.basic_run_hook 24 25 26 @functools.lru_cache(maxsize=1) 27 def get_default_version() -> str: 28 # nodeenv does not yet support `-n system` on windows 29 if sys.platform == 'win32': 30 return C.DEFAULT 31 # if node is already installed, we can save a bunch of setup time by 32 # using the installed version 33 elif all(lang_base.exe_exists(exe) for exe in ('node', 'npm')): 34 return 'system' 35 else: 36 return C.DEFAULT 37 38 39 def get_env_patch(venv: str) -> PatchesT: 40 if sys.platform == 'cygwin': # pragma: no cover 41 _, win_venv, _ = cmd_output('cygpath', '-w', venv) 42 install_prefix = fr'{win_venv.strip()}\bin' 43 lib_dir = 'lib' 44 elif sys.platform == 'win32': # pragma: no cover 45 install_prefix = bin_dir(venv) 46 lib_dir = 'Scripts' 47 else: # pragma: win32 no cover 48 install_prefix = venv 49 lib_dir = 'lib' 50 return ( 51 ('NODE_VIRTUAL_ENV', venv), 52 ('NPM_CONFIG_PREFIX', install_prefix), 53 ('npm_config_prefix', install_prefix), 54 ('NPM_CONFIG_USERCONFIG', UNSET), 55 ('npm_config_userconfig', UNSET), 56 ('NODE_PATH', os.path.join(venv, lib_dir, 'node_modules')), 57 ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))), 58 ) 59 60 61 @contextlib.contextmanager 62 def in_env(prefix: Prefix, version: str) -> Generator[None, None, None]: 63 envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version) 64 with envcontext(get_env_patch(envdir)): 65 yield 66 67 68 def health_check(prefix: Prefix, version: str) -> str | None: 69 with in_env(prefix, version): 70 retcode, _, _ = cmd_output_b('node', '--version', check=False) 71 if retcode != 0: # pragma: win32 no cover 72 return f'`node --version` returned {retcode}' 73 else: 74 return None 75 76 77 def install_environment( 78 prefix: Prefix, version: str, additional_dependencies: Sequence[str], 79 ) -> None: 80 assert prefix.exists('package.json') 81 envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version) 82 83 # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath 84 if sys.platform == 'win32': # pragma: no cover 85 envdir = fr'\\?\{os.path.normpath(envdir)}' 86 cmd = [sys.executable, '-mnodeenv', '--prebuilt', '--clean-src', envdir] 87 if version != C.DEFAULT: 88 cmd.extend(['-n', version]) 89 cmd_output_b(*cmd) 90 91 with in_env(prefix, version): 92 # https://npm.community/t/npm-install-g-git-vs-git-clone-cd-npm-install-g/5449 93 # install as if we installed from git 94 95 local_install_cmd = ( 96 'npm', 'install', '--dev', '--prod', 97 '--ignore-prepublish', '--no-progress', '--no-save', 98 ) 99 lang_base.setup_cmd(prefix, local_install_cmd) 100 101 _, pkg, _ = cmd_output('npm', 'pack', cwd=prefix.prefix_dir) 102 pkg = prefix.path(pkg.strip()) 103 104 install = ('npm', 'install', '-g', pkg, *additional_dependencies) 105 lang_base.setup_cmd(prefix, install) 106 107 # clean these up after installation 108 if prefix.exists('node_modules'): # pragma: win32 no cover 109 rmtree(prefix.path('node_modules')) 110 os.remove(pkg) 111 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py --- a/pre_commit/languages/node.py +++ b/pre_commit/languages/node.py @@ -93,7 +93,7 @@ # install as if we installed from git local_install_cmd = ( - 'npm', 'install', '--dev', '--prod', + 'npm', 'install', '--include=dev', '--include=prod', '--ignore-prepublish', '--no-progress', '--no-save', ) lang_base.setup_cmd(prefix, local_install_cmd)
{"golden_diff": "diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py\n--- a/pre_commit/languages/node.py\n+++ b/pre_commit/languages/node.py\n@@ -93,7 +93,7 @@\n # install as if we installed from git\n \n local_install_cmd = (\n- 'npm', 'install', '--dev', '--prod',\n+ 'npm', 'install', '--include=dev', '--include=prod',\n '--ignore-prepublish', '--no-progress', '--no-save',\n )\n lang_base.setup_cmd(prefix, local_install_cmd)\n", "issue": "Use of --dev deprecated for npm\nI'm seeing this warning sometimes (output seems to be hidden unless the install fails):\r\n\r\n```\r\nnpm WARN install Usage of the `--dev` option is deprecated. Use `--include=dev` instead.\r\n```\r\n\r\nWhich seems to be because of this:\r\n\r\nhttps://github.com/pre-commit/pre-commit/blob/fe436f1eb09dfdd67032b4f9f8dfa543fb99cf06/pre_commit/languages/node.py#L104\r\n\r\nThe problem with this command was that it installed dependencies recursively, rendering them useless (AFAICT, not a node expert). The developers decided it was only a footgun in https://github.com/npm/npm/issues/5554#issuecomment-56121953 and deprecated in https://github.com/npm/npm/issues/6200.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Generator\nfrom typing import Sequence\n\nimport pre_commit.constants as C\nfrom pre_commit import lang_base\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages.python import bin_dir\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import rmtree\n\nENVIRONMENT_DIR = 'node_env'\nrun_hook = lang_base.basic_run_hook\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str:\n # nodeenv does not yet support `-n system` on windows\n if sys.platform == 'win32':\n return C.DEFAULT\n # if node is already installed, we can save a bunch of setup time by\n # using the installed version\n elif all(lang_base.exe_exists(exe) for exe in ('node', 'npm')):\n return 'system'\n else:\n return C.DEFAULT\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n if sys.platform == 'cygwin': # pragma: no cover\n _, win_venv, _ = cmd_output('cygpath', '-w', venv)\n install_prefix = fr'{win_venv.strip()}\\bin'\n lib_dir = 'lib'\n elif sys.platform == 'win32': # pragma: no cover\n install_prefix = bin_dir(venv)\n lib_dir = 'Scripts'\n else: # pragma: win32 no cover\n install_prefix = venv\n lib_dir = 'lib'\n return (\n ('NODE_VIRTUAL_ENV', venv),\n ('NPM_CONFIG_PREFIX', install_prefix),\n ('npm_config_prefix', install_prefix),\n ('NPM_CONFIG_USERCONFIG', UNSET),\n ('npm_config_userconfig', UNSET),\n ('NODE_PATH', os.path.join(venv, lib_dir, 'node_modules')),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(prefix: Prefix, version: str) -> Generator[None, None, None]:\n envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef health_check(prefix: Prefix, version: str) -> str | None:\n with in_env(prefix, version):\n retcode, _, _ = cmd_output_b('node', '--version', check=False)\n if retcode != 0: # pragma: win32 no cover\n return f'`node --version` returned {retcode}'\n else:\n return None\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None:\n assert prefix.exists('package.json')\n envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)\n\n # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath\n if sys.platform == 'win32': # pragma: no cover\n envdir = fr'\\\\?\\{os.path.normpath(envdir)}'\n cmd = [sys.executable, '-mnodeenv', '--prebuilt', '--clean-src', envdir]\n if version != C.DEFAULT:\n cmd.extend(['-n', version])\n cmd_output_b(*cmd)\n\n with in_env(prefix, version):\n # https://npm.community/t/npm-install-g-git-vs-git-clone-cd-npm-install-g/5449\n # install as if we installed from git\n\n local_install_cmd = (\n 'npm', 'install', '--dev', '--prod',\n '--ignore-prepublish', '--no-progress', '--no-save',\n )\n lang_base.setup_cmd(prefix, local_install_cmd)\n\n _, pkg, _ = cmd_output('npm', 'pack', cwd=prefix.prefix_dir)\n pkg = prefix.path(pkg.strip())\n\n install = ('npm', 'install', '-g', pkg, *additional_dependencies)\n lang_base.setup_cmd(prefix, install)\n\n # clean these up after installation\n if prefix.exists('node_modules'): # pragma: win32 no cover\n rmtree(prefix.path('node_modules'))\n os.remove(pkg)\n", "path": "pre_commit/languages/node.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Generator\nfrom typing import Sequence\n\nimport pre_commit.constants as C\nfrom pre_commit import lang_base\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages.python import bin_dir\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import rmtree\n\nENVIRONMENT_DIR = 'node_env'\nrun_hook = lang_base.basic_run_hook\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str:\n # nodeenv does not yet support `-n system` on windows\n if sys.platform == 'win32':\n return C.DEFAULT\n # if node is already installed, we can save a bunch of setup time by\n # using the installed version\n elif all(lang_base.exe_exists(exe) for exe in ('node', 'npm')):\n return 'system'\n else:\n return C.DEFAULT\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n if sys.platform == 'cygwin': # pragma: no cover\n _, win_venv, _ = cmd_output('cygpath', '-w', venv)\n install_prefix = fr'{win_venv.strip()}\\bin'\n lib_dir = 'lib'\n elif sys.platform == 'win32': # pragma: no cover\n install_prefix = bin_dir(venv)\n lib_dir = 'Scripts'\n else: # pragma: win32 no cover\n install_prefix = venv\n lib_dir = 'lib'\n return (\n ('NODE_VIRTUAL_ENV', venv),\n ('NPM_CONFIG_PREFIX', install_prefix),\n ('npm_config_prefix', install_prefix),\n ('NPM_CONFIG_USERCONFIG', UNSET),\n ('npm_config_userconfig', UNSET),\n ('NODE_PATH', os.path.join(venv, lib_dir, 'node_modules')),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(prefix: Prefix, version: str) -> Generator[None, None, None]:\n envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef health_check(prefix: Prefix, version: str) -> str | None:\n with in_env(prefix, version):\n retcode, _, _ = cmd_output_b('node', '--version', check=False)\n if retcode != 0: # pragma: win32 no cover\n return f'`node --version` returned {retcode}'\n else:\n return None\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None:\n assert prefix.exists('package.json')\n envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)\n\n # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath\n if sys.platform == 'win32': # pragma: no cover\n envdir = fr'\\\\?\\{os.path.normpath(envdir)}'\n cmd = [sys.executable, '-mnodeenv', '--prebuilt', '--clean-src', envdir]\n if version != C.DEFAULT:\n cmd.extend(['-n', version])\n cmd_output_b(*cmd)\n\n with in_env(prefix, version):\n # https://npm.community/t/npm-install-g-git-vs-git-clone-cd-npm-install-g/5449\n # install as if we installed from git\n\n local_install_cmd = (\n 'npm', 'install', '--include=dev', '--include=prod',\n '--ignore-prepublish', '--no-progress', '--no-save',\n )\n lang_base.setup_cmd(prefix, local_install_cmd)\n\n _, pkg, _ = cmd_output('npm', 'pack', cwd=prefix.prefix_dir)\n pkg = prefix.path(pkg.strip())\n\n install = ('npm', 'install', '-g', pkg, *additional_dependencies)\n lang_base.setup_cmd(prefix, install)\n\n # clean these up after installation\n if prefix.exists('node_modules'): # pragma: win32 no cover\n rmtree(prefix.path('node_modules'))\n os.remove(pkg)\n", "path": "pre_commit/languages/node.py"}]}
1,673
123
gh_patches_debug_7242
rasdani/github-patches
git_diff
dmlc__dgl-1183
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- build_graph in MovieLens class ## ❓ Questions and Help Dear Quan (Andy) Gan, i have a question about the method build_graph in the MovieLens class. I see that the dataset is processed and you consider all the User and Movie features in generating the node features of the graph. In particular, you assign id of the node with this scheme: from [ 0 : Length(Users) ] -> Features of User (encoded) from [ Length(Users) : Length(Users) + Length(Movies) ] -> Features of Movies (encoded) The problem comes out when you consider the Title of the movies because in the code there is the following (line 158): for i, tw in enumerate(tqdm.tqdm(title_words)): g.ndata['title'][i, [vocab_invmap[w] for w in tw]] = 1 which would mean that the encoded title would be assigned to the nodes related to the Users [0 : Len(Movies)] Maybe i didn't understand the logic behind this, is this correct ? or am i missing something? Thank you, Best regards Francesco --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/pytorch/recommendation/rec/datasets/movielens.py` Content: ``` 1 import pandas as pd 2 import dgl 3 import os 4 import torch 5 import numpy as np 6 import scipy.sparse as sp 7 import time 8 from functools import partial 9 from .. import randomwalk 10 import stanfordnlp 11 import re 12 import tqdm 13 import string 14 15 class MovieLens(object): 16 def __init__(self, directory): 17 ''' 18 directory: path to movielens directory which should have the three 19 files: 20 users.dat 21 movies.dat 22 ratings.dat 23 ''' 24 self.directory = directory 25 26 users = [] 27 movies = [] 28 ratings = [] 29 30 # read users 31 with open(os.path.join(directory, 'users.dat')) as f: 32 for l in f: 33 id_, gender, age, occupation, zip_ = l.strip().split('::') 34 users.append({ 35 'id': int(id_), 36 'gender': gender, 37 'age': age, 38 'occupation': occupation, 39 'zip': zip_, 40 }) 41 self.users = pd.DataFrame(users).set_index('id').astype('category') 42 43 # read movies 44 with open(os.path.join(directory, 'movies.dat'), encoding='latin1') as f: 45 for l in f: 46 id_, title, genres = l.strip().split('::') 47 genres_set = set(genres.split('|')) 48 49 # extract year 50 assert re.match(r'.*\([0-9]{4}\)$', title) 51 year = title[-5:-1] 52 title = title[:-6].strip() 53 54 data = {'id': int(id_), 'title': title, 'year': year} 55 for g in genres_set: 56 data[g] = True 57 movies.append(data) 58 self.movies = ( 59 pd.DataFrame(movies) 60 .set_index('id') 61 .fillna(False) 62 .astype({'year': 'category'})) 63 self.genres = self.movies.columns[self.movies.dtypes == bool] 64 65 # read ratings 66 with open(os.path.join(directory, 'ratings.dat')) as f: 67 for l in f: 68 user_id, movie_id, rating, timestamp = [int(_) for _ in l.split('::')] 69 ratings.append({ 70 'user_id': user_id, 71 'movie_id': movie_id, 72 'rating': rating, 73 'timestamp': timestamp, 74 }) 75 ratings = pd.DataFrame(ratings) 76 movie_count = ratings['movie_id'].value_counts() 77 movie_count.name = 'movie_count' 78 ratings = ratings.join(movie_count, on='movie_id') 79 self.ratings = ratings 80 81 # drop users and movies which do not exist in ratings 82 self.users = self.users[self.users.index.isin(self.ratings['user_id'])] 83 self.movies = self.movies[self.movies.index.isin(self.ratings['movie_id'])] 84 85 self.data_split() 86 self.build_graph() 87 88 def split_user(self, df, filter_counts=False): 89 df_new = df.copy() 90 df_new['prob'] = 0 91 92 if filter_counts: 93 df_new_sub = (df_new['movie_count'] >= 10).nonzero()[0] 94 else: 95 df_new_sub = df_new['train'].nonzero()[0] 96 prob = np.linspace(0, 1, df_new_sub.shape[0], endpoint=False) 97 np.random.shuffle(prob) 98 df_new['prob'].iloc[df_new_sub] = prob 99 return df_new 100 101 def data_split(self): 102 self.ratings = self.ratings.groupby('user_id', group_keys=False).apply( 103 partial(self.split_user, filter_counts=True)) 104 self.ratings['train'] = self.ratings['prob'] <= 0.8 105 self.ratings['valid'] = (self.ratings['prob'] > 0.8) & (self.ratings['prob'] <= 0.9) 106 self.ratings['test'] = self.ratings['prob'] > 0.9 107 self.ratings.drop(['prob'], axis=1, inplace=True) 108 109 def build_graph(self): 110 user_ids = list(self.users.index) 111 movie_ids = list(self.movies.index) 112 user_ids_invmap = {id_: i for i, id_ in enumerate(user_ids)} 113 movie_ids_invmap = {id_: i for i, id_ in enumerate(movie_ids)} 114 self.user_ids = user_ids 115 self.movie_ids = movie_ids 116 self.user_ids_invmap = user_ids_invmap 117 self.movie_ids_invmap = movie_ids_invmap 118 119 g = dgl.DGLGraph() 120 g.add_nodes(len(user_ids) + len(movie_ids)) 121 122 # user features 123 for user_column in self.users.columns: 124 udata = torch.zeros(g.number_of_nodes(), dtype=torch.int64) 125 # 0 for padding 126 udata[:len(user_ids)] = \ 127 torch.LongTensor(self.users[user_column].cat.codes.values.astype('int64') + 1) 128 g.ndata[user_column] = udata 129 130 # movie genre 131 movie_genres = torch.from_numpy(self.movies[self.genres].values.astype('float32')) 132 g.ndata['genre'] = torch.zeros(g.number_of_nodes(), len(self.genres)) 133 g.ndata['genre'][len(user_ids):len(user_ids) + len(movie_ids)] = movie_genres 134 135 # movie year 136 g.ndata['year'] = torch.zeros(g.number_of_nodes(), dtype=torch.int64) 137 # 0 for padding 138 g.ndata['year'][len(user_ids):len(user_ids) + len(movie_ids)] = \ 139 torch.LongTensor(self.movies['year'].cat.codes.values.astype('int64') + 1) 140 141 # movie title 142 nlp = stanfordnlp.Pipeline(use_gpu=False, processors='tokenize,lemma') 143 vocab = set() 144 title_words = [] 145 for t in tqdm.tqdm(self.movies['title'].values): 146 doc = nlp(t) 147 words = set() 148 for s in doc.sentences: 149 words.update(w.lemma.lower() for w in s.words 150 if not re.fullmatch(r'['+string.punctuation+']+', w.lemma)) 151 vocab.update(words) 152 title_words.append(words) 153 vocab = list(vocab) 154 vocab_invmap = {w: i for i, w in enumerate(vocab)} 155 # bag-of-words 156 g.ndata['title'] = torch.zeros(g.number_of_nodes(), len(vocab)) 157 for i, tw in enumerate(tqdm.tqdm(title_words)): 158 g.ndata['title'][i, [vocab_invmap[w] for w in tw]] = 1 159 self.vocab = vocab 160 self.vocab_invmap = vocab_invmap 161 162 rating_user_vertices = [user_ids_invmap[id_] for id_ in self.ratings['user_id'].values] 163 rating_movie_vertices = [movie_ids_invmap[id_] + len(user_ids) 164 for id_ in self.ratings['movie_id'].values] 165 self.rating_user_vertices = rating_user_vertices 166 self.rating_movie_vertices = rating_movie_vertices 167 168 g.add_edges( 169 rating_user_vertices, 170 rating_movie_vertices, 171 data={'inv': torch.zeros(self.ratings.shape[0], dtype=torch.uint8)}) 172 g.add_edges( 173 rating_movie_vertices, 174 rating_user_vertices, 175 data={'inv': torch.ones(self.ratings.shape[0], dtype=torch.uint8)}) 176 self.g = g 177 178 def generate_mask(self): 179 while True: 180 ratings = self.ratings.groupby('user_id', group_keys=False).apply(self.split_user) 181 prior_prob = ratings['prob'].values 182 for i in range(5): 183 train_mask = (prior_prob >= 0.2 * i) & (prior_prob < 0.2 * (i + 1)) 184 prior_mask = ~train_mask 185 train_mask &= ratings['train'].values 186 prior_mask &= ratings['train'].values 187 yield prior_mask, train_mask 188 189 def refresh_mask(self): 190 if not hasattr(self, 'masks'): 191 self.masks = self.generate_mask() 192 prior_mask, train_mask = next(self.masks) 193 194 valid_tensor = torch.from_numpy(self.ratings['valid'].values.astype('uint8')) 195 test_tensor = torch.from_numpy(self.ratings['test'].values.astype('uint8')) 196 train_tensor = torch.from_numpy(train_mask.astype('uint8')) 197 prior_tensor = torch.from_numpy(prior_mask.astype('uint8')) 198 edge_data = { 199 'prior': prior_tensor, 200 'valid': valid_tensor, 201 'test': test_tensor, 202 'train': train_tensor, 203 } 204 205 self.g.edges[self.rating_user_vertices, self.rating_movie_vertices].data.update(edge_data) 206 self.g.edges[self.rating_movie_vertices, self.rating_user_vertices].data.update(edge_data) 207 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/pytorch/recommendation/rec/datasets/movielens.py b/examples/pytorch/recommendation/rec/datasets/movielens.py --- a/examples/pytorch/recommendation/rec/datasets/movielens.py +++ b/examples/pytorch/recommendation/rec/datasets/movielens.py @@ -155,7 +155,7 @@ # bag-of-words g.ndata['title'] = torch.zeros(g.number_of_nodes(), len(vocab)) for i, tw in enumerate(tqdm.tqdm(title_words)): - g.ndata['title'][i, [vocab_invmap[w] for w in tw]] = 1 + g.ndata['title'][len(user_ids) + i, [vocab_invmap[w] for w in tw]] = 1 self.vocab = vocab self.vocab_invmap = vocab_invmap
{"golden_diff": "diff --git a/examples/pytorch/recommendation/rec/datasets/movielens.py b/examples/pytorch/recommendation/rec/datasets/movielens.py\n--- a/examples/pytorch/recommendation/rec/datasets/movielens.py\n+++ b/examples/pytorch/recommendation/rec/datasets/movielens.py\n@@ -155,7 +155,7 @@\n # bag-of-words\n g.ndata['title'] = torch.zeros(g.number_of_nodes(), len(vocab))\n for i, tw in enumerate(tqdm.tqdm(title_words)):\n- g.ndata['title'][i, [vocab_invmap[w] for w in tw]] = 1\n+ g.ndata['title'][len(user_ids) + i, [vocab_invmap[w] for w in tw]] = 1\n self.vocab = vocab\n self.vocab_invmap = vocab_invmap\n", "issue": "build_graph in MovieLens class \n## \u2753 Questions and Help\r\n\r\nDear Quan (Andy) Gan,\r\n\r\ni have a question about the method build_graph in the MovieLens class.\r\nI see that the dataset is processed and you consider all the User and Movie features in generating the node features of the graph. In particular, you assign id of the node with this scheme: \r\n\r\nfrom [ 0 : Length(Users) ] -> Features of User (encoded) \r\nfrom [ Length(Users) : Length(Users) + Length(Movies) ] -> Features of Movies (encoded) \r\n\r\nThe problem comes out when you consider the Title of the movies because in the code there is the following (line 158): \r\n\r\n for i, tw in enumerate(tqdm.tqdm(title_words)):\r\n g.ndata['title'][i, [vocab_invmap[w] for w in tw]] = 1\r\n\r\nwhich would mean that the encoded title would be assigned to the nodes related to the Users\r\n[0 : Len(Movies)]\r\nMaybe i didn't understand the logic behind this, is this correct ? or am i missing something? \r\n\r\nThank you,\r\nBest regards\r\nFrancesco\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import pandas as pd\nimport dgl\nimport os\nimport torch\nimport numpy as np\nimport scipy.sparse as sp\nimport time\nfrom functools import partial\nfrom .. import randomwalk\nimport stanfordnlp\nimport re\nimport tqdm\nimport string\n\nclass MovieLens(object):\n def __init__(self, directory):\n '''\n directory: path to movielens directory which should have the three\n files:\n users.dat\n movies.dat\n ratings.dat\n '''\n self.directory = directory\n\n users = []\n movies = []\n ratings = []\n\n # read users\n with open(os.path.join(directory, 'users.dat')) as f:\n for l in f:\n id_, gender, age, occupation, zip_ = l.strip().split('::')\n users.append({\n 'id': int(id_),\n 'gender': gender,\n 'age': age,\n 'occupation': occupation,\n 'zip': zip_,\n })\n self.users = pd.DataFrame(users).set_index('id').astype('category')\n\n # read movies\n with open(os.path.join(directory, 'movies.dat'), encoding='latin1') as f:\n for l in f:\n id_, title, genres = l.strip().split('::')\n genres_set = set(genres.split('|'))\n\n # extract year\n assert re.match(r'.*\\([0-9]{4}\\)$', title)\n year = title[-5:-1]\n title = title[:-6].strip()\n\n data = {'id': int(id_), 'title': title, 'year': year}\n for g in genres_set:\n data[g] = True\n movies.append(data)\n self.movies = (\n pd.DataFrame(movies)\n .set_index('id')\n .fillna(False)\n .astype({'year': 'category'}))\n self.genres = self.movies.columns[self.movies.dtypes == bool]\n\n # read ratings\n with open(os.path.join(directory, 'ratings.dat')) as f:\n for l in f:\n user_id, movie_id, rating, timestamp = [int(_) for _ in l.split('::')]\n ratings.append({\n 'user_id': user_id,\n 'movie_id': movie_id,\n 'rating': rating,\n 'timestamp': timestamp,\n })\n ratings = pd.DataFrame(ratings)\n movie_count = ratings['movie_id'].value_counts()\n movie_count.name = 'movie_count'\n ratings = ratings.join(movie_count, on='movie_id')\n self.ratings = ratings\n\n # drop users and movies which do not exist in ratings\n self.users = self.users[self.users.index.isin(self.ratings['user_id'])]\n self.movies = self.movies[self.movies.index.isin(self.ratings['movie_id'])]\n\n self.data_split()\n self.build_graph()\n\n def split_user(self, df, filter_counts=False):\n df_new = df.copy()\n df_new['prob'] = 0\n\n if filter_counts:\n df_new_sub = (df_new['movie_count'] >= 10).nonzero()[0]\n else:\n df_new_sub = df_new['train'].nonzero()[0]\n prob = np.linspace(0, 1, df_new_sub.shape[0], endpoint=False)\n np.random.shuffle(prob)\n df_new['prob'].iloc[df_new_sub] = prob\n return df_new\n\n def data_split(self):\n self.ratings = self.ratings.groupby('user_id', group_keys=False).apply(\n partial(self.split_user, filter_counts=True))\n self.ratings['train'] = self.ratings['prob'] <= 0.8\n self.ratings['valid'] = (self.ratings['prob'] > 0.8) & (self.ratings['prob'] <= 0.9)\n self.ratings['test'] = self.ratings['prob'] > 0.9\n self.ratings.drop(['prob'], axis=1, inplace=True)\n\n def build_graph(self):\n user_ids = list(self.users.index)\n movie_ids = list(self.movies.index)\n user_ids_invmap = {id_: i for i, id_ in enumerate(user_ids)}\n movie_ids_invmap = {id_: i for i, id_ in enumerate(movie_ids)}\n self.user_ids = user_ids\n self.movie_ids = movie_ids\n self.user_ids_invmap = user_ids_invmap\n self.movie_ids_invmap = movie_ids_invmap\n\n g = dgl.DGLGraph()\n g.add_nodes(len(user_ids) + len(movie_ids))\n\n # user features\n for user_column in self.users.columns:\n udata = torch.zeros(g.number_of_nodes(), dtype=torch.int64)\n # 0 for padding\n udata[:len(user_ids)] = \\\n torch.LongTensor(self.users[user_column].cat.codes.values.astype('int64') + 1)\n g.ndata[user_column] = udata\n\n # movie genre\n movie_genres = torch.from_numpy(self.movies[self.genres].values.astype('float32'))\n g.ndata['genre'] = torch.zeros(g.number_of_nodes(), len(self.genres))\n g.ndata['genre'][len(user_ids):len(user_ids) + len(movie_ids)] = movie_genres\n\n # movie year\n g.ndata['year'] = torch.zeros(g.number_of_nodes(), dtype=torch.int64)\n # 0 for padding\n g.ndata['year'][len(user_ids):len(user_ids) + len(movie_ids)] = \\\n torch.LongTensor(self.movies['year'].cat.codes.values.astype('int64') + 1)\n\n # movie title\n nlp = stanfordnlp.Pipeline(use_gpu=False, processors='tokenize,lemma')\n vocab = set()\n title_words = []\n for t in tqdm.tqdm(self.movies['title'].values):\n doc = nlp(t)\n words = set()\n for s in doc.sentences:\n words.update(w.lemma.lower() for w in s.words\n if not re.fullmatch(r'['+string.punctuation+']+', w.lemma))\n vocab.update(words)\n title_words.append(words)\n vocab = list(vocab)\n vocab_invmap = {w: i for i, w in enumerate(vocab)}\n # bag-of-words\n g.ndata['title'] = torch.zeros(g.number_of_nodes(), len(vocab))\n for i, tw in enumerate(tqdm.tqdm(title_words)):\n g.ndata['title'][i, [vocab_invmap[w] for w in tw]] = 1\n self.vocab = vocab\n self.vocab_invmap = vocab_invmap\n\n rating_user_vertices = [user_ids_invmap[id_] for id_ in self.ratings['user_id'].values]\n rating_movie_vertices = [movie_ids_invmap[id_] + len(user_ids)\n for id_ in self.ratings['movie_id'].values]\n self.rating_user_vertices = rating_user_vertices\n self.rating_movie_vertices = rating_movie_vertices\n\n g.add_edges(\n rating_user_vertices,\n rating_movie_vertices,\n data={'inv': torch.zeros(self.ratings.shape[0], dtype=torch.uint8)})\n g.add_edges(\n rating_movie_vertices,\n rating_user_vertices,\n data={'inv': torch.ones(self.ratings.shape[0], dtype=torch.uint8)})\n self.g = g\n\n def generate_mask(self):\n while True:\n ratings = self.ratings.groupby('user_id', group_keys=False).apply(self.split_user)\n prior_prob = ratings['prob'].values\n for i in range(5):\n train_mask = (prior_prob >= 0.2 * i) & (prior_prob < 0.2 * (i + 1))\n prior_mask = ~train_mask\n train_mask &= ratings['train'].values\n prior_mask &= ratings['train'].values\n yield prior_mask, train_mask\n\n def refresh_mask(self):\n if not hasattr(self, 'masks'):\n self.masks = self.generate_mask()\n prior_mask, train_mask = next(self.masks)\n\n valid_tensor = torch.from_numpy(self.ratings['valid'].values.astype('uint8'))\n test_tensor = torch.from_numpy(self.ratings['test'].values.astype('uint8'))\n train_tensor = torch.from_numpy(train_mask.astype('uint8'))\n prior_tensor = torch.from_numpy(prior_mask.astype('uint8'))\n edge_data = {\n 'prior': prior_tensor,\n 'valid': valid_tensor,\n 'test': test_tensor,\n 'train': train_tensor,\n }\n\n self.g.edges[self.rating_user_vertices, self.rating_movie_vertices].data.update(edge_data)\n self.g.edges[self.rating_movie_vertices, self.rating_user_vertices].data.update(edge_data)\n", "path": "examples/pytorch/recommendation/rec/datasets/movielens.py"}], "after_files": [{"content": "import pandas as pd\nimport dgl\nimport os\nimport torch\nimport numpy as np\nimport scipy.sparse as sp\nimport time\nfrom functools import partial\nfrom .. import randomwalk\nimport stanfordnlp\nimport re\nimport tqdm\nimport string\n\nclass MovieLens(object):\n def __init__(self, directory):\n '''\n directory: path to movielens directory which should have the three\n files:\n users.dat\n movies.dat\n ratings.dat\n '''\n self.directory = directory\n\n users = []\n movies = []\n ratings = []\n\n # read users\n with open(os.path.join(directory, 'users.dat')) as f:\n for l in f:\n id_, gender, age, occupation, zip_ = l.strip().split('::')\n users.append({\n 'id': int(id_),\n 'gender': gender,\n 'age': age,\n 'occupation': occupation,\n 'zip': zip_,\n })\n self.users = pd.DataFrame(users).set_index('id').astype('category')\n\n # read movies\n with open(os.path.join(directory, 'movies.dat'), encoding='latin1') as f:\n for l in f:\n id_, title, genres = l.strip().split('::')\n genres_set = set(genres.split('|'))\n\n # extract year\n assert re.match(r'.*\\([0-9]{4}\\)$', title)\n year = title[-5:-1]\n title = title[:-6].strip()\n\n data = {'id': int(id_), 'title': title, 'year': year}\n for g in genres_set:\n data[g] = True\n movies.append(data)\n self.movies = (\n pd.DataFrame(movies)\n .set_index('id')\n .fillna(False)\n .astype({'year': 'category'}))\n self.genres = self.movies.columns[self.movies.dtypes == bool]\n\n # read ratings\n with open(os.path.join(directory, 'ratings.dat')) as f:\n for l in f:\n user_id, movie_id, rating, timestamp = [int(_) for _ in l.split('::')]\n ratings.append({\n 'user_id': user_id,\n 'movie_id': movie_id,\n 'rating': rating,\n 'timestamp': timestamp,\n })\n ratings = pd.DataFrame(ratings)\n movie_count = ratings['movie_id'].value_counts()\n movie_count.name = 'movie_count'\n ratings = ratings.join(movie_count, on='movie_id')\n self.ratings = ratings\n\n # drop users and movies which do not exist in ratings\n self.users = self.users[self.users.index.isin(self.ratings['user_id'])]\n self.movies = self.movies[self.movies.index.isin(self.ratings['movie_id'])]\n\n self.data_split()\n self.build_graph()\n\n def split_user(self, df, filter_counts=False):\n df_new = df.copy()\n df_new['prob'] = 0\n\n if filter_counts:\n df_new_sub = (df_new['movie_count'] >= 10).nonzero()[0]\n else:\n df_new_sub = df_new['train'].nonzero()[0]\n prob = np.linspace(0, 1, df_new_sub.shape[0], endpoint=False)\n np.random.shuffle(prob)\n df_new['prob'].iloc[df_new_sub] = prob\n return df_new\n\n def data_split(self):\n self.ratings = self.ratings.groupby('user_id', group_keys=False).apply(\n partial(self.split_user, filter_counts=True))\n self.ratings['train'] = self.ratings['prob'] <= 0.8\n self.ratings['valid'] = (self.ratings['prob'] > 0.8) & (self.ratings['prob'] <= 0.9)\n self.ratings['test'] = self.ratings['prob'] > 0.9\n self.ratings.drop(['prob'], axis=1, inplace=True)\n\n def build_graph(self):\n user_ids = list(self.users.index)\n movie_ids = list(self.movies.index)\n user_ids_invmap = {id_: i for i, id_ in enumerate(user_ids)}\n movie_ids_invmap = {id_: i for i, id_ in enumerate(movie_ids)}\n self.user_ids = user_ids\n self.movie_ids = movie_ids\n self.user_ids_invmap = user_ids_invmap\n self.movie_ids_invmap = movie_ids_invmap\n\n g = dgl.DGLGraph()\n g.add_nodes(len(user_ids) + len(movie_ids))\n\n # user features\n for user_column in self.users.columns:\n udata = torch.zeros(g.number_of_nodes(), dtype=torch.int64)\n # 0 for padding\n udata[:len(user_ids)] = \\\n torch.LongTensor(self.users[user_column].cat.codes.values.astype('int64') + 1)\n g.ndata[user_column] = udata\n\n # movie genre\n movie_genres = torch.from_numpy(self.movies[self.genres].values.astype('float32'))\n g.ndata['genre'] = torch.zeros(g.number_of_nodes(), len(self.genres))\n g.ndata['genre'][len(user_ids):len(user_ids) + len(movie_ids)] = movie_genres\n\n # movie year\n g.ndata['year'] = torch.zeros(g.number_of_nodes(), dtype=torch.int64)\n # 0 for padding\n g.ndata['year'][len(user_ids):len(user_ids) + len(movie_ids)] = \\\n torch.LongTensor(self.movies['year'].cat.codes.values.astype('int64') + 1)\n\n # movie title\n nlp = stanfordnlp.Pipeline(use_gpu=False, processors='tokenize,lemma')\n vocab = set()\n title_words = []\n for t in tqdm.tqdm(self.movies['title'].values):\n doc = nlp(t)\n words = set()\n for s in doc.sentences:\n words.update(w.lemma.lower() for w in s.words\n if not re.fullmatch(r'['+string.punctuation+']+', w.lemma))\n vocab.update(words)\n title_words.append(words)\n vocab = list(vocab)\n vocab_invmap = {w: i for i, w in enumerate(vocab)}\n # bag-of-words\n g.ndata['title'] = torch.zeros(g.number_of_nodes(), len(vocab))\n for i, tw in enumerate(tqdm.tqdm(title_words)):\n g.ndata['title'][len(user_ids) + i, [vocab_invmap[w] for w in tw]] = 1\n self.vocab = vocab\n self.vocab_invmap = vocab_invmap\n\n rating_user_vertices = [user_ids_invmap[id_] for id_ in self.ratings['user_id'].values]\n rating_movie_vertices = [movie_ids_invmap[id_] + len(user_ids)\n for id_ in self.ratings['movie_id'].values]\n self.rating_user_vertices = rating_user_vertices\n self.rating_movie_vertices = rating_movie_vertices\n\n g.add_edges(\n rating_user_vertices,\n rating_movie_vertices,\n data={'inv': torch.zeros(self.ratings.shape[0], dtype=torch.uint8)})\n g.add_edges(\n rating_movie_vertices,\n rating_user_vertices,\n data={'inv': torch.ones(self.ratings.shape[0], dtype=torch.uint8)})\n self.g = g\n\n def generate_mask(self):\n while True:\n ratings = self.ratings.groupby('user_id', group_keys=False).apply(self.split_user)\n prior_prob = ratings['prob'].values\n for i in range(5):\n train_mask = (prior_prob >= 0.2 * i) & (prior_prob < 0.2 * (i + 1))\n prior_mask = ~train_mask\n train_mask &= ratings['train'].values\n prior_mask &= ratings['train'].values\n yield prior_mask, train_mask\n\n def refresh_mask(self):\n if not hasattr(self, 'masks'):\n self.masks = self.generate_mask()\n prior_mask, train_mask = next(self.masks)\n\n valid_tensor = torch.from_numpy(self.ratings['valid'].values.astype('uint8'))\n test_tensor = torch.from_numpy(self.ratings['test'].values.astype('uint8'))\n train_tensor = torch.from_numpy(train_mask.astype('uint8'))\n prior_tensor = torch.from_numpy(prior_mask.astype('uint8'))\n edge_data = {\n 'prior': prior_tensor,\n 'valid': valid_tensor,\n 'test': test_tensor,\n 'train': train_tensor,\n }\n\n self.g.edges[self.rating_user_vertices, self.rating_movie_vertices].data.update(edge_data)\n self.g.edges[self.rating_movie_vertices, self.rating_user_vertices].data.update(edge_data)\n", "path": "examples/pytorch/recommendation/rec/datasets/movielens.py"}]}
2,914
195
gh_patches_debug_42163
rasdani/github-patches
git_diff
cupy__cupy-2290
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `TestNpz.test_dump` test failure https://jenkins.preferred.jp/job/chainer/job/cupy_pr/161/TEST=cupy-py3,label=mn1-p100/console --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cupy/io/npz.py` Content: ``` 1 import numpy 2 3 import cupy 4 5 6 class NpzFile(object): 7 8 def __init__(self, npz_file): 9 self.npz_file = npz_file 10 11 def __enter__(self): 12 self.npz_file.__enter__() 13 return self 14 15 def __exit__(self, typ, val, traceback): 16 self.npz_file.__exit__(typ, val, traceback) 17 18 def __getitem__(self, key): 19 arr = self.npz_file[key] 20 return cupy.array(arr) 21 22 def close(self): 23 self.npz_file.close() 24 25 26 def load(file, mmap_mode=None): 27 """Loads arrays or pickled objects from ``.npy``, ``.npz`` or pickled file. 28 29 This function just calls ``numpy.load`` and then sends the arrays to the 30 current device. NPZ file is converted to NpzFile object, which defers the 31 transfer to the time of accessing the items. 32 33 Args: 34 file (file-like object or string): The file to read. 35 mmap_mode (None, 'r+', 'r', 'w+', 'c'): If not ``None``, memory-map the 36 file to construct an intermediate :class:`numpy.ndarray` object and 37 transfer it to the current device. 38 39 Returns: 40 CuPy array or NpzFile object depending on the type of the file. NpzFile 41 object is a dictionary-like object with the context manager protocol 42 (which enables us to use *with* statement on it). 43 44 .. seealso:: :func:`numpy.load` 45 46 """ 47 obj = numpy.load(file, mmap_mode) 48 if isinstance(obj, numpy.ndarray): 49 return cupy.array(obj) 50 elif isinstance(obj, numpy.lib.npyio.NpzFile): 51 return NpzFile(obj) 52 else: 53 return obj 54 55 56 def save(file, arr): 57 """Saves an array to a binary file in ``.npy`` format. 58 59 Args: 60 file (file or str): File or filename to save. 61 arr (array_like): Array to save. It should be able to feed to 62 :func:`cupy.asnumpy`. 63 64 .. seealso:: :func:`numpy.save` 65 66 """ 67 numpy.save(file, cupy.asnumpy(arr)) 68 69 70 def savez(file, *args, **kwds): 71 """Saves one or more arrays into a file in uncompressed ``.npz`` format. 72 73 Arguments without keys are treated as arguments with automatic keys named 74 ``arr_0``, ``arr_1``, etc. corresponding to the positions in the argument 75 list. The keys of arguments are used as keys in the ``.npz`` file, which 76 are used for accessing NpzFile object when the file is read by 77 :func:`cupy.load` function. 78 79 Args: 80 file (file or str): File or filename to save. 81 *args: Arrays with implicit keys. 82 **kwds: Arrays with explicit keys. 83 84 .. seealso:: :func:`numpy.savez` 85 86 """ 87 args = map(cupy.asnumpy, args) 88 for key in kwds: 89 kwds[key] = cupy.asnumpy(kwds[key]) 90 numpy.savez(file, *args, **kwds) 91 92 93 def savez_compressed(file, *args, **kwds): 94 """Saves one or more arrays into a file in compressed ``.npz`` format. 95 96 It is equivalent to :func:`cupy.savez` function except the output file is 97 compressed. 98 99 .. seealso:: 100 :func:`cupy.savez` for more detail, 101 :func:`numpy.savez_compressed` 102 103 """ 104 args = map(cupy.asnumpy, args) 105 for key in kwds: 106 kwds[key] = cupy.asnumpy(kwds[key]) 107 numpy.savez_compressed(file, *args, **kwds) 108 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cupy/io/npz.py b/cupy/io/npz.py --- a/cupy/io/npz.py +++ b/cupy/io/npz.py @@ -1,8 +1,13 @@ +import warnings + import numpy import cupy +_support_allow_pickle = (numpy.lib.NumpyVersion(numpy.__version__) >= '1.10.0') + + class NpzFile(object): def __init__(self, npz_file): @@ -23,7 +28,7 @@ self.npz_file.close() -def load(file, mmap_mode=None): +def load(file, mmap_mode=None, allow_pickle=None): """Loads arrays or pickled objects from ``.npy``, ``.npz`` or pickled file. This function just calls ``numpy.load`` and then sends the arrays to the @@ -35,6 +40,16 @@ mmap_mode (None, 'r+', 'r', 'w+', 'c'): If not ``None``, memory-map the file to construct an intermediate :class:`numpy.ndarray` object and transfer it to the current device. + allow_pickle (bool): Allow loading pickled object arrays stored in npy + files. Reasons for disallowing pickles include security, as + loading pickled data can execute arbitrary code. If pickles are + disallowed, loading object arrays will fail. + Please be aware that CuPy does not support arrays with dtype of + `object`. + The default is False. + This option is available only for NumPy 1.10 or later. + In NumPy 1.9, this option cannot be specified (loading pickled + objects is always allowed). Returns: CuPy array or NpzFile object depending on the type of the file. NpzFile @@ -44,7 +59,14 @@ .. seealso:: :func:`numpy.load` """ - obj = numpy.load(file, mmap_mode) + if _support_allow_pickle: + allow_pickle = False if allow_pickle is None else allow_pickle + obj = numpy.load(file, mmap_mode, allow_pickle) + else: + if allow_pickle is not None: + warnings.warn('allow_pickle option is not supported in NumPy 1.9') + obj = numpy.load(file, mmap_mode) + if isinstance(obj, numpy.ndarray): return cupy.array(obj) elif isinstance(obj, numpy.lib.npyio.NpzFile): @@ -53,18 +75,35 @@ return obj -def save(file, arr): +def save(file, arr, allow_pickle=None): """Saves an array to a binary file in ``.npy`` format. Args: file (file or str): File or filename to save. arr (array_like): Array to save. It should be able to feed to :func:`cupy.asnumpy`. + allow_pickle (bool): Allow saving object arrays using Python pickles. + Reasons for disallowing pickles include security (loading pickled + data can execute arbitrary code) and portability (pickled objects + may not be loadable on different Python installations, for example + if the stored objects require libraries that are not available, + and not all pickled data is compatible between Python 2 and Python + 3). + The default is True. + This option is available only for NumPy 1.10 or later. + In NumPy 1.9, this option cannot be specified (saving objects + using pickles is always allowed). .. seealso:: :func:`numpy.save` """ - numpy.save(file, cupy.asnumpy(arr)) + if _support_allow_pickle: + allow_pickle = True if allow_pickle is None else allow_pickle + numpy.save(file, cupy.asnumpy(arr), allow_pickle) + else: + if allow_pickle is not None: + warnings.warn('allow_pickle option is not supported in NumPy 1.9') + numpy.save(file, cupy.asnumpy(arr)) def savez(file, *args, **kwds):
{"golden_diff": "diff --git a/cupy/io/npz.py b/cupy/io/npz.py\n--- a/cupy/io/npz.py\n+++ b/cupy/io/npz.py\n@@ -1,8 +1,13 @@\n+import warnings\n+\n import numpy\n \n import cupy\n \n \n+_support_allow_pickle = (numpy.lib.NumpyVersion(numpy.__version__) >= '1.10.0')\n+\n+\n class NpzFile(object):\n \n def __init__(self, npz_file):\n@@ -23,7 +28,7 @@\n self.npz_file.close()\n \n \n-def load(file, mmap_mode=None):\n+def load(file, mmap_mode=None, allow_pickle=None):\n \"\"\"Loads arrays or pickled objects from ``.npy``, ``.npz`` or pickled file.\n \n This function just calls ``numpy.load`` and then sends the arrays to the\n@@ -35,6 +40,16 @@\n mmap_mode (None, 'r+', 'r', 'w+', 'c'): If not ``None``, memory-map the\n file to construct an intermediate :class:`numpy.ndarray` object and\n transfer it to the current device.\n+ allow_pickle (bool): Allow loading pickled object arrays stored in npy\n+ files. Reasons for disallowing pickles include security, as\n+ loading pickled data can execute arbitrary code. If pickles are\n+ disallowed, loading object arrays will fail.\n+ Please be aware that CuPy does not support arrays with dtype of\n+ `object`.\n+ The default is False.\n+ This option is available only for NumPy 1.10 or later.\n+ In NumPy 1.9, this option cannot be specified (loading pickled\n+ objects is always allowed).\n \n Returns:\n CuPy array or NpzFile object depending on the type of the file. NpzFile\n@@ -44,7 +59,14 @@\n .. seealso:: :func:`numpy.load`\n \n \"\"\"\n- obj = numpy.load(file, mmap_mode)\n+ if _support_allow_pickle:\n+ allow_pickle = False if allow_pickle is None else allow_pickle\n+ obj = numpy.load(file, mmap_mode, allow_pickle)\n+ else:\n+ if allow_pickle is not None:\n+ warnings.warn('allow_pickle option is not supported in NumPy 1.9')\n+ obj = numpy.load(file, mmap_mode)\n+\n if isinstance(obj, numpy.ndarray):\n return cupy.array(obj)\n elif isinstance(obj, numpy.lib.npyio.NpzFile):\n@@ -53,18 +75,35 @@\n return obj\n \n \n-def save(file, arr):\n+def save(file, arr, allow_pickle=None):\n \"\"\"Saves an array to a binary file in ``.npy`` format.\n \n Args:\n file (file or str): File or filename to save.\n arr (array_like): Array to save. It should be able to feed to\n :func:`cupy.asnumpy`.\n+ allow_pickle (bool): Allow saving object arrays using Python pickles.\n+ Reasons for disallowing pickles include security (loading pickled\n+ data can execute arbitrary code) and portability (pickled objects\n+ may not be loadable on different Python installations, for example\n+ if the stored objects require libraries that are not available,\n+ and not all pickled data is compatible between Python 2 and Python\n+ 3).\n+ The default is True.\n+ This option is available only for NumPy 1.10 or later.\n+ In NumPy 1.9, this option cannot be specified (saving objects\n+ using pickles is always allowed).\n \n .. seealso:: :func:`numpy.save`\n \n \"\"\"\n- numpy.save(file, cupy.asnumpy(arr))\n+ if _support_allow_pickle:\n+ allow_pickle = True if allow_pickle is None else allow_pickle\n+ numpy.save(file, cupy.asnumpy(arr), allow_pickle)\n+ else:\n+ if allow_pickle is not None:\n+ warnings.warn('allow_pickle option is not supported in NumPy 1.9')\n+ numpy.save(file, cupy.asnumpy(arr))\n \n \n def savez(file, *args, **kwds):\n", "issue": "`TestNpz.test_dump` test failure\nhttps://jenkins.preferred.jp/job/chainer/job/cupy_pr/161/TEST=cupy-py3,label=mn1-p100/console\r\n\n", "before_files": [{"content": "import numpy\n\nimport cupy\n\n\nclass NpzFile(object):\n\n def __init__(self, npz_file):\n self.npz_file = npz_file\n\n def __enter__(self):\n self.npz_file.__enter__()\n return self\n\n def __exit__(self, typ, val, traceback):\n self.npz_file.__exit__(typ, val, traceback)\n\n def __getitem__(self, key):\n arr = self.npz_file[key]\n return cupy.array(arr)\n\n def close(self):\n self.npz_file.close()\n\n\ndef load(file, mmap_mode=None):\n \"\"\"Loads arrays or pickled objects from ``.npy``, ``.npz`` or pickled file.\n\n This function just calls ``numpy.load`` and then sends the arrays to the\n current device. NPZ file is converted to NpzFile object, which defers the\n transfer to the time of accessing the items.\n\n Args:\n file (file-like object or string): The file to read.\n mmap_mode (None, 'r+', 'r', 'w+', 'c'): If not ``None``, memory-map the\n file to construct an intermediate :class:`numpy.ndarray` object and\n transfer it to the current device.\n\n Returns:\n CuPy array or NpzFile object depending on the type of the file. NpzFile\n object is a dictionary-like object with the context manager protocol\n (which enables us to use *with* statement on it).\n\n .. seealso:: :func:`numpy.load`\n\n \"\"\"\n obj = numpy.load(file, mmap_mode)\n if isinstance(obj, numpy.ndarray):\n return cupy.array(obj)\n elif isinstance(obj, numpy.lib.npyio.NpzFile):\n return NpzFile(obj)\n else:\n return obj\n\n\ndef save(file, arr):\n \"\"\"Saves an array to a binary file in ``.npy`` format.\n\n Args:\n file (file or str): File or filename to save.\n arr (array_like): Array to save. It should be able to feed to\n :func:`cupy.asnumpy`.\n\n .. seealso:: :func:`numpy.save`\n\n \"\"\"\n numpy.save(file, cupy.asnumpy(arr))\n\n\ndef savez(file, *args, **kwds):\n \"\"\"Saves one or more arrays into a file in uncompressed ``.npz`` format.\n\n Arguments without keys are treated as arguments with automatic keys named\n ``arr_0``, ``arr_1``, etc. corresponding to the positions in the argument\n list. The keys of arguments are used as keys in the ``.npz`` file, which\n are used for accessing NpzFile object when the file is read by\n :func:`cupy.load` function.\n\n Args:\n file (file or str): File or filename to save.\n *args: Arrays with implicit keys.\n **kwds: Arrays with explicit keys.\n\n .. seealso:: :func:`numpy.savez`\n\n \"\"\"\n args = map(cupy.asnumpy, args)\n for key in kwds:\n kwds[key] = cupy.asnumpy(kwds[key])\n numpy.savez(file, *args, **kwds)\n\n\ndef savez_compressed(file, *args, **kwds):\n \"\"\"Saves one or more arrays into a file in compressed ``.npz`` format.\n\n It is equivalent to :func:`cupy.savez` function except the output file is\n compressed.\n\n .. seealso::\n :func:`cupy.savez` for more detail,\n :func:`numpy.savez_compressed`\n\n \"\"\"\n args = map(cupy.asnumpy, args)\n for key in kwds:\n kwds[key] = cupy.asnumpy(kwds[key])\n numpy.savez_compressed(file, *args, **kwds)\n", "path": "cupy/io/npz.py"}], "after_files": [{"content": "import warnings\n\nimport numpy\n\nimport cupy\n\n\n_support_allow_pickle = (numpy.lib.NumpyVersion(numpy.__version__) >= '1.10.0')\n\n\nclass NpzFile(object):\n\n def __init__(self, npz_file):\n self.npz_file = npz_file\n\n def __enter__(self):\n self.npz_file.__enter__()\n return self\n\n def __exit__(self, typ, val, traceback):\n self.npz_file.__exit__(typ, val, traceback)\n\n def __getitem__(self, key):\n arr = self.npz_file[key]\n return cupy.array(arr)\n\n def close(self):\n self.npz_file.close()\n\n\ndef load(file, mmap_mode=None, allow_pickle=None):\n \"\"\"Loads arrays or pickled objects from ``.npy``, ``.npz`` or pickled file.\n\n This function just calls ``numpy.load`` and then sends the arrays to the\n current device. NPZ file is converted to NpzFile object, which defers the\n transfer to the time of accessing the items.\n\n Args:\n file (file-like object or string): The file to read.\n mmap_mode (None, 'r+', 'r', 'w+', 'c'): If not ``None``, memory-map the\n file to construct an intermediate :class:`numpy.ndarray` object and\n transfer it to the current device.\n allow_pickle (bool): Allow loading pickled object arrays stored in npy\n files. Reasons for disallowing pickles include security, as\n loading pickled data can execute arbitrary code. If pickles are\n disallowed, loading object arrays will fail.\n Please be aware that CuPy does not support arrays with dtype of\n `object`.\n The default is False.\n This option is available only for NumPy 1.10 or later.\n In NumPy 1.9, this option cannot be specified (loading pickled\n objects is always allowed).\n\n Returns:\n CuPy array or NpzFile object depending on the type of the file. NpzFile\n object is a dictionary-like object with the context manager protocol\n (which enables us to use *with* statement on it).\n\n .. seealso:: :func:`numpy.load`\n\n \"\"\"\n if _support_allow_pickle:\n allow_pickle = False if allow_pickle is None else allow_pickle\n obj = numpy.load(file, mmap_mode, allow_pickle)\n else:\n if allow_pickle is not None:\n warnings.warn('allow_pickle option is not supported in NumPy 1.9')\n obj = numpy.load(file, mmap_mode)\n\n if isinstance(obj, numpy.ndarray):\n return cupy.array(obj)\n elif isinstance(obj, numpy.lib.npyio.NpzFile):\n return NpzFile(obj)\n else:\n return obj\n\n\ndef save(file, arr, allow_pickle=None):\n \"\"\"Saves an array to a binary file in ``.npy`` format.\n\n Args:\n file (file or str): File or filename to save.\n arr (array_like): Array to save. It should be able to feed to\n :func:`cupy.asnumpy`.\n allow_pickle (bool): Allow saving object arrays using Python pickles.\n Reasons for disallowing pickles include security (loading pickled\n data can execute arbitrary code) and portability (pickled objects\n may not be loadable on different Python installations, for example\n if the stored objects require libraries that are not available,\n and not all pickled data is compatible between Python 2 and Python\n 3).\n The default is True.\n This option is available only for NumPy 1.10 or later.\n In NumPy 1.9, this option cannot be specified (saving objects\n using pickles is always allowed).\n\n .. seealso:: :func:`numpy.save`\n\n \"\"\"\n if _support_allow_pickle:\n allow_pickle = True if allow_pickle is None else allow_pickle\n numpy.save(file, cupy.asnumpy(arr), allow_pickle)\n else:\n if allow_pickle is not None:\n warnings.warn('allow_pickle option is not supported in NumPy 1.9')\n numpy.save(file, cupy.asnumpy(arr))\n\n\ndef savez(file, *args, **kwds):\n \"\"\"Saves one or more arrays into a file in uncompressed ``.npz`` format.\n\n Arguments without keys are treated as arguments with automatic keys named\n ``arr_0``, ``arr_1``, etc. corresponding to the positions in the argument\n list. The keys of arguments are used as keys in the ``.npz`` file, which\n are used for accessing NpzFile object when the file is read by\n :func:`cupy.load` function.\n\n Args:\n file (file or str): File or filename to save.\n *args: Arrays with implicit keys.\n **kwds: Arrays with explicit keys.\n\n .. seealso:: :func:`numpy.savez`\n\n \"\"\"\n args = map(cupy.asnumpy, args)\n for key in kwds:\n kwds[key] = cupy.asnumpy(kwds[key])\n numpy.savez(file, *args, **kwds)\n\n\ndef savez_compressed(file, *args, **kwds):\n \"\"\"Saves one or more arrays into a file in compressed ``.npz`` format.\n\n It is equivalent to :func:`cupy.savez` function except the output file is\n compressed.\n\n .. seealso::\n :func:`cupy.savez` for more detail,\n :func:`numpy.savez_compressed`\n\n \"\"\"\n args = map(cupy.asnumpy, args)\n for key in kwds:\n kwds[key] = cupy.asnumpy(kwds[key])\n numpy.savez_compressed(file, *args, **kwds)\n", "path": "cupy/io/npz.py"}]}
1,368
931
gh_patches_debug_15544
rasdani/github-patches
git_diff
qtile__qtile-4610
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `ImapWidget` may call `keyring.get_password()` with `username=None`, violating API and potentially crashing it ### Issue description The problematic code is: https://github.com/qtile/qtile/blob/9ccaf6f1c01a9ffbd7beacdd8f405884bd81e1c0/libqtile/widget/imapwidget.py#L78 At this point, `self.user` may be `None`. However, according to the API definition at: https://github.com/jaraco/keyring/blob/0cebfebbf516a47e4e45911ba6b4d4dd2699845c/keyring/core.py#L54 `keyring.get_password()` expects two `str` argument, i.e. `None` is not acceptable. If `keyrings-alt` backend is installed, then it explicitly crashes on `None` username: ```pytb libqtile/widget/imapwidget.py:78: in __init__ password = keyring.get_password("imapwidget", self.user) .tox/py310-x11/lib/python3.10/site-packages/keyring/core.py:56: in get_password return get_keyring().get_password(service_name, username) .tox/py310-x11/lib/python3.10/site-packages/keyrings/alt/file_base.py:92: in get_password assoc = self._generate_assoc(service, username) .tox/py310-x11/lib/python3.10/site-packages/keyrings/alt/file_base.py:133: in _generate_assoc return (escape_for_ini(service) + r'\0' + escape_for_ini(username)).encode() .tox/py310-x11/lib/python3.10/site-packages/keyrings/alt/escape.py:29: in escape return "".join(_escape_char(c) for c in value.encode('utf-8')) E AttributeError: 'NoneType' object has no attribute 'encode' ``` To reproduce: ``` tox -e py310-x11 # you can cancel the initial test run, after dependencies are installed . .tox/py310-x11/bin/activate pip install imapclient keyring keyrings-alt pytest --backend=x11 ``` ### Version 0.23.1.dev83+g9ccaf6f1 ### Backend X11 (default) ### Config _No response_ ### Logs _No response_ ### Required - [X] I have searched past issues to see if this bug has already been reported, and it hasn't been. - [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `libqtile/widget/imapwidget.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # Copyright (c) 2015 David R. Andersen 3 # 4 # Permission is hereby granted, free of charge, to any person obtaining a copy 5 # of this software and associated documentation files (the "Software"), to deal 6 # in the Software without restriction, including without limitation the rights 7 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 8 # copies of the Software, and to permit persons to whom the Software is 9 # furnished to do so, subject to the following conditions: 10 # 11 # The above copyright notice and this permission notice shall be included in 12 # all copies or substantial portions of the Software. 13 # 14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 17 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 19 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 20 # SOFTWARE. 21 22 import imaplib 23 import re 24 25 import keyring 26 27 from libqtile.log_utils import logger 28 from libqtile.widget import base 29 30 31 class ImapWidget(base.ThreadPoolText): 32 """Email IMAP widget 33 34 This widget will scan one of your imap email boxes and report the number of 35 unseen messages present. I've configured it to only work with imap with 36 ssl. Your password is obtained from the Gnome Keyring. 37 38 Writing your password to the keyring initially is as simple as (changing 39 out <userid> and <password> for your userid and password): 40 41 1) create the file ~/.local/share/python_keyring/keyringrc.cfg with the 42 following contents:: 43 44 [backend] 45 default-keyring=keyring.backends.Gnome.Keyring 46 keyring-path=/home/<userid>/.local/share/keyring/ 47 48 49 2) Execute the following python shell script once:: 50 51 #!/usr/bin/env python3 52 import keyring 53 user = <userid> 54 password = <password> 55 keyring.set_password('imapwidget', user, password) 56 57 mbox names must include the path to the mbox (except for the default 58 INBOX). So, for example if your mailroot is ``~/Maildir``, and you want to 59 look at the mailbox at HomeMail/fred, the mbox setting would be: 60 ``mbox="~/Maildir/HomeMail/fred"``. Note the nested sets of quotes! Labels 61 can be whatever you choose, of course. 62 63 Widget requirements: keyring_. 64 65 .. _keyring: https://pypi.org/project/keyring/ 66 """ 67 68 defaults = [ 69 ("mbox", '"INBOX"', "mailbox to fetch"), 70 ("label", "INBOX", "label for display"), 71 ("user", None, "email username"), 72 ("server", None, "email server name"), 73 ] 74 75 def __init__(self, **config): 76 base.ThreadPoolText.__init__(self, "", **config) 77 self.add_defaults(ImapWidget.defaults) 78 password = keyring.get_password("imapwidget", self.user) 79 if password is not None: 80 self.password = password 81 else: 82 logger.critical("Gnome Keyring Error") 83 84 def poll(self): 85 im = imaplib.IMAP4_SSL(self.server, 993) 86 if self.password == "Gnome Keyring Error": 87 text = "Gnome Keyring Error" 88 else: 89 im.login(self.user, self.password) 90 status, response = im.status(self.mbox, "(UNSEEN)") 91 text = response[0].decode() 92 text = self.label + ": " + re.sub(r"\).*$", "", re.sub(r"^.*N\s", "", text)) 93 im.logout() 94 return text 95 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libqtile/widget/imapwidget.py b/libqtile/widget/imapwidget.py --- a/libqtile/widget/imapwidget.py +++ b/libqtile/widget/imapwidget.py @@ -24,6 +24,7 @@ import keyring +from libqtile.confreader import ConfigError from libqtile.log_utils import logger from libqtile.widget import base @@ -75,6 +76,8 @@ def __init__(self, **config): base.ThreadPoolText.__init__(self, "", **config) self.add_defaults(ImapWidget.defaults) + if self.user is None: + raise ConfigError("You must set the 'user' parameter for the IMAP widget.") password = keyring.get_password("imapwidget", self.user) if password is not None: self.password = password
{"golden_diff": "diff --git a/libqtile/widget/imapwidget.py b/libqtile/widget/imapwidget.py\n--- a/libqtile/widget/imapwidget.py\n+++ b/libqtile/widget/imapwidget.py\n@@ -24,6 +24,7 @@\n \n import keyring\n \n+from libqtile.confreader import ConfigError\n from libqtile.log_utils import logger\n from libqtile.widget import base\n \n@@ -75,6 +76,8 @@\n def __init__(self, **config):\n base.ThreadPoolText.__init__(self, \"\", **config)\n self.add_defaults(ImapWidget.defaults)\n+ if self.user is None:\n+ raise ConfigError(\"You must set the 'user' parameter for the IMAP widget.\")\n password = keyring.get_password(\"imapwidget\", self.user)\n if password is not None:\n self.password = password\n", "issue": "`ImapWidget` may call `keyring.get_password()` with `username=None`, violating API and potentially crashing it\n### Issue description\n\nThe problematic code is:\r\n\r\nhttps://github.com/qtile/qtile/blob/9ccaf6f1c01a9ffbd7beacdd8f405884bd81e1c0/libqtile/widget/imapwidget.py#L78\r\n\r\nAt this point, `self.user` may be `None`. However, according to the API definition at:\r\n\r\nhttps://github.com/jaraco/keyring/blob/0cebfebbf516a47e4e45911ba6b4d4dd2699845c/keyring/core.py#L54\r\n\r\n`keyring.get_password()` expects two `str` argument, i.e. `None` is not acceptable. If `keyrings-alt` backend is installed, then it explicitly crashes on `None` username:\r\n\r\n```pytb\r\nlibqtile/widget/imapwidget.py:78: in __init__\r\n password = keyring.get_password(\"imapwidget\", self.user)\r\n.tox/py310-x11/lib/python3.10/site-packages/keyring/core.py:56: in get_password\r\n return get_keyring().get_password(service_name, username)\r\n.tox/py310-x11/lib/python3.10/site-packages/keyrings/alt/file_base.py:92: in get_password\r\n assoc = self._generate_assoc(service, username)\r\n.tox/py310-x11/lib/python3.10/site-packages/keyrings/alt/file_base.py:133: in _generate_assoc\r\n return (escape_for_ini(service) + r'\\0' + escape_for_ini(username)).encode()\r\n.tox/py310-x11/lib/python3.10/site-packages/keyrings/alt/escape.py:29: in escape\r\n return \"\".join(_escape_char(c) for c in value.encode('utf-8'))\r\nE AttributeError: 'NoneType' object has no attribute 'encode'\r\n```\r\n\r\nTo reproduce:\r\n\r\n```\r\ntox -e py310-x11 # you can cancel the initial test run, after dependencies are installed\r\n. .tox/py310-x11/bin/activate\r\npip install imapclient keyring keyrings-alt\r\npytest --backend=x11\r\n```\n\n### Version\n\n0.23.1.dev83+g9ccaf6f1\n\n### Backend\n\nX11 (default)\n\n### Config\n\n_No response_\n\n### Logs\n\n_No response_\n\n### Required\n\n- [X] I have searched past issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2015 David R. Andersen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport imaplib\nimport re\n\nimport keyring\n\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n\nclass ImapWidget(base.ThreadPoolText):\n \"\"\"Email IMAP widget\n\n This widget will scan one of your imap email boxes and report the number of\n unseen messages present. I've configured it to only work with imap with\n ssl. Your password is obtained from the Gnome Keyring.\n\n Writing your password to the keyring initially is as simple as (changing\n out <userid> and <password> for your userid and password):\n\n 1) create the file ~/.local/share/python_keyring/keyringrc.cfg with the\n following contents::\n\n [backend]\n default-keyring=keyring.backends.Gnome.Keyring\n keyring-path=/home/<userid>/.local/share/keyring/\n\n\n 2) Execute the following python shell script once::\n\n #!/usr/bin/env python3\n import keyring\n user = <userid>\n password = <password>\n keyring.set_password('imapwidget', user, password)\n\n mbox names must include the path to the mbox (except for the default\n INBOX). So, for example if your mailroot is ``~/Maildir``, and you want to\n look at the mailbox at HomeMail/fred, the mbox setting would be:\n ``mbox=\"~/Maildir/HomeMail/fred\"``. Note the nested sets of quotes! Labels\n can be whatever you choose, of course.\n\n Widget requirements: keyring_.\n\n .. _keyring: https://pypi.org/project/keyring/\n \"\"\"\n\n defaults = [\n (\"mbox\", '\"INBOX\"', \"mailbox to fetch\"),\n (\"label\", \"INBOX\", \"label for display\"),\n (\"user\", None, \"email username\"),\n (\"server\", None, \"email server name\"),\n ]\n\n def __init__(self, **config):\n base.ThreadPoolText.__init__(self, \"\", **config)\n self.add_defaults(ImapWidget.defaults)\n password = keyring.get_password(\"imapwidget\", self.user)\n if password is not None:\n self.password = password\n else:\n logger.critical(\"Gnome Keyring Error\")\n\n def poll(self):\n im = imaplib.IMAP4_SSL(self.server, 993)\n if self.password == \"Gnome Keyring Error\":\n text = \"Gnome Keyring Error\"\n else:\n im.login(self.user, self.password)\n status, response = im.status(self.mbox, \"(UNSEEN)\")\n text = response[0].decode()\n text = self.label + \": \" + re.sub(r\"\\).*$\", \"\", re.sub(r\"^.*N\\s\", \"\", text))\n im.logout()\n return text\n", "path": "libqtile/widget/imapwidget.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2015 David R. Andersen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport imaplib\nimport re\n\nimport keyring\n\nfrom libqtile.confreader import ConfigError\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n\nclass ImapWidget(base.ThreadPoolText):\n \"\"\"Email IMAP widget\n\n This widget will scan one of your imap email boxes and report the number of\n unseen messages present. I've configured it to only work with imap with\n ssl. Your password is obtained from the Gnome Keyring.\n\n Writing your password to the keyring initially is as simple as (changing\n out <userid> and <password> for your userid and password):\n\n 1) create the file ~/.local/share/python_keyring/keyringrc.cfg with the\n following contents::\n\n [backend]\n default-keyring=keyring.backends.Gnome.Keyring\n keyring-path=/home/<userid>/.local/share/keyring/\n\n\n 2) Execute the following python shell script once::\n\n #!/usr/bin/env python3\n import keyring\n user = <userid>\n password = <password>\n keyring.set_password('imapwidget', user, password)\n\n mbox names must include the path to the mbox (except for the default\n INBOX). So, for example if your mailroot is ``~/Maildir``, and you want to\n look at the mailbox at HomeMail/fred, the mbox setting would be:\n ``mbox=\"~/Maildir/HomeMail/fred\"``. Note the nested sets of quotes! Labels\n can be whatever you choose, of course.\n\n Widget requirements: keyring_.\n\n .. _keyring: https://pypi.org/project/keyring/\n \"\"\"\n\n defaults = [\n (\"mbox\", '\"INBOX\"', \"mailbox to fetch\"),\n (\"label\", \"INBOX\", \"label for display\"),\n (\"user\", None, \"email username\"),\n (\"server\", None, \"email server name\"),\n ]\n\n def __init__(self, **config):\n base.ThreadPoolText.__init__(self, \"\", **config)\n self.add_defaults(ImapWidget.defaults)\n if self.user is None:\n raise ConfigError(\"You must set the 'user' parameter for the IMAP widget.\")\n password = keyring.get_password(\"imapwidget\", self.user)\n if password is not None:\n self.password = password\n else:\n logger.critical(\"Gnome Keyring Error\")\n\n def poll(self):\n im = imaplib.IMAP4_SSL(self.server, 993)\n if self.password == \"Gnome Keyring Error\":\n text = \"Gnome Keyring Error\"\n else:\n im.login(self.user, self.password)\n status, response = im.status(self.mbox, \"(UNSEEN)\")\n text = response[0].decode()\n text = self.label + \": \" + re.sub(r\"\\).*$\", \"\", re.sub(r\"^.*N\\s\", \"\", text))\n im.logout()\n return text\n", "path": "libqtile/widget/imapwidget.py"}]}
1,919
190
gh_patches_debug_14753
rasdani/github-patches
git_diff
ansible__ansible-39634
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- aws_s3 is automaticly decrypting ansible-vault encrypted files before put <!--- Verify first that your issue/request is not already reported on GitHub. Also test if the latest release, and devel branch are affected too. Always add information AFTER of these html comments. --> ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME aws_s3 ##### ANSIBLE VERSION <!--- Paste, BELOW THIS COMMENT, verbatim output from "ansible --version" between quotes below --> ``` 2.5.1 ``` ##### SUMMARY - I'm trying to upload an ansible-vault encrypted file with aws_s3. But aws_s3 decrypts the src: file before uploading it to S3. - aws_s3 in 2.4 didn't decrypt the src: parameter. - The documentation for aws_s3 doesn't mention that the src: parameter is autodecrypted. - The aws_s3 module doesn't accept the decrypt: argument. ##### STEPS TO REPRODUCE <!--- Paste example playbooks or commands between quotes below --> ```yaml - name: upload vault to s3 aws_s3: bucket: "the bucket" object: "file.txt" src: "file.txt" mode: put ``` 1. The file.txt is encrypted with ansible-vault. 2. The playbook that runs this task is invoked with --vault-password and is able to decrypt the file because other tasks need the file decrypted. ##### EXPECTED RESULTS Don't autodecrypt the src: argument or be able to specify decrypt: no. ##### ACTUAL RESULTS The src: argument to aws_s3 is automagicly decrypted without documentation or a way to disable the feature like other modules (ex. copy). aws_s3 is automaticly decrypting ansible-vault encrypted files before put <!--- Verify first that your issue/request is not already reported on GitHub. Also test if the latest release, and devel branch are affected too. Always add information AFTER of these html comments. --> ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME aws_s3 ##### ANSIBLE VERSION <!--- Paste, BELOW THIS COMMENT, verbatim output from "ansible --version" between quotes below --> ``` 2.5.1 ``` ##### SUMMARY - I'm trying to upload an ansible-vault encrypted file with aws_s3. But aws_s3 decrypts the src: file before uploading it to S3. - aws_s3 in 2.4 didn't decrypt the src: parameter. - The documentation for aws_s3 doesn't mention that the src: parameter is autodecrypted. - The aws_s3 module doesn't accept the decrypt: argument. ##### STEPS TO REPRODUCE <!--- Paste example playbooks or commands between quotes below --> ```yaml - name: upload vault to s3 aws_s3: bucket: "the bucket" object: "file.txt" src: "file.txt" mode: put ``` 1. The file.txt is encrypted with ansible-vault. 2. The playbook that runs this task is invoked with --vault-password and is able to decrypt the file because other tasks need the file decrypted. ##### EXPECTED RESULTS Don't autodecrypt the src: argument or be able to specify decrypt: no. ##### ACTUAL RESULTS The src: argument to aws_s3 is automagicly decrypted without documentation or a way to disable the feature like other modules (ex. copy). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/ansible/plugins/action/aws_s3.py` Content: ``` 1 # (c) 2012, Michael DeHaan <[email protected]> 2 # (c) 2018, Will Thames <[email protected]> 3 # 4 # This file is part of Ansible 5 # 6 # Ansible is free software: you can redistribute it and/or modify 7 # it under the terms of the GNU General Public License as published by 8 # the Free Software Foundation, either version 3 of the License, or 9 # (at your option) any later version. 10 # 11 # Ansible is distributed in the hope that it will be useful, 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 # GNU General Public License for more details. 15 # 16 # You should have received a copy of the GNU General Public License 17 # along with Ansible. If not, see <http://www.gnu.org/licenses/>. 18 from __future__ import (absolute_import, division, print_function) 19 __metaclass__ = type 20 21 import os 22 23 from ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleFileNotFound 24 from ansible.module_utils._text import to_text 25 from ansible.plugins.action import ActionBase 26 27 28 class ActionModule(ActionBase): 29 30 TRANSFERS_FILES = True 31 32 def run(self, tmp=None, task_vars=None): 33 ''' handler for aws_s3 operations ''' 34 if task_vars is None: 35 task_vars = dict() 36 37 result = super(ActionModule, self).run(tmp, task_vars) 38 del tmp # tmp no longer has any effect 39 40 source = self._task.args.get('src', None) 41 42 try: 43 new_module_args = self._task.args.copy() 44 if source: 45 source = os.path.expanduser(source) 46 47 # For backward compatibility check if the file exists on the remote; it should take precedence 48 if not self._remote_file_exists(source): 49 try: 50 source = self._loader.get_real_file(self._find_needle('files', source)) 51 new_module_args['src'] = source 52 except AnsibleFileNotFound as e: 53 # module handles error message for nonexistent files 54 new_module_args['src'] = source 55 except AnsibleError as e: 56 raise AnsibleActionFail(to_text(e)) 57 58 # execute the aws_s3 module now, with the updated args 59 result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars)) 60 except AnsibleAction as e: 61 result.update(e.result) 62 return result 63 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/ansible/plugins/action/aws_s3.py b/lib/ansible/plugins/action/aws_s3.py --- a/lib/ansible/plugins/action/aws_s3.py +++ b/lib/ansible/plugins/action/aws_s3.py @@ -47,7 +47,7 @@ # For backward compatibility check if the file exists on the remote; it should take precedence if not self._remote_file_exists(source): try: - source = self._loader.get_real_file(self._find_needle('files', source)) + source = self._loader.get_real_file(self._find_needle('files', source), decrypt=False) new_module_args['src'] = source except AnsibleFileNotFound as e: # module handles error message for nonexistent files
{"golden_diff": "diff --git a/lib/ansible/plugins/action/aws_s3.py b/lib/ansible/plugins/action/aws_s3.py\n--- a/lib/ansible/plugins/action/aws_s3.py\n+++ b/lib/ansible/plugins/action/aws_s3.py\n@@ -47,7 +47,7 @@\n # For backward compatibility check if the file exists on the remote; it should take precedence\n if not self._remote_file_exists(source):\n try:\n- source = self._loader.get_real_file(self._find_needle('files', source))\n+ source = self._loader.get_real_file(self._find_needle('files', source), decrypt=False)\n new_module_args['src'] = source\n except AnsibleFileNotFound as e:\n # module handles error message for nonexistent files\n", "issue": "aws_s3 is automaticly decrypting ansible-vault encrypted files before put\n<!---\r\nVerify first that your issue/request is not already reported on GitHub.\r\nAlso test if the latest release, and devel branch are affected too.\r\nAlways add information AFTER of these html comments. -->\r\n\r\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\naws_s3\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste, BELOW THIS COMMENT, verbatim output from \"ansible --version\" between quotes below -->\r\n```\r\n2.5.1\r\n```\r\n\r\n##### SUMMARY\r\n- I'm trying to upload an ansible-vault encrypted file with aws_s3. But aws_s3 decrypts the src: file before uploading it to S3. \r\n- aws_s3 in 2.4 didn't decrypt the src: parameter.\r\n- The documentation for aws_s3 doesn't mention that the src: parameter is autodecrypted.\r\n- The aws_s3 module doesn't accept the decrypt: argument.\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n- name: upload vault to s3\r\n aws_s3:\r\n bucket: \"the bucket\"\r\n object: \"file.txt\"\r\n src: \"file.txt\"\r\n mode: put\r\n```\r\n1. The file.txt is encrypted with ansible-vault. \r\n2. The playbook that runs this task is invoked with --vault-password and is able to decrypt the file because other tasks need the file decrypted.\r\n\r\n##### EXPECTED RESULTS\r\nDon't autodecrypt the src: argument or be able to specify decrypt: no.\r\n\r\n##### ACTUAL RESULTS\r\nThe src: argument to aws_s3 is automagicly decrypted without documentation or a way to disable the feature like other modules (ex. copy).\r\n\naws_s3 is automaticly decrypting ansible-vault encrypted files before put\n<!---\r\nVerify first that your issue/request is not already reported on GitHub.\r\nAlso test if the latest release, and devel branch are affected too.\r\nAlways add information AFTER of these html comments. -->\r\n\r\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\naws_s3\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste, BELOW THIS COMMENT, verbatim output from \"ansible --version\" between quotes below -->\r\n```\r\n2.5.1\r\n```\r\n\r\n##### SUMMARY\r\n- I'm trying to upload an ansible-vault encrypted file with aws_s3. But aws_s3 decrypts the src: file before uploading it to S3. \r\n- aws_s3 in 2.4 didn't decrypt the src: parameter.\r\n- The documentation for aws_s3 doesn't mention that the src: parameter is autodecrypted.\r\n- The aws_s3 module doesn't accept the decrypt: argument.\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n- name: upload vault to s3\r\n aws_s3:\r\n bucket: \"the bucket\"\r\n object: \"file.txt\"\r\n src: \"file.txt\"\r\n mode: put\r\n```\r\n1. The file.txt is encrypted with ansible-vault. \r\n2. The playbook that runs this task is invoked with --vault-password and is able to decrypt the file because other tasks need the file decrypted.\r\n\r\n##### EXPECTED RESULTS\r\nDon't autodecrypt the src: argument or be able to specify decrypt: no.\r\n\r\n##### ACTUAL RESULTS\r\nThe src: argument to aws_s3 is automagicly decrypted without documentation or a way to disable the feature like other modules (ex. copy).\r\n\n", "before_files": [{"content": "# (c) 2012, Michael DeHaan <[email protected]>\n# (c) 2018, Will Thames <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport os\n\nfrom ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleFileNotFound\nfrom ansible.module_utils._text import to_text\nfrom ansible.plugins.action import ActionBase\n\n\nclass ActionModule(ActionBase):\n\n TRANSFERS_FILES = True\n\n def run(self, tmp=None, task_vars=None):\n ''' handler for aws_s3 operations '''\n if task_vars is None:\n task_vars = dict()\n\n result = super(ActionModule, self).run(tmp, task_vars)\n del tmp # tmp no longer has any effect\n\n source = self._task.args.get('src', None)\n\n try:\n new_module_args = self._task.args.copy()\n if source:\n source = os.path.expanduser(source)\n\n # For backward compatibility check if the file exists on the remote; it should take precedence\n if not self._remote_file_exists(source):\n try:\n source = self._loader.get_real_file(self._find_needle('files', source))\n new_module_args['src'] = source\n except AnsibleFileNotFound as e:\n # module handles error message for nonexistent files\n new_module_args['src'] = source\n except AnsibleError as e:\n raise AnsibleActionFail(to_text(e))\n\n # execute the aws_s3 module now, with the updated args\n result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars))\n except AnsibleAction as e:\n result.update(e.result)\n return result\n", "path": "lib/ansible/plugins/action/aws_s3.py"}], "after_files": [{"content": "# (c) 2012, Michael DeHaan <[email protected]>\n# (c) 2018, Will Thames <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport os\n\nfrom ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleFileNotFound\nfrom ansible.module_utils._text import to_text\nfrom ansible.plugins.action import ActionBase\n\n\nclass ActionModule(ActionBase):\n\n TRANSFERS_FILES = True\n\n def run(self, tmp=None, task_vars=None):\n ''' handler for aws_s3 operations '''\n if task_vars is None:\n task_vars = dict()\n\n result = super(ActionModule, self).run(tmp, task_vars)\n del tmp # tmp no longer has any effect\n\n source = self._task.args.get('src', None)\n\n try:\n new_module_args = self._task.args.copy()\n if source:\n source = os.path.expanduser(source)\n\n # For backward compatibility check if the file exists on the remote; it should take precedence\n if not self._remote_file_exists(source):\n try:\n source = self._loader.get_real_file(self._find_needle('files', source), decrypt=False)\n new_module_args['src'] = source\n except AnsibleFileNotFound as e:\n # module handles error message for nonexistent files\n new_module_args['src'] = source\n except AnsibleError as e:\n raise AnsibleActionFail(to_text(e))\n\n # execute the aws_s3 module now, with the updated args\n result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars))\n except AnsibleAction as e:\n result.update(e.result)\n return result\n", "path": "lib/ansible/plugins/action/aws_s3.py"}]}
1,655
164
gh_patches_debug_7144
rasdani/github-patches
git_diff
adap__flower-1735
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- deprecated eval_fn still used in examples ### Describe the bug While running the `embedded_devices` example, an issue is faced due to the use of `eval_fn` keyword which was deprecated after Flower 1.0.0 and has now been changed to `evaluate_fn` ### Steps/Code to Reproduce Setup the server as mentioned in the `examples/embedded_devices` readme ### Expected Results The server should start without any error ### Actual Results The following error is encountered: ``` File "/embedded_devices/server.py", line 109, in main strategy = fl.server.strategy.FedAvg( TypeError: FedAvg.__init__() got an unexpected keyword argument 'eval_fn' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/embedded_devices/server.py` Content: ``` 1 # Copyright 2020 Adap GmbH. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================== 15 """Minimal example on how to start a simple Flower server.""" 16 17 18 import argparse 19 from collections import OrderedDict 20 from typing import Callable, Dict, Optional, Tuple 21 22 import flwr as fl 23 import numpy as np 24 import torch 25 import torchvision 26 27 import utils 28 29 # pylint: disable=no-member 30 DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 31 # pylint: enable=no-member 32 33 parser = argparse.ArgumentParser(description="Flower") 34 parser.add_argument( 35 "--server_address", 36 type=str, 37 required=True, 38 help=f"gRPC server address", 39 ) 40 parser.add_argument( 41 "--rounds", 42 type=int, 43 default=1, 44 help="Number of rounds of federated learning (default: 1)", 45 ) 46 parser.add_argument( 47 "--sample_fraction", 48 type=float, 49 default=1.0, 50 help="Fraction of available clients used for fit/evaluate (default: 1.0)", 51 ) 52 parser.add_argument( 53 "--min_sample_size", 54 type=int, 55 default=2, 56 help="Minimum number of clients used for fit/evaluate (default: 2)", 57 ) 58 parser.add_argument( 59 "--min_num_clients", 60 type=int, 61 default=2, 62 help="Minimum number of available clients required for sampling (default: 2)", 63 ) 64 parser.add_argument( 65 "--log_host", 66 type=str, 67 help="Logserver address (no default)", 68 ) 69 parser.add_argument( 70 "--model", 71 type=str, 72 default="ResNet18", 73 choices=["Net", "ResNet18"], 74 help="model to train", 75 ) 76 parser.add_argument( 77 "--batch_size", 78 type=int, 79 default=32, 80 help="training batch size", 81 ) 82 parser.add_argument( 83 "--num_workers", 84 type=int, 85 default=4, 86 help="number of workers for dataset reading", 87 ) 88 parser.add_argument("--pin_memory", action="store_true") 89 args = parser.parse_args() 90 91 92 def main() -> None: 93 """Start server and train five rounds.""" 94 95 print(args) 96 97 assert ( 98 args.min_sample_size <= args.min_num_clients 99 ), f"Num_clients shouldn't be lower than min_sample_size" 100 101 # Configure logger 102 fl.common.logger.configure("server", host=args.log_host) 103 104 # Load evaluation data 105 _, testset = utils.load_cifar(download=True) 106 107 # Create client_manager, strategy, and server 108 client_manager = fl.server.SimpleClientManager() 109 strategy = fl.server.strategy.FedAvg( 110 fraction_fit=args.sample_fraction, 111 min_fit_clients=args.min_sample_size, 112 min_available_clients=args.min_num_clients, 113 eval_fn=get_eval_fn(testset), 114 on_fit_config_fn=fit_config, 115 ) 116 server = fl.server.Server(client_manager=client_manager, strategy=strategy) 117 118 # Run server 119 fl.server.start_server( 120 server_address=args.server_address, 121 server=server, 122 config=fl.server.ServerConfig(num_rounds=args.rounds), 123 ) 124 125 126 def fit_config(server_round: int) -> Dict[str, fl.common.Scalar]: 127 """Return a configuration with static batch size and (local) epochs.""" 128 config = { 129 "epoch_global": str(server_round), 130 "epochs": str(1), 131 "batch_size": str(args.batch_size), 132 "num_workers": str(args.num_workers), 133 "pin_memory": str(args.pin_memory), 134 } 135 return config 136 137 138 def set_weights(model: torch.nn.ModuleList, weights: fl.common.NDArrays) -> None: 139 """Set model weights from a list of NumPy ndarrays.""" 140 state_dict = OrderedDict( 141 { 142 k: torch.tensor(np.atleast_1d(v)) 143 for k, v in zip(model.state_dict().keys(), weights) 144 } 145 ) 146 model.load_state_dict(state_dict, strict=True) 147 148 149 def get_eval_fn( 150 testset: torchvision.datasets.CIFAR10, 151 ) -> Callable[[fl.common.NDArrays], Optional[Tuple[float, float]]]: 152 """Return an evaluation function for centralized evaluation.""" 153 154 def evaluate(weights: fl.common.NDArrays) -> Optional[Tuple[float, float]]: 155 """Use the entire CIFAR-10 test set for evaluation.""" 156 157 model = utils.load_model(args.model) 158 set_weights(model, weights) 159 model.to(DEVICE) 160 161 testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False) 162 loss, accuracy = utils.test(model, testloader, device=DEVICE) 163 return loss, {"accuracy": accuracy} 164 165 return evaluate 166 167 168 if __name__ == "__main__": 169 main() 170 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/embedded_devices/server.py b/examples/embedded_devices/server.py --- a/examples/embedded_devices/server.py +++ b/examples/embedded_devices/server.py @@ -110,7 +110,7 @@ fraction_fit=args.sample_fraction, min_fit_clients=args.min_sample_size, min_available_clients=args.min_num_clients, - eval_fn=get_eval_fn(testset), + evaluate_fn=get_eval_fn(testset), on_fit_config_fn=fit_config, ) server = fl.server.Server(client_manager=client_manager, strategy=strategy)
{"golden_diff": "diff --git a/examples/embedded_devices/server.py b/examples/embedded_devices/server.py\n--- a/examples/embedded_devices/server.py\n+++ b/examples/embedded_devices/server.py\n@@ -110,7 +110,7 @@\n fraction_fit=args.sample_fraction,\n min_fit_clients=args.min_sample_size,\n min_available_clients=args.min_num_clients,\n- eval_fn=get_eval_fn(testset),\n+ evaluate_fn=get_eval_fn(testset),\n on_fit_config_fn=fit_config,\n )\n server = fl.server.Server(client_manager=client_manager, strategy=strategy)\n", "issue": "deprecated eval_fn still used in examples\n### Describe the bug\n\nWhile running the `embedded_devices` example, an issue is faced due to the use of `eval_fn` keyword which was deprecated after Flower 1.0.0 and has now been changed to `evaluate_fn`\n\n### Steps/Code to Reproduce\n\nSetup the server as mentioned in the `examples/embedded_devices` readme\n\n### Expected Results\n\nThe server should start without any error\n\n### Actual Results\n\nThe following error is encountered:\r\n```\r\nFile \"/embedded_devices/server.py\", line 109, in main\r\n strategy = fl.server.strategy.FedAvg(\r\nTypeError: FedAvg.__init__() got an unexpected keyword argument 'eval_fn'\r\n```\n", "before_files": [{"content": "# Copyright 2020 Adap GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Minimal example on how to start a simple Flower server.\"\"\"\n\n\nimport argparse\nfrom collections import OrderedDict\nfrom typing import Callable, Dict, Optional, Tuple\n\nimport flwr as fl\nimport numpy as np\nimport torch\nimport torchvision\n\nimport utils\n\n# pylint: disable=no-member\nDEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n# pylint: enable=no-member\n\nparser = argparse.ArgumentParser(description=\"Flower\")\nparser.add_argument(\n \"--server_address\",\n type=str,\n required=True,\n help=f\"gRPC server address\",\n)\nparser.add_argument(\n \"--rounds\",\n type=int,\n default=1,\n help=\"Number of rounds of federated learning (default: 1)\",\n)\nparser.add_argument(\n \"--sample_fraction\",\n type=float,\n default=1.0,\n help=\"Fraction of available clients used for fit/evaluate (default: 1.0)\",\n)\nparser.add_argument(\n \"--min_sample_size\",\n type=int,\n default=2,\n help=\"Minimum number of clients used for fit/evaluate (default: 2)\",\n)\nparser.add_argument(\n \"--min_num_clients\",\n type=int,\n default=2,\n help=\"Minimum number of available clients required for sampling (default: 2)\",\n)\nparser.add_argument(\n \"--log_host\",\n type=str,\n help=\"Logserver address (no default)\",\n)\nparser.add_argument(\n \"--model\",\n type=str,\n default=\"ResNet18\",\n choices=[\"Net\", \"ResNet18\"],\n help=\"model to train\",\n)\nparser.add_argument(\n \"--batch_size\",\n type=int,\n default=32,\n help=\"training batch size\",\n)\nparser.add_argument(\n \"--num_workers\",\n type=int,\n default=4,\n help=\"number of workers for dataset reading\",\n)\nparser.add_argument(\"--pin_memory\", action=\"store_true\")\nargs = parser.parse_args()\n\n\ndef main() -> None:\n \"\"\"Start server and train five rounds.\"\"\"\n\n print(args)\n\n assert (\n args.min_sample_size <= args.min_num_clients\n ), f\"Num_clients shouldn't be lower than min_sample_size\"\n\n # Configure logger\n fl.common.logger.configure(\"server\", host=args.log_host)\n\n # Load evaluation data\n _, testset = utils.load_cifar(download=True)\n\n # Create client_manager, strategy, and server\n client_manager = fl.server.SimpleClientManager()\n strategy = fl.server.strategy.FedAvg(\n fraction_fit=args.sample_fraction,\n min_fit_clients=args.min_sample_size,\n min_available_clients=args.min_num_clients,\n eval_fn=get_eval_fn(testset),\n on_fit_config_fn=fit_config,\n )\n server = fl.server.Server(client_manager=client_manager, strategy=strategy)\n\n # Run server\n fl.server.start_server(\n server_address=args.server_address,\n server=server,\n config=fl.server.ServerConfig(num_rounds=args.rounds),\n )\n\n\ndef fit_config(server_round: int) -> Dict[str, fl.common.Scalar]:\n \"\"\"Return a configuration with static batch size and (local) epochs.\"\"\"\n config = {\n \"epoch_global\": str(server_round),\n \"epochs\": str(1),\n \"batch_size\": str(args.batch_size),\n \"num_workers\": str(args.num_workers),\n \"pin_memory\": str(args.pin_memory),\n }\n return config\n\n\ndef set_weights(model: torch.nn.ModuleList, weights: fl.common.NDArrays) -> None:\n \"\"\"Set model weights from a list of NumPy ndarrays.\"\"\"\n state_dict = OrderedDict(\n {\n k: torch.tensor(np.atleast_1d(v))\n for k, v in zip(model.state_dict().keys(), weights)\n }\n )\n model.load_state_dict(state_dict, strict=True)\n\n\ndef get_eval_fn(\n testset: torchvision.datasets.CIFAR10,\n) -> Callable[[fl.common.NDArrays], Optional[Tuple[float, float]]]:\n \"\"\"Return an evaluation function for centralized evaluation.\"\"\"\n\n def evaluate(weights: fl.common.NDArrays) -> Optional[Tuple[float, float]]:\n \"\"\"Use the entire CIFAR-10 test set for evaluation.\"\"\"\n\n model = utils.load_model(args.model)\n set_weights(model, weights)\n model.to(DEVICE)\n\n testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False)\n loss, accuracy = utils.test(model, testloader, device=DEVICE)\n return loss, {\"accuracy\": accuracy}\n\n return evaluate\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/embedded_devices/server.py"}], "after_files": [{"content": "# Copyright 2020 Adap GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Minimal example on how to start a simple Flower server.\"\"\"\n\n\nimport argparse\nfrom collections import OrderedDict\nfrom typing import Callable, Dict, Optional, Tuple\n\nimport flwr as fl\nimport numpy as np\nimport torch\nimport torchvision\n\nimport utils\n\n# pylint: disable=no-member\nDEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n# pylint: enable=no-member\n\nparser = argparse.ArgumentParser(description=\"Flower\")\nparser.add_argument(\n \"--server_address\",\n type=str,\n required=True,\n help=f\"gRPC server address\",\n)\nparser.add_argument(\n \"--rounds\",\n type=int,\n default=1,\n help=\"Number of rounds of federated learning (default: 1)\",\n)\nparser.add_argument(\n \"--sample_fraction\",\n type=float,\n default=1.0,\n help=\"Fraction of available clients used for fit/evaluate (default: 1.0)\",\n)\nparser.add_argument(\n \"--min_sample_size\",\n type=int,\n default=2,\n help=\"Minimum number of clients used for fit/evaluate (default: 2)\",\n)\nparser.add_argument(\n \"--min_num_clients\",\n type=int,\n default=2,\n help=\"Minimum number of available clients required for sampling (default: 2)\",\n)\nparser.add_argument(\n \"--log_host\",\n type=str,\n help=\"Logserver address (no default)\",\n)\nparser.add_argument(\n \"--model\",\n type=str,\n default=\"ResNet18\",\n choices=[\"Net\", \"ResNet18\"],\n help=\"model to train\",\n)\nparser.add_argument(\n \"--batch_size\",\n type=int,\n default=32,\n help=\"training batch size\",\n)\nparser.add_argument(\n \"--num_workers\",\n type=int,\n default=4,\n help=\"number of workers for dataset reading\",\n)\nparser.add_argument(\"--pin_memory\", action=\"store_true\")\nargs = parser.parse_args()\n\n\ndef main() -> None:\n \"\"\"Start server and train five rounds.\"\"\"\n\n print(args)\n\n assert (\n args.min_sample_size <= args.min_num_clients\n ), f\"Num_clients shouldn't be lower than min_sample_size\"\n\n # Configure logger\n fl.common.logger.configure(\"server\", host=args.log_host)\n\n # Load evaluation data\n _, testset = utils.load_cifar(download=True)\n\n # Create client_manager, strategy, and server\n client_manager = fl.server.SimpleClientManager()\n strategy = fl.server.strategy.FedAvg(\n fraction_fit=args.sample_fraction,\n min_fit_clients=args.min_sample_size,\n min_available_clients=args.min_num_clients,\n evaluate_fn=get_eval_fn(testset),\n on_fit_config_fn=fit_config,\n )\n server = fl.server.Server(client_manager=client_manager, strategy=strategy)\n\n # Run server\n fl.server.start_server(\n server_address=args.server_address,\n server=server,\n config=fl.server.ServerConfig(num_rounds=args.rounds),\n )\n\n\ndef fit_config(server_round: int) -> Dict[str, fl.common.Scalar]:\n \"\"\"Return a configuration with static batch size and (local) epochs.\"\"\"\n config = {\n \"epoch_global\": str(server_round),\n \"epochs\": str(1),\n \"batch_size\": str(args.batch_size),\n \"num_workers\": str(args.num_workers),\n \"pin_memory\": str(args.pin_memory),\n }\n return config\n\n\ndef set_weights(model: torch.nn.ModuleList, weights: fl.common.NDArrays) -> None:\n \"\"\"Set model weights from a list of NumPy ndarrays.\"\"\"\n state_dict = OrderedDict(\n {\n k: torch.tensor(np.atleast_1d(v))\n for k, v in zip(model.state_dict().keys(), weights)\n }\n )\n model.load_state_dict(state_dict, strict=True)\n\n\ndef get_eval_fn(\n testset: torchvision.datasets.CIFAR10,\n) -> Callable[[fl.common.NDArrays], Optional[Tuple[float, float]]]:\n \"\"\"Return an evaluation function for centralized evaluation.\"\"\"\n\n def evaluate(weights: fl.common.NDArrays) -> Optional[Tuple[float, float]]:\n \"\"\"Use the entire CIFAR-10 test set for evaluation.\"\"\"\n\n model = utils.load_model(args.model)\n set_weights(model, weights)\n model.to(DEVICE)\n\n testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False)\n loss, accuracy = utils.test(model, testloader, device=DEVICE)\n return loss, {\"accuracy\": accuracy}\n\n return evaluate\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/embedded_devices/server.py"}]}
1,933
122
gh_patches_debug_34994
rasdani/github-patches
git_diff
getredash__redash-725
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- User should be redirected to his original destination after login with Google OAuth If the user tried to open a page before being logged in, he should be redirected to this page after successful login. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `redash/google_oauth.py` Content: ``` 1 import logging 2 from flask.ext.login import login_user 3 import requests 4 from flask import redirect, url_for, Blueprint, flash 5 from flask_oauth import OAuth 6 from redash import models, settings 7 8 logger = logging.getLogger('google_oauth') 9 oauth = OAuth() 10 11 12 if not settings.GOOGLE_APPS_DOMAIN: 13 logger.warning("No Google Apps domain defined, all Google accounts allowed.") 14 15 google = oauth.remote_app('google', 16 base_url='https://www.google.com/accounts/', 17 authorize_url='https://accounts.google.com/o/oauth2/auth', 18 request_token_url=None, 19 request_token_params={ 20 'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile', 21 'response_type': 'code' 22 }, 23 access_token_url='https://accounts.google.com/o/oauth2/token', 24 access_token_method='POST', 25 access_token_params={'grant_type': 'authorization_code'}, 26 consumer_key=settings.GOOGLE_CLIENT_ID, 27 consumer_secret=settings.GOOGLE_CLIENT_SECRET) 28 29 30 blueprint = Blueprint('google_oauth', __name__) 31 32 33 def get_user_profile(access_token): 34 headers = {'Authorization': 'OAuth {}'.format(access_token)} 35 response = requests.get('https://www.googleapis.com/oauth2/v1/userinfo', headers=headers) 36 37 if response.status_code == 401: 38 logger.warning("Failed getting user profile (response code 401).") 39 return None 40 41 return response.json() 42 43 44 def verify_profile(profile): 45 if not settings.GOOGLE_APPS_DOMAIN: 46 return True 47 48 domain = profile['email'].split('@')[-1] 49 return domain in settings.GOOGLE_APPS_DOMAIN 50 51 52 def create_and_login_user(name, email): 53 try: 54 user_object = models.User.get_by_email(email) 55 if user_object.name != name: 56 logger.debug("Updating user name (%r -> %r)", user_object.name, name) 57 user_object.name = name 58 user_object.save() 59 except models.User.DoesNotExist: 60 logger.debug("Creating user object (%r)", name) 61 user_object = models.User.create(name=name, email=email, groups=models.User.DEFAULT_GROUPS) 62 63 login_user(user_object, remember=True) 64 65 66 @blueprint.route('/oauth/google', endpoint="authorize") 67 def login(): 68 # TODO, suport next 69 callback=url_for('.callback', _external=True) 70 logger.debug("Callback url: %s", callback) 71 return google.authorize(callback=callback) 72 73 74 @blueprint.route('/oauth/google_callback', endpoint="callback") 75 @google.authorized_handler 76 def authorized(resp): 77 access_token = resp['access_token'] 78 79 if access_token is None: 80 logger.warning("Access token missing in call back request.") 81 flash("Validation error. Please retry.") 82 return redirect(url_for('login')) 83 84 profile = get_user_profile(access_token) 85 if profile is None: 86 flash("Validation error. Please retry.") 87 return redirect(url_for('login')) 88 89 if not verify_profile(profile): 90 logger.warning("User tried to login with unauthorized domain name: %s", profile['email']) 91 flash("Your Google Apps domain name isn't allowed.") 92 return redirect(url_for('login')) 93 94 create_and_login_user(profile['name'], profile['email']) 95 96 return redirect(url_for('index')) ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/redash/google_oauth.py b/redash/google_oauth.py --- a/redash/google_oauth.py +++ b/redash/google_oauth.py @@ -1,8 +1,8 @@ import logging from flask.ext.login import login_user import requests -from flask import redirect, url_for, Blueprint, flash -from flask_oauth import OAuth +from flask import redirect, url_for, Blueprint, flash, request +from flask_oauthlib.client import OAuth from redash import models, settings logger = logging.getLogger('google_oauth') @@ -18,11 +18,9 @@ request_token_url=None, request_token_params={ 'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile', - 'response_type': 'code' }, access_token_url='https://accounts.google.com/o/oauth2/token', access_token_method='POST', - access_token_params={'grant_type': 'authorization_code'}, consumer_key=settings.GOOGLE_CLIENT_ID, consumer_secret=settings.GOOGLE_CLIENT_SECRET) @@ -65,10 +63,10 @@ @blueprint.route('/oauth/google', endpoint="authorize") def login(): - # TODO, suport next + next = request.args.get('next','/') callback=url_for('.callback', _external=True) logger.debug("Callback url: %s", callback) - return google.authorize(callback=callback) + return google.authorize(callback=callback, state=next) @blueprint.route('/oauth/google_callback', endpoint="callback") @@ -93,4 +91,6 @@ create_and_login_user(profile['name'], profile['email']) - return redirect(url_for('index')) \ No newline at end of file + next = request.args.get('state','/') + + return redirect(next)
{"golden_diff": "diff --git a/redash/google_oauth.py b/redash/google_oauth.py\n--- a/redash/google_oauth.py\n+++ b/redash/google_oauth.py\n@@ -1,8 +1,8 @@\n import logging\n from flask.ext.login import login_user\n import requests\n-from flask import redirect, url_for, Blueprint, flash\n-from flask_oauth import OAuth\n+from flask import redirect, url_for, Blueprint, flash, request\n+from flask_oauthlib.client import OAuth\n from redash import models, settings\n \n logger = logging.getLogger('google_oauth')\n@@ -18,11 +18,9 @@\n request_token_url=None,\n request_token_params={\n 'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile',\n- 'response_type': 'code'\n },\n access_token_url='https://accounts.google.com/o/oauth2/token',\n access_token_method='POST',\n- access_token_params={'grant_type': 'authorization_code'},\n consumer_key=settings.GOOGLE_CLIENT_ID,\n consumer_secret=settings.GOOGLE_CLIENT_SECRET)\n \n@@ -65,10 +63,10 @@\n \n @blueprint.route('/oauth/google', endpoint=\"authorize\")\n def login():\n- # TODO, suport next\n+ next = request.args.get('next','/')\n callback=url_for('.callback', _external=True)\n logger.debug(\"Callback url: %s\", callback)\n- return google.authorize(callback=callback)\n+ return google.authorize(callback=callback, state=next)\n \n \n @blueprint.route('/oauth/google_callback', endpoint=\"callback\")\n@@ -93,4 +91,6 @@\n \n create_and_login_user(profile['name'], profile['email'])\n \n- return redirect(url_for('index'))\n\\ No newline at end of file\n+ next = request.args.get('state','/')\n+\n+ return redirect(next)\n", "issue": "User should be redirected to his original destination after login with Google OAuth\nIf the user tried to open a page before being logged in, he should be redirected to this page after successful login.\n\n", "before_files": [{"content": "import logging\nfrom flask.ext.login import login_user\nimport requests\nfrom flask import redirect, url_for, Blueprint, flash\nfrom flask_oauth import OAuth\nfrom redash import models, settings\n\nlogger = logging.getLogger('google_oauth')\noauth = OAuth()\n\n\nif not settings.GOOGLE_APPS_DOMAIN:\n logger.warning(\"No Google Apps domain defined, all Google accounts allowed.\")\n\ngoogle = oauth.remote_app('google',\n base_url='https://www.google.com/accounts/',\n authorize_url='https://accounts.google.com/o/oauth2/auth',\n request_token_url=None,\n request_token_params={\n 'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile',\n 'response_type': 'code'\n },\n access_token_url='https://accounts.google.com/o/oauth2/token',\n access_token_method='POST',\n access_token_params={'grant_type': 'authorization_code'},\n consumer_key=settings.GOOGLE_CLIENT_ID,\n consumer_secret=settings.GOOGLE_CLIENT_SECRET)\n\n\nblueprint = Blueprint('google_oauth', __name__)\n\n\ndef get_user_profile(access_token):\n headers = {'Authorization': 'OAuth {}'.format(access_token)}\n response = requests.get('https://www.googleapis.com/oauth2/v1/userinfo', headers=headers)\n\n if response.status_code == 401:\n logger.warning(\"Failed getting user profile (response code 401).\")\n return None\n\n return response.json()\n\n\ndef verify_profile(profile):\n if not settings.GOOGLE_APPS_DOMAIN:\n return True\n\n domain = profile['email'].split('@')[-1]\n return domain in settings.GOOGLE_APPS_DOMAIN\n\n\ndef create_and_login_user(name, email):\n try:\n user_object = models.User.get_by_email(email)\n if user_object.name != name:\n logger.debug(\"Updating user name (%r -> %r)\", user_object.name, name)\n user_object.name = name\n user_object.save()\n except models.User.DoesNotExist:\n logger.debug(\"Creating user object (%r)\", name)\n user_object = models.User.create(name=name, email=email, groups=models.User.DEFAULT_GROUPS)\n\n login_user(user_object, remember=True)\n\n\[email protected]('/oauth/google', endpoint=\"authorize\")\ndef login():\n # TODO, suport next\n callback=url_for('.callback', _external=True)\n logger.debug(\"Callback url: %s\", callback)\n return google.authorize(callback=callback)\n\n\[email protected]('/oauth/google_callback', endpoint=\"callback\")\[email protected]_handler\ndef authorized(resp):\n access_token = resp['access_token']\n\n if access_token is None:\n logger.warning(\"Access token missing in call back request.\")\n flash(\"Validation error. Please retry.\")\n return redirect(url_for('login'))\n\n profile = get_user_profile(access_token)\n if profile is None:\n flash(\"Validation error. Please retry.\")\n return redirect(url_for('login'))\n\n if not verify_profile(profile):\n logger.warning(\"User tried to login with unauthorized domain name: %s\", profile['email'])\n flash(\"Your Google Apps domain name isn't allowed.\")\n return redirect(url_for('login'))\n\n create_and_login_user(profile['name'], profile['email'])\n\n return redirect(url_for('index'))", "path": "redash/google_oauth.py"}], "after_files": [{"content": "import logging\nfrom flask.ext.login import login_user\nimport requests\nfrom flask import redirect, url_for, Blueprint, flash, request\nfrom flask_oauthlib.client import OAuth\nfrom redash import models, settings\n\nlogger = logging.getLogger('google_oauth')\noauth = OAuth()\n\n\nif not settings.GOOGLE_APPS_DOMAIN:\n logger.warning(\"No Google Apps domain defined, all Google accounts allowed.\")\n\ngoogle = oauth.remote_app('google',\n base_url='https://www.google.com/accounts/',\n authorize_url='https://accounts.google.com/o/oauth2/auth',\n request_token_url=None,\n request_token_params={\n 'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile',\n },\n access_token_url='https://accounts.google.com/o/oauth2/token',\n access_token_method='POST',\n consumer_key=settings.GOOGLE_CLIENT_ID,\n consumer_secret=settings.GOOGLE_CLIENT_SECRET)\n\n\nblueprint = Blueprint('google_oauth', __name__)\n\n\ndef get_user_profile(access_token):\n headers = {'Authorization': 'OAuth {}'.format(access_token)}\n response = requests.get('https://www.googleapis.com/oauth2/v1/userinfo', headers=headers)\n\n if response.status_code == 401:\n logger.warning(\"Failed getting user profile (response code 401).\")\n return None\n\n return response.json()\n\n\ndef verify_profile(profile):\n if not settings.GOOGLE_APPS_DOMAIN:\n return True\n\n domain = profile['email'].split('@')[-1]\n return domain in settings.GOOGLE_APPS_DOMAIN\n\n\ndef create_and_login_user(name, email):\n try:\n user_object = models.User.get_by_email(email)\n if user_object.name != name:\n logger.debug(\"Updating user name (%r -> %r)\", user_object.name, name)\n user_object.name = name\n user_object.save()\n except models.User.DoesNotExist:\n logger.debug(\"Creating user object (%r)\", name)\n user_object = models.User.create(name=name, email=email, groups=models.User.DEFAULT_GROUPS)\n\n login_user(user_object, remember=True)\n\n\[email protected]('/oauth/google', endpoint=\"authorize\")\ndef login():\n next = request.args.get('next','/')\n callback=url_for('.callback', _external=True)\n logger.debug(\"Callback url: %s\", callback)\n return google.authorize(callback=callback, state=next)\n\n\[email protected]('/oauth/google_callback', endpoint=\"callback\")\[email protected]_handler\ndef authorized(resp):\n access_token = resp['access_token']\n\n if access_token is None:\n logger.warning(\"Access token missing in call back request.\")\n flash(\"Validation error. Please retry.\")\n return redirect(url_for('login'))\n\n profile = get_user_profile(access_token)\n if profile is None:\n flash(\"Validation error. Please retry.\")\n return redirect(url_for('login'))\n\n if not verify_profile(profile):\n logger.warning(\"User tried to login with unauthorized domain name: %s\", profile['email'])\n flash(\"Your Google Apps domain name isn't allowed.\")\n return redirect(url_for('login'))\n\n create_and_login_user(profile['name'], profile['email'])\n\n next = request.args.get('state','/')\n\n return redirect(next)\n", "path": "redash/google_oauth.py"}]}
1,169
400
gh_patches_debug_551
rasdani/github-patches
git_diff
pypi__warehouse-5814
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Sorting searches by 'Date last updated' results in 503 **Describe the bug** When trying to search for anything on pypi.org, sorting by relevance or trending works fine, but sorting by date last updated returns a 503 error. **Expected behavior** Search results, sorted by date. **To Reproduce** Example URL: https://pypi.org/search/?q=test&o=-created Result: > Sorry, something went wrong > > PyPI is down for maintenance or is having an outage. > > This is affecting several of our services, including our web interface. > If you are trying to install a package, you should be able to pip install packages without problem. > > Check our status page, or > View Python Status on Twitter The status page, though, shows all green. **My Platform** - Win 10, Firefox 66.0.3 - Ubuntu 18.04, Chrome 74.0.3729.108 --- Edit: I know this functionality was working at least as recently as last Thursday, 2 May 2019. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `warehouse/packaging/search.py` Content: ``` 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 import packaging.version 14 15 from elasticsearch_dsl import Date, Document, Float, Keyword, Text, analyzer 16 17 from warehouse.search.utils import doc_type 18 19 EmailAnalyzer = analyzer( 20 "email", 21 tokenizer="uax_url_email", 22 filter=["standard", "lowercase", "stop", "snowball"], 23 ) 24 25 NameAnalyzer = analyzer( 26 "normalized_name", 27 tokenizer="lowercase", 28 filter=["standard", "lowercase", "word_delimiter"], 29 ) 30 31 32 @doc_type 33 class Project(Document): 34 35 name = Text() 36 normalized_name = Text(analyzer=NameAnalyzer) 37 version = Keyword(multi=True) 38 latest_version = Keyword() 39 summary = Text(analyzer="snowball") 40 description = Text(analyzer="snowball") 41 author = Text() 42 author_email = Text(analyzer=EmailAnalyzer) 43 maintainer = Text() 44 maintainer_email = Text(analyzer=EmailAnalyzer) 45 license = Text() 46 home_page = Keyword() 47 download_url = Keyword() 48 keywords = Text(analyzer="snowball") 49 platform = Keyword() 50 created = Date() 51 classifiers = Keyword(multi=True) 52 zscore = Float() 53 54 @classmethod 55 def from_db(cls, release): 56 obj = cls(meta={"id": release.normalized_name}) 57 obj["name"] = release.name 58 obj["normalized_name"] = release.normalized_name 59 obj["version"] = sorted( 60 release.all_versions, key=lambda r: packaging.version.parse(r), reverse=True 61 ) 62 obj["latest_version"] = release.latest_version 63 obj["summary"] = release.summary 64 obj["description"] = release.description 65 obj["author"] = release.author 66 obj["author_email"] = release.author_email 67 obj["maintainer"] = release.maintainer 68 obj["maintainer_email"] = release.maintainer_email 69 obj["home_page"] = release.home_page 70 obj["download_url"] = release.download_url 71 obj["keywords"] = release.keywords 72 obj["platform"] = release.platform 73 obj["created"] = release.created 74 obj["classifiers"] = release.classifiers 75 obj["zscore"] = release.zscore 76 77 return obj 78 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/warehouse/packaging/search.py b/warehouse/packaging/search.py --- a/warehouse/packaging/search.py +++ b/warehouse/packaging/search.py @@ -75,3 +75,8 @@ obj["zscore"] = release.zscore return obj + + class Index: + # make sure this class can match any index so it will always be used to + # deserialize data coming from elasticsearch. + name = "*"
{"golden_diff": "diff --git a/warehouse/packaging/search.py b/warehouse/packaging/search.py\n--- a/warehouse/packaging/search.py\n+++ b/warehouse/packaging/search.py\n@@ -75,3 +75,8 @@\n obj[\"zscore\"] = release.zscore\n \n return obj\n+\n+ class Index:\n+ # make sure this class can match any index so it will always be used to\n+ # deserialize data coming from elasticsearch.\n+ name = \"*\"\n", "issue": "Sorting searches by 'Date last updated' results in 503\n**Describe the bug**\r\n\r\nWhen trying to search for anything on pypi.org, sorting by relevance or trending works fine, but sorting by date last updated returns a 503 error.\r\n\r\n**Expected behavior**\r\n\r\nSearch results, sorted by date.\r\n\r\n**To Reproduce**\r\n\r\nExample URL: https://pypi.org/search/?q=test&o=-created\r\n\r\nResult:\r\n\r\n> Sorry, something went wrong\r\n> \r\n> PyPI is down for maintenance or is having an outage.\r\n> \r\n> This is affecting several of our services, including our web interface.\r\n> If you are trying to install a package, you should be able to pip install packages without problem.\r\n> \r\n> Check our status page, or\r\n> View Python Status on Twitter\r\n\r\nThe status page, though, shows all green.\r\n\r\n\r\n**My Platform**\r\n\r\n- Win 10, Firefox 66.0.3\r\n- Ubuntu 18.04, Chrome 74.0.3729.108\r\n\r\n---\r\n\r\nEdit: I know this functionality was working at least as recently as last Thursday, 2 May 2019.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport packaging.version\n\nfrom elasticsearch_dsl import Date, Document, Float, Keyword, Text, analyzer\n\nfrom warehouse.search.utils import doc_type\n\nEmailAnalyzer = analyzer(\n \"email\",\n tokenizer=\"uax_url_email\",\n filter=[\"standard\", \"lowercase\", \"stop\", \"snowball\"],\n)\n\nNameAnalyzer = analyzer(\n \"normalized_name\",\n tokenizer=\"lowercase\",\n filter=[\"standard\", \"lowercase\", \"word_delimiter\"],\n)\n\n\n@doc_type\nclass Project(Document):\n\n name = Text()\n normalized_name = Text(analyzer=NameAnalyzer)\n version = Keyword(multi=True)\n latest_version = Keyword()\n summary = Text(analyzer=\"snowball\")\n description = Text(analyzer=\"snowball\")\n author = Text()\n author_email = Text(analyzer=EmailAnalyzer)\n maintainer = Text()\n maintainer_email = Text(analyzer=EmailAnalyzer)\n license = Text()\n home_page = Keyword()\n download_url = Keyword()\n keywords = Text(analyzer=\"snowball\")\n platform = Keyword()\n created = Date()\n classifiers = Keyword(multi=True)\n zscore = Float()\n\n @classmethod\n def from_db(cls, release):\n obj = cls(meta={\"id\": release.normalized_name})\n obj[\"name\"] = release.name\n obj[\"normalized_name\"] = release.normalized_name\n obj[\"version\"] = sorted(\n release.all_versions, key=lambda r: packaging.version.parse(r), reverse=True\n )\n obj[\"latest_version\"] = release.latest_version\n obj[\"summary\"] = release.summary\n obj[\"description\"] = release.description\n obj[\"author\"] = release.author\n obj[\"author_email\"] = release.author_email\n obj[\"maintainer\"] = release.maintainer\n obj[\"maintainer_email\"] = release.maintainer_email\n obj[\"home_page\"] = release.home_page\n obj[\"download_url\"] = release.download_url\n obj[\"keywords\"] = release.keywords\n obj[\"platform\"] = release.platform\n obj[\"created\"] = release.created\n obj[\"classifiers\"] = release.classifiers\n obj[\"zscore\"] = release.zscore\n\n return obj\n", "path": "warehouse/packaging/search.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport packaging.version\n\nfrom elasticsearch_dsl import Date, Document, Float, Keyword, Text, analyzer\n\nfrom warehouse.search.utils import doc_type\n\nEmailAnalyzer = analyzer(\n \"email\",\n tokenizer=\"uax_url_email\",\n filter=[\"standard\", \"lowercase\", \"stop\", \"snowball\"],\n)\n\nNameAnalyzer = analyzer(\n \"normalized_name\",\n tokenizer=\"lowercase\",\n filter=[\"standard\", \"lowercase\", \"word_delimiter\"],\n)\n\n\n@doc_type\nclass Project(Document):\n\n name = Text()\n normalized_name = Text(analyzer=NameAnalyzer)\n version = Keyword(multi=True)\n latest_version = Keyword()\n summary = Text(analyzer=\"snowball\")\n description = Text(analyzer=\"snowball\")\n author = Text()\n author_email = Text(analyzer=EmailAnalyzer)\n maintainer = Text()\n maintainer_email = Text(analyzer=EmailAnalyzer)\n license = Text()\n home_page = Keyword()\n download_url = Keyword()\n keywords = Text(analyzer=\"snowball\")\n platform = Keyword()\n created = Date()\n classifiers = Keyword(multi=True)\n zscore = Float()\n\n @classmethod\n def from_db(cls, release):\n obj = cls(meta={\"id\": release.normalized_name})\n obj[\"name\"] = release.name\n obj[\"normalized_name\"] = release.normalized_name\n obj[\"version\"] = sorted(\n release.all_versions, key=lambda r: packaging.version.parse(r), reverse=True\n )\n obj[\"latest_version\"] = release.latest_version\n obj[\"summary\"] = release.summary\n obj[\"description\"] = release.description\n obj[\"author\"] = release.author\n obj[\"author_email\"] = release.author_email\n obj[\"maintainer\"] = release.maintainer\n obj[\"maintainer_email\"] = release.maintainer_email\n obj[\"home_page\"] = release.home_page\n obj[\"download_url\"] = release.download_url\n obj[\"keywords\"] = release.keywords\n obj[\"platform\"] = release.platform\n obj[\"created\"] = release.created\n obj[\"classifiers\"] = release.classifiers\n obj[\"zscore\"] = release.zscore\n\n return obj\n\n class Index:\n # make sure this class can match any index so it will always be used to\n # deserialize data coming from elasticsearch.\n name = \"*\"\n", "path": "warehouse/packaging/search.py"}]}
1,245
108
gh_patches_debug_34460
rasdani/github-patches
git_diff
facebookresearch__ParlAI-3164
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add --version flag to parlai command **Is your feature request related to a problem? Please describe.** Make parlai "super command" conform the unix idiom of a --version flag. Usecase: A user switching between virtual environments or reading about the release of a new feature should be able to easily identify which version they have on their path. **Describe the solution you'd like** ```bash $ parlai --version parlai version X.X.X # Optionally any relevant install info (not probably not any) # Optionally copyright/license info (up to the maintainers whether you want this here or not) ``` **Describe alternatives you've considered** ```bash $ python -c "import parlai; print(parlai.__version__)" 0.9.2 ``` A little less natural --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `parlai/core/script.py` Content: ``` 1 #!/usr/bin/env python3 2 3 # Copyright (c) Facebook, Inc. and its affiliates. 4 # This source code is licensed under the MIT license found in the 5 # LICENSE file in the root directory of this source tree. 6 7 8 """ 9 The general ParlAI Script interface. 10 11 An abstract class to help standardize the call to ParlAI scripts, enabling them to be 12 completed easily. 13 14 Also contains helper classes for loading scripts, etc. 15 """ 16 17 import io 18 import argparse 19 from typing import List, Optional, Dict, Any 20 from parlai.core.opt import Opt 21 from parlai.core.params import ParlaiParser, CustomHelpFormatter 22 from abc import abstractmethod 23 import importlib 24 import pkgutil 25 import parlai.scripts 26 import parlai.utils.logging as logging 27 from parlai.core.loader import register_script, SCRIPT_REGISTRY # noqa: F401 28 29 30 def setup_script_registry(): 31 """ 32 Loads the scripts so that @register_script is hit for all. 33 """ 34 for module in pkgutil.iter_modules(parlai.scripts.__path__, 'parlai.scripts.'): 35 importlib.import_module(module.name) 36 37 38 class ParlaiScript(object): 39 """ 40 A ParlAI script is a standardized form of access. 41 """ 42 43 parser: ParlaiParser 44 45 @classmethod 46 @abstractmethod 47 def setup_args(cls) -> ParlaiParser: 48 """ 49 Create the parser with args. 50 """ 51 # we want to later deprecate this for add_cmdline_args 52 pass 53 54 def __init__(self, opt: Opt): 55 self.opt = opt 56 57 @abstractmethod 58 def run(self): 59 """ 60 The main method. 61 62 Must be implemented by the script writer. 63 """ 64 raise NotImplementedError() 65 66 @classmethod 67 def _run_kwargs(cls, kwargs: Dict[str, Any]): 68 """ 69 Construct and run the script using kwargs, pseudo-parsing them. 70 """ 71 parser = cls.setup_args() 72 opt = parser.parse_kwargs(**kwargs) 73 return cls._run_from_parser_and_opt(opt, parser) 74 75 @classmethod 76 def _run_args(cls, args: Optional[List[str]] = None): 77 """ 78 Construct and run the script using args, defaulting to getting from CLI. 79 """ 80 parser = cls.setup_args() 81 opt = parser.parse_args(args=args) 82 return cls._run_from_parser_and_opt(opt, parser) 83 84 @classmethod 85 def _run_from_parser_and_opt(cls, opt: Opt, parser: ParlaiParser): 86 script = cls(opt) 87 script.parser = parser 88 return script.run() 89 90 @classmethod 91 def main(cls, *args, **kwargs): 92 """ 93 Run the program, possibly with some given args. 94 95 You may provide command line args in the form of strings, or 96 options. For example: 97 98 >>> MyScript.main(['--task', 'convai2']) 99 >>> MyScript.main(task='convai2') 100 101 You may not combine both args and kwargs. 102 """ 103 assert not (bool(args) and bool(kwargs)) 104 if args: 105 return cls._run_args(args) 106 elif kwargs: 107 return cls._run_kwargs(kwargs) 108 else: 109 return cls._run_args(None) 110 111 @classmethod 112 def help(cls, **kwargs): 113 f = io.StringIO() 114 parser = cls.setup_args() 115 parser.prog = cls.__name__ 116 parser.add_extra_args(parser._kwargs_to_str_args(**kwargs)) 117 parser.print_help(f) 118 return f.getvalue() 119 120 121 class _SupercommandParser(ParlaiParser): 122 """ 123 Specialty ParlAI parser used for the supercommand. 124 125 Contains some special behavior. 126 """ 127 128 def __init__(self, *args, **kwargs): 129 from parlai.utils.strings import colorize 130 131 logo = "" 132 logo += colorize(' _', 'red') + "\n" 133 logo += colorize(' /', 'red') + colorize('"', 'brightblack') 134 logo += colorize(")", "yellow") + "\n" 135 logo += colorize(' //', 'red') + colorize(')', 'yellow') + '\n' 136 logo += colorize(' ==', 'green') 137 logo += colorize("/", 'blue') + colorize('/', 'red') + colorize("'", 'yellow') 138 logo += colorize("===", 'green') + " ParlAI\n" 139 logo += colorize(" /", 'blue') 140 kwargs['description'] = logo 141 return super().__init__(*args, **kwargs) 142 143 def add_extra_args(self, args): 144 sa = [a for a in self._actions if isinstance(a, argparse._SubParsersAction)] 145 assert len(sa) == 1 146 sa = sa[0] 147 for _, v in sa.choices.items(): 148 v.add_extra_args(args) 149 150 def add_subparsers(self, **kwargs): 151 return super().add_subparsers(**kwargs) 152 153 def _unsuppress_hidden(self): 154 """ 155 Restore the help messages of hidden commands. 156 """ 157 158 spa = [a for a in self._actions if isinstance(a, argparse._SubParsersAction)] 159 assert len(spa) == 1 160 spa = spa[0] 161 for choices_action in spa._choices_actions: 162 dest = choices_action.dest 163 if choices_action.help == argparse.SUPPRESS: 164 choices_action.help = spa.choices[dest].description 165 166 def print_helpall(self): 167 self._unsuppress_hidden() 168 self.print_help() 169 170 171 class _SubcommandParser(ParlaiParser): 172 """ 173 ParlaiParser which always sets add_parlai_args and add_model_args to False. 174 175 Used in the superscript to initialize just the args for that command. 176 """ 177 178 def __init__(self, **kwargs): 179 kwargs['add_parlai_args'] = False 180 kwargs['add_model_args'] = False 181 assert 'description' in kwargs, 'Must supply description' 182 return super().__init__(**kwargs) 183 184 def parse_known_args(self, args=None, namespace=None, nohelp=False): 185 if not nohelp: 186 self.add_extra_args(args) 187 return super().parse_known_args(args, namespace, nohelp) 188 189 190 def _SuperscriptHelpFormatter(**kwargs): 191 kwargs['width'] = 100 192 kwargs['max_help_position'] = 9999 193 194 return CustomHelpFormatter(**kwargs) 195 196 197 def superscript_main(args=None): 198 """ 199 Superscript is a loader for all the other scripts. 200 """ 201 setup_script_registry() 202 203 parser = _SupercommandParser( 204 False, False, formatter_class=_SuperscriptHelpFormatter 205 ) 206 parser.add_argument( 207 '--helpall', 208 action='helpall', 209 help='show all commands, including advanced ones.', 210 ) 211 parser.set_defaults(super_command=None) 212 subparsers = parser.add_subparsers( 213 parser_class=_SubcommandParser, title="Commands", metavar="COMMAND" 214 ) 215 hparser = subparsers.add_parser( 216 'help', 217 aliases=['h'], 218 help=argparse.SUPPRESS, 219 description="List the main commands", 220 ) 221 hparser.set_defaults(super_command='help') 222 hparser = subparsers.add_parser( 223 'helpall', 224 help=argparse.SUPPRESS, 225 description="List all commands, including advanced ones.", 226 ) 227 hparser.set_defaults(super_command='helpall') 228 229 # build the supercommand 230 for script_name, registration in SCRIPT_REGISTRY.items(): 231 logging.verbose(f"Discovered command {script_name}") 232 script_parser = registration.klass.setup_args() 233 if script_parser is None: 234 # user didn't bother defining command line args. let's just fill 235 # in for them 236 script_parser = ParlaiParser(False, False) 237 help_ = argparse.SUPPRESS if registration.hidden else script_parser.description 238 subparser = subparsers.add_parser( 239 script_name, 240 aliases=registration.aliases, 241 help=help_, 242 description=script_parser.description, 243 formatter_class=CustomHelpFormatter, 244 ) 245 subparser.set_defaults( 246 # carries the name of the full command so we know what to execute 247 super_command=script_name, 248 # used in ParlAI parser to find CLI options set by user 249 _subparser=subparser, 250 ) 251 subparser.set_defaults(**script_parser._defaults) 252 for action in script_parser._actions: 253 subparser._add_action(action) 254 for action_group in script_parser._action_groups: 255 subparser._action_groups.append(action_group) 256 257 try: 258 import argcomplete 259 260 argcomplete.autocomplete(parser) 261 except ModuleNotFoundError: 262 pass 263 264 opt = parser.parse_args(args) 265 cmd = opt.pop('super_command') 266 if cmd == 'helpall': 267 parser.print_helpall() 268 elif cmd == 'help' or cmd is None: 269 parser.print_help() 270 elif cmd is not None: 271 return SCRIPT_REGISTRY[cmd].klass._run_from_parser_and_opt(opt, parser) 272 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/parlai/core/script.py b/parlai/core/script.py --- a/parlai/core/script.py +++ b/parlai/core/script.py @@ -17,6 +17,7 @@ import io import argparse from typing import List, Optional, Dict, Any +import parlai from parlai.core.opt import Opt from parlai.core.params import ParlaiParser, CustomHelpFormatter from abc import abstractmethod @@ -206,7 +207,13 @@ parser.add_argument( '--helpall', action='helpall', - help='show all commands, including advanced ones.', + help='List all commands, including advanced ones.', + ) + parser.add_argument( + '--version', + action='version', + version=get_version_string(), + help='Prints version info and exit.', ) parser.set_defaults(super_command=None) subparsers = parser.add_subparsers( @@ -216,13 +223,13 @@ 'help', aliases=['h'], help=argparse.SUPPRESS, - description="List the main commands", + description='List the main commands.', ) hparser.set_defaults(super_command='help') hparser = subparsers.add_parser( 'helpall', help=argparse.SUPPRESS, - description="List all commands, including advanced ones.", + description='List all commands, including advanced ones.', ) hparser.set_defaults(super_command='helpall') @@ -265,7 +272,13 @@ cmd = opt.pop('super_command') if cmd == 'helpall': parser.print_helpall() + elif cmd == 'versioninfo': + exit(0) elif cmd == 'help' or cmd is None: parser.print_help() elif cmd is not None: return SCRIPT_REGISTRY[cmd].klass._run_from_parser_and_opt(opt, parser) + + +def get_version_string() -> str: + return f"ParlAI version {parlai.__version__}"
{"golden_diff": "diff --git a/parlai/core/script.py b/parlai/core/script.py\n--- a/parlai/core/script.py\n+++ b/parlai/core/script.py\n@@ -17,6 +17,7 @@\n import io\n import argparse\n from typing import List, Optional, Dict, Any\n+import parlai\n from parlai.core.opt import Opt\n from parlai.core.params import ParlaiParser, CustomHelpFormatter\n from abc import abstractmethod\n@@ -206,7 +207,13 @@\n parser.add_argument(\n '--helpall',\n action='helpall',\n- help='show all commands, including advanced ones.',\n+ help='List all commands, including advanced ones.',\n+ )\n+ parser.add_argument(\n+ '--version',\n+ action='version',\n+ version=get_version_string(),\n+ help='Prints version info and exit.',\n )\n parser.set_defaults(super_command=None)\n subparsers = parser.add_subparsers(\n@@ -216,13 +223,13 @@\n 'help',\n aliases=['h'],\n help=argparse.SUPPRESS,\n- description=\"List the main commands\",\n+ description='List the main commands.',\n )\n hparser.set_defaults(super_command='help')\n hparser = subparsers.add_parser(\n 'helpall',\n help=argparse.SUPPRESS,\n- description=\"List all commands, including advanced ones.\",\n+ description='List all commands, including advanced ones.',\n )\n hparser.set_defaults(super_command='helpall')\n \n@@ -265,7 +272,13 @@\n cmd = opt.pop('super_command')\n if cmd == 'helpall':\n parser.print_helpall()\n+ elif cmd == 'versioninfo':\n+ exit(0)\n elif cmd == 'help' or cmd is None:\n parser.print_help()\n elif cmd is not None:\n return SCRIPT_REGISTRY[cmd].klass._run_from_parser_and_opt(opt, parser)\n+\n+\n+def get_version_string() -> str:\n+ return f\"ParlAI version {parlai.__version__}\"\n", "issue": "Add --version flag to parlai command\n**Is your feature request related to a problem? Please describe.**\r\nMake parlai \"super command\" conform the unix idiom of a --version flag.\r\n\r\nUsecase: A user switching between virtual environments or reading about the release of a new feature should be able to easily identify which version they have on their path.\r\n\r\n**Describe the solution you'd like**\r\n```bash\r\n$ parlai --version\r\nparlai version X.X.X\r\n# Optionally any relevant install info (not probably not any)\r\n# Optionally copyright/license info (up to the maintainers whether you want this here or not)\r\n```\r\n\r\n**Describe alternatives you've considered**\r\n```bash\r\n$ python -c \"import parlai; print(parlai.__version__)\"\r\n0.9.2\r\n```\r\nA little less natural\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n\"\"\"\nThe general ParlAI Script interface.\n\nAn abstract class to help standardize the call to ParlAI scripts, enabling them to be\ncompleted easily.\n\nAlso contains helper classes for loading scripts, etc.\n\"\"\"\n\nimport io\nimport argparse\nfrom typing import List, Optional, Dict, Any\nfrom parlai.core.opt import Opt\nfrom parlai.core.params import ParlaiParser, CustomHelpFormatter\nfrom abc import abstractmethod\nimport importlib\nimport pkgutil\nimport parlai.scripts\nimport parlai.utils.logging as logging\nfrom parlai.core.loader import register_script, SCRIPT_REGISTRY # noqa: F401\n\n\ndef setup_script_registry():\n \"\"\"\n Loads the scripts so that @register_script is hit for all.\n \"\"\"\n for module in pkgutil.iter_modules(parlai.scripts.__path__, 'parlai.scripts.'):\n importlib.import_module(module.name)\n\n\nclass ParlaiScript(object):\n \"\"\"\n A ParlAI script is a standardized form of access.\n \"\"\"\n\n parser: ParlaiParser\n\n @classmethod\n @abstractmethod\n def setup_args(cls) -> ParlaiParser:\n \"\"\"\n Create the parser with args.\n \"\"\"\n # we want to later deprecate this for add_cmdline_args\n pass\n\n def __init__(self, opt: Opt):\n self.opt = opt\n\n @abstractmethod\n def run(self):\n \"\"\"\n The main method.\n\n Must be implemented by the script writer.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _run_kwargs(cls, kwargs: Dict[str, Any]):\n \"\"\"\n Construct and run the script using kwargs, pseudo-parsing them.\n \"\"\"\n parser = cls.setup_args()\n opt = parser.parse_kwargs(**kwargs)\n return cls._run_from_parser_and_opt(opt, parser)\n\n @classmethod\n def _run_args(cls, args: Optional[List[str]] = None):\n \"\"\"\n Construct and run the script using args, defaulting to getting from CLI.\n \"\"\"\n parser = cls.setup_args()\n opt = parser.parse_args(args=args)\n return cls._run_from_parser_and_opt(opt, parser)\n\n @classmethod\n def _run_from_parser_and_opt(cls, opt: Opt, parser: ParlaiParser):\n script = cls(opt)\n script.parser = parser\n return script.run()\n\n @classmethod\n def main(cls, *args, **kwargs):\n \"\"\"\n Run the program, possibly with some given args.\n\n You may provide command line args in the form of strings, or\n options. For example:\n\n >>> MyScript.main(['--task', 'convai2'])\n >>> MyScript.main(task='convai2')\n\n You may not combine both args and kwargs.\n \"\"\"\n assert not (bool(args) and bool(kwargs))\n if args:\n return cls._run_args(args)\n elif kwargs:\n return cls._run_kwargs(kwargs)\n else:\n return cls._run_args(None)\n\n @classmethod\n def help(cls, **kwargs):\n f = io.StringIO()\n parser = cls.setup_args()\n parser.prog = cls.__name__\n parser.add_extra_args(parser._kwargs_to_str_args(**kwargs))\n parser.print_help(f)\n return f.getvalue()\n\n\nclass _SupercommandParser(ParlaiParser):\n \"\"\"\n Specialty ParlAI parser used for the supercommand.\n\n Contains some special behavior.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n from parlai.utils.strings import colorize\n\n logo = \"\"\n logo += colorize(' _', 'red') + \"\\n\"\n logo += colorize(' /', 'red') + colorize('\"', 'brightblack')\n logo += colorize(\")\", \"yellow\") + \"\\n\"\n logo += colorize(' //', 'red') + colorize(')', 'yellow') + '\\n'\n logo += colorize(' ==', 'green')\n logo += colorize(\"/\", 'blue') + colorize('/', 'red') + colorize(\"'\", 'yellow')\n logo += colorize(\"===\", 'green') + \" ParlAI\\n\"\n logo += colorize(\" /\", 'blue')\n kwargs['description'] = logo\n return super().__init__(*args, **kwargs)\n\n def add_extra_args(self, args):\n sa = [a for a in self._actions if isinstance(a, argparse._SubParsersAction)]\n assert len(sa) == 1\n sa = sa[0]\n for _, v in sa.choices.items():\n v.add_extra_args(args)\n\n def add_subparsers(self, **kwargs):\n return super().add_subparsers(**kwargs)\n\n def _unsuppress_hidden(self):\n \"\"\"\n Restore the help messages of hidden commands.\n \"\"\"\n\n spa = [a for a in self._actions if isinstance(a, argparse._SubParsersAction)]\n assert len(spa) == 1\n spa = spa[0]\n for choices_action in spa._choices_actions:\n dest = choices_action.dest\n if choices_action.help == argparse.SUPPRESS:\n choices_action.help = spa.choices[dest].description\n\n def print_helpall(self):\n self._unsuppress_hidden()\n self.print_help()\n\n\nclass _SubcommandParser(ParlaiParser):\n \"\"\"\n ParlaiParser which always sets add_parlai_args and add_model_args to False.\n\n Used in the superscript to initialize just the args for that command.\n \"\"\"\n\n def __init__(self, **kwargs):\n kwargs['add_parlai_args'] = False\n kwargs['add_model_args'] = False\n assert 'description' in kwargs, 'Must supply description'\n return super().__init__(**kwargs)\n\n def parse_known_args(self, args=None, namespace=None, nohelp=False):\n if not nohelp:\n self.add_extra_args(args)\n return super().parse_known_args(args, namespace, nohelp)\n\n\ndef _SuperscriptHelpFormatter(**kwargs):\n kwargs['width'] = 100\n kwargs['max_help_position'] = 9999\n\n return CustomHelpFormatter(**kwargs)\n\n\ndef superscript_main(args=None):\n \"\"\"\n Superscript is a loader for all the other scripts.\n \"\"\"\n setup_script_registry()\n\n parser = _SupercommandParser(\n False, False, formatter_class=_SuperscriptHelpFormatter\n )\n parser.add_argument(\n '--helpall',\n action='helpall',\n help='show all commands, including advanced ones.',\n )\n parser.set_defaults(super_command=None)\n subparsers = parser.add_subparsers(\n parser_class=_SubcommandParser, title=\"Commands\", metavar=\"COMMAND\"\n )\n hparser = subparsers.add_parser(\n 'help',\n aliases=['h'],\n help=argparse.SUPPRESS,\n description=\"List the main commands\",\n )\n hparser.set_defaults(super_command='help')\n hparser = subparsers.add_parser(\n 'helpall',\n help=argparse.SUPPRESS,\n description=\"List all commands, including advanced ones.\",\n )\n hparser.set_defaults(super_command='helpall')\n\n # build the supercommand\n for script_name, registration in SCRIPT_REGISTRY.items():\n logging.verbose(f\"Discovered command {script_name}\")\n script_parser = registration.klass.setup_args()\n if script_parser is None:\n # user didn't bother defining command line args. let's just fill\n # in for them\n script_parser = ParlaiParser(False, False)\n help_ = argparse.SUPPRESS if registration.hidden else script_parser.description\n subparser = subparsers.add_parser(\n script_name,\n aliases=registration.aliases,\n help=help_,\n description=script_parser.description,\n formatter_class=CustomHelpFormatter,\n )\n subparser.set_defaults(\n # carries the name of the full command so we know what to execute\n super_command=script_name,\n # used in ParlAI parser to find CLI options set by user\n _subparser=subparser,\n )\n subparser.set_defaults(**script_parser._defaults)\n for action in script_parser._actions:\n subparser._add_action(action)\n for action_group in script_parser._action_groups:\n subparser._action_groups.append(action_group)\n\n try:\n import argcomplete\n\n argcomplete.autocomplete(parser)\n except ModuleNotFoundError:\n pass\n\n opt = parser.parse_args(args)\n cmd = opt.pop('super_command')\n if cmd == 'helpall':\n parser.print_helpall()\n elif cmd == 'help' or cmd is None:\n parser.print_help()\n elif cmd is not None:\n return SCRIPT_REGISTRY[cmd].klass._run_from_parser_and_opt(opt, parser)\n", "path": "parlai/core/script.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n\"\"\"\nThe general ParlAI Script interface.\n\nAn abstract class to help standardize the call to ParlAI scripts, enabling them to be\ncompleted easily.\n\nAlso contains helper classes for loading scripts, etc.\n\"\"\"\n\nimport io\nimport argparse\nfrom typing import List, Optional, Dict, Any\nimport parlai\nfrom parlai.core.opt import Opt\nfrom parlai.core.params import ParlaiParser, CustomHelpFormatter\nfrom abc import abstractmethod\nimport importlib\nimport pkgutil\nimport parlai.scripts\nimport parlai.utils.logging as logging\nfrom parlai.core.loader import register_script, SCRIPT_REGISTRY # noqa: F401\n\n\ndef setup_script_registry():\n \"\"\"\n Loads the scripts so that @register_script is hit for all.\n \"\"\"\n for module in pkgutil.iter_modules(parlai.scripts.__path__, 'parlai.scripts.'):\n importlib.import_module(module.name)\n\n\nclass ParlaiScript(object):\n \"\"\"\n A ParlAI script is a standardized form of access.\n \"\"\"\n\n parser: ParlaiParser\n\n @classmethod\n @abstractmethod\n def setup_args(cls) -> ParlaiParser:\n \"\"\"\n Create the parser with args.\n \"\"\"\n # we want to later deprecate this for add_cmdline_args\n pass\n\n def __init__(self, opt: Opt):\n self.opt = opt\n\n @abstractmethod\n def run(self):\n \"\"\"\n The main method.\n\n Must be implemented by the script writer.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _run_kwargs(cls, kwargs: Dict[str, Any]):\n \"\"\"\n Construct and run the script using kwargs, pseudo-parsing them.\n \"\"\"\n parser = cls.setup_args()\n opt = parser.parse_kwargs(**kwargs)\n return cls._run_from_parser_and_opt(opt, parser)\n\n @classmethod\n def _run_args(cls, args: Optional[List[str]] = None):\n \"\"\"\n Construct and run the script using args, defaulting to getting from CLI.\n \"\"\"\n parser = cls.setup_args()\n opt = parser.parse_args(args=args)\n return cls._run_from_parser_and_opt(opt, parser)\n\n @classmethod\n def _run_from_parser_and_opt(cls, opt: Opt, parser: ParlaiParser):\n script = cls(opt)\n script.parser = parser\n return script.run()\n\n @classmethod\n def main(cls, *args, **kwargs):\n \"\"\"\n Run the program, possibly with some given args.\n\n You may provide command line args in the form of strings, or\n options. For example:\n\n >>> MyScript.main(['--task', 'convai2'])\n >>> MyScript.main(task='convai2')\n\n You may not combine both args and kwargs.\n \"\"\"\n assert not (bool(args) and bool(kwargs))\n if args:\n return cls._run_args(args)\n elif kwargs:\n return cls._run_kwargs(kwargs)\n else:\n return cls._run_args(None)\n\n @classmethod\n def help(cls, **kwargs):\n f = io.StringIO()\n parser = cls.setup_args()\n parser.prog = cls.__name__\n parser.add_extra_args(parser._kwargs_to_str_args(**kwargs))\n parser.print_help(f)\n return f.getvalue()\n\n\nclass _SupercommandParser(ParlaiParser):\n \"\"\"\n Specialty ParlAI parser used for the supercommand.\n\n Contains some special behavior.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n from parlai.utils.strings import colorize\n\n logo = \"\"\n logo += colorize(' _', 'red') + \"\\n\"\n logo += colorize(' /', 'red') + colorize('\"', 'brightblack')\n logo += colorize(\")\", \"yellow\") + \"\\n\"\n logo += colorize(' //', 'red') + colorize(')', 'yellow') + '\\n'\n logo += colorize(' ==', 'green')\n logo += colorize(\"/\", 'blue') + colorize('/', 'red') + colorize(\"'\", 'yellow')\n logo += colorize(\"===\", 'green') + \" ParlAI\\n\"\n logo += colorize(\" /\", 'blue')\n kwargs['description'] = logo\n return super().__init__(*args, **kwargs)\n\n def add_extra_args(self, args):\n sa = [a for a in self._actions if isinstance(a, argparse._SubParsersAction)]\n assert len(sa) == 1\n sa = sa[0]\n for _, v in sa.choices.items():\n v.add_extra_args(args)\n\n def add_subparsers(self, **kwargs):\n return super().add_subparsers(**kwargs)\n\n def _unsuppress_hidden(self):\n \"\"\"\n Restore the help messages of hidden commands.\n \"\"\"\n\n spa = [a for a in self._actions if isinstance(a, argparse._SubParsersAction)]\n assert len(spa) == 1\n spa = spa[0]\n for choices_action in spa._choices_actions:\n dest = choices_action.dest\n if choices_action.help == argparse.SUPPRESS:\n choices_action.help = spa.choices[dest].description\n\n def print_helpall(self):\n self._unsuppress_hidden()\n self.print_help()\n\n\nclass _SubcommandParser(ParlaiParser):\n \"\"\"\n ParlaiParser which always sets add_parlai_args and add_model_args to False.\n\n Used in the superscript to initialize just the args for that command.\n \"\"\"\n\n def __init__(self, **kwargs):\n kwargs['add_parlai_args'] = False\n kwargs['add_model_args'] = False\n assert 'description' in kwargs, 'Must supply description'\n return super().__init__(**kwargs)\n\n def parse_known_args(self, args=None, namespace=None, nohelp=False):\n if not nohelp:\n self.add_extra_args(args)\n return super().parse_known_args(args, namespace, nohelp)\n\n\ndef _SuperscriptHelpFormatter(**kwargs):\n kwargs['width'] = 100\n kwargs['max_help_position'] = 9999\n\n return CustomHelpFormatter(**kwargs)\n\n\ndef superscript_main(args=None):\n \"\"\"\n Superscript is a loader for all the other scripts.\n \"\"\"\n setup_script_registry()\n\n parser = _SupercommandParser(\n False, False, formatter_class=_SuperscriptHelpFormatter\n )\n parser.add_argument(\n '--helpall',\n action='helpall',\n help='List all commands, including advanced ones.',\n )\n parser.add_argument(\n '--version',\n action='version',\n version=get_version_string(),\n help='Prints version info and exit.',\n )\n parser.set_defaults(super_command=None)\n subparsers = parser.add_subparsers(\n parser_class=_SubcommandParser, title=\"Commands\", metavar=\"COMMAND\"\n )\n hparser = subparsers.add_parser(\n 'help',\n aliases=['h'],\n help=argparse.SUPPRESS,\n description='List the main commands.',\n )\n hparser.set_defaults(super_command='help')\n hparser = subparsers.add_parser(\n 'helpall',\n help=argparse.SUPPRESS,\n description='List all commands, including advanced ones.',\n )\n hparser.set_defaults(super_command='helpall')\n\n # build the supercommand\n for script_name, registration in SCRIPT_REGISTRY.items():\n logging.verbose(f\"Discovered command {script_name}\")\n script_parser = registration.klass.setup_args()\n if script_parser is None:\n # user didn't bother defining command line args. let's just fill\n # in for them\n script_parser = ParlaiParser(False, False)\n help_ = argparse.SUPPRESS if registration.hidden else script_parser.description\n subparser = subparsers.add_parser(\n script_name,\n aliases=registration.aliases,\n help=help_,\n description=script_parser.description,\n formatter_class=CustomHelpFormatter,\n )\n subparser.set_defaults(\n # carries the name of the full command so we know what to execute\n super_command=script_name,\n # used in ParlAI parser to find CLI options set by user\n _subparser=subparser,\n )\n subparser.set_defaults(**script_parser._defaults)\n for action in script_parser._actions:\n subparser._add_action(action)\n for action_group in script_parser._action_groups:\n subparser._action_groups.append(action_group)\n\n try:\n import argcomplete\n\n argcomplete.autocomplete(parser)\n except ModuleNotFoundError:\n pass\n\n opt = parser.parse_args(args)\n cmd = opt.pop('super_command')\n if cmd == 'helpall':\n parser.print_helpall()\n elif cmd == 'versioninfo':\n exit(0)\n elif cmd == 'help' or cmd is None:\n parser.print_help()\n elif cmd is not None:\n return SCRIPT_REGISTRY[cmd].klass._run_from_parser_and_opt(opt, parser)\n\n\ndef get_version_string() -> str:\n return f\"ParlAI version {parlai.__version__}\"\n", "path": "parlai/core/script.py"}]}
3,069
462
gh_patches_debug_20796
rasdani/github-patches
git_diff
wagtail__wagtail-7703
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- csrf_token missing for admin dashboard workflow actions (resulting in 403) ### Issue Summary When a `POST` is made to the workflow URLs `admin/pages/workflow/action/` via the dashboard buttons (e.g. Approve) it does not contain the `csrfToken` resulting in a 403 each time. ### Steps to Reproduce 1. Start a new project with `wagtail start myproject` 2. Create a new page & publish 3. Edit that same page but this time put to moderator's approval 4. Go to the dashboard (admin home) 5. Click 'Approve' 6. Expected: Should work & approve the page's moderation, Actual: user is shown a 403 error * I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: No - Wagtail bakerydemo without changes only. <img width="1873" alt="Screen Shot 2021-11-08 at 9 10 12 pm" src="https://user-images.githubusercontent.com/1396140/140733351-f955d1ed-283b-45b8-a85e-fc3ce20b6d49.png"> <img width="1873" alt="Screen Shot 2021-11-08 at 9 10 36 pm" src="https://user-images.githubusercontent.com/1396140/140733360-6657420b-6e14-4f28-9233-f81c95812305.png"> ### Technical details * Python version: 2.7.6 * Django version: 3.2.6 * Wagtail version: 2.15 * Browser version: Chrome 95 https://www.whatsmybrowser.org/b/VAX8V ### Likely root cause * Dashboard renders buttons with `data-workflow-action-url` to indicate the action URL to POST to. * `client/src/entrypoints/admin/workflow-action.js` has a function `ActivateWorkflowActionsForDashboard` which accepts a `csrfToken` and for non-modal workflow actions (e.g. approve but not approve with comment) it will `POST` to `buttonElement.dataset.workflowActionUrl` which in the approve case is something like `admin/pages/workflow/action/` * This function is initiated in the template `wagtail/admin/templates/wagtailadmin/home/workflow_pages_to_moderate.html` - see `document.addEventListener('DOMContentLoaded', ActivateWorkflowActionsForDashboard('{{ csrf_token|escapejs }}'));` * This template is used by `wagtail/admin/views/home.py` - see * ```class WorkflowPagesToModeratePanel(Component): name = 'workflow_pages_to_moderate' template_name = 'wagtailadmin/home/workflow_pages_to_moderate.html' order = 220``` * However, the `get_context_data` does not appear to return the context variable `csrf_token` at all ### Recommended fix * It is probably best to align the `csrf_token` with the Django tag usage instead of passing it down as a variable through the component's context. * However, it appears that a similar approach has been taken for wagtail/admin/templates/wagtailadmin/home/locked_pages.html (not sure if that is broken also) - but it seems like there is an intentional escapejs usage --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wagtail/admin/views/home.py` Content: ``` 1 import itertools 2 3 from warnings import warn 4 5 from django.conf import settings 6 from django.contrib.auth import get_user_model 7 from django.contrib.auth.decorators import permission_required 8 from django.db import connection 9 from django.db.models import Max, Q 10 from django.forms import Media 11 from django.http import Http404, HttpResponse 12 from django.template.loader import render_to_string 13 from django.template.response import TemplateResponse 14 15 from wagtail.admin.navigation import get_site_for_user 16 from wagtail.admin.site_summary import SiteSummaryPanel 17 from wagtail.admin.ui.components import Component 18 from wagtail.core import hooks 19 from wagtail.core.models import ( 20 Page, PageRevision, TaskState, UserPagePermissionsProxy, WorkflowState) 21 from wagtail.utils.deprecation import RemovedInWagtail217Warning 22 23 24 User = get_user_model() 25 26 27 # Panels for the homepage 28 29 class UpgradeNotificationPanel(Component): 30 name = 'upgrade_notification' 31 template_name = 'wagtailadmin/home/upgrade_notification.html' 32 order = 100 33 34 def render_html(self, parent_context): 35 if parent_context['request'].user.is_superuser and getattr(settings, "WAGTAIL_ENABLE_UPDATE_CHECK", True): 36 return super().render_html(parent_context) 37 else: 38 return "" 39 40 41 class PagesForModerationPanel(Component): 42 name = 'pages_for_moderation' 43 template_name = 'wagtailadmin/home/pages_for_moderation.html' 44 order = 200 45 46 def get_context_data(self, parent_context): 47 request = parent_context['request'] 48 context = super().get_context_data(parent_context) 49 user_perms = UserPagePermissionsProxy(request.user) 50 context['page_revisions_for_moderation'] = ( 51 user_perms.revisions_for_moderation().select_related('page', 'user').order_by('-created_at') 52 ) 53 context['request'] = request 54 return context 55 56 57 class UserPagesInWorkflowModerationPanel(Component): 58 name = 'user_pages_in_workflow_moderation' 59 template_name = 'wagtailadmin/home/user_pages_in_workflow_moderation.html' 60 order = 210 61 62 def get_context_data(self, parent_context): 63 request = parent_context['request'] 64 context = super().get_context_data(parent_context) 65 if getattr(settings, 'WAGTAIL_WORKFLOW_ENABLED', True): 66 # Find in progress workflow states which are either requested by the user or on pages owned by the user 67 context['workflow_states'] = ( 68 WorkflowState.objects.active() 69 .filter(Q(page__owner=request.user) | Q(requested_by=request.user)) 70 .select_related( 71 'page', 'current_task_state', 'current_task_state__task', 'current_task_state__page_revision' 72 ) 73 .order_by('-current_task_state__started_at') 74 ) 75 else: 76 context['workflow_states'] = WorkflowState.objects.none() 77 context['request'] = request 78 return context 79 80 81 class WorkflowPagesToModeratePanel(Component): 82 name = 'workflow_pages_to_moderate' 83 template_name = 'wagtailadmin/home/workflow_pages_to_moderate.html' 84 order = 220 85 86 def get_context_data(self, parent_context): 87 request = parent_context['request'] 88 context = super().get_context_data(parent_context) 89 if getattr(settings, 'WAGTAIL_WORKFLOW_ENABLED', True): 90 states = ( 91 TaskState.objects.reviewable_by(request.user) 92 .select_related('page_revision', 'task', 'page_revision__page') 93 .order_by('-started_at') 94 ) 95 context['states'] = [ 96 (state, state.task.specific.get_actions(page=state.page_revision.page, user=request.user), state.workflow_state.all_tasks_with_status()) 97 for state in states 98 ] 99 else: 100 context['states'] = [] 101 context['request'] = request 102 return context 103 104 105 class LockedPagesPanel(Component): 106 name = 'locked_pages' 107 template_name = 'wagtailadmin/home/locked_pages.html' 108 order = 300 109 110 def get_context_data(self, parent_context): 111 request = parent_context['request'] 112 context = super().get_context_data(parent_context) 113 context.update({ 114 'locked_pages': Page.objects.filter( 115 locked=True, 116 locked_by=request.user, 117 ), 118 'can_remove_locks': UserPagePermissionsProxy(request.user).can_remove_locks(), 119 'request': request, 120 }) 121 return context 122 123 124 class RecentEditsPanel(Component): 125 name = 'recent_edits' 126 template_name = 'wagtailadmin/home/recent_edits.html' 127 order = 250 128 129 def get_context_data(self, parent_context): 130 request = parent_context['request'] 131 context = super().get_context_data(parent_context) 132 133 # Last n edited pages 134 edit_count = getattr(settings, 'WAGTAILADMIN_RECENT_EDITS_LIMIT', 5) 135 if connection.vendor == 'mysql': 136 # MySQL can't handle the subselect created by the ORM version - 137 # it fails with "This version of MySQL doesn't yet support 'LIMIT & IN/ALL/ANY/SOME subquery'" 138 last_edits = PageRevision.objects.raw( 139 """ 140 SELECT wp.* FROM 141 wagtailcore_pagerevision wp JOIN ( 142 SELECT max(created_at) AS max_created_at, page_id FROM 143 wagtailcore_pagerevision WHERE user_id = %s GROUP BY page_id ORDER BY max_created_at DESC LIMIT %s 144 ) AS max_rev ON max_rev.max_created_at = wp.created_at ORDER BY wp.created_at DESC 145 """, [ 146 User._meta.pk.get_db_prep_value(request.user.pk, connection), 147 edit_count 148 ] 149 ) 150 else: 151 last_edits_dates = (PageRevision.objects.filter(user=request.user) 152 .values('page_id').annotate(latest_date=Max('created_at')) 153 .order_by('-latest_date').values('latest_date')[:edit_count]) 154 last_edits = PageRevision.objects.filter(created_at__in=last_edits_dates).order_by('-created_at') 155 156 page_keys = [pr.page_id for pr in last_edits] 157 pages = Page.objects.specific().in_bulk(page_keys) 158 context['last_edits'] = [ 159 [revision, pages.get(revision.page_id)] for revision in last_edits 160 ] 161 context['request'] = request 162 return context 163 164 165 def home(request): 166 167 panels = [ 168 SiteSummaryPanel(request), 169 UpgradeNotificationPanel(), 170 WorkflowPagesToModeratePanel(), 171 PagesForModerationPanel(), 172 UserPagesInWorkflowModerationPanel(), 173 RecentEditsPanel(), 174 LockedPagesPanel(), 175 ] 176 177 for fn in hooks.get_hooks('construct_homepage_panels'): 178 fn(request, panels) 179 180 media = Media() 181 182 for panel in panels: 183 if hasattr(panel, 'render') and not hasattr(panel, 'render_html'): 184 # NOTE: when this deprecation warning is removed the 'fallback_render_method=True' in 185 # wagtailadmin/home.html should be removed too 186 message = ( 187 "Homepage panel %r should provide a render_html method. " 188 "See https://docs.wagtail.io/en/stable/releases/2.15.html#template-components-2-15" 189 % panel 190 ) 191 warn(message, category=RemovedInWagtail217Warning) 192 193 # RemovedInWagtail217Warning: this hasattr check can be removed when support for 194 # non-component-based panels ends 195 if hasattr(panel, 'media'): 196 media += panel.media 197 198 site_details = get_site_for_user(request.user) 199 200 return TemplateResponse(request, "wagtailadmin/home.html", { 201 'root_page': site_details['root_page'], 202 'root_site': site_details['root_site'], 203 'site_name': site_details['site_name'], 204 'panels': sorted(panels, key=lambda p: p.order), 205 'user': request.user, 206 'media': media, 207 }) 208 209 210 def error_test(request): 211 raise Exception("This is a test of the emergency broadcast system.") 212 213 214 @permission_required('wagtailadmin.access_admin', login_url='wagtailadmin_login') 215 def default(request): 216 """ 217 Called whenever a request comes in with the correct prefix (eg /admin/) but 218 doesn't actually correspond to a Wagtail view. 219 220 For authenticated users, it'll raise a 404 error. Anonymous users will be 221 redirected to the login page. 222 """ 223 raise Http404 224 225 226 _icons_html = None 227 228 229 def icons(): 230 global _icons_html 231 if _icons_html is None: 232 icon_hooks = hooks.get_hooks('register_icons') 233 all_icons = sorted(itertools.chain.from_iterable(hook([]) for hook in icon_hooks)) 234 _icons_html = render_to_string("wagtailadmin/shared/icons.html", {'icons': all_icons}) 235 return _icons_html 236 237 238 def sprite(request): 239 return HttpResponse(icons()) 240 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wagtail/admin/views/home.py b/wagtail/admin/views/home.py --- a/wagtail/admin/views/home.py +++ b/wagtail/admin/views/home.py @@ -51,6 +51,7 @@ user_perms.revisions_for_moderation().select_related('page', 'user').order_by('-created_at') ) context['request'] = request + context['csrf_token'] = parent_context['csrf_token'] return context @@ -99,6 +100,7 @@ else: context['states'] = [] context['request'] = request + context['csrf_token'] = parent_context['csrf_token'] return context @@ -117,6 +119,7 @@ ), 'can_remove_locks': UserPagePermissionsProxy(request.user).can_remove_locks(), 'request': request, + 'csrf_token': parent_context['csrf_token'], }) return context
{"golden_diff": "diff --git a/wagtail/admin/views/home.py b/wagtail/admin/views/home.py\n--- a/wagtail/admin/views/home.py\n+++ b/wagtail/admin/views/home.py\n@@ -51,6 +51,7 @@\n user_perms.revisions_for_moderation().select_related('page', 'user').order_by('-created_at')\n )\n context['request'] = request\n+ context['csrf_token'] = parent_context['csrf_token']\n return context\n \n \n@@ -99,6 +100,7 @@\n else:\n context['states'] = []\n context['request'] = request\n+ context['csrf_token'] = parent_context['csrf_token']\n return context\n \n \n@@ -117,6 +119,7 @@\n ),\n 'can_remove_locks': UserPagePermissionsProxy(request.user).can_remove_locks(),\n 'request': request,\n+ 'csrf_token': parent_context['csrf_token'],\n })\n return context\n", "issue": "csrf_token missing for admin dashboard workflow actions (resulting in 403)\n### Issue Summary\r\n\r\nWhen a `POST` is made to the workflow URLs `admin/pages/workflow/action/` via the dashboard buttons (e.g. Approve) it does not contain the `csrfToken` resulting in a 403 each time.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Start a new project with `wagtail start myproject`\r\n2. Create a new page & publish\r\n3. Edit that same page but this time put to moderator's approval\r\n4. Go to the dashboard (admin home)\r\n5. Click 'Approve'\r\n6. Expected: Should work & approve the page's moderation, Actual: user is shown a 403 error\r\n\r\n* I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: No - Wagtail bakerydemo without changes only.\r\n\r\n\r\n<img width=\"1873\" alt=\"Screen Shot 2021-11-08 at 9 10 12 pm\" src=\"https://user-images.githubusercontent.com/1396140/140733351-f955d1ed-283b-45b8-a85e-fc3ce20b6d49.png\">\r\n\r\n\r\n<img width=\"1873\" alt=\"Screen Shot 2021-11-08 at 9 10 36 pm\" src=\"https://user-images.githubusercontent.com/1396140/140733360-6657420b-6e14-4f28-9233-f81c95812305.png\">\r\n\r\n\r\n### Technical details\r\n\r\n* Python version: 2.7.6\r\n* Django version: 3.2.6\r\n* Wagtail version: 2.15\r\n* Browser version: Chrome 95 https://www.whatsmybrowser.org/b/VAX8V\r\n\r\n### Likely root cause\r\n\r\n* Dashboard renders buttons with `data-workflow-action-url` to indicate the action URL to POST to.\r\n* `client/src/entrypoints/admin/workflow-action.js` has a function `ActivateWorkflowActionsForDashboard` which accepts a `csrfToken` and for non-modal workflow actions (e.g. approve but not approve with comment) it will `POST` to `buttonElement.dataset.workflowActionUrl` which in the approve case is something like `admin/pages/workflow/action/`\r\n* This function is initiated in the template `wagtail/admin/templates/wagtailadmin/home/workflow_pages_to_moderate.html` - see `document.addEventListener('DOMContentLoaded', ActivateWorkflowActionsForDashboard('{{ csrf_token|escapejs }}'));`\r\n* This template is used by `wagtail/admin/views/home.py` - see\r\n* ```class WorkflowPagesToModeratePanel(Component):\r\n name = 'workflow_pages_to_moderate'\r\n template_name = 'wagtailadmin/home/workflow_pages_to_moderate.html'\r\n order = 220```\r\n* However, the `get_context_data` does not appear to return the context variable `csrf_token` at all\r\n\r\n\r\n### Recommended fix\r\n\r\n* It is probably best to align the `csrf_token` with the Django tag usage instead of passing it down as a variable through the component's context.\r\n* However, it appears that a similar approach has been taken for wagtail/admin/templates/wagtailadmin/home/locked_pages.html (not sure if that is broken also) - but it seems like there is an intentional escapejs usage\r\n\n", "before_files": [{"content": "import itertools\n\nfrom warnings import warn\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.decorators import permission_required\nfrom django.db import connection\nfrom django.db.models import Max, Q\nfrom django.forms import Media\nfrom django.http import Http404, HttpResponse\nfrom django.template.loader import render_to_string\nfrom django.template.response import TemplateResponse\n\nfrom wagtail.admin.navigation import get_site_for_user\nfrom wagtail.admin.site_summary import SiteSummaryPanel\nfrom wagtail.admin.ui.components import Component\nfrom wagtail.core import hooks\nfrom wagtail.core.models import (\n Page, PageRevision, TaskState, UserPagePermissionsProxy, WorkflowState)\nfrom wagtail.utils.deprecation import RemovedInWagtail217Warning\n\n\nUser = get_user_model()\n\n\n# Panels for the homepage\n\nclass UpgradeNotificationPanel(Component):\n name = 'upgrade_notification'\n template_name = 'wagtailadmin/home/upgrade_notification.html'\n order = 100\n\n def render_html(self, parent_context):\n if parent_context['request'].user.is_superuser and getattr(settings, \"WAGTAIL_ENABLE_UPDATE_CHECK\", True):\n return super().render_html(parent_context)\n else:\n return \"\"\n\n\nclass PagesForModerationPanel(Component):\n name = 'pages_for_moderation'\n template_name = 'wagtailadmin/home/pages_for_moderation.html'\n order = 200\n\n def get_context_data(self, parent_context):\n request = parent_context['request']\n context = super().get_context_data(parent_context)\n user_perms = UserPagePermissionsProxy(request.user)\n context['page_revisions_for_moderation'] = (\n user_perms.revisions_for_moderation().select_related('page', 'user').order_by('-created_at')\n )\n context['request'] = request\n return context\n\n\nclass UserPagesInWorkflowModerationPanel(Component):\n name = 'user_pages_in_workflow_moderation'\n template_name = 'wagtailadmin/home/user_pages_in_workflow_moderation.html'\n order = 210\n\n def get_context_data(self, parent_context):\n request = parent_context['request']\n context = super().get_context_data(parent_context)\n if getattr(settings, 'WAGTAIL_WORKFLOW_ENABLED', True):\n # Find in progress workflow states which are either requested by the user or on pages owned by the user\n context['workflow_states'] = (\n WorkflowState.objects.active()\n .filter(Q(page__owner=request.user) | Q(requested_by=request.user))\n .select_related(\n 'page', 'current_task_state', 'current_task_state__task', 'current_task_state__page_revision'\n )\n .order_by('-current_task_state__started_at')\n )\n else:\n context['workflow_states'] = WorkflowState.objects.none()\n context['request'] = request\n return context\n\n\nclass WorkflowPagesToModeratePanel(Component):\n name = 'workflow_pages_to_moderate'\n template_name = 'wagtailadmin/home/workflow_pages_to_moderate.html'\n order = 220\n\n def get_context_data(self, parent_context):\n request = parent_context['request']\n context = super().get_context_data(parent_context)\n if getattr(settings, 'WAGTAIL_WORKFLOW_ENABLED', True):\n states = (\n TaskState.objects.reviewable_by(request.user)\n .select_related('page_revision', 'task', 'page_revision__page')\n .order_by('-started_at')\n )\n context['states'] = [\n (state, state.task.specific.get_actions(page=state.page_revision.page, user=request.user), state.workflow_state.all_tasks_with_status())\n for state in states\n ]\n else:\n context['states'] = []\n context['request'] = request\n return context\n\n\nclass LockedPagesPanel(Component):\n name = 'locked_pages'\n template_name = 'wagtailadmin/home/locked_pages.html'\n order = 300\n\n def get_context_data(self, parent_context):\n request = parent_context['request']\n context = super().get_context_data(parent_context)\n context.update({\n 'locked_pages': Page.objects.filter(\n locked=True,\n locked_by=request.user,\n ),\n 'can_remove_locks': UserPagePermissionsProxy(request.user).can_remove_locks(),\n 'request': request,\n })\n return context\n\n\nclass RecentEditsPanel(Component):\n name = 'recent_edits'\n template_name = 'wagtailadmin/home/recent_edits.html'\n order = 250\n\n def get_context_data(self, parent_context):\n request = parent_context['request']\n context = super().get_context_data(parent_context)\n\n # Last n edited pages\n edit_count = getattr(settings, 'WAGTAILADMIN_RECENT_EDITS_LIMIT', 5)\n if connection.vendor == 'mysql':\n # MySQL can't handle the subselect created by the ORM version -\n # it fails with \"This version of MySQL doesn't yet support 'LIMIT & IN/ALL/ANY/SOME subquery'\"\n last_edits = PageRevision.objects.raw(\n \"\"\"\n SELECT wp.* FROM\n wagtailcore_pagerevision wp JOIN (\n SELECT max(created_at) AS max_created_at, page_id FROM\n wagtailcore_pagerevision WHERE user_id = %s GROUP BY page_id ORDER BY max_created_at DESC LIMIT %s\n ) AS max_rev ON max_rev.max_created_at = wp.created_at ORDER BY wp.created_at DESC\n \"\"\", [\n User._meta.pk.get_db_prep_value(request.user.pk, connection),\n edit_count\n ]\n )\n else:\n last_edits_dates = (PageRevision.objects.filter(user=request.user)\n .values('page_id').annotate(latest_date=Max('created_at'))\n .order_by('-latest_date').values('latest_date')[:edit_count])\n last_edits = PageRevision.objects.filter(created_at__in=last_edits_dates).order_by('-created_at')\n\n page_keys = [pr.page_id for pr in last_edits]\n pages = Page.objects.specific().in_bulk(page_keys)\n context['last_edits'] = [\n [revision, pages.get(revision.page_id)] for revision in last_edits\n ]\n context['request'] = request\n return context\n\n\ndef home(request):\n\n panels = [\n SiteSummaryPanel(request),\n UpgradeNotificationPanel(),\n WorkflowPagesToModeratePanel(),\n PagesForModerationPanel(),\n UserPagesInWorkflowModerationPanel(),\n RecentEditsPanel(),\n LockedPagesPanel(),\n ]\n\n for fn in hooks.get_hooks('construct_homepage_panels'):\n fn(request, panels)\n\n media = Media()\n\n for panel in panels:\n if hasattr(panel, 'render') and not hasattr(panel, 'render_html'):\n # NOTE: when this deprecation warning is removed the 'fallback_render_method=True' in\n # wagtailadmin/home.html should be removed too\n message = (\n \"Homepage panel %r should provide a render_html method. \"\n \"See https://docs.wagtail.io/en/stable/releases/2.15.html#template-components-2-15\"\n % panel\n )\n warn(message, category=RemovedInWagtail217Warning)\n\n # RemovedInWagtail217Warning: this hasattr check can be removed when support for\n # non-component-based panels ends\n if hasattr(panel, 'media'):\n media += panel.media\n\n site_details = get_site_for_user(request.user)\n\n return TemplateResponse(request, \"wagtailadmin/home.html\", {\n 'root_page': site_details['root_page'],\n 'root_site': site_details['root_site'],\n 'site_name': site_details['site_name'],\n 'panels': sorted(panels, key=lambda p: p.order),\n 'user': request.user,\n 'media': media,\n })\n\n\ndef error_test(request):\n raise Exception(\"This is a test of the emergency broadcast system.\")\n\n\n@permission_required('wagtailadmin.access_admin', login_url='wagtailadmin_login')\ndef default(request):\n \"\"\"\n Called whenever a request comes in with the correct prefix (eg /admin/) but\n doesn't actually correspond to a Wagtail view.\n\n For authenticated users, it'll raise a 404 error. Anonymous users will be\n redirected to the login page.\n \"\"\"\n raise Http404\n\n\n_icons_html = None\n\n\ndef icons():\n global _icons_html\n if _icons_html is None:\n icon_hooks = hooks.get_hooks('register_icons')\n all_icons = sorted(itertools.chain.from_iterable(hook([]) for hook in icon_hooks))\n _icons_html = render_to_string(\"wagtailadmin/shared/icons.html\", {'icons': all_icons})\n return _icons_html\n\n\ndef sprite(request):\n return HttpResponse(icons())\n", "path": "wagtail/admin/views/home.py"}], "after_files": [{"content": "import itertools\n\nfrom warnings import warn\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.decorators import permission_required\nfrom django.db import connection\nfrom django.db.models import Max, Q\nfrom django.forms import Media\nfrom django.http import Http404, HttpResponse\nfrom django.template.loader import render_to_string\nfrom django.template.response import TemplateResponse\n\nfrom wagtail.admin.navigation import get_site_for_user\nfrom wagtail.admin.site_summary import SiteSummaryPanel\nfrom wagtail.admin.ui.components import Component\nfrom wagtail.core import hooks\nfrom wagtail.core.models import (\n Page, PageRevision, TaskState, UserPagePermissionsProxy, WorkflowState)\nfrom wagtail.utils.deprecation import RemovedInWagtail217Warning\n\n\nUser = get_user_model()\n\n\n# Panels for the homepage\n\nclass UpgradeNotificationPanel(Component):\n name = 'upgrade_notification'\n template_name = 'wagtailadmin/home/upgrade_notification.html'\n order = 100\n\n def render_html(self, parent_context):\n if parent_context['request'].user.is_superuser and getattr(settings, \"WAGTAIL_ENABLE_UPDATE_CHECK\", True):\n return super().render_html(parent_context)\n else:\n return \"\"\n\n\nclass PagesForModerationPanel(Component):\n name = 'pages_for_moderation'\n template_name = 'wagtailadmin/home/pages_for_moderation.html'\n order = 200\n\n def get_context_data(self, parent_context):\n request = parent_context['request']\n context = super().get_context_data(parent_context)\n user_perms = UserPagePermissionsProxy(request.user)\n context['page_revisions_for_moderation'] = (\n user_perms.revisions_for_moderation().select_related('page', 'user').order_by('-created_at')\n )\n context['request'] = request\n context['csrf_token'] = parent_context['csrf_token']\n return context\n\n\nclass UserPagesInWorkflowModerationPanel(Component):\n name = 'user_pages_in_workflow_moderation'\n template_name = 'wagtailadmin/home/user_pages_in_workflow_moderation.html'\n order = 210\n\n def get_context_data(self, parent_context):\n request = parent_context['request']\n context = super().get_context_data(parent_context)\n if getattr(settings, 'WAGTAIL_WORKFLOW_ENABLED', True):\n # Find in progress workflow states which are either requested by the user or on pages owned by the user\n context['workflow_states'] = (\n WorkflowState.objects.active()\n .filter(Q(page__owner=request.user) | Q(requested_by=request.user))\n .select_related(\n 'page', 'current_task_state', 'current_task_state__task', 'current_task_state__page_revision'\n )\n .order_by('-current_task_state__started_at')\n )\n else:\n context['workflow_states'] = WorkflowState.objects.none()\n context['request'] = request\n return context\n\n\nclass WorkflowPagesToModeratePanel(Component):\n name = 'workflow_pages_to_moderate'\n template_name = 'wagtailadmin/home/workflow_pages_to_moderate.html'\n order = 220\n\n def get_context_data(self, parent_context):\n request = parent_context['request']\n context = super().get_context_data(parent_context)\n if getattr(settings, 'WAGTAIL_WORKFLOW_ENABLED', True):\n states = (\n TaskState.objects.reviewable_by(request.user)\n .select_related('page_revision', 'task', 'page_revision__page')\n .order_by('-started_at')\n )\n context['states'] = [\n (state, state.task.specific.get_actions(page=state.page_revision.page, user=request.user), state.workflow_state.all_tasks_with_status())\n for state in states\n ]\n else:\n context['states'] = []\n context['request'] = request\n context['csrf_token'] = parent_context['csrf_token']\n return context\n\n\nclass LockedPagesPanel(Component):\n name = 'locked_pages'\n template_name = 'wagtailadmin/home/locked_pages.html'\n order = 300\n\n def get_context_data(self, parent_context):\n request = parent_context['request']\n context = super().get_context_data(parent_context)\n context.update({\n 'locked_pages': Page.objects.filter(\n locked=True,\n locked_by=request.user,\n ),\n 'can_remove_locks': UserPagePermissionsProxy(request.user).can_remove_locks(),\n 'request': request,\n 'csrf_token': parent_context['csrf_token'],\n })\n return context\n\n\nclass RecentEditsPanel(Component):\n name = 'recent_edits'\n template_name = 'wagtailadmin/home/recent_edits.html'\n order = 250\n\n def get_context_data(self, parent_context):\n request = parent_context['request']\n context = super().get_context_data(parent_context)\n\n # Last n edited pages\n edit_count = getattr(settings, 'WAGTAILADMIN_RECENT_EDITS_LIMIT', 5)\n if connection.vendor == 'mysql':\n # MySQL can't handle the subselect created by the ORM version -\n # it fails with \"This version of MySQL doesn't yet support 'LIMIT & IN/ALL/ANY/SOME subquery'\"\n last_edits = PageRevision.objects.raw(\n \"\"\"\n SELECT wp.* FROM\n wagtailcore_pagerevision wp JOIN (\n SELECT max(created_at) AS max_created_at, page_id FROM\n wagtailcore_pagerevision WHERE user_id = %s GROUP BY page_id ORDER BY max_created_at DESC LIMIT %s\n ) AS max_rev ON max_rev.max_created_at = wp.created_at ORDER BY wp.created_at DESC\n \"\"\", [\n User._meta.pk.get_db_prep_value(request.user.pk, connection),\n edit_count\n ]\n )\n else:\n last_edits_dates = (PageRevision.objects.filter(user=request.user)\n .values('page_id').annotate(latest_date=Max('created_at'))\n .order_by('-latest_date').values('latest_date')[:edit_count])\n last_edits = PageRevision.objects.filter(created_at__in=last_edits_dates).order_by('-created_at')\n\n page_keys = [pr.page_id for pr in last_edits]\n pages = Page.objects.specific().in_bulk(page_keys)\n context['last_edits'] = [\n [revision, pages.get(revision.page_id)] for revision in last_edits\n ]\n context['request'] = request\n return context\n\n\ndef home(request):\n\n panels = [\n SiteSummaryPanel(request),\n UpgradeNotificationPanel(),\n WorkflowPagesToModeratePanel(),\n PagesForModerationPanel(),\n UserPagesInWorkflowModerationPanel(),\n RecentEditsPanel(),\n LockedPagesPanel(),\n ]\n\n for fn in hooks.get_hooks('construct_homepage_panels'):\n fn(request, panels)\n\n media = Media()\n\n for panel in panels:\n if hasattr(panel, 'render') and not hasattr(panel, 'render_html'):\n # NOTE: when this deprecation warning is removed the 'fallback_render_method=True' in\n # wagtailadmin/home.html should be removed too\n message = (\n \"Homepage panel %r should provide a render_html method. \"\n \"See https://docs.wagtail.io/en/stable/releases/2.15.html#template-components-2-15\"\n % panel\n )\n warn(message, category=RemovedInWagtail217Warning)\n\n # RemovedInWagtail217Warning: this hasattr check can be removed when support for\n # non-component-based panels ends\n if hasattr(panel, 'media'):\n media += panel.media\n\n site_details = get_site_for_user(request.user)\n\n return TemplateResponse(request, \"wagtailadmin/home.html\", {\n 'root_page': site_details['root_page'],\n 'root_site': site_details['root_site'],\n 'site_name': site_details['site_name'],\n 'panels': sorted(panels, key=lambda p: p.order),\n 'user': request.user,\n 'media': media,\n })\n\n\ndef error_test(request):\n raise Exception(\"This is a test of the emergency broadcast system.\")\n\n\n@permission_required('wagtailadmin.access_admin', login_url='wagtailadmin_login')\ndef default(request):\n \"\"\"\n Called whenever a request comes in with the correct prefix (eg /admin/) but\n doesn't actually correspond to a Wagtail view.\n\n For authenticated users, it'll raise a 404 error. Anonymous users will be\n redirected to the login page.\n \"\"\"\n raise Http404\n\n\n_icons_html = None\n\n\ndef icons():\n global _icons_html\n if _icons_html is None:\n icon_hooks = hooks.get_hooks('register_icons')\n all_icons = sorted(itertools.chain.from_iterable(hook([]) for hook in icon_hooks))\n _icons_html = render_to_string(\"wagtailadmin/shared/icons.html\", {'icons': all_icons})\n return _icons_html\n\n\ndef sprite(request):\n return HttpResponse(icons())\n", "path": "wagtail/admin/views/home.py"}]}
3,569
214
gh_patches_debug_19601
rasdani/github-patches
git_diff
voxel51__fiftyone-200
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Documentation build takes a long time Reproducible with `SPHINXOPTS=-E time ./docs/generate_docs.bash` - notably, it takes multiple minutes to build this page: ``` reading sources... [ 18%] api/fiftyone.core.odm ``` This is at least part of the reason why documentation builds on GitHub actions are taking so long. Possibly related to #182, although I don't think earlier builds on GitHub actions ran into this. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `fiftyone/core/odm/database.py` Content: ``` 1 """ 2 Database connection. 3 4 | Copyright 2017-2020, Voxel51, Inc. 5 | `voxel51.com <https://voxel51.com/>`_ 6 | 7 """ 8 # pragma pylint: disable=redefined-builtin 9 # pragma pylint: disable=unused-wildcard-import 10 # pragma pylint: disable=wildcard-import 11 from __future__ import absolute_import 12 from __future__ import division 13 from __future__ import print_function 14 from __future__ import unicode_literals 15 16 # pragma pylint: enable=redefined-builtin 17 # pragma pylint: enable=unused-wildcard-import 18 # pragma pylint: enable=wildcard-import 19 20 21 from mongoengine import connect 22 import pymongo 23 24 _DEFAULT_DATABASE = "fiftyone" 25 26 27 _db = connect(_DEFAULT_DATABASE) 28 29 30 def get_db_conn(): 31 """Creates a connection to the database""" 32 return pymongo.MongoClient()[_DEFAULT_DATABASE] 33 34 35 def drop_database(): 36 """Drops the database.""" 37 client = pymongo.MongoClient() 38 client.drop_database(_DEFAULT_DATABASE) 39 ``` Path: `fiftyone/core/service.py` Content: ``` 1 """ 2 FiftyOne Services. 3 4 | Copyright 2017-2020, Voxel51, Inc. 5 | `voxel51.com <https://voxel51.com/>`_ 6 | 7 """ 8 # pragma pylint: disable=redefined-builtin 9 # pragma pylint: disable=unused-wildcard-import 10 # pragma pylint: disable=wildcard-import 11 from __future__ import absolute_import 12 from __future__ import division 13 from __future__ import print_function 14 from __future__ import unicode_literals 15 from builtins import * 16 17 # pragma pylint: enable=redefined-builtin 18 # pragma pylint: enable=unused-wildcard-import 19 # pragma pylint: enable=wildcard-import 20 21 import logging 22 import os 23 import re 24 import subprocess 25 import sys 26 27 from packaging.version import Version 28 import requests 29 30 import eta.core.utils as etau 31 32 import fiftyone.constants as foc 33 34 35 logger = logging.getLogger(__name__) 36 37 38 class Service(object): 39 """Interface for FiftyOne services. 40 41 All services must define a ``command`` property. 42 43 Services are run in an isolated Python subprocess (see ``_service_main.py``) 44 to ensure that they are shut down when the main Python process exits. The 45 ``command`` and ``working_dir`` properties control the execution of the 46 service in the subprocess. 47 """ 48 49 working_dir = "." 50 51 def __init__(self): 52 """Creates (starts) the Service.""" 53 self._system = os.system 54 self._is_server = os.environ.get( 55 "FIFTYONE_SERVER", False 56 ) or os.environ.get("FIFTYONE_DISABLE_SERVICES", False) 57 self.child = None 58 if not self._is_server: 59 self.start() 60 61 def __del__(self): 62 """Deletes (stops) the Service.""" 63 if not self._is_server: 64 try: 65 self.stop() 66 except: 67 # something probably failed due to interpreter shutdown, which 68 # will be handled by _service_main.py 69 pass 70 71 @property 72 def command(self): 73 raise NotImplementedError("subclasses must define `command`") 74 75 def start(self): 76 """Starts the Service.""" 77 service_main_path = os.path.join( 78 os.path.dirname(os.path.abspath(__file__)), 79 "..", 80 "_service_main.py", 81 ) 82 self.child = subprocess.Popen( 83 [sys.executable, service_main_path] + self.command, 84 cwd=self.working_dir, 85 stdin=subprocess.PIPE, 86 ) 87 88 def stop(self): 89 """Stops the Service.""" 90 self.child.stdin.close() 91 self.child.wait() 92 93 def wait(self): 94 """Waits for the Service to exit and returns its exit code.""" 95 return self.child.wait() 96 97 98 class DatabaseService(Service): 99 """Service that controls the underlying MongoDB database.""" 100 101 MIN_MONGO_VERSION = "3.6" 102 103 @property 104 def command(self): 105 return [ 106 DatabaseService.find_mongod(), 107 "--dbpath", 108 foc.DB_PATH, 109 "--logpath", 110 foc.DB_LOG_PATH, 111 ] 112 113 def start(self): 114 """Starts the DatabaseService.""" 115 for folder in (foc.DB_PATH, os.path.dirname(foc.DB_LOG_PATH)): 116 if not os.path.isdir(folder): 117 os.makedirs(folder) 118 119 super().start() 120 121 # Drop non-persistent datasets 122 import fiftyone.core.dataset as fod 123 124 fod.delete_non_persistent_datasets() 125 126 @staticmethod 127 def find_mongod(): 128 search_paths = [ 129 foc.FIFTYONE_DB_BIN_DIR, 130 os.path.join(foc.FIFTYONE_CONFIG_DIR, "bin"), 131 ] + os.environ["PATH"].split(os.pathsep) 132 searched = set() 133 attempts = [] 134 for folder in search_paths: 135 if folder in searched: 136 continue 137 searched.add(folder) 138 mongod_path = os.path.join(folder, "mongod") 139 if os.path.isfile(mongod_path): 140 ok, out, err = etau.communicate([mongod_path, "--version"]) 141 out = out.decode(errors="ignore").strip() 142 err = err.decode(errors="ignore").strip() 143 mongod_version = None 144 if ok: 145 match = re.search(r"db version.+?([\d\.]+)", out, re.I) 146 if match: 147 mongod_version = match.group(1) 148 if Version(mongod_version) >= Version( 149 DatabaseService.MIN_MONGO_VERSION 150 ): 151 return mongod_path 152 attempts.append((mongod_path, mongod_version, err)) 153 for path, version, err in attempts: 154 if version is not None: 155 logger.warn("%s: incompatible version %s" % (path, version)) 156 else: 157 logger.error("%s: failed to launch: %s" % (path, err)) 158 raise RuntimeError( 159 "Could not find mongod >= %s" % DatabaseService.MIN_MONGO_VERSION 160 ) 161 162 163 class ServerService(Service): 164 """Service that controls the FiftyOne web server.""" 165 166 working_dir = foc.SERVER_DIR 167 168 def __init__(self, port): 169 self._port = port 170 super(ServerService, self).__init__() 171 172 def start(self): 173 server_version = None 174 try: 175 server_version = requests.get( 176 "http://127.0.0.1:%i/fiftyone" % self._port, timeout=2 177 ).json()["version"] 178 except Exception: 179 # There is likely not a fiftyone server running (remote or local), 180 # so start a local server. If there actually is a fiftyone server 181 # running that didn't respond to /fiftyone, the local server will 182 # fail to start but the dashboard will still connect successfully. 183 super().start() 184 185 if server_version is not None: 186 logger.info("Connected to fiftyone on local port %i" % self._port) 187 if server_version != foc.VERSION: 188 logger.warn( 189 "Server version (%s) does not match client version (%s)" 190 % (server_version, foc.VERSION) 191 ) 192 193 @property 194 def command(self): 195 command = [ 196 "gunicorn", 197 "-w", 198 "1", 199 "--worker-class", 200 "eventlet", 201 "-b", 202 "127.0.0.1:%d" % self._port, 203 "main:app", 204 ] 205 if foc.DEV_INSTALL: 206 command += ["--reload"] 207 return command 208 209 @property 210 def port(self): 211 """Getter for the current port""" 212 return self._port 213 214 215 class AppService(Service): 216 """Service that controls the FiftyOne app.""" 217 218 working_dir = foc.FIFTYONE_APP_DIR 219 220 @property 221 def command(self): 222 with etau.WorkingDir(foc.FIFTYONE_APP_DIR): 223 if os.path.isfile("FiftyOne.AppImage"): 224 # linux 225 args = ["./FiftyOne.AppImage"] 226 elif os.path.isdir("FiftyOne.app"): 227 args = ["./FiftyOne.app/Contents/MacOS/FiftyOne"] 228 elif os.path.isfile("package.json"): 229 # dev build 230 args = ["yarn", "dev"] 231 else: 232 raise RuntimeError( 233 "Could not find FiftyOne dashboard in %r" 234 % foc.FIFTYONE_APP_DIR 235 ) 236 return args 237 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/fiftyone/core/odm/database.py b/fiftyone/core/odm/database.py --- a/fiftyone/core/odm/database.py +++ b/fiftyone/core/odm/database.py @@ -22,17 +22,23 @@ import pymongo _DEFAULT_DATABASE = "fiftyone" +_client = None -_db = connect(_DEFAULT_DATABASE) +def _connect(): + global _client + if _client is None: + connect(_DEFAULT_DATABASE) + _client = pymongo.MongoClient() def get_db_conn(): """Creates a connection to the database""" - return pymongo.MongoClient()[_DEFAULT_DATABASE] + _connect() + return _client[_DEFAULT_DATABASE] def drop_database(): """Drops the database.""" - client = pymongo.MongoClient() - client.drop_database(_DEFAULT_DATABASE) + _connect() + _client.drop_database(_DEFAULT_DATABASE) diff --git a/fiftyone/core/service.py b/fiftyone/core/service.py --- a/fiftyone/core/service.py +++ b/fiftyone/core/service.py @@ -118,6 +118,11 @@ super().start() + # Set up a default connection + import fiftyone.core.odm as foo + + foo.get_db_conn() + # Drop non-persistent datasets import fiftyone.core.dataset as fod
{"golden_diff": "diff --git a/fiftyone/core/odm/database.py b/fiftyone/core/odm/database.py\n--- a/fiftyone/core/odm/database.py\n+++ b/fiftyone/core/odm/database.py\n@@ -22,17 +22,23 @@\n import pymongo\n \n _DEFAULT_DATABASE = \"fiftyone\"\n+_client = None\n \n \n-_db = connect(_DEFAULT_DATABASE)\n+def _connect():\n+ global _client\n+ if _client is None:\n+ connect(_DEFAULT_DATABASE)\n+ _client = pymongo.MongoClient()\n \n \n def get_db_conn():\n \"\"\"Creates a connection to the database\"\"\"\n- return pymongo.MongoClient()[_DEFAULT_DATABASE]\n+ _connect()\n+ return _client[_DEFAULT_DATABASE]\n \n \n def drop_database():\n \"\"\"Drops the database.\"\"\"\n- client = pymongo.MongoClient()\n- client.drop_database(_DEFAULT_DATABASE)\n+ _connect()\n+ _client.drop_database(_DEFAULT_DATABASE)\ndiff --git a/fiftyone/core/service.py b/fiftyone/core/service.py\n--- a/fiftyone/core/service.py\n+++ b/fiftyone/core/service.py\n@@ -118,6 +118,11 @@\n \n super().start()\n \n+ # Set up a default connection\n+ import fiftyone.core.odm as foo\n+\n+ foo.get_db_conn()\n+\n # Drop non-persistent datasets\n import fiftyone.core.dataset as fod\n", "issue": "Documentation build takes a long time\nReproducible with `SPHINXOPTS=-E time ./docs/generate_docs.bash` - notably, it takes multiple minutes to build this page:\r\n```\r\nreading sources... [ 18%] api/fiftyone.core.odm \r\n```\r\n\r\nThis is at least part of the reason why documentation builds on GitHub actions are taking so long.\r\n\r\nPossibly related to #182, although I don't think earlier builds on GitHub actions ran into this.\n", "before_files": [{"content": "\"\"\"\nDatabase connection.\n\n| Copyright 2017-2020, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\n# pragma pylint: disable=redefined-builtin\n# pragma pylint: disable=unused-wildcard-import\n# pragma pylint: disable=wildcard-import\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n# pragma pylint: enable=redefined-builtin\n# pragma pylint: enable=unused-wildcard-import\n# pragma pylint: enable=wildcard-import\n\n\nfrom mongoengine import connect\nimport pymongo\n\n_DEFAULT_DATABASE = \"fiftyone\"\n\n\n_db = connect(_DEFAULT_DATABASE)\n\n\ndef get_db_conn():\n \"\"\"Creates a connection to the database\"\"\"\n return pymongo.MongoClient()[_DEFAULT_DATABASE]\n\n\ndef drop_database():\n \"\"\"Drops the database.\"\"\"\n client = pymongo.MongoClient()\n client.drop_database(_DEFAULT_DATABASE)\n", "path": "fiftyone/core/odm/database.py"}, {"content": "\"\"\"\nFiftyOne Services.\n\n| Copyright 2017-2020, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\n# pragma pylint: disable=redefined-builtin\n# pragma pylint: disable=unused-wildcard-import\n# pragma pylint: disable=wildcard-import\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom builtins import *\n\n# pragma pylint: enable=redefined-builtin\n# pragma pylint: enable=unused-wildcard-import\n# pragma pylint: enable=wildcard-import\n\nimport logging\nimport os\nimport re\nimport subprocess\nimport sys\n\nfrom packaging.version import Version\nimport requests\n\nimport eta.core.utils as etau\n\nimport fiftyone.constants as foc\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Service(object):\n \"\"\"Interface for FiftyOne services.\n\n All services must define a ``command`` property.\n\n Services are run in an isolated Python subprocess (see ``_service_main.py``)\n to ensure that they are shut down when the main Python process exits. The\n ``command`` and ``working_dir`` properties control the execution of the\n service in the subprocess.\n \"\"\"\n\n working_dir = \".\"\n\n def __init__(self):\n \"\"\"Creates (starts) the Service.\"\"\"\n self._system = os.system\n self._is_server = os.environ.get(\n \"FIFTYONE_SERVER\", False\n ) or os.environ.get(\"FIFTYONE_DISABLE_SERVICES\", False)\n self.child = None\n if not self._is_server:\n self.start()\n\n def __del__(self):\n \"\"\"Deletes (stops) the Service.\"\"\"\n if not self._is_server:\n try:\n self.stop()\n except:\n # something probably failed due to interpreter shutdown, which\n # will be handled by _service_main.py\n pass\n\n @property\n def command(self):\n raise NotImplementedError(\"subclasses must define `command`\")\n\n def start(self):\n \"\"\"Starts the Service.\"\"\"\n service_main_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"..\",\n \"_service_main.py\",\n )\n self.child = subprocess.Popen(\n [sys.executable, service_main_path] + self.command,\n cwd=self.working_dir,\n stdin=subprocess.PIPE,\n )\n\n def stop(self):\n \"\"\"Stops the Service.\"\"\"\n self.child.stdin.close()\n self.child.wait()\n\n def wait(self):\n \"\"\"Waits for the Service to exit and returns its exit code.\"\"\"\n return self.child.wait()\n\n\nclass DatabaseService(Service):\n \"\"\"Service that controls the underlying MongoDB database.\"\"\"\n\n MIN_MONGO_VERSION = \"3.6\"\n\n @property\n def command(self):\n return [\n DatabaseService.find_mongod(),\n \"--dbpath\",\n foc.DB_PATH,\n \"--logpath\",\n foc.DB_LOG_PATH,\n ]\n\n def start(self):\n \"\"\"Starts the DatabaseService.\"\"\"\n for folder in (foc.DB_PATH, os.path.dirname(foc.DB_LOG_PATH)):\n if not os.path.isdir(folder):\n os.makedirs(folder)\n\n super().start()\n\n # Drop non-persistent datasets\n import fiftyone.core.dataset as fod\n\n fod.delete_non_persistent_datasets()\n\n @staticmethod\n def find_mongod():\n search_paths = [\n foc.FIFTYONE_DB_BIN_DIR,\n os.path.join(foc.FIFTYONE_CONFIG_DIR, \"bin\"),\n ] + os.environ[\"PATH\"].split(os.pathsep)\n searched = set()\n attempts = []\n for folder in search_paths:\n if folder in searched:\n continue\n searched.add(folder)\n mongod_path = os.path.join(folder, \"mongod\")\n if os.path.isfile(mongod_path):\n ok, out, err = etau.communicate([mongod_path, \"--version\"])\n out = out.decode(errors=\"ignore\").strip()\n err = err.decode(errors=\"ignore\").strip()\n mongod_version = None\n if ok:\n match = re.search(r\"db version.+?([\\d\\.]+)\", out, re.I)\n if match:\n mongod_version = match.group(1)\n if Version(mongod_version) >= Version(\n DatabaseService.MIN_MONGO_VERSION\n ):\n return mongod_path\n attempts.append((mongod_path, mongod_version, err))\n for path, version, err in attempts:\n if version is not None:\n logger.warn(\"%s: incompatible version %s\" % (path, version))\n else:\n logger.error(\"%s: failed to launch: %s\" % (path, err))\n raise RuntimeError(\n \"Could not find mongod >= %s\" % DatabaseService.MIN_MONGO_VERSION\n )\n\n\nclass ServerService(Service):\n \"\"\"Service that controls the FiftyOne web server.\"\"\"\n\n working_dir = foc.SERVER_DIR\n\n def __init__(self, port):\n self._port = port\n super(ServerService, self).__init__()\n\n def start(self):\n server_version = None\n try:\n server_version = requests.get(\n \"http://127.0.0.1:%i/fiftyone\" % self._port, timeout=2\n ).json()[\"version\"]\n except Exception:\n # There is likely not a fiftyone server running (remote or local),\n # so start a local server. If there actually is a fiftyone server\n # running that didn't respond to /fiftyone, the local server will\n # fail to start but the dashboard will still connect successfully.\n super().start()\n\n if server_version is not None:\n logger.info(\"Connected to fiftyone on local port %i\" % self._port)\n if server_version != foc.VERSION:\n logger.warn(\n \"Server version (%s) does not match client version (%s)\"\n % (server_version, foc.VERSION)\n )\n\n @property\n def command(self):\n command = [\n \"gunicorn\",\n \"-w\",\n \"1\",\n \"--worker-class\",\n \"eventlet\",\n \"-b\",\n \"127.0.0.1:%d\" % self._port,\n \"main:app\",\n ]\n if foc.DEV_INSTALL:\n command += [\"--reload\"]\n return command\n\n @property\n def port(self):\n \"\"\"Getter for the current port\"\"\"\n return self._port\n\n\nclass AppService(Service):\n \"\"\"Service that controls the FiftyOne app.\"\"\"\n\n working_dir = foc.FIFTYONE_APP_DIR\n\n @property\n def command(self):\n with etau.WorkingDir(foc.FIFTYONE_APP_DIR):\n if os.path.isfile(\"FiftyOne.AppImage\"):\n # linux\n args = [\"./FiftyOne.AppImage\"]\n elif os.path.isdir(\"FiftyOne.app\"):\n args = [\"./FiftyOne.app/Contents/MacOS/FiftyOne\"]\n elif os.path.isfile(\"package.json\"):\n # dev build\n args = [\"yarn\", \"dev\"]\n else:\n raise RuntimeError(\n \"Could not find FiftyOne dashboard in %r\"\n % foc.FIFTYONE_APP_DIR\n )\n return args\n", "path": "fiftyone/core/service.py"}], "after_files": [{"content": "\"\"\"\nDatabase connection.\n\n| Copyright 2017-2020, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\n# pragma pylint: disable=redefined-builtin\n# pragma pylint: disable=unused-wildcard-import\n# pragma pylint: disable=wildcard-import\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n# pragma pylint: enable=redefined-builtin\n# pragma pylint: enable=unused-wildcard-import\n# pragma pylint: enable=wildcard-import\n\n\nfrom mongoengine import connect\nimport pymongo\n\n_DEFAULT_DATABASE = \"fiftyone\"\n_client = None\n\n\ndef _connect():\n global _client\n if _client is None:\n connect(_DEFAULT_DATABASE)\n _client = pymongo.MongoClient()\n\n\ndef get_db_conn():\n \"\"\"Creates a connection to the database\"\"\"\n _connect()\n return _client[_DEFAULT_DATABASE]\n\n\ndef drop_database():\n \"\"\"Drops the database.\"\"\"\n _connect()\n _client.drop_database(_DEFAULT_DATABASE)\n", "path": "fiftyone/core/odm/database.py"}, {"content": "\"\"\"\nFiftyOne Services.\n\n| Copyright 2017-2020, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\n# pragma pylint: disable=redefined-builtin\n# pragma pylint: disable=unused-wildcard-import\n# pragma pylint: disable=wildcard-import\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom builtins import *\n\n# pragma pylint: enable=redefined-builtin\n# pragma pylint: enable=unused-wildcard-import\n# pragma pylint: enable=wildcard-import\n\nimport logging\nimport os\nimport re\nimport subprocess\nimport sys\n\nfrom packaging.version import Version\nimport requests\n\nimport eta.core.utils as etau\n\nimport fiftyone.constants as foc\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Service(object):\n \"\"\"Interface for FiftyOne services.\n\n All services must define a ``command`` property.\n\n Services are run in an isolated Python subprocess (see ``_service_main.py``)\n to ensure that they are shut down when the main Python process exits. The\n ``command`` and ``working_dir`` properties control the execution of the\n service in the subprocess.\n \"\"\"\n\n working_dir = \".\"\n\n def __init__(self):\n \"\"\"Creates (starts) the Service.\"\"\"\n self._system = os.system\n self._is_server = os.environ.get(\n \"FIFTYONE_SERVER\", False\n ) or os.environ.get(\"FIFTYONE_DISABLE_SERVICES\", False)\n self.child = None\n if not self._is_server:\n self.start()\n\n def __del__(self):\n \"\"\"Deletes (stops) the Service.\"\"\"\n if not self._is_server:\n try:\n self.stop()\n except:\n # something probably failed due to interpreter shutdown, which\n # will be handled by _service_main.py\n pass\n\n @property\n def command(self):\n raise NotImplementedError(\"subclasses must define `command`\")\n\n def start(self):\n \"\"\"Starts the Service.\"\"\"\n service_main_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"..\",\n \"_service_main.py\",\n )\n self.child = subprocess.Popen(\n [sys.executable, service_main_path] + self.command,\n cwd=self.working_dir,\n stdin=subprocess.PIPE,\n )\n\n def stop(self):\n \"\"\"Stops the Service.\"\"\"\n self.child.stdin.close()\n self.child.wait()\n\n def wait(self):\n \"\"\"Waits for the Service to exit and returns its exit code.\"\"\"\n return self.child.wait()\n\n\nclass DatabaseService(Service):\n \"\"\"Service that controls the underlying MongoDB database.\"\"\"\n\n MIN_MONGO_VERSION = \"3.6\"\n\n @property\n def command(self):\n return [\n DatabaseService.find_mongod(),\n \"--dbpath\",\n foc.DB_PATH,\n \"--logpath\",\n foc.DB_LOG_PATH,\n ]\n\n def start(self):\n \"\"\"Starts the DatabaseService.\"\"\"\n for folder in (foc.DB_PATH, os.path.dirname(foc.DB_LOG_PATH)):\n if not os.path.isdir(folder):\n os.makedirs(folder)\n\n super().start()\n\n # Set up a default connection\n import fiftyone.core.odm as foo\n\n foo.get_db_conn()\n\n # Drop non-persistent datasets\n import fiftyone.core.dataset as fod\n\n fod.delete_non_persistent_datasets()\n\n @staticmethod\n def find_mongod():\n search_paths = [\n foc.FIFTYONE_DB_BIN_DIR,\n os.path.join(foc.FIFTYONE_CONFIG_DIR, \"bin\"),\n ] + os.environ[\"PATH\"].split(os.pathsep)\n searched = set()\n attempts = []\n for folder in search_paths:\n if folder in searched:\n continue\n searched.add(folder)\n mongod_path = os.path.join(folder, \"mongod\")\n if os.path.isfile(mongod_path):\n ok, out, err = etau.communicate([mongod_path, \"--version\"])\n out = out.decode(errors=\"ignore\").strip()\n err = err.decode(errors=\"ignore\").strip()\n mongod_version = None\n if ok:\n match = re.search(r\"db version.+?([\\d\\.]+)\", out, re.I)\n if match:\n mongod_version = match.group(1)\n if Version(mongod_version) >= Version(\n DatabaseService.MIN_MONGO_VERSION\n ):\n return mongod_path\n attempts.append((mongod_path, mongod_version, err))\n for path, version, err in attempts:\n if version is not None:\n logger.warn(\"%s: incompatible version %s\" % (path, version))\n else:\n logger.error(\"%s: failed to launch: %s\" % (path, err))\n raise RuntimeError(\n \"Could not find mongod >= %s\" % DatabaseService.MIN_MONGO_VERSION\n )\n\n\nclass ServerService(Service):\n \"\"\"Service that controls the FiftyOne web server.\"\"\"\n\n working_dir = foc.SERVER_DIR\n\n def __init__(self, port):\n self._port = port\n super(ServerService, self).__init__()\n\n def start(self):\n server_version = None\n try:\n server_version = requests.get(\n \"http://127.0.0.1:%i/fiftyone\" % self._port, timeout=2\n ).json()[\"version\"]\n except Exception:\n # There is likely not a fiftyone server running (remote or local),\n # so start a local server. If there actually is a fiftyone server\n # running that didn't respond to /fiftyone, the local server will\n # fail to start but the dashboard will still connect successfully.\n super().start()\n\n if server_version is not None:\n logger.info(\"Connected to fiftyone on local port %i\" % self._port)\n if server_version != foc.VERSION:\n logger.warn(\n \"Server version (%s) does not match client version (%s)\"\n % (server_version, foc.VERSION)\n )\n\n @property\n def command(self):\n command = [\n \"gunicorn\",\n \"-w\",\n \"1\",\n \"--worker-class\",\n \"eventlet\",\n \"-b\",\n \"127.0.0.1:%d\" % self._port,\n \"main:app\",\n ]\n if foc.DEV_INSTALL:\n command += [\"--reload\"]\n return command\n\n @property\n def port(self):\n \"\"\"Getter for the current port\"\"\"\n return self._port\n\n\nclass AppService(Service):\n \"\"\"Service that controls the FiftyOne app.\"\"\"\n\n working_dir = foc.FIFTYONE_APP_DIR\n\n @property\n def command(self):\n with etau.WorkingDir(foc.FIFTYONE_APP_DIR):\n if os.path.isfile(\"FiftyOne.AppImage\"):\n # linux\n args = [\"./FiftyOne.AppImage\"]\n elif os.path.isdir(\"FiftyOne.app\"):\n args = [\"./FiftyOne.app/Contents/MacOS/FiftyOne\"]\n elif os.path.isfile(\"package.json\"):\n # dev build\n args = [\"yarn\", \"dev\"]\n else:\n raise RuntimeError(\n \"Could not find FiftyOne dashboard in %r\"\n % foc.FIFTYONE_APP_DIR\n )\n return args\n", "path": "fiftyone/core/service.py"}]}
2,868
311
gh_patches_debug_23865
rasdani/github-patches
git_diff
angr__angr-2127
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- DefUseGraph.top_predecessors() may trigger an infinite recursion <!-- *Disclaimer: The angr suite is maintained by a small team of volunteers. While we cannot guarantee any timeliness for fixes and enhancements, we will do our best. For more real-time help with angr, from us and the community, join our [Slack.](http://angr.io/invite/)* --> **Describe the bug.** <!-- Please include a clear and concise description of what the bug is. --> Infinite recursion triggered by one of the test cases. Raw log is [here](https://dev.azure.com/angr/64bc1357-22a8-47d9-a89e-588cd888be53/_apis/build/builds/1354/logs/104). ``` ====================================================================== ERROR: test_def_use_graph.transplant_class.<locals>.C (test_top_predecessors_should_not_contain_duplicates) ---------------------------------------------------------------------- Traceback (most recent call last): File "/__w/1/s/build/src/angr/tests/test_def_use_graph.py", line 100, in test_top_predecessors_should_not_contain_duplicates result = def_use_graph.top_predecessors(D) File "/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py", line 121, in top_predecessors return _top_predecessors(definition, self._graph, []) File "/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py", line 118, in _top_predecessors result File "/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py", line 116, in <lambda> lambda acc, definition: _top_predecessors(definition, graph, acc), File "/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py", line 118, in _top_predecessors result File "/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py", line 116, in <lambda> lambda acc, definition: _top_predecessors(definition, graph, acc), File "/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py", line 118, in _top_predecessors result File "/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py", line 116, in <lambda> lambda acc, definition: _top_predecessors(definition, graph, acc), ...snip... File "/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py", line 116, in <lambda> lambda acc, definition: _top_predecessors(definition, graph, acc), File "/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py", line 118, in _top_predecessors result File "/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py", line 116, in <lambda> lambda acc, definition: _top_predecessors(definition, graph, acc), File "/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py", line 110, in _top_predecessors predecessors = list(graph.predecessors(definition)) File "/__w/1/s/build/virtualenv/lib/python3.6/site-packages/networkx/classes/digraph.py", line 837, in predecessors return iter(self._pred[n]) File "/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/definition.py", line 40, in __hash__ return hash((self.atom, self.codeloc, self.data)) RecursionError: maximum recursion depth exceeded while calling a Python object ``` **Environment Information.** <!-- Many common issues are caused by problems with the local Python environment. Before submitting, double-check that your versions of all modules in the angr suite (angr, cle, pyvex, ...) are up to date. Please include the output of `python -m angr.misc.bug_report` here. --> Latest master branches of all projects (except for CLE) by 1/5/2020 and `cle:feat/pe_fast_load`. **To Reproduce.** <!-- Please include *both a script to reproduce the crash, and attach the binary used, if possible* --> Cannot be reproduced reliably. But running the test case multiple times should trigger it (if lucky). **Additional context.** <!-- Add any other context about the problem here. --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `angr/analyses/reaching_definitions/dep_graph.py` Content: ``` 1 from typing import Optional, Dict, List 2 from functools import reduce 3 4 import networkx 5 6 from .definition import Definition 7 8 9 def _is_definition(node): 10 return isinstance(node, Definition) 11 12 13 class DepGraph: 14 """ 15 The representation of a dependency graph: a directed graph, where nodes are definitions, and edges represent uses. 16 17 Mostly a wrapper around a <networkx.DiGraph>. 18 """ 19 20 def __init__(self, graph: Optional[networkx.DiGraph]=None): 21 """ 22 :param graph: A graph where nodes are definitions, and edges represent uses. 23 """ 24 # Used for memoization of the `transitive_closure` method. 25 self._transitive_closures: Dict = {} 26 27 if graph and not all(map(_is_definition, graph.nodes)): 28 raise TypeError("In a DepGraph, nodes need to be <%s>s." % Definition.__name__) 29 30 self._graph = graph if graph is not None else networkx.DiGraph() 31 32 @property 33 def graph(self) -> networkx.DiGraph: 34 return self._graph 35 36 def add_node(self, node: Definition) -> None: 37 """ 38 :param node: The definition to add to the definition-use graph. 39 """ 40 if not _is_definition(node): 41 raise TypeError("In a DepGraph, nodes need to be <%s>s." % Definition.__name__) 42 43 self._graph.add_node(node) 44 45 def add_edge(self, source: Definition, destination: Definition, **labels) -> None: 46 """ 47 The edge to add to the definition-use graph. Will create nodes that are not yet present. 48 49 :param source: The "source" definition, used by the "destination". 50 :param destination: The "destination" definition, using the variable defined by "source". 51 :param labels: Optional keyword arguments to represent edge labels. 52 """ 53 if not _is_definition(source) and not _is_definition(destination): 54 raise TypeError("In a DepGraph, edges need to be between <%s>s." % Definition.__name__) 55 56 self._graph.add_edge(source, destination, **labels) 57 58 def transitive_closure(self, definition: Definition) -> networkx.DiGraph: 59 """ 60 Compute the "transitive closure" of a given definition. 61 Obtained by transitively aggregating the ancestors of this definition in the graph. 62 63 Note: Each definition is memoized to avoid any kind of recomputation across the lifetime of this object. 64 65 :param definition: The Definition to get transitive closure for. 66 :return: A graph of the transitive closure of the given definition. 67 """ 68 69 def _transitive_closure(def_: Definition, graph: networkx.DiGraph, result: networkx.DiGraph): 70 if def_ in self._transitive_closures.keys(): 71 return self._transitive_closures[def_] 72 73 predecessors = list(graph.predecessors(def_)) 74 75 result.add_node(def_) 76 result.add_edges_from(list(map( 77 lambda e: (*e, graph.get_edge_data(*e)), 78 map( 79 lambda p: (p, def_), 80 predecessors 81 ) 82 ))) 83 84 closure = reduce( 85 lambda acc, definition: _transitive_closure(definition, graph, acc), 86 predecessors, 87 result 88 ) 89 90 self._transitive_closures[def_] = closure 91 return closure 92 93 return _transitive_closure(definition, self._graph, networkx.DiGraph()) 94 95 def top_predecessors(self, definition: Definition) -> List[Definition]: 96 """ 97 Recover the "entrypoint definitions" flowing into a given definition. 98 Obtained by transitively computing the top-level ancestors (nodes without predecessors) of this definition in 99 the graph. 100 101 :param definition: The <Definition> to return the top-level ancestors for. 102 :return: The list of top-level definitions flowing into the <node>. 103 """ 104 105 def _top_predecessors(def_: Definition, graph: networkx.DiGraph, result: List[Definition]): 106 predecessors = list(graph.predecessors(def_)) 107 108 if len(predecessors) == 0 and def_ not in result: 109 return result + [ def_ ] 110 111 return reduce( 112 lambda acc, definition: _top_predecessors(definition, graph, acc), 113 predecessors, 114 result 115 ) 116 117 return _top_predecessors(definition, self._graph, []) 118 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/angr/analyses/reaching_definitions/dep_graph.py b/angr/analyses/reaching_definitions/dep_graph.py --- a/angr/analyses/reaching_definitions/dep_graph.py +++ b/angr/analyses/reaching_definitions/dep_graph.py @@ -1,4 +1,4 @@ -from typing import Optional, Dict, List +from typing import Optional, Dict from functools import reduce import networkx @@ -91,27 +91,3 @@ return closure return _transitive_closure(definition, self._graph, networkx.DiGraph()) - - def top_predecessors(self, definition: Definition) -> List[Definition]: - """ - Recover the "entrypoint definitions" flowing into a given definition. - Obtained by transitively computing the top-level ancestors (nodes without predecessors) of this definition in - the graph. - - :param definition: The <Definition> to return the top-level ancestors for. - :return: The list of top-level definitions flowing into the <node>. - """ - - def _top_predecessors(def_: Definition, graph: networkx.DiGraph, result: List[Definition]): - predecessors = list(graph.predecessors(def_)) - - if len(predecessors) == 0 and def_ not in result: - return result + [ def_ ] - - return reduce( - lambda acc, definition: _top_predecessors(definition, graph, acc), - predecessors, - result - ) - - return _top_predecessors(definition, self._graph, [])
{"golden_diff": "diff --git a/angr/analyses/reaching_definitions/dep_graph.py b/angr/analyses/reaching_definitions/dep_graph.py\n--- a/angr/analyses/reaching_definitions/dep_graph.py\n+++ b/angr/analyses/reaching_definitions/dep_graph.py\n@@ -1,4 +1,4 @@\n-from typing import Optional, Dict, List\n+from typing import Optional, Dict\n from functools import reduce\n \n import networkx\n@@ -91,27 +91,3 @@\n return closure\n \n return _transitive_closure(definition, self._graph, networkx.DiGraph())\n-\n- def top_predecessors(self, definition: Definition) -> List[Definition]:\n- \"\"\"\n- Recover the \"entrypoint definitions\" flowing into a given definition.\n- Obtained by transitively computing the top-level ancestors (nodes without predecessors) of this definition in\n- the graph.\n-\n- :param definition: The <Definition> to return the top-level ancestors for.\n- :return: The list of top-level definitions flowing into the <node>.\n- \"\"\"\n-\n- def _top_predecessors(def_: Definition, graph: networkx.DiGraph, result: List[Definition]):\n- predecessors = list(graph.predecessors(def_))\n-\n- if len(predecessors) == 0 and def_ not in result:\n- return result + [ def_ ]\n-\n- return reduce(\n- lambda acc, definition: _top_predecessors(definition, graph, acc),\n- predecessors,\n- result\n- )\n-\n- return _top_predecessors(definition, self._graph, [])\n", "issue": "DefUseGraph.top_predecessors() may trigger an infinite recursion\n<!--\r\n*Disclaimer:\r\nThe angr suite is maintained by a small team of volunteers.\r\nWhile we cannot guarantee any timeliness for fixes and enhancements, we will do our best.\r\nFor more real-time help with angr, from us and the community, join our [Slack.](http://angr.io/invite/)*\r\n-->\r\n\r\n**Describe the bug.**\r\n<!--\r\nPlease include a clear and concise description of what the bug is.\r\n-->\r\n\r\nInfinite recursion triggered by one of the test cases. Raw log is [here](https://dev.azure.com/angr/64bc1357-22a8-47d9-a89e-588cd888be53/_apis/build/builds/1354/logs/104).\r\n\r\n```\r\n======================================================================\r\nERROR: test_def_use_graph.transplant_class.<locals>.C (test_top_predecessors_should_not_contain_duplicates)\r\n----------------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/__w/1/s/build/src/angr/tests/test_def_use_graph.py\", line 100, in test_top_predecessors_should_not_contain_duplicates\r\n result = def_use_graph.top_predecessors(D)\r\n File \"/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py\", line 121, in top_predecessors\r\n return _top_predecessors(definition, self._graph, [])\r\n File \"/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py\", line 118, in _top_predecessors\r\n result\r\n File \"/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py\", line 116, in <lambda>\r\n lambda acc, definition: _top_predecessors(definition, graph, acc),\r\n File \"/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py\", line 118, in _top_predecessors\r\n result\r\n File \"/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py\", line 116, in <lambda>\r\n lambda acc, definition: _top_predecessors(definition, graph, acc),\r\n File \"/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py\", line 118, in _top_predecessors\r\n result\r\n File \"/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py\", line 116, in <lambda>\r\n lambda acc, definition: _top_predecessors(definition, graph, acc),\r\n...snip...\r\n File \"/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py\", line 116, in <lambda>\r\n lambda acc, definition: _top_predecessors(definition, graph, acc),\r\n File \"/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py\", line 118, in _top_predecessors\r\n result\r\n File \"/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py\", line 116, in <lambda>\r\n lambda acc, definition: _top_predecessors(definition, graph, acc),\r\n File \"/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/def_use_graph.py\", line 110, in _top_predecessors\r\n predecessors = list(graph.predecessors(definition))\r\n File \"/__w/1/s/build/virtualenv/lib/python3.6/site-packages/networkx/classes/digraph.py\", line 837, in predecessors\r\n return iter(self._pred[n])\r\n File \"/__w/1/s/build/src/angr/angr/analyses/reaching_definitions/definition.py\", line 40, in __hash__\r\n return hash((self.atom, self.codeloc, self.data))\r\nRecursionError: maximum recursion depth exceeded while calling a Python object\r\n```\r\n\r\n\r\n**Environment Information.**\r\n<!--\r\nMany common issues are caused by problems with the local Python environment.\r\nBefore submitting, double-check that your versions of all modules in the angr suite (angr, cle, pyvex, ...) are up to date.\r\nPlease include the output of `python -m angr.misc.bug_report` here.\r\n-->\r\n\r\nLatest master branches of all projects (except for CLE) by 1/5/2020 and `cle:feat/pe_fast_load`.\r\n\r\n**To Reproduce.**\r\n<!--\r\nPlease include *both a script to reproduce the crash, and attach the binary used, if possible*\r\n-->\r\n\r\nCannot be reproduced reliably. But running the test case multiple times should trigger it (if lucky).\r\n\r\n**Additional context.**\r\n<!--\r\nAdd any other context about the problem here.\r\n-->\r\n\n", "before_files": [{"content": "from typing import Optional, Dict, List\nfrom functools import reduce\n\nimport networkx\n\nfrom .definition import Definition\n\n\ndef _is_definition(node):\n return isinstance(node, Definition)\n\n\nclass DepGraph:\n \"\"\"\n The representation of a dependency graph: a directed graph, where nodes are definitions, and edges represent uses.\n\n Mostly a wrapper around a <networkx.DiGraph>.\n \"\"\"\n\n def __init__(self, graph: Optional[networkx.DiGraph]=None):\n \"\"\"\n :param graph: A graph where nodes are definitions, and edges represent uses.\n \"\"\"\n # Used for memoization of the `transitive_closure` method.\n self._transitive_closures: Dict = {}\n\n if graph and not all(map(_is_definition, graph.nodes)):\n raise TypeError(\"In a DepGraph, nodes need to be <%s>s.\" % Definition.__name__)\n\n self._graph = graph if graph is not None else networkx.DiGraph()\n\n @property\n def graph(self) -> networkx.DiGraph:\n return self._graph\n\n def add_node(self, node: Definition) -> None:\n \"\"\"\n :param node: The definition to add to the definition-use graph.\n \"\"\"\n if not _is_definition(node):\n raise TypeError(\"In a DepGraph, nodes need to be <%s>s.\" % Definition.__name__)\n\n self._graph.add_node(node)\n\n def add_edge(self, source: Definition, destination: Definition, **labels) -> None:\n \"\"\"\n The edge to add to the definition-use graph. Will create nodes that are not yet present.\n\n :param source: The \"source\" definition, used by the \"destination\".\n :param destination: The \"destination\" definition, using the variable defined by \"source\".\n :param labels: Optional keyword arguments to represent edge labels.\n \"\"\"\n if not _is_definition(source) and not _is_definition(destination):\n raise TypeError(\"In a DepGraph, edges need to be between <%s>s.\" % Definition.__name__)\n\n self._graph.add_edge(source, destination, **labels)\n\n def transitive_closure(self, definition: Definition) -> networkx.DiGraph:\n \"\"\"\n Compute the \"transitive closure\" of a given definition.\n Obtained by transitively aggregating the ancestors of this definition in the graph.\n\n Note: Each definition is memoized to avoid any kind of recomputation across the lifetime of this object.\n\n :param definition: The Definition to get transitive closure for.\n :return: A graph of the transitive closure of the given definition.\n \"\"\"\n\n def _transitive_closure(def_: Definition, graph: networkx.DiGraph, result: networkx.DiGraph):\n if def_ in self._transitive_closures.keys():\n return self._transitive_closures[def_]\n\n predecessors = list(graph.predecessors(def_))\n\n result.add_node(def_)\n result.add_edges_from(list(map(\n lambda e: (*e, graph.get_edge_data(*e)),\n map(\n lambda p: (p, def_),\n predecessors\n )\n )))\n\n closure = reduce(\n lambda acc, definition: _transitive_closure(definition, graph, acc),\n predecessors,\n result\n )\n\n self._transitive_closures[def_] = closure\n return closure\n\n return _transitive_closure(definition, self._graph, networkx.DiGraph())\n\n def top_predecessors(self, definition: Definition) -> List[Definition]:\n \"\"\"\n Recover the \"entrypoint definitions\" flowing into a given definition.\n Obtained by transitively computing the top-level ancestors (nodes without predecessors) of this definition in\n the graph.\n\n :param definition: The <Definition> to return the top-level ancestors for.\n :return: The list of top-level definitions flowing into the <node>.\n \"\"\"\n\n def _top_predecessors(def_: Definition, graph: networkx.DiGraph, result: List[Definition]):\n predecessors = list(graph.predecessors(def_))\n\n if len(predecessors) == 0 and def_ not in result:\n return result + [ def_ ]\n\n return reduce(\n lambda acc, definition: _top_predecessors(definition, graph, acc),\n predecessors,\n result\n )\n\n return _top_predecessors(definition, self._graph, [])\n", "path": "angr/analyses/reaching_definitions/dep_graph.py"}], "after_files": [{"content": "from typing import Optional, Dict\nfrom functools import reduce\n\nimport networkx\n\nfrom .definition import Definition\n\n\ndef _is_definition(node):\n return isinstance(node, Definition)\n\n\nclass DepGraph:\n \"\"\"\n The representation of a dependency graph: a directed graph, where nodes are definitions, and edges represent uses.\n\n Mostly a wrapper around a <networkx.DiGraph>.\n \"\"\"\n\n def __init__(self, graph: Optional[networkx.DiGraph]=None):\n \"\"\"\n :param graph: A graph where nodes are definitions, and edges represent uses.\n \"\"\"\n # Used for memoization of the `transitive_closure` method.\n self._transitive_closures: Dict = {}\n\n if graph and not all(map(_is_definition, graph.nodes)):\n raise TypeError(\"In a DepGraph, nodes need to be <%s>s.\" % Definition.__name__)\n\n self._graph = graph if graph is not None else networkx.DiGraph()\n\n @property\n def graph(self) -> networkx.DiGraph:\n return self._graph\n\n def add_node(self, node: Definition) -> None:\n \"\"\"\n :param node: The definition to add to the definition-use graph.\n \"\"\"\n if not _is_definition(node):\n raise TypeError(\"In a DepGraph, nodes need to be <%s>s.\" % Definition.__name__)\n\n self._graph.add_node(node)\n\n def add_edge(self, source: Definition, destination: Definition, **labels) -> None:\n \"\"\"\n The edge to add to the definition-use graph. Will create nodes that are not yet present.\n\n :param source: The \"source\" definition, used by the \"destination\".\n :param destination: The \"destination\" definition, using the variable defined by \"source\".\n :param labels: Optional keyword arguments to represent edge labels.\n \"\"\"\n if not _is_definition(source) and not _is_definition(destination):\n raise TypeError(\"In a DepGraph, edges need to be between <%s>s.\" % Definition.__name__)\n\n self._graph.add_edge(source, destination, **labels)\n\n def transitive_closure(self, definition: Definition) -> networkx.DiGraph:\n \"\"\"\n Compute the \"transitive closure\" of a given definition.\n Obtained by transitively aggregating the ancestors of this definition in the graph.\n\n Note: Each definition is memoized to avoid any kind of recomputation across the lifetime of this object.\n\n :param definition: The Definition to get transitive closure for.\n :return: A graph of the transitive closure of the given definition.\n \"\"\"\n\n def _transitive_closure(def_: Definition, graph: networkx.DiGraph, result: networkx.DiGraph):\n if def_ in self._transitive_closures.keys():\n return self._transitive_closures[def_]\n\n predecessors = list(graph.predecessors(def_))\n\n result.add_node(def_)\n result.add_edges_from(list(map(\n lambda e: (*e, graph.get_edge_data(*e)),\n map(\n lambda p: (p, def_),\n predecessors\n )\n )))\n\n closure = reduce(\n lambda acc, definition: _transitive_closure(definition, graph, acc),\n predecessors,\n result\n )\n\n self._transitive_closures[def_] = closure\n return closure\n\n return _transitive_closure(definition, self._graph, networkx.DiGraph())\n", "path": "angr/analyses/reaching_definitions/dep_graph.py"}]}
2,575
357
gh_patches_debug_22279
rasdani/github-patches
git_diff
chainer__chainer-243
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add type check to NonparameterizedConvolution2D function Related to #123 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `chainer/functions/nonparameterized_convolution_2d.py` Content: ``` 1 from chainer import cuda 2 from chainer import function 3 from chainer.functions import convolution_2d as conv2d_module 4 5 6 class NonparameterizedConvolution2D(function.Function): 7 8 """Two-dimensional nonparameterized convolution class. 9 10 Args: 11 stride (int or (int, int)): Stride of filter applications. 12 ``stride=s`` and ``stride=(s, s)`` are equivalent. 13 pad (int or (int, int)): Spatial padding width for input arrays. 14 ``pad=p`` and ``pad=(p, p)`` are equivalent. 15 use_cudnn (bool): If True, then this function uses CuDNN if available. 16 17 .. seealso:: :class:`Convolution2D` 18 19 """ 20 def __init__(self, stride=1, pad=0, use_cudnn=True): 21 self.stride = stride 22 self.pad = pad 23 24 self.use_cudnn = use_cudnn 25 26 def forward(self, x): 27 W = x[1] 28 b = None 29 if len(x) == 3: 30 b = x[2] 31 func = conv2d_module.Convolution2D( 32 W.shape[1], W.shape[0], W.shape[2:], 33 stride=self.stride, pad=self.pad, use_cudnn=self.use_cudnn, 34 initialW=W, initial_bias=b) 35 self.func = func 36 if any(isinstance(i, cuda.GPUArray) for i in x): 37 func.to_gpu() 38 return func.forward(x[:1]) 39 40 def backward(self, x, gy): 41 func = self.func 42 func.zero_grads() 43 gx = func.backward(x[:1], gy) 44 if func.gb is None: 45 return (gx[0], func.gW) 46 return (gx[0], func.gW, func.gb) 47 48 49 def convolution_2d(x, W, b=None, stride=1, pad=0, use_cudnn=True): 50 """Two-dimensional convolution function. 51 52 Args: 53 x (~chainer.Variable): Input variable. 54 W (~chainer.Variable): Weight variable. 55 b (~chainer.Variable): Bias variable. 56 stride (int or (int, int)): Stride of filter applications. 57 ``stride=s`` and ``stride=(s, s)`` are equivalent. 58 pad (int or (int, int)): Spatial padding width for input arrays. 59 ``pad=p`` and ``pad=(p, p)`` are equivalent. 60 use_cudnn (bool): If True, then this function uses CuDNN if available. 61 62 Returns: 63 ~chainer.Variable: Output variable. 64 65 .. seealso:: :class:`Convolution2D` 66 67 """ 68 return NonparameterizedConvolution2D( 69 stride=stride, pad=pad, use_cudnn=use_cudnn)(x, W, b) 70 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/chainer/functions/nonparameterized_convolution_2d.py b/chainer/functions/nonparameterized_convolution_2d.py --- a/chainer/functions/nonparameterized_convolution_2d.py +++ b/chainer/functions/nonparameterized_convolution_2d.py @@ -1,6 +1,9 @@ +import numpy + from chainer import cuda from chainer import function from chainer.functions import convolution_2d as conv2d_module +from chainer.utils import type_check class NonparameterizedConvolution2D(function.Function): @@ -23,6 +26,30 @@ self.use_cudnn = use_cudnn + def check_type_forward(self, in_types): + type_check.expect( + 2 <= in_types.size(), + in_types.size() <= 3, + ) + + x_type = in_types[0] + w_type = in_types[1] + type_check.expect( + x_type.dtype == numpy.float32, + w_type.dtype == numpy.float32, + x_type.ndim == 4, + w_type.ndim == 4, + x_type.shape[1] == w_type.shape[1], + ) + + if in_types.size().eval() == 3: + b_type = in_types[2] + type_check.expect( + b_type.dtype == numpy.float32, + b_type.ndim == 1, + b_type.shape[0] == w_type.shape[0], + ) + def forward(self, x): W = x[1] b = None
{"golden_diff": "diff --git a/chainer/functions/nonparameterized_convolution_2d.py b/chainer/functions/nonparameterized_convolution_2d.py\n--- a/chainer/functions/nonparameterized_convolution_2d.py\n+++ b/chainer/functions/nonparameterized_convolution_2d.py\n@@ -1,6 +1,9 @@\n+import numpy\n+\n from chainer import cuda\n from chainer import function\n from chainer.functions import convolution_2d as conv2d_module\n+from chainer.utils import type_check\n \n \n class NonparameterizedConvolution2D(function.Function):\n@@ -23,6 +26,30 @@\n \n self.use_cudnn = use_cudnn\n \n+ def check_type_forward(self, in_types):\n+ type_check.expect(\n+ 2 <= in_types.size(),\n+ in_types.size() <= 3,\n+ )\n+\n+ x_type = in_types[0]\n+ w_type = in_types[1]\n+ type_check.expect(\n+ x_type.dtype == numpy.float32,\n+ w_type.dtype == numpy.float32,\n+ x_type.ndim == 4,\n+ w_type.ndim == 4,\n+ x_type.shape[1] == w_type.shape[1],\n+ )\n+\n+ if in_types.size().eval() == 3:\n+ b_type = in_types[2]\n+ type_check.expect(\n+ b_type.dtype == numpy.float32,\n+ b_type.ndim == 1,\n+ b_type.shape[0] == w_type.shape[0],\n+ )\n+\n def forward(self, x):\n W = x[1]\n b = None\n", "issue": "Add type check to NonparameterizedConvolution2D function\nRelated to #123 \n\n", "before_files": [{"content": "from chainer import cuda\nfrom chainer import function\nfrom chainer.functions import convolution_2d as conv2d_module\n\n\nclass NonparameterizedConvolution2D(function.Function):\n\n \"\"\"Two-dimensional nonparameterized convolution class.\n\n Args:\n stride (int or (int, int)): Stride of filter applications.\n ``stride=s`` and ``stride=(s, s)`` are equivalent.\n pad (int or (int, int)): Spatial padding width for input arrays.\n ``pad=p`` and ``pad=(p, p)`` are equivalent.\n use_cudnn (bool): If True, then this function uses CuDNN if available.\n\n .. seealso:: :class:`Convolution2D`\n\n \"\"\"\n def __init__(self, stride=1, pad=0, use_cudnn=True):\n self.stride = stride\n self.pad = pad\n\n self.use_cudnn = use_cudnn\n\n def forward(self, x):\n W = x[1]\n b = None\n if len(x) == 3:\n b = x[2]\n func = conv2d_module.Convolution2D(\n W.shape[1], W.shape[0], W.shape[2:],\n stride=self.stride, pad=self.pad, use_cudnn=self.use_cudnn,\n initialW=W, initial_bias=b)\n self.func = func\n if any(isinstance(i, cuda.GPUArray) for i in x):\n func.to_gpu()\n return func.forward(x[:1])\n\n def backward(self, x, gy):\n func = self.func\n func.zero_grads()\n gx = func.backward(x[:1], gy)\n if func.gb is None:\n return (gx[0], func.gW)\n return (gx[0], func.gW, func.gb)\n\n\ndef convolution_2d(x, W, b=None, stride=1, pad=0, use_cudnn=True):\n \"\"\"Two-dimensional convolution function.\n\n Args:\n x (~chainer.Variable): Input variable.\n W (~chainer.Variable): Weight variable.\n b (~chainer.Variable): Bias variable.\n stride (int or (int, int)): Stride of filter applications.\n ``stride=s`` and ``stride=(s, s)`` are equivalent.\n pad (int or (int, int)): Spatial padding width for input arrays.\n ``pad=p`` and ``pad=(p, p)`` are equivalent.\n use_cudnn (bool): If True, then this function uses CuDNN if available.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n .. seealso:: :class:`Convolution2D`\n\n \"\"\"\n return NonparameterizedConvolution2D(\n stride=stride, pad=pad, use_cudnn=use_cudnn)(x, W, b)\n", "path": "chainer/functions/nonparameterized_convolution_2d.py"}], "after_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.functions import convolution_2d as conv2d_module\nfrom chainer.utils import type_check\n\n\nclass NonparameterizedConvolution2D(function.Function):\n\n \"\"\"Two-dimensional nonparameterized convolution class.\n\n Args:\n stride (int or (int, int)): Stride of filter applications.\n ``stride=s`` and ``stride=(s, s)`` are equivalent.\n pad (int or (int, int)): Spatial padding width for input arrays.\n ``pad=p`` and ``pad=(p, p)`` are equivalent.\n use_cudnn (bool): If True, then this function uses CuDNN if available.\n\n .. seealso:: :class:`Convolution2D`\n\n \"\"\"\n def __init__(self, stride=1, pad=0, use_cudnn=True):\n self.stride = stride\n self.pad = pad\n\n self.use_cudnn = use_cudnn\n\n def check_type_forward(self, in_types):\n type_check.expect(\n 2 <= in_types.size(),\n in_types.size() <= 3,\n )\n\n x_type = in_types[0]\n w_type = in_types[1]\n type_check.expect(\n x_type.dtype == numpy.float32,\n w_type.dtype == numpy.float32,\n x_type.ndim == 4,\n w_type.ndim == 4,\n x_type.shape[1] == w_type.shape[1],\n )\n\n if in_types.size().eval() == 3:\n b_type = in_types[2]\n type_check.expect(\n b_type.dtype == numpy.float32,\n b_type.ndim == 1,\n b_type.shape[0] == w_type.shape[0],\n )\n\n def forward(self, x):\n W = x[1]\n b = None\n if len(x) == 3:\n b = x[2]\n func = conv2d_module.Convolution2D(\n W.shape[1], W.shape[0], W.shape[2:],\n stride=self.stride, pad=self.pad, use_cudnn=self.use_cudnn,\n initialW=W, initial_bias=b)\n self.func = func\n if any(isinstance(i, cuda.GPUArray) for i in x):\n func.to_gpu()\n return func.forward(x[:1])\n\n def backward(self, x, gy):\n func = self.func\n func.zero_grads()\n gx = func.backward(x[:1], gy)\n if func.gb is None:\n return (gx[0], func.gW)\n return (gx[0], func.gW, func.gb)\n\n\ndef convolution_2d(x, W, b=None, stride=1, pad=0, use_cudnn=True):\n \"\"\"Two-dimensional convolution function.\n\n Args:\n x (~chainer.Variable): Input variable.\n W (~chainer.Variable): Weight variable.\n b (~chainer.Variable): Bias variable.\n stride (int or (int, int)): Stride of filter applications.\n ``stride=s`` and ``stride=(s, s)`` are equivalent.\n pad (int or (int, int)): Spatial padding width for input arrays.\n ``pad=p`` and ``pad=(p, p)`` are equivalent.\n use_cudnn (bool): If True, then this function uses CuDNN if available.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n .. seealso:: :class:`Convolution2D`\n\n \"\"\"\n return NonparameterizedConvolution2D(\n stride=stride, pad=pad, use_cudnn=use_cudnn)(x, W, b)\n", "path": "chainer/functions/nonparameterized_convolution_2d.py"}]}
1,039
361
gh_patches_debug_23091
rasdani/github-patches
git_diff
pytorch__ignite-984
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Deprecate ignite.contrib.handlers.custom_events.CustomPeriodicEvent ## 🚀 Feature Custom events `CustomPeriodicEvent` from contrib seem heavy and unusable. Idea is to - [ ] raise a warning about deprecation since v0.4.0 and removing since v0.5.0 and suggest to use filtered events. - [ ] remove all docs about them --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ignite/contrib/handlers/custom_events.py` Content: ``` 1 from ignite.engine import Events, State, EventEnum 2 3 4 class CustomPeriodicEvent: 5 """Handler to define a custom periodic events as a number of elapsed iterations/epochs for an engine. 6 7 When custom periodic event is created and attached to an engine, the following events are fired: 8 1) K iterations is specified: 9 - `Events.ITERATIONS_<K>_STARTED` 10 - `Events.ITERATIONS_<K>_COMPLETED` 11 12 1) K epochs is specified: 13 - `Events.EPOCHS_<K>_STARTED` 14 - `Events.EPOCHS_<K>_COMPLETED` 15 16 17 Examples: 18 19 .. code-block:: python 20 21 from ignite.engine import Engine, Events 22 from ignite.contrib.handlers import CustomPeriodicEvent 23 24 # Let's define an event every 1000 iterations 25 cpe1 = CustomPeriodicEvent(n_iterations=1000) 26 cpe1.attach(trainer) 27 28 # Let's define an event every 10 epochs 29 cpe2 = CustomPeriodicEvent(n_epochs=10) 30 cpe2.attach(trainer) 31 32 @trainer.on(cpe1.Events.ITERATIONS_1000_COMPLETED) 33 def on_every_1000_iterations(engine): 34 # run a computation after 1000 iterations 35 # ... 36 print(engine.state.iterations_1000) 37 38 @trainer.on(cpe2.Events.EPOCHS_10_STARTED) 39 def on_every_10_epochs(engine): 40 # run a computation every 10 epochs 41 # ... 42 print(engine.state.epochs_10) 43 44 45 Args: 46 n_iterations (int, optional): number iterations of the custom periodic event 47 n_epochs (int, optional): number iterations of the custom periodic event. Argument is optional, but only one, 48 either n_iterations or n_epochs should defined. 49 50 """ 51 52 def __init__(self, n_iterations=None, n_epochs=None): 53 54 if n_iterations is not None and (not isinstance(n_iterations, int) or n_iterations < 1): 55 raise ValueError("Argument n_iterations should be positive integer number") 56 57 if n_epochs is not None and (not isinstance(n_epochs, int) or n_epochs < 1): 58 raise ValueError("Argument n_epochs should be positive integer number") 59 60 if (n_iterations is None and n_epochs is None) or (n_iterations and n_epochs): 61 raise ValueError("Either n_iterations or n_epochs should defined") 62 63 if n_iterations: 64 prefix = "iterations" 65 self.state_attr = "iteration" 66 self.period = n_iterations 67 68 if n_epochs: 69 prefix = "epochs" 70 self.state_attr = "epoch" 71 self.period = n_epochs 72 73 self.custom_state_attr = "{}_{}".format(prefix, self.period) 74 event_name = "{}_{}".format(prefix.upper(), self.period) 75 setattr( 76 self, 77 "Events", 78 EventEnum("Events", " ".join(["{}_STARTED".format(event_name), "{}_COMPLETED".format(event_name)])), 79 ) 80 81 # Update State.event_to_attr 82 for e in self.Events: 83 State.event_to_attr[e] = self.custom_state_attr 84 85 # Create aliases 86 self._periodic_event_started = getattr(self.Events, "{}_STARTED".format(event_name)) 87 self._periodic_event_completed = getattr(self.Events, "{}_COMPLETED".format(event_name)) 88 89 def _on_started(self, engine): 90 setattr(engine.state, self.custom_state_attr, 0) 91 92 def _on_periodic_event_started(self, engine): 93 if getattr(engine.state, self.state_attr) % self.period == 1: 94 setattr(engine.state, self.custom_state_attr, getattr(engine.state, self.custom_state_attr) + 1) 95 engine.fire_event(self._periodic_event_started) 96 97 def _on_periodic_event_completed(self, engine): 98 if getattr(engine.state, self.state_attr) % self.period == 0: 99 engine.fire_event(self._periodic_event_completed) 100 101 def attach(self, engine): 102 engine.register_events(*self.Events) 103 104 engine.add_event_handler(Events.STARTED, self._on_started) 105 engine.add_event_handler( 106 getattr(Events, "{}_STARTED".format(self.state_attr.upper())), self._on_periodic_event_started 107 ) 108 engine.add_event_handler( 109 getattr(Events, "{}_COMPLETED".format(self.state_attr.upper())), self._on_periodic_event_completed 110 ) 111 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ignite/contrib/handlers/custom_events.py b/ignite/contrib/handlers/custom_events.py --- a/ignite/contrib/handlers/custom_events.py +++ b/ignite/contrib/handlers/custom_events.py @@ -1,8 +1,11 @@ from ignite.engine import Events, State, EventEnum +import warnings class CustomPeriodicEvent: - """Handler to define a custom periodic events as a number of elapsed iterations/epochs for an engine. + """DEPRECATED. Use filtered events instead. + Handler to define a custom periodic events as a number of elapsed iterations/epochs + for an engine. When custom periodic event is created and attached to an engine, the following events are fired: 1) K iterations is specified: @@ -51,6 +54,11 @@ def __init__(self, n_iterations=None, n_epochs=None): + warnings.warn( + "CustomPeriodicEvent is deprecated since 0.4.0 and will be removed in 0.5.0. Use filtered events instead.", + DeprecationWarning, + ) + if n_iterations is not None and (not isinstance(n_iterations, int) or n_iterations < 1): raise ValueError("Argument n_iterations should be positive integer number")
{"golden_diff": "diff --git a/ignite/contrib/handlers/custom_events.py b/ignite/contrib/handlers/custom_events.py\n--- a/ignite/contrib/handlers/custom_events.py\n+++ b/ignite/contrib/handlers/custom_events.py\n@@ -1,8 +1,11 @@\n from ignite.engine import Events, State, EventEnum\n+import warnings\n \n \n class CustomPeriodicEvent:\n- \"\"\"Handler to define a custom periodic events as a number of elapsed iterations/epochs for an engine.\n+ \"\"\"DEPRECATED. Use filtered events instead.\n+ Handler to define a custom periodic events as a number of elapsed iterations/epochs\n+ for an engine.\n \n When custom periodic event is created and attached to an engine, the following events are fired:\n 1) K iterations is specified:\n@@ -51,6 +54,11 @@\n \n def __init__(self, n_iterations=None, n_epochs=None):\n \n+ warnings.warn(\n+ \"CustomPeriodicEvent is deprecated since 0.4.0 and will be removed in 0.5.0. Use filtered events instead.\",\n+ DeprecationWarning,\n+ )\n+\n if n_iterations is not None and (not isinstance(n_iterations, int) or n_iterations < 1):\n raise ValueError(\"Argument n_iterations should be positive integer number\")\n", "issue": "Deprecate ignite.contrib.handlers.custom_events.CustomPeriodicEvent\n## \ud83d\ude80 Feature\r\n\r\nCustom events `CustomPeriodicEvent` from contrib seem heavy and unusable. \r\n\r\nIdea is to \r\n\r\n- [ ] raise a warning about deprecation since v0.4.0 and removing since v0.5.0 and suggest to use filtered events.\r\n- [ ] remove all docs about them \r\n\n", "before_files": [{"content": "from ignite.engine import Events, State, EventEnum\n\n\nclass CustomPeriodicEvent:\n \"\"\"Handler to define a custom periodic events as a number of elapsed iterations/epochs for an engine.\n\n When custom periodic event is created and attached to an engine, the following events are fired:\n 1) K iterations is specified:\n - `Events.ITERATIONS_<K>_STARTED`\n - `Events.ITERATIONS_<K>_COMPLETED`\n\n 1) K epochs is specified:\n - `Events.EPOCHS_<K>_STARTED`\n - `Events.EPOCHS_<K>_COMPLETED`\n\n\n Examples:\n\n .. code-block:: python\n\n from ignite.engine import Engine, Events\n from ignite.contrib.handlers import CustomPeriodicEvent\n\n # Let's define an event every 1000 iterations\n cpe1 = CustomPeriodicEvent(n_iterations=1000)\n cpe1.attach(trainer)\n\n # Let's define an event every 10 epochs\n cpe2 = CustomPeriodicEvent(n_epochs=10)\n cpe2.attach(trainer)\n\n @trainer.on(cpe1.Events.ITERATIONS_1000_COMPLETED)\n def on_every_1000_iterations(engine):\n # run a computation after 1000 iterations\n # ...\n print(engine.state.iterations_1000)\n\n @trainer.on(cpe2.Events.EPOCHS_10_STARTED)\n def on_every_10_epochs(engine):\n # run a computation every 10 epochs\n # ...\n print(engine.state.epochs_10)\n\n\n Args:\n n_iterations (int, optional): number iterations of the custom periodic event\n n_epochs (int, optional): number iterations of the custom periodic event. Argument is optional, but only one,\n either n_iterations or n_epochs should defined.\n\n \"\"\"\n\n def __init__(self, n_iterations=None, n_epochs=None):\n\n if n_iterations is not None and (not isinstance(n_iterations, int) or n_iterations < 1):\n raise ValueError(\"Argument n_iterations should be positive integer number\")\n\n if n_epochs is not None and (not isinstance(n_epochs, int) or n_epochs < 1):\n raise ValueError(\"Argument n_epochs should be positive integer number\")\n\n if (n_iterations is None and n_epochs is None) or (n_iterations and n_epochs):\n raise ValueError(\"Either n_iterations or n_epochs should defined\")\n\n if n_iterations:\n prefix = \"iterations\"\n self.state_attr = \"iteration\"\n self.period = n_iterations\n\n if n_epochs:\n prefix = \"epochs\"\n self.state_attr = \"epoch\"\n self.period = n_epochs\n\n self.custom_state_attr = \"{}_{}\".format(prefix, self.period)\n event_name = \"{}_{}\".format(prefix.upper(), self.period)\n setattr(\n self,\n \"Events\",\n EventEnum(\"Events\", \" \".join([\"{}_STARTED\".format(event_name), \"{}_COMPLETED\".format(event_name)])),\n )\n\n # Update State.event_to_attr\n for e in self.Events:\n State.event_to_attr[e] = self.custom_state_attr\n\n # Create aliases\n self._periodic_event_started = getattr(self.Events, \"{}_STARTED\".format(event_name))\n self._periodic_event_completed = getattr(self.Events, \"{}_COMPLETED\".format(event_name))\n\n def _on_started(self, engine):\n setattr(engine.state, self.custom_state_attr, 0)\n\n def _on_periodic_event_started(self, engine):\n if getattr(engine.state, self.state_attr) % self.period == 1:\n setattr(engine.state, self.custom_state_attr, getattr(engine.state, self.custom_state_attr) + 1)\n engine.fire_event(self._periodic_event_started)\n\n def _on_periodic_event_completed(self, engine):\n if getattr(engine.state, self.state_attr) % self.period == 0:\n engine.fire_event(self._periodic_event_completed)\n\n def attach(self, engine):\n engine.register_events(*self.Events)\n\n engine.add_event_handler(Events.STARTED, self._on_started)\n engine.add_event_handler(\n getattr(Events, \"{}_STARTED\".format(self.state_attr.upper())), self._on_periodic_event_started\n )\n engine.add_event_handler(\n getattr(Events, \"{}_COMPLETED\".format(self.state_attr.upper())), self._on_periodic_event_completed\n )\n", "path": "ignite/contrib/handlers/custom_events.py"}], "after_files": [{"content": "from ignite.engine import Events, State, EventEnum\nimport warnings\n\n\nclass CustomPeriodicEvent:\n \"\"\"DEPRECATED. Use filtered events instead.\n Handler to define a custom periodic events as a number of elapsed iterations/epochs\n for an engine.\n\n When custom periodic event is created and attached to an engine, the following events are fired:\n 1) K iterations is specified:\n - `Events.ITERATIONS_<K>_STARTED`\n - `Events.ITERATIONS_<K>_COMPLETED`\n\n 1) K epochs is specified:\n - `Events.EPOCHS_<K>_STARTED`\n - `Events.EPOCHS_<K>_COMPLETED`\n\n\n Examples:\n\n .. code-block:: python\n\n from ignite.engine import Engine, Events\n from ignite.contrib.handlers import CustomPeriodicEvent\n\n # Let's define an event every 1000 iterations\n cpe1 = CustomPeriodicEvent(n_iterations=1000)\n cpe1.attach(trainer)\n\n # Let's define an event every 10 epochs\n cpe2 = CustomPeriodicEvent(n_epochs=10)\n cpe2.attach(trainer)\n\n @trainer.on(cpe1.Events.ITERATIONS_1000_COMPLETED)\n def on_every_1000_iterations(engine):\n # run a computation after 1000 iterations\n # ...\n print(engine.state.iterations_1000)\n\n @trainer.on(cpe2.Events.EPOCHS_10_STARTED)\n def on_every_10_epochs(engine):\n # run a computation every 10 epochs\n # ...\n print(engine.state.epochs_10)\n\n\n Args:\n n_iterations (int, optional): number iterations of the custom periodic event\n n_epochs (int, optional): number iterations of the custom periodic event. Argument is optional, but only one,\n either n_iterations or n_epochs should defined.\n\n \"\"\"\n\n def __init__(self, n_iterations=None, n_epochs=None):\n\n warnings.warn(\n \"CustomPeriodicEvent is deprecated since 0.4.0 and will be removed in 0.5.0. Use filtered events instead.\",\n DeprecationWarning,\n )\n\n if n_iterations is not None and (not isinstance(n_iterations, int) or n_iterations < 1):\n raise ValueError(\"Argument n_iterations should be positive integer number\")\n\n if n_epochs is not None and (not isinstance(n_epochs, int) or n_epochs < 1):\n raise ValueError(\"Argument n_epochs should be positive integer number\")\n\n if (n_iterations is None and n_epochs is None) or (n_iterations and n_epochs):\n raise ValueError(\"Either n_iterations or n_epochs should defined\")\n\n if n_iterations:\n prefix = \"iterations\"\n self.state_attr = \"iteration\"\n self.period = n_iterations\n\n if n_epochs:\n prefix = \"epochs\"\n self.state_attr = \"epoch\"\n self.period = n_epochs\n\n self.custom_state_attr = \"{}_{}\".format(prefix, self.period)\n event_name = \"{}_{}\".format(prefix.upper(), self.period)\n setattr(\n self,\n \"Events\",\n EventEnum(\"Events\", \" \".join([\"{}_STARTED\".format(event_name), \"{}_COMPLETED\".format(event_name)])),\n )\n\n # Update State.event_to_attr\n for e in self.Events:\n State.event_to_attr[e] = self.custom_state_attr\n\n # Create aliases\n self._periodic_event_started = getattr(self.Events, \"{}_STARTED\".format(event_name))\n self._periodic_event_completed = getattr(self.Events, \"{}_COMPLETED\".format(event_name))\n\n def _on_started(self, engine):\n setattr(engine.state, self.custom_state_attr, 0)\n\n def _on_periodic_event_started(self, engine):\n if getattr(engine.state, self.state_attr) % self.period == 1:\n setattr(engine.state, self.custom_state_attr, getattr(engine.state, self.custom_state_attr) + 1)\n engine.fire_event(self._periodic_event_started)\n\n def _on_periodic_event_completed(self, engine):\n if getattr(engine.state, self.state_attr) % self.period == 0:\n engine.fire_event(self._periodic_event_completed)\n\n def attach(self, engine):\n engine.register_events(*self.Events)\n\n engine.add_event_handler(Events.STARTED, self._on_started)\n engine.add_event_handler(\n getattr(Events, \"{}_STARTED\".format(self.state_attr.upper())), self._on_periodic_event_started\n )\n engine.add_event_handler(\n getattr(Events, \"{}_COMPLETED\".format(self.state_attr.upper())), self._on_periodic_event_completed\n )\n", "path": "ignite/contrib/handlers/custom_events.py"}]}
1,533
286
gh_patches_debug_13028
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-778
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update rule E2541 to not error when action names are not strings *Issue #, if available:* Fix #752 *Description of changes:* - Update rule E2541 to only check for duplicate names when the action name is a string By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cfnlint/transform.py` Content: ``` 1 """ 2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 4 Permission is hereby granted, free of charge, to any person obtaining a copy of this 5 software and associated documentation files (the "Software"), to deal in the Software 6 without restriction, including without limitation the rights to use, copy, modify, 7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 8 permit persons to whom the Software is furnished to do so. 9 10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 16 """ 17 import os 18 import logging 19 import six 20 import samtranslator 21 from samtranslator.parser import parser 22 from samtranslator.translator.translator import Translator 23 from samtranslator.public.exceptions import InvalidDocumentException 24 25 import cfnlint.helpers 26 LOGGER = logging.getLogger('cfnlint') 27 28 class Transform(object): 29 """ 30 Application Serverless Module tranform Wrappor. Based on code from AWS SAM CLI: 31 https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/commands/validate/lib/sam_template_validator.py 32 """ 33 34 def __init__(self, filename, template, region): 35 """ 36 Initialize Transform class 37 """ 38 self._filename = filename 39 self._template = template 40 self._region = region 41 42 self._managed_policy_map = self.load_managed_policies() 43 self._sam_parser = parser.Parser() 44 45 def template(self): 46 """Get the template""" 47 return self._template 48 49 def load_managed_policies(self): 50 """ 51 Load the ManagedPolicies locally, based on the AWS-CLI: 52 https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/lib/samlib/default_managed_policies.json 53 """ 54 return cfnlint.helpers.load_resources('data/Serverless/ManagedPolicies.json') 55 56 def _replace_local_codeuri(self): 57 """ 58 Replaces the CodeUri in AWS::Serverless::Function and DefinitionUri in AWS::Serverless::Api to a fake 59 S3 Uri. This is to support running the SAM Translator with valid values for these fields. If this in not done, 60 the template is invalid in the eyes of SAM Translator (the translator does not support local paths) 61 """ 62 63 all_resources = self._template.get('Resources', {}) 64 65 for _, resource in all_resources.items(): 66 67 resource_type = resource.get('Type') 68 resource_dict = resource.get('Properties') 69 70 if resource_type == 'AWS::Serverless::Function': 71 72 Transform._update_to_s3_uri('CodeUri', resource_dict) 73 if resource_type in ['AWS::Serverless::LayerVersion']: 74 if resource_dict.get('ContentUri'): 75 Transform._update_to_s3_uri('ContentUri', resource_dict) 76 if resource_type == 'AWS::Serverless::Application': 77 if resource_dict.get('Location'): 78 resource_dict['Location'] = '' 79 Transform._update_to_s3_uri('Location', resource_dict) 80 if resource_type == 'AWS::Serverless::Api': 81 if 'DefinitionBody' not in resource_dict: 82 Transform._update_to_s3_uri('DefinitionUri', resource_dict) 83 84 def transform_template(self): 85 """ 86 Transform the Template using the Serverless Application Model. 87 """ 88 matches = [] 89 90 try: 91 # Output the SAM Translator version in debug mode 92 LOGGER.debug('SAM Translator: %s', samtranslator.__version__) 93 94 sam_translator = Translator(managed_policy_map=self._managed_policy_map, 95 sam_parser=self._sam_parser) 96 97 self._replace_local_codeuri() 98 99 # Tell SAM to use the region we're linting in, this has to be controlled using the default AWS mechanisms, see also: 100 # https://github.com/awslabs/serverless-application-model/blob/master/samtranslator/translator/arn_generator.py 101 LOGGER.debug('Setting AWS_DEFAULT_REGION to %s', self._region) 102 os.environ['AWS_DEFAULT_REGION'] = self._region 103 104 self._template = cfnlint.helpers.convert_dict( 105 sam_translator.translate(sam_template=self._template, parameter_values={})) 106 107 LOGGER.debug('Transformed template: %s', self._template) 108 except InvalidDocumentException as e: 109 message = 'Error transforming template: {0}' 110 for cause in e.causes: 111 matches.append(cfnlint.Match( 112 1, 1, 113 1, 1, 114 self._filename, cfnlint.TransformError(), message.format(cause.message))) 115 except Exception as e: # pylint: disable=W0703 116 LOGGER.debug('Error transforming template: %s', str(e)) 117 LOGGER.debug('Stack trace: %s', e, exc_info=True) 118 message = 'Error transforming template: {0}' 119 matches.append(cfnlint.Match( 120 1, 1, 121 1, 1, 122 self._filename, cfnlint.TransformError(), message.format(str(e)))) 123 124 return matches 125 126 @staticmethod 127 def is_s3_uri(uri): 128 """ 129 Checks the uri and determines if it is a valid S3 Uri 130 Parameters 131 ---------- 132 uri str, required 133 Uri to check 134 Returns 135 ------- 136 bool 137 Returns True if the uri given is an S3 uri, otherwise False 138 """ 139 return isinstance(uri, six.string_types) and uri.startswith('s3://') 140 141 @staticmethod 142 def _update_to_s3_uri(property_key, resource_property_dict, s3_uri_value='s3://bucket/value'): 143 """ 144 Updates the 'property_key' in the 'resource_property_dict' to the value of 's3_uri_value' 145 Note: The function will mutate the resource_property_dict that is pass in 146 Parameters 147 ---------- 148 property_key str, required 149 Key in the resource_property_dict 150 resource_property_dict dict, required 151 Property dictionary of a Resource in the template to replace 152 s3_uri_value str, optional 153 Value to update the value of the property_key to 154 """ 155 uri_property = resource_property_dict.get(property_key, '.') 156 157 # ignore if dict or already an S3 Uri 158 if isinstance(uri_property, dict) or Transform.is_s3_uri(uri_property): 159 return 160 161 resource_property_dict[property_key] = s3_uri_value 162 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cfnlint/transform.py b/src/cfnlint/transform.py --- a/src/cfnlint/transform.py +++ b/src/cfnlint/transform.py @@ -78,8 +78,10 @@ resource_dict['Location'] = '' Transform._update_to_s3_uri('Location', resource_dict) if resource_type == 'AWS::Serverless::Api': - if 'DefinitionBody' not in resource_dict: + if 'DefinitionBody' not in resource_dict and 'Auth' not in resource_dict: Transform._update_to_s3_uri('DefinitionUri', resource_dict) + else: + resource_dict['DefinitionBody'] = '' def transform_template(self): """
{"golden_diff": "diff --git a/src/cfnlint/transform.py b/src/cfnlint/transform.py\n--- a/src/cfnlint/transform.py\n+++ b/src/cfnlint/transform.py\n@@ -78,8 +78,10 @@\n resource_dict['Location'] = ''\n Transform._update_to_s3_uri('Location', resource_dict)\n if resource_type == 'AWS::Serverless::Api':\n- if 'DefinitionBody' not in resource_dict:\n+ if 'DefinitionBody' not in resource_dict and 'Auth' not in resource_dict:\n Transform._update_to_s3_uri('DefinitionUri', resource_dict)\n+ else:\n+ resource_dict['DefinitionBody'] = ''\n \n def transform_template(self):\n \"\"\"\n", "issue": "Update rule E2541 to not error when action names are not strings\n*Issue #, if available:*\r\nFix #752\r\n*Description of changes:*\r\n- Update rule E2541 to only check for duplicate names when the action name is a string\r\n\r\nBy submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice.\r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport os\nimport logging\nimport six\nimport samtranslator\nfrom samtranslator.parser import parser\nfrom samtranslator.translator.translator import Translator\nfrom samtranslator.public.exceptions import InvalidDocumentException\n\nimport cfnlint.helpers\nLOGGER = logging.getLogger('cfnlint')\n\nclass Transform(object):\n \"\"\"\n Application Serverless Module tranform Wrappor. Based on code from AWS SAM CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/commands/validate/lib/sam_template_validator.py\n \"\"\"\n\n def __init__(self, filename, template, region):\n \"\"\"\n Initialize Transform class\n \"\"\"\n self._filename = filename\n self._template = template\n self._region = region\n\n self._managed_policy_map = self.load_managed_policies()\n self._sam_parser = parser.Parser()\n\n def template(self):\n \"\"\"Get the template\"\"\"\n return self._template\n\n def load_managed_policies(self):\n \"\"\"\n Load the ManagedPolicies locally, based on the AWS-CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/lib/samlib/default_managed_policies.json\n \"\"\"\n return cfnlint.helpers.load_resources('data/Serverless/ManagedPolicies.json')\n\n def _replace_local_codeuri(self):\n \"\"\"\n Replaces the CodeUri in AWS::Serverless::Function and DefinitionUri in AWS::Serverless::Api to a fake\n S3 Uri. This is to support running the SAM Translator with valid values for these fields. If this in not done,\n the template is invalid in the eyes of SAM Translator (the translator does not support local paths)\n \"\"\"\n\n all_resources = self._template.get('Resources', {})\n\n for _, resource in all_resources.items():\n\n resource_type = resource.get('Type')\n resource_dict = resource.get('Properties')\n\n if resource_type == 'AWS::Serverless::Function':\n\n Transform._update_to_s3_uri('CodeUri', resource_dict)\n if resource_type in ['AWS::Serverless::LayerVersion']:\n if resource_dict.get('ContentUri'):\n Transform._update_to_s3_uri('ContentUri', resource_dict)\n if resource_type == 'AWS::Serverless::Application':\n if resource_dict.get('Location'):\n resource_dict['Location'] = ''\n Transform._update_to_s3_uri('Location', resource_dict)\n if resource_type == 'AWS::Serverless::Api':\n if 'DefinitionBody' not in resource_dict:\n Transform._update_to_s3_uri('DefinitionUri', resource_dict)\n\n def transform_template(self):\n \"\"\"\n Transform the Template using the Serverless Application Model.\n \"\"\"\n matches = []\n\n try:\n # Output the SAM Translator version in debug mode\n LOGGER.debug('SAM Translator: %s', samtranslator.__version__)\n\n sam_translator = Translator(managed_policy_map=self._managed_policy_map,\n sam_parser=self._sam_parser)\n\n self._replace_local_codeuri()\n\n # Tell SAM to use the region we're linting in, this has to be controlled using the default AWS mechanisms, see also:\n # https://github.com/awslabs/serverless-application-model/blob/master/samtranslator/translator/arn_generator.py\n LOGGER.debug('Setting AWS_DEFAULT_REGION to %s', self._region)\n os.environ['AWS_DEFAULT_REGION'] = self._region\n\n self._template = cfnlint.helpers.convert_dict(\n sam_translator.translate(sam_template=self._template, parameter_values={}))\n\n LOGGER.debug('Transformed template: %s', self._template)\n except InvalidDocumentException as e:\n message = 'Error transforming template: {0}'\n for cause in e.causes:\n matches.append(cfnlint.Match(\n 1, 1,\n 1, 1,\n self._filename, cfnlint.TransformError(), message.format(cause.message)))\n except Exception as e: # pylint: disable=W0703\n LOGGER.debug('Error transforming template: %s', str(e))\n LOGGER.debug('Stack trace: %s', e, exc_info=True)\n message = 'Error transforming template: {0}'\n matches.append(cfnlint.Match(\n 1, 1,\n 1, 1,\n self._filename, cfnlint.TransformError(), message.format(str(e))))\n\n return matches\n\n @staticmethod\n def is_s3_uri(uri):\n \"\"\"\n Checks the uri and determines if it is a valid S3 Uri\n Parameters\n ----------\n uri str, required\n Uri to check\n Returns\n -------\n bool\n Returns True if the uri given is an S3 uri, otherwise False\n \"\"\"\n return isinstance(uri, six.string_types) and uri.startswith('s3://')\n\n @staticmethod\n def _update_to_s3_uri(property_key, resource_property_dict, s3_uri_value='s3://bucket/value'):\n \"\"\"\n Updates the 'property_key' in the 'resource_property_dict' to the value of 's3_uri_value'\n Note: The function will mutate the resource_property_dict that is pass in\n Parameters\n ----------\n property_key str, required\n Key in the resource_property_dict\n resource_property_dict dict, required\n Property dictionary of a Resource in the template to replace\n s3_uri_value str, optional\n Value to update the value of the property_key to\n \"\"\"\n uri_property = resource_property_dict.get(property_key, '.')\n\n # ignore if dict or already an S3 Uri\n if isinstance(uri_property, dict) or Transform.is_s3_uri(uri_property):\n return\n\n resource_property_dict[property_key] = s3_uri_value\n", "path": "src/cfnlint/transform.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport os\nimport logging\nimport six\nimport samtranslator\nfrom samtranslator.parser import parser\nfrom samtranslator.translator.translator import Translator\nfrom samtranslator.public.exceptions import InvalidDocumentException\n\nimport cfnlint.helpers\nLOGGER = logging.getLogger('cfnlint')\n\nclass Transform(object):\n \"\"\"\n Application Serverless Module tranform Wrappor. Based on code from AWS SAM CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/commands/validate/lib/sam_template_validator.py\n \"\"\"\n\n def __init__(self, filename, template, region):\n \"\"\"\n Initialize Transform class\n \"\"\"\n self._filename = filename\n self._template = template\n self._region = region\n\n self._managed_policy_map = self.load_managed_policies()\n self._sam_parser = parser.Parser()\n\n def template(self):\n \"\"\"Get the template\"\"\"\n return self._template\n\n def load_managed_policies(self):\n \"\"\"\n Load the ManagedPolicies locally, based on the AWS-CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/lib/samlib/default_managed_policies.json\n \"\"\"\n return cfnlint.helpers.load_resources('data/Serverless/ManagedPolicies.json')\n\n def _replace_local_codeuri(self):\n \"\"\"\n Replaces the CodeUri in AWS::Serverless::Function and DefinitionUri in AWS::Serverless::Api to a fake\n S3 Uri. This is to support running the SAM Translator with valid values for these fields. If this in not done,\n the template is invalid in the eyes of SAM Translator (the translator does not support local paths)\n \"\"\"\n\n all_resources = self._template.get('Resources', {})\n\n for _, resource in all_resources.items():\n\n resource_type = resource.get('Type')\n resource_dict = resource.get('Properties')\n\n if resource_type == 'AWS::Serverless::Function':\n\n Transform._update_to_s3_uri('CodeUri', resource_dict)\n if resource_type in ['AWS::Serverless::LayerVersion']:\n if resource_dict.get('ContentUri'):\n Transform._update_to_s3_uri('ContentUri', resource_dict)\n if resource_type == 'AWS::Serverless::Application':\n if resource_dict.get('Location'):\n resource_dict['Location'] = ''\n Transform._update_to_s3_uri('Location', resource_dict)\n if resource_type == 'AWS::Serverless::Api':\n if 'DefinitionBody' not in resource_dict and 'Auth' not in resource_dict:\n Transform._update_to_s3_uri('DefinitionUri', resource_dict)\n else:\n resource_dict['DefinitionBody'] = ''\n\n def transform_template(self):\n \"\"\"\n Transform the Template using the Serverless Application Model.\n \"\"\"\n matches = []\n\n try:\n # Output the SAM Translator version in debug mode\n LOGGER.debug('SAM Translator: %s', samtranslator.__version__)\n\n sam_translator = Translator(managed_policy_map=self._managed_policy_map,\n sam_parser=self._sam_parser)\n\n self._replace_local_codeuri()\n\n # Tell SAM to use the region we're linting in, this has to be controlled using the default AWS mechanisms, see also:\n # https://github.com/awslabs/serverless-application-model/blob/master/samtranslator/translator/arn_generator.py\n LOGGER.debug('Setting AWS_DEFAULT_REGION to %s', self._region)\n os.environ['AWS_DEFAULT_REGION'] = self._region\n\n self._template = cfnlint.helpers.convert_dict(\n sam_translator.translate(sam_template=self._template, parameter_values={}))\n\n LOGGER.debug('Transformed template: %s', self._template)\n except InvalidDocumentException as e:\n message = 'Error transforming template: {0}'\n for cause in e.causes:\n matches.append(cfnlint.Match(\n 1, 1,\n 1, 1,\n self._filename, cfnlint.TransformError(), message.format(cause.message)))\n except Exception as e: # pylint: disable=W0703\n LOGGER.debug('Error transforming template: %s', str(e))\n LOGGER.debug('Stack trace: %s', e, exc_info=True)\n message = 'Error transforming template: {0}'\n matches.append(cfnlint.Match(\n 1, 1,\n 1, 1,\n self._filename, cfnlint.TransformError(), message.format(str(e))))\n\n return matches\n\n @staticmethod\n def is_s3_uri(uri):\n \"\"\"\n Checks the uri and determines if it is a valid S3 Uri\n Parameters\n ----------\n uri str, required\n Uri to check\n Returns\n -------\n bool\n Returns True if the uri given is an S3 uri, otherwise False\n \"\"\"\n return isinstance(uri, six.string_types) and uri.startswith('s3://')\n\n @staticmethod\n def _update_to_s3_uri(property_key, resource_property_dict, s3_uri_value='s3://bucket/value'):\n \"\"\"\n Updates the 'property_key' in the 'resource_property_dict' to the value of 's3_uri_value'\n Note: The function will mutate the resource_property_dict that is pass in\n Parameters\n ----------\n property_key str, required\n Key in the resource_property_dict\n resource_property_dict dict, required\n Property dictionary of a Resource in the template to replace\n s3_uri_value str, optional\n Value to update the value of the property_key to\n \"\"\"\n uri_property = resource_property_dict.get(property_key, '.')\n\n # ignore if dict or already an S3 Uri\n if isinstance(uri_property, dict) or Transform.is_s3_uri(uri_property):\n return\n\n resource_property_dict[property_key] = s3_uri_value\n", "path": "src/cfnlint/transform.py"}]}
2,161
158
gh_patches_debug_720
rasdani/github-patches
git_diff
liqd__a4-opin-766
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- cannot delete user in django admin if user has not uploaded avatar --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `euth/users/signals.py` Content: ``` 1 from django.db.models import signals 2 from django.dispatch import receiver 3 4 from adhocracy4.images import services 5 6 from . import models 7 8 9 @receiver(signals.post_init, sender=models.User) 10 def backup_image_path(sender, instance, **kwargs): 11 instance._current_image_file = instance.avatar 12 13 14 @receiver(signals.post_save, sender=models.User) 15 def delete_old_image(sender, instance, **kwargs): 16 if hasattr(instance, '_current_image_file'): 17 if instance._current_image_file != instance.avatar: 18 services.delete_images([instance._current_image_file]) 19 20 21 @receiver(signals.post_delete, sender=models.User) 22 def delete_images_for_User(sender, instance, **kwargs): 23 services.delete_images([instance.avatar]) 24 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/euth/users/signals.py b/euth/users/signals.py --- a/euth/users/signals.py +++ b/euth/users/signals.py @@ -20,4 +20,4 @@ @receiver(signals.post_delete, sender=models.User) def delete_images_for_User(sender, instance, **kwargs): - services.delete_images([instance.avatar]) + services.delete_images([instance._avatar])
{"golden_diff": "diff --git a/euth/users/signals.py b/euth/users/signals.py\n--- a/euth/users/signals.py\n+++ b/euth/users/signals.py\n@@ -20,4 +20,4 @@\n \n @receiver(signals.post_delete, sender=models.User)\n def delete_images_for_User(sender, instance, **kwargs):\n- services.delete_images([instance.avatar])\n+ services.delete_images([instance._avatar])\n", "issue": "cannot delete user in django admin if user has not uploaded avatar \n\n", "before_files": [{"content": "from django.db.models import signals\nfrom django.dispatch import receiver\n\nfrom adhocracy4.images import services\n\nfrom . import models\n\n\n@receiver(signals.post_init, sender=models.User)\ndef backup_image_path(sender, instance, **kwargs):\n instance._current_image_file = instance.avatar\n\n\n@receiver(signals.post_save, sender=models.User)\ndef delete_old_image(sender, instance, **kwargs):\n if hasattr(instance, '_current_image_file'):\n if instance._current_image_file != instance.avatar:\n services.delete_images([instance._current_image_file])\n\n\n@receiver(signals.post_delete, sender=models.User)\ndef delete_images_for_User(sender, instance, **kwargs):\n services.delete_images([instance.avatar])\n", "path": "euth/users/signals.py"}], "after_files": [{"content": "from django.db.models import signals\nfrom django.dispatch import receiver\n\nfrom adhocracy4.images import services\n\nfrom . import models\n\n\n@receiver(signals.post_init, sender=models.User)\ndef backup_image_path(sender, instance, **kwargs):\n instance._current_image_file = instance.avatar\n\n\n@receiver(signals.post_save, sender=models.User)\ndef delete_old_image(sender, instance, **kwargs):\n if hasattr(instance, '_current_image_file'):\n if instance._current_image_file != instance.avatar:\n services.delete_images([instance._current_image_file])\n\n\n@receiver(signals.post_delete, sender=models.User)\ndef delete_images_for_User(sender, instance, **kwargs):\n services.delete_images([instance._avatar])\n", "path": "euth/users/signals.py"}]}
464
91
gh_patches_debug_951
rasdani/github-patches
git_diff
pytorch__ignite-844
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Typehint of ignite._utils._to_hours_mins_secs not satisfied with float ## 🐛 Bug description That is a so tiny bug. The `typehint` of the following function of `ignite._utils` is not satisfied with a `float` argument ``` python def _to_hours_mins_secs(time_taken: Union[float, int]) -> Tuple[int, int, int]: """Convert seconds to hours, mins, and seconds.""" mins, secs = divmod(time_taken, 60) hours, mins = divmod(mins, 60) return hours, mins, secs ``` We have ```python >>> divmod(10.0,2) (5.0, 0.0) ``` ## Environment - PyTorch Version (e.g., 1.4): 1.4 - Ignite Version (e.g., 0.3.0): 0.3.0 - OS (e.g., Linux): Linux - How you installed Ignite (`conda`, `pip`, source): conda - Python version: 3.7 - Any other relevant information: --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ignite/_utils.py` Content: ``` 1 from typing import Union, Tuple 2 3 # For compatibilty 4 from ignite.utils import convert_tensor, apply_to_tensor, apply_to_type, to_onehot 5 6 7 def _to_hours_mins_secs(time_taken: Union[float, int]) -> Tuple[int, int, int]: 8 """Convert seconds to hours, mins, and seconds.""" 9 mins, secs = divmod(time_taken, 60) 10 hours, mins = divmod(mins, 60) 11 return hours, mins, secs 12 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ignite/_utils.py b/ignite/_utils.py --- a/ignite/_utils.py +++ b/ignite/_utils.py @@ -8,4 +8,4 @@ """Convert seconds to hours, mins, and seconds.""" mins, secs = divmod(time_taken, 60) hours, mins = divmod(mins, 60) - return hours, mins, secs + return round(hours), round(mins), round(secs)
{"golden_diff": "diff --git a/ignite/_utils.py b/ignite/_utils.py\n--- a/ignite/_utils.py\n+++ b/ignite/_utils.py\n@@ -8,4 +8,4 @@\n \"\"\"Convert seconds to hours, mins, and seconds.\"\"\"\n mins, secs = divmod(time_taken, 60)\n hours, mins = divmod(mins, 60)\n- return hours, mins, secs\n+ return round(hours), round(mins), round(secs)\n", "issue": "Typehint of ignite._utils._to_hours_mins_secs not satisfied with float\n## \ud83d\udc1b Bug description\r\n\r\nThat is a so tiny bug. The `typehint` of the following function of `ignite._utils` is not satisfied with a `float` argument\r\n``` python\r\ndef _to_hours_mins_secs(time_taken: Union[float, int]) -> Tuple[int, int, int]:\r\n \"\"\"Convert seconds to hours, mins, and seconds.\"\"\"\r\n mins, secs = divmod(time_taken, 60)\r\n hours, mins = divmod(mins, 60)\r\n return hours, mins, secs\r\n```\r\nWe have\r\n```python\r\n>>> divmod(10.0,2)\r\n(5.0, 0.0)\r\n```\r\n\r\n## Environment\r\n\r\n - PyTorch Version (e.g., 1.4): 1.4\r\n - Ignite Version (e.g., 0.3.0): 0.3.0\r\n - OS (e.g., Linux): Linux\r\n - How you installed Ignite (`conda`, `pip`, source): conda\r\n - Python version: 3.7\r\n - Any other relevant information:\r\n\r\n\n", "before_files": [{"content": "from typing import Union, Tuple\n\n# For compatibilty\nfrom ignite.utils import convert_tensor, apply_to_tensor, apply_to_type, to_onehot\n\n\ndef _to_hours_mins_secs(time_taken: Union[float, int]) -> Tuple[int, int, int]:\n \"\"\"Convert seconds to hours, mins, and seconds.\"\"\"\n mins, secs = divmod(time_taken, 60)\n hours, mins = divmod(mins, 60)\n return hours, mins, secs\n", "path": "ignite/_utils.py"}], "after_files": [{"content": "from typing import Union, Tuple\n\n# For compatibilty\nfrom ignite.utils import convert_tensor, apply_to_tensor, apply_to_type, to_onehot\n\n\ndef _to_hours_mins_secs(time_taken: Union[float, int]) -> Tuple[int, int, int]:\n \"\"\"Convert seconds to hours, mins, and seconds.\"\"\"\n mins, secs = divmod(time_taken, 60)\n hours, mins = divmod(mins, 60)\n return round(hours), round(mins), round(secs)\n", "path": "ignite/_utils.py"}]}
630
105
gh_patches_debug_38190
rasdani/github-patches
git_diff
PennyLaneAI__pennylane-4540
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] pennylane.tape.Unwarp does not unwrap Hamiltonian coefficients ### Expected behavior `qml.tape.unwrap` convert all ArrayBox in a tape to usual arrays. ### Actual behavior Coefficients of the `Hamiltonian` object instance (accessible via `ham.coeffs`) are not unwrapped. ### Additional information _No response_ ### Source code _No response_ ### Tracebacks _No response_ ### System information ```shell Name: PennyLane Version: 0.26.0.dev0 Summary: PennyLane is a Python quantum machine learning library by Xanadu Inc. Home-page: https://github.com/XanaduAI/pennylane Author: Author-email: License: Apache License 2.0 Location: /home/chae-yeun/work/Xanadu/devel-py39/lib/python3.9/site-packages Requires: appdirs, autograd, autoray, cachetools, networkx, numpy, pennylane-lightning, retworkx, scipy, semantic-version, toml Required-by: PennyLane-Lightning Platform info: Linux-5.14.0-1042-oem-x86_64-with-glibc2.35 Python version: 3.9.13 Numpy version: 1.23.2 Scipy version: 1.9.0 Installed devices: - lightning.qubit (PennyLane-Lightning-0.26.0.dev1) - default.gaussian (PennyLane-0.26.0.dev0) - default.mixed (PennyLane-0.26.0.dev0) - default.qubit (PennyLane-0.26.0.dev0) - default.qubit.autograd (PennyLane-0.26.0.dev0) - default.qubit.jax (PennyLane-0.26.0.dev0) - default.qubit.tf (PennyLane-0.26.0.dev0) - default.qubit.torch (PennyLane-0.26.0.dev0) - default.qutrit (PennyLane-0.26.0.dev0) ``` ### Existing GitHub issues - [X] I have searched existing GitHub issues to make sure the issue does not already exist. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pennylane/transforms/convert_to_numpy_parameters.py` Content: ``` 1 # Copyright 2018-2023 Xanadu Quantum Technologies Inc. 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 7 # http://www.apache.org/licenses/LICENSE-2.0 8 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """ 15 This file contains preprocessings steps that may be called internally 16 during execution. 17 """ 18 import copy 19 20 import pennylane as qml 21 from pennylane import math 22 from pennylane.tape import QuantumScript 23 24 25 def _convert_op_to_numpy_data(op: qml.operation.Operator) -> qml.operation.Operator: 26 if math.get_interface(*op.data) == "numpy": 27 return op 28 # Use operator method to change parameters when it become available 29 copied_op = copy.copy(op) 30 copied_op.data = math.unwrap(op.data) 31 return copied_op 32 33 34 def _convert_measurement_to_numpy_data( 35 m: qml.measurements.MeasurementProcess, 36 ) -> qml.measurements.MeasurementProcess: 37 if m.obs is None or math.get_interface(*m.obs.data) == "numpy": 38 return m 39 # Use measurement method to change parameters when it becomes available 40 copied_m = copy.copy(m) 41 if isinstance(copied_m.obs, qml.operation.Tensor): 42 copied_m.obs.data = math.unwrap([o.data for o in m.obs.obs]) 43 else: 44 copied_m.obs.data = math.unwrap(m.obs.data) 45 return copied_m 46 47 48 # pylint: disable=protected-access 49 def convert_to_numpy_parameters(circuit: QuantumScript) -> QuantumScript: 50 """Transforms a circuit to one with purely numpy parameters. 51 52 Args: 53 circuit (QuantumScript): a circuit with parameters of any interface 54 55 Returns: 56 QuantumScript: A circuit with purely numpy parameters 57 58 .. seealso:: 59 60 :class:`pennylane.tape.Unwrap` modifies a :class:`~.pennylane.tape.QuantumScript` in place instead of creating 61 a new class. It will also set all parameters on the circuit, not just ones that need to be unwrapped. 62 63 >>> ops = [qml.S(0), qml.RX(torch.tensor(0.1234), 0)] 64 >>> measurements = [qml.state(), qml.expval(qml.Hermitian(torch.eye(2), 0))] 65 >>> circuit = qml.tape.QuantumScript(ops, measurements ) 66 >>> new_circuit = convert_to_numpy_parameters(circuit) 67 >>> new_circuit.circuit 68 [S(wires=[0]), 69 RX(0.1234000027179718, wires=[0]), 70 state(wires=[]), 71 expval(Hermitian(array([[1., 0.], 72 [0., 1.]], dtype=float32), wires=[0]))] 73 74 If the component's data does not need to be transformed, it is left uncopied. 75 76 >>> circuit[0] is new_circuit[0] 77 True 78 >>> circuit[1] is new_circuit[1] 79 False 80 >>> circuit[2] is new_circuit[2] 81 True 82 >>> circuit[3] is new_circuit[3] 83 False 84 85 """ 86 new_ops = (_convert_op_to_numpy_data(op) for op in circuit.operations) 87 new_measurements = (_convert_measurement_to_numpy_data(m) for m in circuit.measurements) 88 new_circuit = circuit.__class__(new_ops, new_measurements, shots=circuit.shots) 89 # must preserve trainable params as we lose information about the machine learning interface 90 new_circuit.trainable_params = circuit.trainable_params 91 new_circuit._qfunc_output = circuit._qfunc_output 92 return new_circuit 93 ``` Path: `pennylane/ops/functions/bind_new_parameters.py` Content: ``` 1 # Copyright 2018-2023 Xanadu Quantum Technologies Inc. 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 7 # http://www.apache.org/licenses/LICENSE-2.0 8 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """ 15 This module contains the qml.bind_new_parameters function. 16 """ 17 # pylint: disable=missing-docstring 18 19 from typing import Sequence 20 import copy 21 from functools import singledispatch 22 23 import pennylane as qml 24 from pennylane.typing import TensorLike 25 from pennylane.operation import Operator, Tensor 26 27 from ..identity import Identity 28 from ..qubit import Projector 29 from ..op_math import CompositeOp, SymbolicOp, ScalarSymbolicOp, Adjoint, Pow, SProd 30 31 32 @singledispatch 33 def bind_new_parameters(op: Operator, params: Sequence[TensorLike]) -> Operator: 34 """Create a new operator with updated parameters 35 36 This function takes an :class:`~.Operator` and new parameters as input and 37 returns a new operator of the same type with the new parameters. This function 38 does not mutate the original operator. 39 40 Args: 41 op (.Operator): Operator to update 42 params (Sequence[TensorLike]): New parameters to create operator with. This 43 must have the same shape as `op.data`. 44 45 Returns: 46 .Operator: New operator with updated parameters 47 """ 48 try: 49 return op.__class__(*params, wires=op.wires, **copy.deepcopy(op.hyperparameters)) 50 except TypeError: 51 # operation is doing something different with its call signature. 52 new_op = copy.deepcopy(op) 53 new_op.data = tuple(params) 54 return new_op 55 56 57 @bind_new_parameters.register 58 def bind_new_parameters_approx_time_evolution( 59 op: qml.ApproxTimeEvolution, params: Sequence[TensorLike] 60 ): 61 new_hamiltonian = bind_new_parameters(op.hyperparameters["hamiltonian"], params[:-1]) 62 time = params[-1] 63 n = op.hyperparameters["n"] 64 65 return qml.ApproxTimeEvolution(new_hamiltonian, time, n) 66 67 68 @bind_new_parameters.register 69 def bind_new_parameters_commuting_evolution( 70 op: qml.CommutingEvolution, params: Sequence[TensorLike] 71 ): 72 new_hamiltonian = bind_new_parameters(op.hyperparameters["hamiltonian"], params[1:]) 73 freq = op.hyperparameters["frequencies"] 74 shifts = op.hyperparameters["shifts"] 75 time = params[0] 76 77 return qml.CommutingEvolution(new_hamiltonian, time, frequencies=freq, shifts=shifts) 78 79 80 @bind_new_parameters.register 81 def bind_new_parameters_fermionic_double_excitation( 82 op: qml.FermionicDoubleExcitation, params: Sequence[TensorLike] 83 ): 84 wires1 = op.hyperparameters["wires1"] 85 wires2 = op.hyperparameters["wires2"] 86 87 return qml.FermionicDoubleExcitation(params[0], wires1=wires1, wires2=wires2) 88 89 90 @bind_new_parameters.register 91 def bind_new_parameters_angle_embedding(op: qml.AngleEmbedding, params: Sequence[TensorLike]): 92 rotation = op.hyperparameters["rotation"].basis 93 return qml.AngleEmbedding(params[0], wires=op.wires, rotation=rotation) 94 95 96 @bind_new_parameters.register 97 def bind_new_parameters_identity(op: Identity, params: Sequence[TensorLike]): 98 return qml.Identity(*params, wires=op.wires) 99 100 101 @bind_new_parameters.register 102 def bind_new_parameters_composite_op(op: CompositeOp, params: Sequence[TensorLike]): 103 new_operands = [] 104 105 for operand in op.operands: 106 op_num_params = operand.num_params 107 sub_params = params[:op_num_params] 108 params = params[op_num_params:] 109 new_operands.append(bind_new_parameters(operand, sub_params)) 110 111 return op.__class__(*new_operands) 112 113 114 @bind_new_parameters.register 115 def bind_new_parameters_symbolic_op(op: SymbolicOp, params: Sequence[TensorLike]): 116 new_base = bind_new_parameters(op.base, params) 117 new_hyperparameters = copy.deepcopy(op.hyperparameters) 118 _ = new_hyperparameters.pop("base") 119 120 return op.__class__(new_base, **new_hyperparameters) 121 122 123 @bind_new_parameters.register 124 def bind_new_parameters_adjoint(op: Adjoint, params: Sequence[TensorLike]): 125 # Need a separate dispatch for `Adjoint` because using a more general class 126 # signature results in a call to `Adjoint.__new__` which doesn't raise an 127 # error but does return an unusable object. 128 return Adjoint(bind_new_parameters(op.base, params)) 129 130 131 @bind_new_parameters.register 132 def bind_new_parameters_projector(op: Projector, params: Sequence[TensorLike]): 133 # Need a separate dispatch for `Projector` because using a more general class 134 # signature results in a call to `Projector.__new__` which doesn't raise an 135 # error but does return an unusable object. 136 return Projector(*params, wires=op.wires) 137 138 139 @bind_new_parameters.register 140 def bind_new_parameters_scalar_symbolic_op(op: ScalarSymbolicOp, params: Sequence[TensorLike]): 141 new_scalar = params[0] 142 params = params[1:] 143 144 new_base = bind_new_parameters(op.base, params) 145 new_hyperparameters = copy.deepcopy(op.hyperparameters) 146 _ = new_hyperparameters.pop("base") 147 148 return op.__class__(new_base, new_scalar, **new_hyperparameters) 149 150 151 @bind_new_parameters.register 152 def bind_new_parameters_sprod(op: SProd, params: Sequence[TensorLike]): 153 # Separate dispatch for `SProd` since its constructor has a different interface 154 new_scalar = params[0] 155 params = params[1:] 156 new_base = bind_new_parameters(op.base, params) 157 158 return SProd(new_scalar, new_base) 159 160 161 @bind_new_parameters.register 162 def bind_new_parameters_pow(op: Pow, params: Sequence[TensorLike]): 163 # Need a separate dispatch for `Pow` because using a more general class 164 # signature results in a call to `Pow.__new__` which doesn't raise an 165 # error but does return an unusable object. 166 return Pow(bind_new_parameters(op.base, params), op.scalar) 167 168 169 @bind_new_parameters.register 170 def bind_new_parameters_hamiltonian(op: qml.Hamiltonian, params: Sequence[TensorLike]): 171 return qml.Hamiltonian(params, op.ops) 172 173 174 @bind_new_parameters.register 175 def bind_new_parameters_tensor(op: Tensor, params: Sequence[TensorLike]): 176 new_obs = [] 177 178 for obs in op.obs: 179 sub_params = params[: obs.num_params] 180 params = params[obs.num_params :] 181 new_obs.append(bind_new_parameters(obs, sub_params)) 182 183 return Tensor(*new_obs) 184 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pennylane/ops/functions/bind_new_parameters.py b/pennylane/ops/functions/bind_new_parameters.py --- a/pennylane/ops/functions/bind_new_parameters.py +++ b/pennylane/ops/functions/bind_new_parameters.py @@ -47,7 +47,7 @@ """ try: return op.__class__(*params, wires=op.wires, **copy.deepcopy(op.hyperparameters)) - except TypeError: + except (TypeError, ValueError): # operation is doing something different with its call signature. new_op = copy.deepcopy(op) new_op.data = tuple(params) @@ -168,7 +168,10 @@ @bind_new_parameters.register def bind_new_parameters_hamiltonian(op: qml.Hamiltonian, params: Sequence[TensorLike]): - return qml.Hamiltonian(params, op.ops) + new_H = qml.Hamiltonian(params, op.ops) + if op.grouping_indices is not None: + new_H.grouping_indices = op.grouping_indices + return new_H @bind_new_parameters.register diff --git a/pennylane/transforms/convert_to_numpy_parameters.py b/pennylane/transforms/convert_to_numpy_parameters.py --- a/pennylane/transforms/convert_to_numpy_parameters.py +++ b/pennylane/transforms/convert_to_numpy_parameters.py @@ -15,34 +15,32 @@ This file contains preprocessings steps that may be called internally during execution. """ -import copy - import pennylane as qml from pennylane import math from pennylane.tape import QuantumScript +# pylint: disable=no-member def _convert_op_to_numpy_data(op: qml.operation.Operator) -> qml.operation.Operator: if math.get_interface(*op.data) == "numpy": return op # Use operator method to change parameters when it become available - copied_op = copy.copy(op) - copied_op.data = math.unwrap(op.data) - return copied_op + return qml.ops.functions.bind_new_parameters(op, math.unwrap(op.data)) +# pylint: disable=no-member def _convert_measurement_to_numpy_data( m: qml.measurements.MeasurementProcess, ) -> qml.measurements.MeasurementProcess: - if m.obs is None or math.get_interface(*m.obs.data) == "numpy": + if m.obs is None: + if m.eigvals() is None or math.get_interface(m.eigvals()) == "numpy": + return m + return type(m)(wires=m.wires, eigvals=math.unwrap(m.eigvals())) + + if math.get_interface(*m.obs.data) == "numpy": return m - # Use measurement method to change parameters when it becomes available - copied_m = copy.copy(m) - if isinstance(copied_m.obs, qml.operation.Tensor): - copied_m.obs.data = math.unwrap([o.data for o in m.obs.obs]) - else: - copied_m.obs.data = math.unwrap(m.obs.data) - return copied_m + new_obs = qml.ops.functions.bind_new_parameters(m.obs, math.unwrap(m.obs.data)) + return type(m)(obs=new_obs) # pylint: disable=protected-access
{"golden_diff": "diff --git a/pennylane/ops/functions/bind_new_parameters.py b/pennylane/ops/functions/bind_new_parameters.py\n--- a/pennylane/ops/functions/bind_new_parameters.py\n+++ b/pennylane/ops/functions/bind_new_parameters.py\n@@ -47,7 +47,7 @@\n \"\"\"\n try:\n return op.__class__(*params, wires=op.wires, **copy.deepcopy(op.hyperparameters))\n- except TypeError:\n+ except (TypeError, ValueError):\n # operation is doing something different with its call signature.\n new_op = copy.deepcopy(op)\n new_op.data = tuple(params)\n@@ -168,7 +168,10 @@\n \n @bind_new_parameters.register\n def bind_new_parameters_hamiltonian(op: qml.Hamiltonian, params: Sequence[TensorLike]):\n- return qml.Hamiltonian(params, op.ops)\n+ new_H = qml.Hamiltonian(params, op.ops)\n+ if op.grouping_indices is not None:\n+ new_H.grouping_indices = op.grouping_indices\n+ return new_H\n \n \n @bind_new_parameters.register\ndiff --git a/pennylane/transforms/convert_to_numpy_parameters.py b/pennylane/transforms/convert_to_numpy_parameters.py\n--- a/pennylane/transforms/convert_to_numpy_parameters.py\n+++ b/pennylane/transforms/convert_to_numpy_parameters.py\n@@ -15,34 +15,32 @@\n This file contains preprocessings steps that may be called internally\n during execution.\n \"\"\"\n-import copy\n-\n import pennylane as qml\n from pennylane import math\n from pennylane.tape import QuantumScript\n \n \n+# pylint: disable=no-member\n def _convert_op_to_numpy_data(op: qml.operation.Operator) -> qml.operation.Operator:\n if math.get_interface(*op.data) == \"numpy\":\n return op\n # Use operator method to change parameters when it become available\n- copied_op = copy.copy(op)\n- copied_op.data = math.unwrap(op.data)\n- return copied_op\n+ return qml.ops.functions.bind_new_parameters(op, math.unwrap(op.data))\n \n \n+# pylint: disable=no-member\n def _convert_measurement_to_numpy_data(\n m: qml.measurements.MeasurementProcess,\n ) -> qml.measurements.MeasurementProcess:\n- if m.obs is None or math.get_interface(*m.obs.data) == \"numpy\":\n+ if m.obs is None:\n+ if m.eigvals() is None or math.get_interface(m.eigvals()) == \"numpy\":\n+ return m\n+ return type(m)(wires=m.wires, eigvals=math.unwrap(m.eigvals()))\n+\n+ if math.get_interface(*m.obs.data) == \"numpy\":\n return m\n- # Use measurement method to change parameters when it becomes available\n- copied_m = copy.copy(m)\n- if isinstance(copied_m.obs, qml.operation.Tensor):\n- copied_m.obs.data = math.unwrap([o.data for o in m.obs.obs])\n- else:\n- copied_m.obs.data = math.unwrap(m.obs.data)\n- return copied_m\n+ new_obs = qml.ops.functions.bind_new_parameters(m.obs, math.unwrap(m.obs.data))\n+ return type(m)(obs=new_obs)\n \n \n # pylint: disable=protected-access\n", "issue": "[BUG] pennylane.tape.Unwarp does not unwrap Hamiltonian coefficients\n### Expected behavior\r\n\r\n`qml.tape.unwrap` convert all ArrayBox in a tape to usual arrays.\r\n\r\n### Actual behavior\r\n\r\nCoefficients of the `Hamiltonian` object instance (accessible via `ham.coeffs`) are not unwrapped.\r\n\r\n### Additional information\r\n\r\n_No response_\r\n\r\n### Source code\r\n\r\n_No response_\r\n\r\n### Tracebacks\r\n\r\n_No response_\r\n\r\n### System information\r\n\r\n```shell\r\nName: PennyLane\r\nVersion: 0.26.0.dev0\r\nSummary: PennyLane is a Python quantum machine learning library by Xanadu Inc.\r\nHome-page: https://github.com/XanaduAI/pennylane\r\nAuthor: \r\nAuthor-email: \r\nLicense: Apache License 2.0\r\nLocation: /home/chae-yeun/work/Xanadu/devel-py39/lib/python3.9/site-packages\r\nRequires: appdirs, autograd, autoray, cachetools, networkx, numpy, pennylane-lightning, retworkx, scipy, semantic-version, toml\r\nRequired-by: PennyLane-Lightning\r\n\r\nPlatform info: Linux-5.14.0-1042-oem-x86_64-with-glibc2.35\r\nPython version: 3.9.13\r\nNumpy version: 1.23.2\r\nScipy version: 1.9.0\r\nInstalled devices:\r\n- lightning.qubit (PennyLane-Lightning-0.26.0.dev1)\r\n- default.gaussian (PennyLane-0.26.0.dev0)\r\n- default.mixed (PennyLane-0.26.0.dev0)\r\n- default.qubit (PennyLane-0.26.0.dev0)\r\n- default.qubit.autograd (PennyLane-0.26.0.dev0)\r\n- default.qubit.jax (PennyLane-0.26.0.dev0)\r\n- default.qubit.tf (PennyLane-0.26.0.dev0)\r\n- default.qubit.torch (PennyLane-0.26.0.dev0)\r\n- default.qutrit (PennyLane-0.26.0.dev0)\r\n```\r\n\r\n\r\n### Existing GitHub issues\r\n\r\n- [X] I have searched existing GitHub issues to make sure the issue does not already exist.\n", "before_files": [{"content": "# Copyright 2018-2023 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis file contains preprocessings steps that may be called internally\nduring execution.\n\"\"\"\nimport copy\n\nimport pennylane as qml\nfrom pennylane import math\nfrom pennylane.tape import QuantumScript\n\n\ndef _convert_op_to_numpy_data(op: qml.operation.Operator) -> qml.operation.Operator:\n if math.get_interface(*op.data) == \"numpy\":\n return op\n # Use operator method to change parameters when it become available\n copied_op = copy.copy(op)\n copied_op.data = math.unwrap(op.data)\n return copied_op\n\n\ndef _convert_measurement_to_numpy_data(\n m: qml.measurements.MeasurementProcess,\n) -> qml.measurements.MeasurementProcess:\n if m.obs is None or math.get_interface(*m.obs.data) == \"numpy\":\n return m\n # Use measurement method to change parameters when it becomes available\n copied_m = copy.copy(m)\n if isinstance(copied_m.obs, qml.operation.Tensor):\n copied_m.obs.data = math.unwrap([o.data for o in m.obs.obs])\n else:\n copied_m.obs.data = math.unwrap(m.obs.data)\n return copied_m\n\n\n# pylint: disable=protected-access\ndef convert_to_numpy_parameters(circuit: QuantumScript) -> QuantumScript:\n \"\"\"Transforms a circuit to one with purely numpy parameters.\n\n Args:\n circuit (QuantumScript): a circuit with parameters of any interface\n\n Returns:\n QuantumScript: A circuit with purely numpy parameters\n\n .. seealso::\n\n :class:`pennylane.tape.Unwrap` modifies a :class:`~.pennylane.tape.QuantumScript` in place instead of creating\n a new class. It will also set all parameters on the circuit, not just ones that need to be unwrapped.\n\n >>> ops = [qml.S(0), qml.RX(torch.tensor(0.1234), 0)]\n >>> measurements = [qml.state(), qml.expval(qml.Hermitian(torch.eye(2), 0))]\n >>> circuit = qml.tape.QuantumScript(ops, measurements )\n >>> new_circuit = convert_to_numpy_parameters(circuit)\n >>> new_circuit.circuit\n [S(wires=[0]),\n RX(0.1234000027179718, wires=[0]),\n state(wires=[]),\n expval(Hermitian(array([[1., 0.],\n [0., 1.]], dtype=float32), wires=[0]))]\n\n If the component's data does not need to be transformed, it is left uncopied.\n\n >>> circuit[0] is new_circuit[0]\n True\n >>> circuit[1] is new_circuit[1]\n False\n >>> circuit[2] is new_circuit[2]\n True\n >>> circuit[3] is new_circuit[3]\n False\n\n \"\"\"\n new_ops = (_convert_op_to_numpy_data(op) for op in circuit.operations)\n new_measurements = (_convert_measurement_to_numpy_data(m) for m in circuit.measurements)\n new_circuit = circuit.__class__(new_ops, new_measurements, shots=circuit.shots)\n # must preserve trainable params as we lose information about the machine learning interface\n new_circuit.trainable_params = circuit.trainable_params\n new_circuit._qfunc_output = circuit._qfunc_output\n return new_circuit\n", "path": "pennylane/transforms/convert_to_numpy_parameters.py"}, {"content": "# Copyright 2018-2023 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis module contains the qml.bind_new_parameters function.\n\"\"\"\n# pylint: disable=missing-docstring\n\nfrom typing import Sequence\nimport copy\nfrom functools import singledispatch\n\nimport pennylane as qml\nfrom pennylane.typing import TensorLike\nfrom pennylane.operation import Operator, Tensor\n\nfrom ..identity import Identity\nfrom ..qubit import Projector\nfrom ..op_math import CompositeOp, SymbolicOp, ScalarSymbolicOp, Adjoint, Pow, SProd\n\n\n@singledispatch\ndef bind_new_parameters(op: Operator, params: Sequence[TensorLike]) -> Operator:\n \"\"\"Create a new operator with updated parameters\n\n This function takes an :class:`~.Operator` and new parameters as input and\n returns a new operator of the same type with the new parameters. This function\n does not mutate the original operator.\n\n Args:\n op (.Operator): Operator to update\n params (Sequence[TensorLike]): New parameters to create operator with. This\n must have the same shape as `op.data`.\n\n Returns:\n .Operator: New operator with updated parameters\n \"\"\"\n try:\n return op.__class__(*params, wires=op.wires, **copy.deepcopy(op.hyperparameters))\n except TypeError:\n # operation is doing something different with its call signature.\n new_op = copy.deepcopy(op)\n new_op.data = tuple(params)\n return new_op\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_approx_time_evolution(\n op: qml.ApproxTimeEvolution, params: Sequence[TensorLike]\n):\n new_hamiltonian = bind_new_parameters(op.hyperparameters[\"hamiltonian\"], params[:-1])\n time = params[-1]\n n = op.hyperparameters[\"n\"]\n\n return qml.ApproxTimeEvolution(new_hamiltonian, time, n)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_commuting_evolution(\n op: qml.CommutingEvolution, params: Sequence[TensorLike]\n):\n new_hamiltonian = bind_new_parameters(op.hyperparameters[\"hamiltonian\"], params[1:])\n freq = op.hyperparameters[\"frequencies\"]\n shifts = op.hyperparameters[\"shifts\"]\n time = params[0]\n\n return qml.CommutingEvolution(new_hamiltonian, time, frequencies=freq, shifts=shifts)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_fermionic_double_excitation(\n op: qml.FermionicDoubleExcitation, params: Sequence[TensorLike]\n):\n wires1 = op.hyperparameters[\"wires1\"]\n wires2 = op.hyperparameters[\"wires2\"]\n\n return qml.FermionicDoubleExcitation(params[0], wires1=wires1, wires2=wires2)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_angle_embedding(op: qml.AngleEmbedding, params: Sequence[TensorLike]):\n rotation = op.hyperparameters[\"rotation\"].basis\n return qml.AngleEmbedding(params[0], wires=op.wires, rotation=rotation)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_identity(op: Identity, params: Sequence[TensorLike]):\n return qml.Identity(*params, wires=op.wires)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_composite_op(op: CompositeOp, params: Sequence[TensorLike]):\n new_operands = []\n\n for operand in op.operands:\n op_num_params = operand.num_params\n sub_params = params[:op_num_params]\n params = params[op_num_params:]\n new_operands.append(bind_new_parameters(operand, sub_params))\n\n return op.__class__(*new_operands)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_symbolic_op(op: SymbolicOp, params: Sequence[TensorLike]):\n new_base = bind_new_parameters(op.base, params)\n new_hyperparameters = copy.deepcopy(op.hyperparameters)\n _ = new_hyperparameters.pop(\"base\")\n\n return op.__class__(new_base, **new_hyperparameters)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_adjoint(op: Adjoint, params: Sequence[TensorLike]):\n # Need a separate dispatch for `Adjoint` because using a more general class\n # signature results in a call to `Adjoint.__new__` which doesn't raise an\n # error but does return an unusable object.\n return Adjoint(bind_new_parameters(op.base, params))\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_projector(op: Projector, params: Sequence[TensorLike]):\n # Need a separate dispatch for `Projector` because using a more general class\n # signature results in a call to `Projector.__new__` which doesn't raise an\n # error but does return an unusable object.\n return Projector(*params, wires=op.wires)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_scalar_symbolic_op(op: ScalarSymbolicOp, params: Sequence[TensorLike]):\n new_scalar = params[0]\n params = params[1:]\n\n new_base = bind_new_parameters(op.base, params)\n new_hyperparameters = copy.deepcopy(op.hyperparameters)\n _ = new_hyperparameters.pop(\"base\")\n\n return op.__class__(new_base, new_scalar, **new_hyperparameters)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_sprod(op: SProd, params: Sequence[TensorLike]):\n # Separate dispatch for `SProd` since its constructor has a different interface\n new_scalar = params[0]\n params = params[1:]\n new_base = bind_new_parameters(op.base, params)\n\n return SProd(new_scalar, new_base)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_pow(op: Pow, params: Sequence[TensorLike]):\n # Need a separate dispatch for `Pow` because using a more general class\n # signature results in a call to `Pow.__new__` which doesn't raise an\n # error but does return an unusable object.\n return Pow(bind_new_parameters(op.base, params), op.scalar)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_hamiltonian(op: qml.Hamiltonian, params: Sequence[TensorLike]):\n return qml.Hamiltonian(params, op.ops)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_tensor(op: Tensor, params: Sequence[TensorLike]):\n new_obs = []\n\n for obs in op.obs:\n sub_params = params[: obs.num_params]\n params = params[obs.num_params :]\n new_obs.append(bind_new_parameters(obs, sub_params))\n\n return Tensor(*new_obs)\n", "path": "pennylane/ops/functions/bind_new_parameters.py"}], "after_files": [{"content": "# Copyright 2018-2023 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis file contains preprocessings steps that may be called internally\nduring execution.\n\"\"\"\nimport pennylane as qml\nfrom pennylane import math\nfrom pennylane.tape import QuantumScript\n\n\n# pylint: disable=no-member\ndef _convert_op_to_numpy_data(op: qml.operation.Operator) -> qml.operation.Operator:\n if math.get_interface(*op.data) == \"numpy\":\n return op\n # Use operator method to change parameters when it become available\n return qml.ops.functions.bind_new_parameters(op, math.unwrap(op.data))\n\n\n# pylint: disable=no-member\ndef _convert_measurement_to_numpy_data(\n m: qml.measurements.MeasurementProcess,\n) -> qml.measurements.MeasurementProcess:\n if m.obs is None:\n if m.eigvals() is None or math.get_interface(m.eigvals()) == \"numpy\":\n return m\n return type(m)(wires=m.wires, eigvals=math.unwrap(m.eigvals()))\n\n if math.get_interface(*m.obs.data) == \"numpy\":\n return m\n new_obs = qml.ops.functions.bind_new_parameters(m.obs, math.unwrap(m.obs.data))\n return type(m)(obs=new_obs)\n\n\n# pylint: disable=protected-access\ndef convert_to_numpy_parameters(circuit: QuantumScript) -> QuantumScript:\n \"\"\"Transforms a circuit to one with purely numpy parameters.\n\n Args:\n circuit (QuantumScript): a circuit with parameters of any interface\n\n Returns:\n QuantumScript: A circuit with purely numpy parameters\n\n .. seealso::\n\n :class:`pennylane.tape.Unwrap` modifies a :class:`~.pennylane.tape.QuantumScript` in place instead of creating\n a new class. It will also set all parameters on the circuit, not just ones that need to be unwrapped.\n\n >>> ops = [qml.S(0), qml.RX(torch.tensor(0.1234), 0)]\n >>> measurements = [qml.state(), qml.expval(qml.Hermitian(torch.eye(2), 0))]\n >>> circuit = qml.tape.QuantumScript(ops, measurements )\n >>> new_circuit = convert_to_numpy_parameters(circuit)\n >>> new_circuit.circuit\n [S(wires=[0]),\n RX(0.1234000027179718, wires=[0]),\n state(wires=[]),\n expval(Hermitian(array([[1., 0.],\n [0., 1.]], dtype=float32), wires=[0]))]\n\n If the component's data does not need to be transformed, it is left uncopied.\n\n >>> circuit[0] is new_circuit[0]\n True\n >>> circuit[1] is new_circuit[1]\n False\n >>> circuit[2] is new_circuit[2]\n True\n >>> circuit[3] is new_circuit[3]\n False\n\n \"\"\"\n new_ops = (_convert_op_to_numpy_data(op) for op in circuit.operations)\n new_measurements = (_convert_measurement_to_numpy_data(m) for m in circuit.measurements)\n new_circuit = circuit.__class__(new_ops, new_measurements, shots=circuit.shots)\n # must preserve trainable params as we lose information about the machine learning interface\n new_circuit.trainable_params = circuit.trainable_params\n new_circuit._qfunc_output = circuit._qfunc_output\n return new_circuit\n", "path": "pennylane/transforms/convert_to_numpy_parameters.py"}, {"content": "# Copyright 2018-2023 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis module contains the qml.bind_new_parameters function.\n\"\"\"\n# pylint: disable=missing-docstring\n\nfrom typing import Sequence\nimport copy\nfrom functools import singledispatch\n\nimport pennylane as qml\nfrom pennylane.typing import TensorLike\nfrom pennylane.operation import Operator, Tensor\n\nfrom ..identity import Identity\nfrom ..qubit import Projector\nfrom ..op_math import CompositeOp, SymbolicOp, ScalarSymbolicOp, Adjoint, Pow, SProd\n\n\n@singledispatch\ndef bind_new_parameters(op: Operator, params: Sequence[TensorLike]) -> Operator:\n \"\"\"Create a new operator with updated parameters\n\n This function takes an :class:`~.Operator` and new parameters as input and\n returns a new operator of the same type with the new parameters. This function\n does not mutate the original operator.\n\n Args:\n op (.Operator): Operator to update\n params (Sequence[TensorLike]): New parameters to create operator with. This\n must have the same shape as `op.data`.\n\n Returns:\n .Operator: New operator with updated parameters\n \"\"\"\n try:\n return op.__class__(*params, wires=op.wires, **copy.deepcopy(op.hyperparameters))\n except (TypeError, ValueError):\n # operation is doing something different with its call signature.\n new_op = copy.deepcopy(op)\n new_op.data = tuple(params)\n return new_op\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_approx_time_evolution(\n op: qml.ApproxTimeEvolution, params: Sequence[TensorLike]\n):\n new_hamiltonian = bind_new_parameters(op.hyperparameters[\"hamiltonian\"], params[:-1])\n time = params[-1]\n n = op.hyperparameters[\"n\"]\n\n return qml.ApproxTimeEvolution(new_hamiltonian, time, n)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_commuting_evolution(\n op: qml.CommutingEvolution, params: Sequence[TensorLike]\n):\n new_hamiltonian = bind_new_parameters(op.hyperparameters[\"hamiltonian\"], params[1:])\n freq = op.hyperparameters[\"frequencies\"]\n shifts = op.hyperparameters[\"shifts\"]\n time = params[0]\n\n return qml.CommutingEvolution(new_hamiltonian, time, frequencies=freq, shifts=shifts)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_fermionic_double_excitation(\n op: qml.FermionicDoubleExcitation, params: Sequence[TensorLike]\n):\n wires1 = op.hyperparameters[\"wires1\"]\n wires2 = op.hyperparameters[\"wires2\"]\n\n return qml.FermionicDoubleExcitation(params[0], wires1=wires1, wires2=wires2)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_angle_embedding(op: qml.AngleEmbedding, params: Sequence[TensorLike]):\n rotation = op.hyperparameters[\"rotation\"].basis\n return qml.AngleEmbedding(params[0], wires=op.wires, rotation=rotation)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_identity(op: Identity, params: Sequence[TensorLike]):\n return qml.Identity(*params, wires=op.wires)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_composite_op(op: CompositeOp, params: Sequence[TensorLike]):\n new_operands = []\n\n for operand in op.operands:\n op_num_params = operand.num_params\n sub_params = params[:op_num_params]\n params = params[op_num_params:]\n new_operands.append(bind_new_parameters(operand, sub_params))\n\n return op.__class__(*new_operands)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_symbolic_op(op: SymbolicOp, params: Sequence[TensorLike]):\n new_base = bind_new_parameters(op.base, params)\n new_hyperparameters = copy.deepcopy(op.hyperparameters)\n _ = new_hyperparameters.pop(\"base\")\n\n return op.__class__(new_base, **new_hyperparameters)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_adjoint(op: Adjoint, params: Sequence[TensorLike]):\n # Need a separate dispatch for `Adjoint` because using a more general class\n # signature results in a call to `Adjoint.__new__` which doesn't raise an\n # error but does return an unusable object.\n return Adjoint(bind_new_parameters(op.base, params))\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_projector(op: Projector, params: Sequence[TensorLike]):\n # Need a separate dispatch for `Projector` because using a more general class\n # signature results in a call to `Projector.__new__` which doesn't raise an\n # error but does return an unusable object.\n return Projector(*params, wires=op.wires)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_scalar_symbolic_op(op: ScalarSymbolicOp, params: Sequence[TensorLike]):\n new_scalar = params[0]\n params = params[1:]\n\n new_base = bind_new_parameters(op.base, params)\n new_hyperparameters = copy.deepcopy(op.hyperparameters)\n _ = new_hyperparameters.pop(\"base\")\n\n return op.__class__(new_base, new_scalar, **new_hyperparameters)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_sprod(op: SProd, params: Sequence[TensorLike]):\n # Separate dispatch for `SProd` since its constructor has a different interface\n new_scalar = params[0]\n params = params[1:]\n new_base = bind_new_parameters(op.base, params)\n\n return SProd(new_scalar, new_base)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_pow(op: Pow, params: Sequence[TensorLike]):\n # Need a separate dispatch for `Pow` because using a more general class\n # signature results in a call to `Pow.__new__` which doesn't raise an\n # error but does return an unusable object.\n return Pow(bind_new_parameters(op.base, params), op.scalar)\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_hamiltonian(op: qml.Hamiltonian, params: Sequence[TensorLike]):\n new_H = qml.Hamiltonian(params, op.ops)\n if op.grouping_indices is not None:\n new_H.grouping_indices = op.grouping_indices\n return new_H\n\n\n@bind_new_parameters.register\ndef bind_new_parameters_tensor(op: Tensor, params: Sequence[TensorLike]):\n new_obs = []\n\n for obs in op.obs:\n sub_params = params[: obs.num_params]\n params = params[obs.num_params :]\n new_obs.append(bind_new_parameters(obs, sub_params))\n\n return Tensor(*new_obs)\n", "path": "pennylane/ops/functions/bind_new_parameters.py"}]}
3,895
724
gh_patches_debug_16924
rasdani/github-patches
git_diff
pwr-Solaar__Solaar-732
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Backtrace when run without gtk installed If you try to bring up the graphical interface without gtk installed, you get a backtrace: ``` Traceback (most recent call last): File "/usr/bin/solaar", line 57, in <module> solaar.gtk.main() File "/usr/lib/python3.7/site-packages/solaar/gtk.py", line 90, in main gi.require_version('Gtk', '3.0') File "/usr/lib64/python3.7/site-packages/gi/__init__.py", line 127, in require_version raise ValueError('Namespace %s not available' % namespace) ValueError: Namespace Gtk not available ``` It does appear that there is code to handle missing modules, but gtk doesn't get imported directly so it wouldn't trigger. Maybe something like this? ``` diff --git a/lib/solaar/gtk.py b/lib/solaar/gtk.py index f728420..20683af 100644 --- a/lib/solaar/gtk.py +++ b/lib/solaar/gtk.py @@ -87,8 +87,11 @@ def main(): if not args: return if args.action: return _cli.run(args.action, args.hidraw_path) - gi = _require('gi', 'python-gi') - gi.require_version('Gtk', '3.0') + try: + gi.require_version('Gtk', '3.0') + except ValueError: + import sys + sys.exit("%s: Gtk (version 3) must be installed in order to run the graphical interface." % (NAME)) _require('gi.repository.Gtk', 'gir1.2-gtk-3.0') try: ``` Can send a PR if desired, but I don't know if that's the right approach. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/solaar/gtk.py` Content: ``` 1 #!/usr/bin/env python3 2 # -*- python-mode -*- 3 # -*- coding: UTF-8 -*- 4 5 ## Copyright (C) 2012-2013 Daniel Pavel 6 ## 7 ## This program is free software; you can redistribute it and/or modify 8 ## it under the terms of the GNU General Public License as published by 9 ## the Free Software Foundation; either version 2 of the License, or 10 ## (at your option) any later version. 11 ## 12 ## This program is distributed in the hope that it will be useful, 13 ## but WITHOUT ANY WARRANTY; without even the implied warranty of 14 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 ## GNU General Public License for more details. 16 ## 17 ## You should have received a copy of the GNU General Public License along 18 ## with this program; if not, write to the Free Software Foundation, Inc., 19 ## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 20 21 from __future__ import absolute_import, division, print_function, unicode_literals 22 23 import importlib 24 25 26 from solaar import __version__, NAME 27 import solaar.i18n as _i18n 28 import solaar.cli as _cli 29 30 # 31 # 32 # 33 34 def _require(module, os_package): 35 try: 36 return importlib.import_module(module) 37 except ImportError: 38 import sys 39 sys.exit("%s: missing required system package %s" % (NAME, os_package)) 40 41 42 def _parse_arguments(): 43 import argparse 44 arg_parser = argparse.ArgumentParser(prog=NAME.lower()) 45 arg_parser.add_argument('-d', '--debug', action='count', default=0, 46 help='print logging messages, for debugging purposes (may be repeated for extra verbosity)') 47 arg_parser.add_argument('-D', '--hidraw', action='store', dest='hidraw_path', metavar='PATH', 48 help='unifying receiver to use; the first detected receiver if unspecified. Example: /dev/hidraw2') 49 arg_parser.add_argument('--restart-on-wake-up', action='store_true', 50 help='restart Solaar on sleep wake-up (experimental)') 51 arg_parser.add_argument('-w', '--window', choices=('show','hide','only'), help='start with window showing / hidden / only (no tray icon)') 52 arg_parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__) 53 arg_parser.add_argument('--help-actions', action='store_true', 54 help='print help for the optional actions') 55 arg_parser.add_argument('action', nargs=argparse.REMAINDER, choices=_cli.actions, 56 help='optional actions to perform') 57 58 args = arg_parser.parse_args() 59 60 if args.help_actions: 61 _cli.print_help() 62 return 63 64 if args.window is None: 65 args.window = 'show' # default behaviour is to show main window 66 67 import logging 68 if args.debug > 0: 69 log_level = logging.WARNING - 10 * args.debug 70 log_format='%(asctime)s,%(msecs)03d %(levelname)8s [%(threadName)s] %(name)s: %(message)s' 71 logging.basicConfig(level=max(log_level, logging.DEBUG), format=log_format, datefmt='%H:%M:%S') 72 else: 73 logging.root.addHandler(logging.NullHandler()) 74 logging.root.setLevel(logging.ERROR) 75 76 if not args.action: 77 if logging.root.isEnabledFor(logging.INFO): 78 logging.info("language %s (%s), translations path %s", _i18n.language, _i18n.encoding, _i18n.path) 79 80 return args 81 82 83 def main(): 84 _require('pyudev', 'python3-pyudev') 85 86 # handle ^C in console 87 import signal 88 signal.signal(signal.SIGINT, signal.SIG_DFL) 89 90 args = _parse_arguments() 91 if not args: return 92 if args.action: 93 # if any argument, run comandline and exit 94 return _cli.run(args.action, args.hidraw_path) 95 96 gi = _require('gi', 'python3-gi or python3-gobject') 97 gi.require_version('Gtk', '3.0') 98 _require('gi.repository.Gtk', 'gir1.2-gtk-3.0') 99 100 try: 101 import solaar.ui as ui 102 import solaar.listener as listener 103 listener.setup_scanner(ui.status_changed, ui.error_dialog) 104 105 import solaar.upower as _upower 106 if args.restart_on_wake_up: 107 _upower.watch(listener.start_all, listener.stop_all) 108 else: 109 _upower.watch(lambda: listener.ping_all(True)) 110 111 # main UI event loop 112 ui.run_loop(listener.start_all, listener.stop_all, args.window!='only', args.window!='hide') 113 except Exception as e: 114 import sys 115 sys.exit('%s: error: %s' % (NAME.lower(), e)) 116 117 118 if __name__ == '__main__': 119 main() 120 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/solaar/gtk.py b/lib/solaar/gtk.py --- a/lib/solaar/gtk.py +++ b/lib/solaar/gtk.py @@ -31,10 +31,12 @@ # # -def _require(module, os_package): +def _require(module, os_package, gi=None, gi_package=None, gi_version=None): try: + if gi is not None: + gi.require_version(gi_package,gi_version) return importlib.import_module(module) - except ImportError: + except (ImportError, ValueError): import sys sys.exit("%s: missing required system package %s" % (NAME, os_package)) @@ -94,8 +96,7 @@ return _cli.run(args.action, args.hidraw_path) gi = _require('gi', 'python3-gi or python3-gobject') - gi.require_version('Gtk', '3.0') - _require('gi.repository.Gtk', 'gir1.2-gtk-3.0') + _require('gi.repository.Gtk', 'gir1.2-gtk-3.0', gi, 'Gtk', '3.0') try: import solaar.ui as ui
{"golden_diff": "diff --git a/lib/solaar/gtk.py b/lib/solaar/gtk.py\n--- a/lib/solaar/gtk.py\n+++ b/lib/solaar/gtk.py\n@@ -31,10 +31,12 @@\n #\n #\n \n-def _require(module, os_package):\n+def _require(module, os_package, gi=None, gi_package=None, gi_version=None):\n \ttry:\n+\t\tif gi is not None:\n+\t\t\tgi.require_version(gi_package,gi_version)\n \t\treturn importlib.import_module(module)\n-\texcept ImportError:\n+\texcept (ImportError, ValueError):\n \t\timport sys\n \t\tsys.exit(\"%s: missing required system package %s\" % (NAME, os_package))\n \n@@ -94,8 +96,7 @@\n \t\treturn _cli.run(args.action, args.hidraw_path)\n \n \tgi = _require('gi', 'python3-gi or python3-gobject')\n-\tgi.require_version('Gtk', '3.0')\n-\t_require('gi.repository.Gtk', 'gir1.2-gtk-3.0')\n+\t_require('gi.repository.Gtk', 'gir1.2-gtk-3.0', gi, 'Gtk', '3.0')\n \n \ttry:\n \t\timport solaar.ui as ui\n", "issue": "Backtrace when run without gtk installed\nIf you try to bring up the graphical interface without gtk installed, you get a backtrace:\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/bin/solaar\", line 57, in <module>\r\n solaar.gtk.main()\r\n File \"/usr/lib/python3.7/site-packages/solaar/gtk.py\", line 90, in main\r\n gi.require_version('Gtk', '3.0')\r\n File \"/usr/lib64/python3.7/site-packages/gi/__init__.py\", line 127, in require_version\r\n raise ValueError('Namespace %s not available' % namespace)\r\nValueError: Namespace Gtk not available\r\n```\r\nIt does appear that there is code to handle missing modules, but gtk doesn't get imported directly so it wouldn't trigger. Maybe something like this?\r\n```\r\ndiff --git a/lib/solaar/gtk.py b/lib/solaar/gtk.py\r\nindex f728420..20683af 100644\r\n--- a/lib/solaar/gtk.py\r\n+++ b/lib/solaar/gtk.py\r\n@@ -87,8 +87,11 @@ def main():\r\n if not args: return\r\n if args.action: return _cli.run(args.action, args.hidraw_path)\r\n\r\n- gi = _require('gi', 'python-gi')\r\n- gi.require_version('Gtk', '3.0')\r\n+ try:\r\n+ gi.require_version('Gtk', '3.0')\r\n+ except ValueError:\r\n+ import sys\r\n+ sys.exit(\"%s: Gtk (version 3) must be installed in order to run the graphical interface.\" % (NAME))\r\n _require('gi.repository.Gtk', 'gir1.2-gtk-3.0')\r\n\r\n try:\r\n```\r\nCan send a PR if desired, but I don't know if that's the right approach.\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport importlib\n\n\nfrom solaar import __version__, NAME\nimport solaar.i18n as _i18n\nimport solaar.cli as _cli\n\n#\n#\n#\n\ndef _require(module, os_package):\n\ttry:\n\t\treturn importlib.import_module(module)\n\texcept ImportError:\n\t\timport sys\n\t\tsys.exit(\"%s: missing required system package %s\" % (NAME, os_package))\n\n\ndef _parse_arguments():\n\timport argparse\n\targ_parser = argparse.ArgumentParser(prog=NAME.lower())\n\targ_parser.add_argument('-d', '--debug', action='count', default=0,\n\t\t\t\t\t\t\thelp='print logging messages, for debugging purposes (may be repeated for extra verbosity)')\n\targ_parser.add_argument('-D', '--hidraw', action='store', dest='hidraw_path', metavar='PATH',\n\t\t\t\t\t\t\thelp='unifying receiver to use; the first detected receiver if unspecified. Example: /dev/hidraw2')\n\targ_parser.add_argument('--restart-on-wake-up', action='store_true',\n\t\t\t\t\t\t\thelp='restart Solaar on sleep wake-up (experimental)')\n\targ_parser.add_argument('-w', '--window', choices=('show','hide','only'), help='start with window showing / hidden / only (no tray icon)')\n\targ_parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__)\n\targ_parser.add_argument('--help-actions', action='store_true',\n\t\t\t\t\t\t\thelp='print help for the optional actions')\n\targ_parser.add_argument('action', nargs=argparse.REMAINDER, choices=_cli.actions,\n\t\t\t\t\t\t\thelp='optional actions to perform')\n\n\targs = arg_parser.parse_args()\n\n\tif args.help_actions:\n\t\t_cli.print_help()\n\t\treturn\n\n\tif args.window is None:\n\t\targs.window = 'show' # default behaviour is to show main window\n\n\timport logging\n\tif args.debug > 0:\n\t\tlog_level = logging.WARNING - 10 * args.debug\n\t\tlog_format='%(asctime)s,%(msecs)03d %(levelname)8s [%(threadName)s] %(name)s: %(message)s'\n\t\tlogging.basicConfig(level=max(log_level, logging.DEBUG), format=log_format, datefmt='%H:%M:%S')\n\telse:\n\t\tlogging.root.addHandler(logging.NullHandler())\n\t\tlogging.root.setLevel(logging.ERROR)\n\n\tif not args.action:\n\t\tif logging.root.isEnabledFor(logging.INFO):\n\t\t\tlogging.info(\"language %s (%s), translations path %s\", _i18n.language, _i18n.encoding, _i18n.path)\n\n\treturn args\n\n\ndef main():\n\t_require('pyudev', 'python3-pyudev')\n\n\t# handle ^C in console\n\timport signal\n\tsignal.signal(signal.SIGINT, signal.SIG_DFL)\n\n\targs = _parse_arguments()\n\tif not args: return\n\tif args.action:\n\t\t# if any argument, run comandline and exit\n\t\treturn _cli.run(args.action, args.hidraw_path)\n\n\tgi = _require('gi', 'python3-gi or python3-gobject')\n\tgi.require_version('Gtk', '3.0')\n\t_require('gi.repository.Gtk', 'gir1.2-gtk-3.0')\n\n\ttry:\n\t\timport solaar.ui as ui\n\t\timport solaar.listener as listener\n\t\tlistener.setup_scanner(ui.status_changed, ui.error_dialog)\n\n\t\timport solaar.upower as _upower\n\t\tif args.restart_on_wake_up:\n\t\t\t_upower.watch(listener.start_all, listener.stop_all)\n\t\telse:\n\t\t\t_upower.watch(lambda: listener.ping_all(True))\n\n\t\t# main UI event loop\n\t\tui.run_loop(listener.start_all, listener.stop_all, args.window!='only', args.window!='hide')\n\texcept Exception as e:\n\t\timport sys\n\t\tsys.exit('%s: error: %s' % (NAME.lower(), e))\n\n\nif __name__ == '__main__':\n\tmain()\n", "path": "lib/solaar/gtk.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport importlib\n\n\nfrom solaar import __version__, NAME\nimport solaar.i18n as _i18n\nimport solaar.cli as _cli\n\n#\n#\n#\n\ndef _require(module, os_package, gi=None, gi_package=None, gi_version=None):\n\ttry:\n\t\tif gi is not None:\n\t\t\tgi.require_version(gi_package,gi_version)\n\t\treturn importlib.import_module(module)\n\texcept (ImportError, ValueError):\n\t\timport sys\n\t\tsys.exit(\"%s: missing required system package %s\" % (NAME, os_package))\n\n\ndef _parse_arguments():\n\timport argparse\n\targ_parser = argparse.ArgumentParser(prog=NAME.lower())\n\targ_parser.add_argument('-d', '--debug', action='count', default=0,\n\t\t\t\t\t\t\thelp='print logging messages, for debugging purposes (may be repeated for extra verbosity)')\n\targ_parser.add_argument('-D', '--hidraw', action='store', dest='hidraw_path', metavar='PATH',\n\t\t\t\t\t\t\thelp='unifying receiver to use; the first detected receiver if unspecified. Example: /dev/hidraw2')\n\targ_parser.add_argument('--restart-on-wake-up', action='store_true',\n\t\t\t\t\t\t\thelp='restart Solaar on sleep wake-up (experimental)')\n\targ_parser.add_argument('-w', '--window', choices=('show','hide','only'), help='start with window showing / hidden / only (no tray icon)')\n\targ_parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__)\n\targ_parser.add_argument('--help-actions', action='store_true',\n\t\t\t\t\t\t\thelp='print help for the optional actions')\n\targ_parser.add_argument('action', nargs=argparse.REMAINDER, choices=_cli.actions,\n\t\t\t\t\t\t\thelp='optional actions to perform')\n\n\targs = arg_parser.parse_args()\n\n\tif args.help_actions:\n\t\t_cli.print_help()\n\t\treturn\n\n\tif args.window is None:\n\t\targs.window = 'show' # default behaviour is to show main window\n\n\timport logging\n\tif args.debug > 0:\n\t\tlog_level = logging.WARNING - 10 * args.debug\n\t\tlog_format='%(asctime)s,%(msecs)03d %(levelname)8s [%(threadName)s] %(name)s: %(message)s'\n\t\tlogging.basicConfig(level=max(log_level, logging.DEBUG), format=log_format, datefmt='%H:%M:%S')\n\telse:\n\t\tlogging.root.addHandler(logging.NullHandler())\n\t\tlogging.root.setLevel(logging.ERROR)\n\n\tif not args.action:\n\t\tif logging.root.isEnabledFor(logging.INFO):\n\t\t\tlogging.info(\"language %s (%s), translations path %s\", _i18n.language, _i18n.encoding, _i18n.path)\n\n\treturn args\n\n\ndef main():\n\t_require('pyudev', 'python3-pyudev')\n\n\t# handle ^C in console\n\timport signal\n\tsignal.signal(signal.SIGINT, signal.SIG_DFL)\n\n\targs = _parse_arguments()\n\tif not args: return\n\tif args.action:\n\t\t# if any argument, run comandline and exit\n\t\treturn _cli.run(args.action, args.hidraw_path)\n\n\tgi = _require('gi', 'python3-gi or python3-gobject')\n\t_require('gi.repository.Gtk', 'gir1.2-gtk-3.0', gi, 'Gtk', '3.0')\n\n\ttry:\n\t\timport solaar.ui as ui\n\t\timport solaar.listener as listener\n\t\tlistener.setup_scanner(ui.status_changed, ui.error_dialog)\n\n\t\timport solaar.upower as _upower\n\t\tif args.restart_on_wake_up:\n\t\t\t_upower.watch(listener.start_all, listener.stop_all)\n\t\telse:\n\t\t\t_upower.watch(lambda: listener.ping_all(True))\n\n\t\t# main UI event loop\n\t\tui.run_loop(listener.start_all, listener.stop_all, args.window!='only', args.window!='hide')\n\texcept Exception as e:\n\t\timport sys\n\t\tsys.exit('%s: error: %s' % (NAME.lower(), e))\n\n\nif __name__ == '__main__':\n\tmain()\n", "path": "lib/solaar/gtk.py"}]}
2,011
280
gh_patches_debug_32601
rasdani/github-patches
git_diff
ansible-collections__amazon.aws-2029
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- The cloudwatchlogs_log_group_info module generates throttling exceptions ### Summary When running the amazon.aws.cloudwatchlogs_log_group_info module on AWS accounts having more than (roughly) 50 log groups we get ThrottlingExceptions once every 20 calls or so. I noticed that both the describe and the list-tags boto calls in the cloudwatchlogs_log_group_info module have no throttling handling configured and use the default throttling handling policy, ie. none. ### Issue Type Bug Report ### Component Name amazon.aws.cloudwatchlogs_log_group_info ### Ansible Version ```console (paste below) ansible [core 2.13.13] config file = None configured module search path = ['/home/rundeck/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible ansible collection location = /home/rundeck/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible python version = 3.8.10 (default, Nov 22 2023, 10:22:35) [GCC 9.4.0] jinja version = 3.1.3 libyaml = True ``` ### Collection Versions ```console (paste below) Collection Version --------------------- ------- amazon.aws 7.3.0 community.general 8.3.0 nutanix.ncp 1.7.0 servicenow.servicenow 1.0.6 # /usr/local/lib/python3.8/dist-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 3.5.0 ansible.netcommon 3.1.3 ansible.posix 1.4.0 ansible.utils 2.8.0 ansible.windows 1.12.0 arista.eos 5.0.1 awx.awx 21.10.0 azure.azcollection 1.14.0 check_point.mgmt 2.3.0 chocolatey.chocolatey 1.3.1 cisco.aci 2.3.0 cisco.asa 3.1.0 cisco.dnac 6.6.1 cisco.intersight 1.0.22 cisco.ios 3.3.2 cisco.iosxr 3.3.1 cisco.ise 2.5.9 cisco.meraki 2.13.0 cisco.mso 2.1.0 cisco.nso 1.0.3 cisco.nxos 3.2.0 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.3 community.aws 3.6.0 [...] ``` ### AWS SDK versions ```console (paste below) WARNING: Package(s) not found: boto Name: boto3 Version: 1.34.45 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: None License: Apache License 2.0 Location: /usr/local/lib/python3.8/dist-packages Requires: botocore, jmespath, s3transfer Required-by: --- Name: botocore Version: 1.34.45 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: None License: Apache License 2.0 Location: /usr/local/lib/python3.8/dist-packages Requires: python-dateutil, jmespath, urllib3 Required-by: s3transfer, boto3, awscli ``` ### Configuration ```console (paste below) (no Ansible configuration) ``` ### OS / Environment NAME="Ubuntu" VERSION="20.04.6 LTS (Focal Fossa)" ID=ubuntu ID_LIKE=debian PRETTY_NAME="Ubuntu 20.04.6 LTS" ### Steps to Reproduce ```yaml - name: "Role based get all log groups in {{ selected_region }}" amazon.aws.cloudwatchlogs_log_group_info: region: "{{ selected_region }}" aws_access_key: "{{ aws_access_key }}" aws_secret_key: "{{ aws_secret_key }}" aws_session_token: "{{ aws_session_token }}" log_group_name: "{{ log_group_prefix }}" register: log_groups ``` ### Expected Results Should get all log group information and tags without error. Optionally, allow us to add an option to skip getting the tags for each log group would allow engineers to work around this issue. ### Actual Results ```console (paste below) fatal: [127.0.0.1]: FAILED! => {"boto3_version": "1.34.45", "botocore_version": "1.34.45", "changed": false, "error": {"code": "ThrottlingException", "message": "Rate exceeded"}, "msg": "Unable to describe tags for log group /aws/codebuild/tankmaintenanceplanning-pipeline-tsa: An error occurred (ThrottlingException) when calling the ListTagsLogGroup operation (reached max retries: 4): Rate exceeded" ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `plugins/modules/cloudwatchlogs_log_group_info.py` Content: ``` 1 #!/usr/bin/python 2 # -*- coding: utf-8 -*- 3 4 # Copyright: Ansible Project 5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) 6 7 DOCUMENTATION = r""" 8 --- 9 module: cloudwatchlogs_log_group_info 10 version_added: 5.0.0 11 short_description: Get information about log_group in CloudWatchLogs 12 description: 13 - Lists the specified log groups. You can list all your log groups or filter the results by prefix. 14 - This module was originally added to C(community.aws) in release 1.0.0. 15 author: 16 - Willian Ricardo (@willricardo) <[email protected]> 17 options: 18 log_group_name: 19 description: 20 - The name or prefix of the log group to filter by. 21 type: str 22 extends_documentation_fragment: 23 - amazon.aws.common.modules 24 - amazon.aws.region.modules 25 - amazon.aws.boto3 26 """ 27 28 EXAMPLES = r""" 29 # Note: These examples do not set authentication details, see the AWS Guide for details. 30 - amazon.aws.cloudwatchlogs_log_group_info: 31 log_group_name: test-log-group 32 """ 33 34 RETURN = r""" 35 log_groups: 36 description: Return the list of complex objects representing log groups 37 returned: success 38 type: complex 39 contains: 40 log_group_name: 41 description: The name of the log group. 42 returned: always 43 type: str 44 creation_time: 45 description: The creation time of the log group. 46 returned: always 47 type: int 48 retention_in_days: 49 description: The number of days to retain the log events in the specified log group. 50 returned: always 51 type: int 52 metric_filter_count: 53 description: The number of metric filters. 54 returned: always 55 type: int 56 arn: 57 description: The Amazon Resource Name (ARN) of the log group. 58 returned: always 59 type: str 60 stored_bytes: 61 description: The number of bytes stored. 62 returned: always 63 type: str 64 kms_key_id: 65 description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. 66 returned: always 67 type: str 68 tags: 69 description: A dictionary representing the tags on the log group. 70 returned: always 71 type: dict 72 version_added: 4.0.0 73 version_added_collection: community.aws 74 """ 75 76 try: 77 import botocore 78 except ImportError: 79 pass # Handled by AnsibleAWSModule 80 81 from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict 82 83 from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule 84 from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code 85 86 87 def describe_log_group(client, log_group_name, module): 88 params = {} 89 if log_group_name: 90 params["logGroupNamePrefix"] = log_group_name 91 try: 92 paginator = client.get_paginator("describe_log_groups") 93 desc_log_group = paginator.paginate(**params).build_full_result() 94 except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: 95 module.fail_json_aws(e, msg=f"Unable to describe log group {log_group_name}") 96 97 for log_group in desc_log_group["logGroups"]: 98 log_group_name = log_group["logGroupName"] 99 try: 100 tags = client.list_tags_log_group(logGroupName=log_group_name) 101 except is_boto3_error_code("AccessDeniedException"): 102 tags = {} 103 module.warn(f"Permission denied listing tags for log group {log_group_name}") 104 except ( 105 botocore.exceptions.ClientError, 106 botocore.exceptions.BotoCoreError, 107 ) as e: # pylint: disable=duplicate-except 108 module.fail_json_aws(e, msg=f"Unable to describe tags for log group {log_group_name}") 109 log_group["tags"] = tags.get("tags", {}) 110 111 return desc_log_group 112 113 114 def main(): 115 argument_spec = dict( 116 log_group_name=dict(), 117 ) 118 119 module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) 120 121 try: 122 logs = module.client("logs") 123 except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: 124 module.fail_json_aws(e, msg="Failed to connect to AWS") 125 126 desc_log_group = describe_log_group(client=logs, log_group_name=module.params["log_group_name"], module=module) 127 final_log_group_snake = [] 128 129 for log_group in desc_log_group["logGroups"]: 130 final_log_group_snake.append(camel_dict_to_snake_dict(log_group, ignore_list=["tags"])) 131 132 desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake) 133 module.exit_json(**desc_log_group_result) 134 135 136 if __name__ == "__main__": 137 main() 138 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/plugins/modules/cloudwatchlogs_log_group_info.py b/plugins/modules/cloudwatchlogs_log_group_info.py --- a/plugins/modules/cloudwatchlogs_log_group_info.py +++ b/plugins/modules/cloudwatchlogs_log_group_info.py @@ -82,6 +82,18 @@ from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + [email protected]_backoff() +def list_tags_log_group_with_backoff(client, log_group_name): + return client.list_tags_log_group(logGroupName=log_group_name) + + [email protected]_backoff() +def describe_log_groups_with_backoff(client, **kwargs): + paginator = client.get_paginator("describe_log_groups") + return paginator.paginate(**kwargs).build_full_result() def describe_log_group(client, log_group_name, module): @@ -89,15 +101,14 @@ if log_group_name: params["logGroupNamePrefix"] = log_group_name try: - paginator = client.get_paginator("describe_log_groups") - desc_log_group = paginator.paginate(**params).build_full_result() + desc_log_group = describe_log_groups_with_backoff(client, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg=f"Unable to describe log group {log_group_name}") for log_group in desc_log_group["logGroups"]: log_group_name = log_group["logGroupName"] try: - tags = client.list_tags_log_group(logGroupName=log_group_name) + tags = list_tags_log_group_with_backoff(client, log_group_name) except is_boto3_error_code("AccessDeniedException"): tags = {} module.warn(f"Permission denied listing tags for log group {log_group_name}")
{"golden_diff": "diff --git a/plugins/modules/cloudwatchlogs_log_group_info.py b/plugins/modules/cloudwatchlogs_log_group_info.py\n--- a/plugins/modules/cloudwatchlogs_log_group_info.py\n+++ b/plugins/modules/cloudwatchlogs_log_group_info.py\n@@ -82,6 +82,18 @@\n \n from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule\n from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code\n+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry\n+\n+\[email protected]_backoff()\n+def list_tags_log_group_with_backoff(client, log_group_name):\n+ return client.list_tags_log_group(logGroupName=log_group_name)\n+\n+\[email protected]_backoff()\n+def describe_log_groups_with_backoff(client, **kwargs):\n+ paginator = client.get_paginator(\"describe_log_groups\")\n+ return paginator.paginate(**kwargs).build_full_result()\n \n \n def describe_log_group(client, log_group_name, module):\n@@ -89,15 +101,14 @@\n if log_group_name:\n params[\"logGroupNamePrefix\"] = log_group_name\n try:\n- paginator = client.get_paginator(\"describe_log_groups\")\n- desc_log_group = paginator.paginate(**params).build_full_result()\n+ desc_log_group = describe_log_groups_with_backoff(client, **params)\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg=f\"Unable to describe log group {log_group_name}\")\n \n for log_group in desc_log_group[\"logGroups\"]:\n log_group_name = log_group[\"logGroupName\"]\n try:\n- tags = client.list_tags_log_group(logGroupName=log_group_name)\n+ tags = list_tags_log_group_with_backoff(client, log_group_name)\n except is_boto3_error_code(\"AccessDeniedException\"):\n tags = {}\n module.warn(f\"Permission denied listing tags for log group {log_group_name}\")\n", "issue": "The cloudwatchlogs_log_group_info module generates throttling exceptions\n### Summary\n\nWhen running the amazon.aws.cloudwatchlogs_log_group_info module on AWS accounts having more than (roughly) 50 log groups we get ThrottlingExceptions once every 20 calls or so. I noticed that both the describe and the list-tags boto calls in the cloudwatchlogs_log_group_info module have no throttling handling configured and use the default throttling handling policy, ie. none. \n\n### Issue Type\n\nBug Report\n\n### Component Name\n\namazon.aws.cloudwatchlogs_log_group_info\n\n### Ansible Version\n\n```console (paste below)\r\nansible [core 2.13.13]\r\n config file = None\r\n configured module search path = ['/home/rundeck/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible\r\n ansible collection location = /home/rundeck/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /usr/local/bin/ansible\r\n python version = 3.8.10 (default, Nov 22 2023, 10:22:35) [GCC 9.4.0]\r\n jinja version = 3.1.3\r\n libyaml = True\r\n\r\n```\r\n\n\n### Collection Versions\n\n```console (paste below)\r\nCollection Version\r\n--------------------- -------\r\namazon.aws 7.3.0\r\ncommunity.general 8.3.0\r\nnutanix.ncp 1.7.0\r\nservicenow.servicenow 1.0.6\r\n\r\n# /usr/local/lib/python3.8/dist-packages/ansible_collections\r\nCollection Version\r\n----------------------------- -------\r\namazon.aws 3.5.0\r\nansible.netcommon 3.1.3\r\nansible.posix 1.4.0\r\nansible.utils 2.8.0\r\nansible.windows 1.12.0\r\narista.eos 5.0.1\r\nawx.awx 21.10.0\r\nazure.azcollection 1.14.0\r\ncheck_point.mgmt 2.3.0\r\nchocolatey.chocolatey 1.3.1\r\ncisco.aci 2.3.0\r\ncisco.asa 3.1.0\r\ncisco.dnac 6.6.1\r\ncisco.intersight 1.0.22\r\ncisco.ios 3.3.2\r\ncisco.iosxr 3.3.1\r\ncisco.ise 2.5.9\r\ncisco.meraki 2.13.0\r\ncisco.mso 2.1.0\r\ncisco.nso 1.0.3\r\ncisco.nxos 3.2.0\r\ncisco.ucs 1.8.0\r\ncloud.common 2.1.2\r\ncloudscale_ch.cloud 2.2.3\r\ncommunity.aws 3.6.0\r\n[...]\r\n```\r\n\n\n### AWS SDK versions\n\n```console (paste below)\r\nWARNING: Package(s) not found: boto\r\nName: boto3\r\nVersion: 1.34.45\r\nSummary: The AWS SDK for Python\r\nHome-page: https://github.com/boto/boto3\r\nAuthor: Amazon Web Services\r\nAuthor-email: None\r\nLicense: Apache License 2.0\r\nLocation: /usr/local/lib/python3.8/dist-packages\r\nRequires: botocore, jmespath, s3transfer\r\nRequired-by:\r\n---\r\nName: botocore\r\nVersion: 1.34.45\r\nSummary: Low-level, data-driven core of boto 3.\r\nHome-page: https://github.com/boto/botocore\r\nAuthor: Amazon Web Services\r\nAuthor-email: None\r\nLicense: Apache License 2.0\r\nLocation: /usr/local/lib/python3.8/dist-packages\r\nRequires: python-dateutil, jmespath, urllib3\r\nRequired-by: s3transfer, boto3, awscli\r\n```\r\n\n\n### Configuration\n\n```console (paste below)\r\n(no Ansible configuration)\r\n```\r\n\n\n### OS / Environment\n\nNAME=\"Ubuntu\"\r\nVERSION=\"20.04.6 LTS (Focal Fossa)\"\r\nID=ubuntu\r\nID_LIKE=debian\r\nPRETTY_NAME=\"Ubuntu 20.04.6 LTS\"\n\n### Steps to Reproduce\n\n```yaml \r\n- name: \"Role based get all log groups in {{ selected_region }}\"\r\n amazon.aws.cloudwatchlogs_log_group_info:\r\n region: \"{{ selected_region }}\"\r\n aws_access_key: \"{{ aws_access_key }}\"\r\n aws_secret_key: \"{{ aws_secret_key }}\"\r\n aws_session_token: \"{{ aws_session_token }}\"\r\n log_group_name: \"{{ log_group_prefix }}\"\r\n register: log_groups\r\n```\r\n\n\n### Expected Results\n\nShould get all log group information and tags without error. Optionally, allow us to add an option to skip getting the tags for each log group would allow engineers to work around this issue. \n\n### Actual Results\n\n```console (paste below)\r\nfatal: [127.0.0.1]: FAILED! => {\"boto3_version\": \"1.34.45\", \"botocore_version\": \"1.34.45\", \"changed\": false, \"error\": {\"code\": \"ThrottlingException\", \"message\": \"Rate exceeded\"}, \"msg\": \"Unable to describe tags for log group /aws/codebuild/tankmaintenanceplanning-pipeline-tsa: An error occurred (ThrottlingException) when calling the ListTagsLogGroup operation (reached max retries: 4): Rate exceeded\"\r\n```\r\n\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nDOCUMENTATION = r\"\"\"\n---\nmodule: cloudwatchlogs_log_group_info\nversion_added: 5.0.0\nshort_description: Get information about log_group in CloudWatchLogs\ndescription:\n - Lists the specified log groups. You can list all your log groups or filter the results by prefix.\n - This module was originally added to C(community.aws) in release 1.0.0.\nauthor:\n - Willian Ricardo (@willricardo) <[email protected]>\noptions:\n log_group_name:\n description:\n - The name or prefix of the log group to filter by.\n type: str\nextends_documentation_fragment:\n - amazon.aws.common.modules\n - amazon.aws.region.modules\n - amazon.aws.boto3\n\"\"\"\n\nEXAMPLES = r\"\"\"\n# Note: These examples do not set authentication details, see the AWS Guide for details.\n- amazon.aws.cloudwatchlogs_log_group_info:\n log_group_name: test-log-group\n\"\"\"\n\nRETURN = r\"\"\"\nlog_groups:\n description: Return the list of complex objects representing log groups\n returned: success\n type: complex\n contains:\n log_group_name:\n description: The name of the log group.\n returned: always\n type: str\n creation_time:\n description: The creation time of the log group.\n returned: always\n type: int\n retention_in_days:\n description: The number of days to retain the log events in the specified log group.\n returned: always\n type: int\n metric_filter_count:\n description: The number of metric filters.\n returned: always\n type: int\n arn:\n description: The Amazon Resource Name (ARN) of the log group.\n returned: always\n type: str\n stored_bytes:\n description: The number of bytes stored.\n returned: always\n type: str\n kms_key_id:\n description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.\n returned: always\n type: str\n tags:\n description: A dictionary representing the tags on the log group.\n returned: always\n type: dict\n version_added: 4.0.0\n version_added_collection: community.aws\n\"\"\"\n\ntry:\n import botocore\nexcept ImportError:\n pass # Handled by AnsibleAWSModule\n\nfrom ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule\nfrom ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code\n\n\ndef describe_log_group(client, log_group_name, module):\n params = {}\n if log_group_name:\n params[\"logGroupNamePrefix\"] = log_group_name\n try:\n paginator = client.get_paginator(\"describe_log_groups\")\n desc_log_group = paginator.paginate(**params).build_full_result()\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg=f\"Unable to describe log group {log_group_name}\")\n\n for log_group in desc_log_group[\"logGroups\"]:\n log_group_name = log_group[\"logGroupName\"]\n try:\n tags = client.list_tags_log_group(logGroupName=log_group_name)\n except is_boto3_error_code(\"AccessDeniedException\"):\n tags = {}\n module.warn(f\"Permission denied listing tags for log group {log_group_name}\")\n except (\n botocore.exceptions.ClientError,\n botocore.exceptions.BotoCoreError,\n ) as e: # pylint: disable=duplicate-except\n module.fail_json_aws(e, msg=f\"Unable to describe tags for log group {log_group_name}\")\n log_group[\"tags\"] = tags.get(\"tags\", {})\n\n return desc_log_group\n\n\ndef main():\n argument_spec = dict(\n log_group_name=dict(),\n )\n\n module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)\n\n try:\n logs = module.client(\"logs\")\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg=\"Failed to connect to AWS\")\n\n desc_log_group = describe_log_group(client=logs, log_group_name=module.params[\"log_group_name\"], module=module)\n final_log_group_snake = []\n\n for log_group in desc_log_group[\"logGroups\"]:\n final_log_group_snake.append(camel_dict_to_snake_dict(log_group, ignore_list=[\"tags\"]))\n\n desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake)\n module.exit_json(**desc_log_group_result)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "plugins/modules/cloudwatchlogs_log_group_info.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nDOCUMENTATION = r\"\"\"\n---\nmodule: cloudwatchlogs_log_group_info\nversion_added: 5.0.0\nshort_description: Get information about log_group in CloudWatchLogs\ndescription:\n - Lists the specified log groups. You can list all your log groups or filter the results by prefix.\n - This module was originally added to C(community.aws) in release 1.0.0.\nauthor:\n - Willian Ricardo (@willricardo) <[email protected]>\noptions:\n log_group_name:\n description:\n - The name or prefix of the log group to filter by.\n type: str\nextends_documentation_fragment:\n - amazon.aws.common.modules\n - amazon.aws.region.modules\n - amazon.aws.boto3\n\"\"\"\n\nEXAMPLES = r\"\"\"\n# Note: These examples do not set authentication details, see the AWS Guide for details.\n- amazon.aws.cloudwatchlogs_log_group_info:\n log_group_name: test-log-group\n\"\"\"\n\nRETURN = r\"\"\"\nlog_groups:\n description: Return the list of complex objects representing log groups\n returned: success\n type: complex\n contains:\n log_group_name:\n description: The name of the log group.\n returned: always\n type: str\n creation_time:\n description: The creation time of the log group.\n returned: always\n type: int\n retention_in_days:\n description: The number of days to retain the log events in the specified log group.\n returned: always\n type: int\n metric_filter_count:\n description: The number of metric filters.\n returned: always\n type: int\n arn:\n description: The Amazon Resource Name (ARN) of the log group.\n returned: always\n type: str\n stored_bytes:\n description: The number of bytes stored.\n returned: always\n type: str\n kms_key_id:\n description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.\n returned: always\n type: str\n tags:\n description: A dictionary representing the tags on the log group.\n returned: always\n type: dict\n version_added: 4.0.0\n version_added_collection: community.aws\n\"\"\"\n\ntry:\n import botocore\nexcept ImportError:\n pass # Handled by AnsibleAWSModule\n\nfrom ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule\nfrom ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code\nfrom ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry\n\n\[email protected]_backoff()\ndef list_tags_log_group_with_backoff(client, log_group_name):\n return client.list_tags_log_group(logGroupName=log_group_name)\n\n\[email protected]_backoff()\ndef describe_log_groups_with_backoff(client, **kwargs):\n paginator = client.get_paginator(\"describe_log_groups\")\n return paginator.paginate(**kwargs).build_full_result()\n\n\ndef describe_log_group(client, log_group_name, module):\n params = {}\n if log_group_name:\n params[\"logGroupNamePrefix\"] = log_group_name\n try:\n desc_log_group = describe_log_groups_with_backoff(client, **params)\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg=f\"Unable to describe log group {log_group_name}\")\n\n for log_group in desc_log_group[\"logGroups\"]:\n log_group_name = log_group[\"logGroupName\"]\n try:\n tags = list_tags_log_group_with_backoff(client, log_group_name)\n except is_boto3_error_code(\"AccessDeniedException\"):\n tags = {}\n module.warn(f\"Permission denied listing tags for log group {log_group_name}\")\n except (\n botocore.exceptions.ClientError,\n botocore.exceptions.BotoCoreError,\n ) as e: # pylint: disable=duplicate-except\n module.fail_json_aws(e, msg=f\"Unable to describe tags for log group {log_group_name}\")\n log_group[\"tags\"] = tags.get(\"tags\", {})\n\n return desc_log_group\n\n\ndef main():\n argument_spec = dict(\n log_group_name=dict(),\n )\n\n module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)\n\n try:\n logs = module.client(\"logs\")\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg=\"Failed to connect to AWS\")\n\n desc_log_group = describe_log_group(client=logs, log_group_name=module.params[\"log_group_name\"], module=module)\n final_log_group_snake = []\n\n for log_group in desc_log_group[\"logGroups\"]:\n final_log_group_snake.append(camel_dict_to_snake_dict(log_group, ignore_list=[\"tags\"]))\n\n desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake)\n module.exit_json(**desc_log_group_result)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "plugins/modules/cloudwatchlogs_log_group_info.py"}]}
2,902
432
gh_patches_debug_64715
rasdani/github-patches
git_diff
Lightning-Universe__lightning-flash-1243
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Docs page describing Beta meaning ## 📚 Documentation Add a page in our docs describing that beta means that one or all of the following are true: - the feature has unstable dependencies - the feature may change without notice in future versions - the feature is not compatible with other flash / pl features - the performance of the feature has not been verified Anything else? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/extensions/stability.py` Content: ``` 1 # Copyright The PyTorch Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from docutils import nodes 15 from docutils.parsers.rst import Directive 16 from docutils.statemachine import StringList 17 18 ADMONITION_TEMPLATE = """ 19 .. raw:: html 20 21 <div class="admonition warning {type}"> 22 <p class="admonition-title">{title}</p> 23 <p>This {scope} is currently in Beta. The interfaces and functionality may change without warning in future 24 releases.</p> 25 </div> 26 """ 27 28 29 class Beta(Directive): 30 has_content = True 31 required_arguments = 1 32 optional_arguments = 0 33 34 def run(self): 35 36 scope = self.arguments[0] 37 38 admonition_rst = ADMONITION_TEMPLATE.format(type="beta", title="Beta", scope=scope) 39 admonition_list = StringList(admonition_rst.split("\n")) 40 admonition = nodes.paragraph() 41 self.state.nested_parse(admonition_list, self.content_offset, admonition) 42 return [admonition] 43 44 45 def setup(app): 46 app.add_directive("beta", Beta) 47 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/extensions/stability.py b/docs/extensions/stability.py --- a/docs/extensions/stability.py +++ b/docs/extensions/stability.py @@ -20,8 +20,14 @@ <div class="admonition warning {type}"> <p class="admonition-title">{title}</p> - <p>This {scope} is currently in Beta. The interfaces and functionality may change without warning in future - releases.</p> + <p> + +This {scope} is currently in Beta. The API and functionality may change without warning in future +releases. :ref:`More details <stability>`. + +.. raw:: html + + </p> </div> """
{"golden_diff": "diff --git a/docs/extensions/stability.py b/docs/extensions/stability.py\n--- a/docs/extensions/stability.py\n+++ b/docs/extensions/stability.py\n@@ -20,8 +20,14 @@\n \n <div class=\"admonition warning {type}\">\n <p class=\"admonition-title\">{title}</p>\n- <p>This {scope} is currently in Beta. The interfaces and functionality may change without warning in future\n- releases.</p>\n+ <p>\n+\n+This {scope} is currently in Beta. The API and functionality may change without warning in future\n+releases. :ref:`More details <stability>`.\n+\n+.. raw:: html\n+\n+ </p>\n </div>\n \"\"\"\n", "issue": "Docs page describing Beta meaning\n## \ud83d\udcda Documentation\r\n\r\nAdd a page in our docs describing that beta means that one or all of the following are true:\r\n- the feature has unstable dependencies\r\n- the feature may change without notice in future versions\r\n- the feature is not compatible with other flash / pl features\r\n- the performance of the feature has not been verified\r\n\r\nAnything else?\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive\nfrom docutils.statemachine import StringList\n\nADMONITION_TEMPLATE = \"\"\"\n.. raw:: html\n\n <div class=\"admonition warning {type}\">\n <p class=\"admonition-title\">{title}</p>\n <p>This {scope} is currently in Beta. The interfaces and functionality may change without warning in future\n releases.</p>\n </div>\n\"\"\"\n\n\nclass Beta(Directive):\n has_content = True\n required_arguments = 1\n optional_arguments = 0\n\n def run(self):\n\n scope = self.arguments[0]\n\n admonition_rst = ADMONITION_TEMPLATE.format(type=\"beta\", title=\"Beta\", scope=scope)\n admonition_list = StringList(admonition_rst.split(\"\\n\"))\n admonition = nodes.paragraph()\n self.state.nested_parse(admonition_list, self.content_offset, admonition)\n return [admonition]\n\n\ndef setup(app):\n app.add_directive(\"beta\", Beta)\n", "path": "docs/extensions/stability.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive\nfrom docutils.statemachine import StringList\n\nADMONITION_TEMPLATE = \"\"\"\n.. raw:: html\n\n <div class=\"admonition warning {type}\">\n <p class=\"admonition-title\">{title}</p>\n <p>\n\nThis {scope} is currently in Beta. The API and functionality may change without warning in future\nreleases. :ref:`More details <stability>`.\n\n.. raw:: html\n\n </p>\n </div>\n\"\"\"\n\n\nclass Beta(Directive):\n has_content = True\n required_arguments = 1\n optional_arguments = 0\n\n def run(self):\n\n scope = self.arguments[0]\n\n admonition_rst = ADMONITION_TEMPLATE.format(type=\"beta\", title=\"Beta\", scope=scope)\n admonition_list = StringList(admonition_rst.split(\"\\n\"))\n admonition = nodes.paragraph()\n self.state.nested_parse(admonition_list, self.content_offset, admonition)\n return [admonition]\n\n\ndef setup(app):\n app.add_directive(\"beta\", Beta)\n", "path": "docs/extensions/stability.py"}]}
780
161