problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
10.2k
golden_diff
stringlengths
151
4.94k
verification_info
stringlengths
582
21k
num_tokens
int64
271
2.05k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_35130
rasdani/github-patches
git_diff
mlflow__mlflow-6206
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Timeout value too small: when downloading large model files, timeout is reached https://github.com/mlflow/mlflow/blob/d40780be361f4bd2741c2e8fcbd428c1d693edcf/mlflow/store/artifact/http_artifact_repo.py#L63 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mlflow/store/artifact/http_artifact_repo.py` Content: ``` 1 import os 2 import posixpath 3 4 from mlflow.entities import FileInfo 5 from mlflow.store.artifact.artifact_repo import ArtifactRepository, verify_artifact_path 6 from mlflow.tracking._tracking_service.utils import _get_default_host_creds 7 from mlflow.utils.file_utils import relative_path_to_artifact_path 8 from mlflow.utils.rest_utils import augmented_raise_for_status, http_request 9 10 11 class HttpArtifactRepository(ArtifactRepository): 12 """Stores artifacts in a remote artifact storage using HTTP requests""" 13 14 @property 15 def _host_creds(self): 16 return _get_default_host_creds(self.artifact_uri) 17 18 def log_artifact(self, local_file, artifact_path=None): 19 verify_artifact_path(artifact_path) 20 21 file_name = os.path.basename(local_file) 22 paths = (artifact_path, file_name) if artifact_path else (file_name,) 23 endpoint = posixpath.join("/", *paths) 24 with open(local_file, "rb") as f: 25 resp = http_request(self._host_creds, endpoint, "PUT", data=f, timeout=600) 26 augmented_raise_for_status(resp) 27 28 def log_artifacts(self, local_dir, artifact_path=None): 29 local_dir = os.path.abspath(local_dir) 30 for root, _, filenames in os.walk(local_dir): 31 if root == local_dir: 32 artifact_dir = artifact_path 33 else: 34 rel_path = os.path.relpath(root, local_dir) 35 rel_path = relative_path_to_artifact_path(rel_path) 36 artifact_dir = ( 37 posixpath.join(artifact_path, rel_path) if artifact_path else rel_path 38 ) 39 for f in filenames: 40 self.log_artifact(os.path.join(root, f), artifact_dir) 41 42 def list_artifacts(self, path=None): 43 endpoint = "/mlflow-artifacts/artifacts" 44 url, tail = self.artifact_uri.split(endpoint, maxsplit=1) 45 root = tail.lstrip("/") 46 params = {"path": posixpath.join(root, path) if path else root} 47 host_creds = _get_default_host_creds(url) 48 resp = http_request(host_creds, endpoint, "GET", params=params, timeout=10) 49 augmented_raise_for_status(resp) 50 file_infos = [] 51 for f in resp.json().get("files", []): 52 file_info = FileInfo( 53 posixpath.join(path, f["path"]) if path else f["path"], 54 f["is_dir"], 55 int(f["file_size"]) if ("file_size" in f) else None, 56 ) 57 file_infos.append(file_info) 58 59 return sorted(file_infos, key=lambda f: f.path) 60 61 def _download_file(self, remote_file_path, local_path): 62 endpoint = posixpath.join("/", remote_file_path) 63 resp = http_request(self._host_creds, endpoint, "GET", stream=True, timeout=10) 64 augmented_raise_for_status(resp) 65 with open(local_path, "wb") as f: 66 chunk_size = 1024 * 1024 # 1 MB 67 for chunk in resp.iter_content(chunk_size=chunk_size): 68 f.write(chunk) 69 70 def delete_artifacts(self, artifact_path=None): 71 endpoint = posixpath.join("/", artifact_path) if artifact_path else "/" 72 resp = http_request(self._host_creds, endpoint, "DELETE", stream=True, timeout=10) 73 augmented_raise_for_status(resp) 74 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mlflow/store/artifact/http_artifact_repo.py b/mlflow/store/artifact/http_artifact_repo.py --- a/mlflow/store/artifact/http_artifact_repo.py +++ b/mlflow/store/artifact/http_artifact_repo.py @@ -22,7 +22,7 @@ paths = (artifact_path, file_name) if artifact_path else (file_name,) endpoint = posixpath.join("/", *paths) with open(local_file, "rb") as f: - resp = http_request(self._host_creds, endpoint, "PUT", data=f, timeout=600) + resp = http_request(self._host_creds, endpoint, "PUT", data=f) augmented_raise_for_status(resp) def log_artifacts(self, local_dir, artifact_path=None): @@ -45,7 +45,7 @@ root = tail.lstrip("/") params = {"path": posixpath.join(root, path) if path else root} host_creds = _get_default_host_creds(url) - resp = http_request(host_creds, endpoint, "GET", params=params, timeout=10) + resp = http_request(host_creds, endpoint, "GET", params=params) augmented_raise_for_status(resp) file_infos = [] for f in resp.json().get("files", []): @@ -60,7 +60,7 @@ def _download_file(self, remote_file_path, local_path): endpoint = posixpath.join("/", remote_file_path) - resp = http_request(self._host_creds, endpoint, "GET", stream=True, timeout=10) + resp = http_request(self._host_creds, endpoint, "GET", stream=True) augmented_raise_for_status(resp) with open(local_path, "wb") as f: chunk_size = 1024 * 1024 # 1 MB @@ -69,5 +69,5 @@ def delete_artifacts(self, artifact_path=None): endpoint = posixpath.join("/", artifact_path) if artifact_path else "/" - resp = http_request(self._host_creds, endpoint, "DELETE", stream=True, timeout=10) + resp = http_request(self._host_creds, endpoint, "DELETE", stream=True) augmented_raise_for_status(resp)
{"golden_diff": "diff --git a/mlflow/store/artifact/http_artifact_repo.py b/mlflow/store/artifact/http_artifact_repo.py\n--- a/mlflow/store/artifact/http_artifact_repo.py\n+++ b/mlflow/store/artifact/http_artifact_repo.py\n@@ -22,7 +22,7 @@\n paths = (artifact_path, file_name) if artifact_path else (file_name,)\n endpoint = posixpath.join(\"/\", *paths)\n with open(local_file, \"rb\") as f:\n- resp = http_request(self._host_creds, endpoint, \"PUT\", data=f, timeout=600)\n+ resp = http_request(self._host_creds, endpoint, \"PUT\", data=f)\n augmented_raise_for_status(resp)\n \n def log_artifacts(self, local_dir, artifact_path=None):\n@@ -45,7 +45,7 @@\n root = tail.lstrip(\"/\")\n params = {\"path\": posixpath.join(root, path) if path else root}\n host_creds = _get_default_host_creds(url)\n- resp = http_request(host_creds, endpoint, \"GET\", params=params, timeout=10)\n+ resp = http_request(host_creds, endpoint, \"GET\", params=params)\n augmented_raise_for_status(resp)\n file_infos = []\n for f in resp.json().get(\"files\", []):\n@@ -60,7 +60,7 @@\n \n def _download_file(self, remote_file_path, local_path):\n endpoint = posixpath.join(\"/\", remote_file_path)\n- resp = http_request(self._host_creds, endpoint, \"GET\", stream=True, timeout=10)\n+ resp = http_request(self._host_creds, endpoint, \"GET\", stream=True)\n augmented_raise_for_status(resp)\n with open(local_path, \"wb\") as f:\n chunk_size = 1024 * 1024 # 1 MB\n@@ -69,5 +69,5 @@\n \n def delete_artifacts(self, artifact_path=None):\n endpoint = posixpath.join(\"/\", artifact_path) if artifact_path else \"/\"\n- resp = http_request(self._host_creds, endpoint, \"DELETE\", stream=True, timeout=10)\n+ resp = http_request(self._host_creds, endpoint, \"DELETE\", stream=True)\n augmented_raise_for_status(resp)\n", "issue": "Timeout value too small: when downloading large model files, timeout is reached\nhttps://github.com/mlflow/mlflow/blob/d40780be361f4bd2741c2e8fcbd428c1d693edcf/mlflow/store/artifact/http_artifact_repo.py#L63\n", "before_files": [{"content": "import os\nimport posixpath\n\nfrom mlflow.entities import FileInfo\nfrom mlflow.store.artifact.artifact_repo import ArtifactRepository, verify_artifact_path\nfrom mlflow.tracking._tracking_service.utils import _get_default_host_creds\nfrom mlflow.utils.file_utils import relative_path_to_artifact_path\nfrom mlflow.utils.rest_utils import augmented_raise_for_status, http_request\n\n\nclass HttpArtifactRepository(ArtifactRepository):\n \"\"\"Stores artifacts in a remote artifact storage using HTTP requests\"\"\"\n\n @property\n def _host_creds(self):\n return _get_default_host_creds(self.artifact_uri)\n\n def log_artifact(self, local_file, artifact_path=None):\n verify_artifact_path(artifact_path)\n\n file_name = os.path.basename(local_file)\n paths = (artifact_path, file_name) if artifact_path else (file_name,)\n endpoint = posixpath.join(\"/\", *paths)\n with open(local_file, \"rb\") as f:\n resp = http_request(self._host_creds, endpoint, \"PUT\", data=f, timeout=600)\n augmented_raise_for_status(resp)\n\n def log_artifacts(self, local_dir, artifact_path=None):\n local_dir = os.path.abspath(local_dir)\n for root, _, filenames in os.walk(local_dir):\n if root == local_dir:\n artifact_dir = artifact_path\n else:\n rel_path = os.path.relpath(root, local_dir)\n rel_path = relative_path_to_artifact_path(rel_path)\n artifact_dir = (\n posixpath.join(artifact_path, rel_path) if artifact_path else rel_path\n )\n for f in filenames:\n self.log_artifact(os.path.join(root, f), artifact_dir)\n\n def list_artifacts(self, path=None):\n endpoint = \"/mlflow-artifacts/artifacts\"\n url, tail = self.artifact_uri.split(endpoint, maxsplit=1)\n root = tail.lstrip(\"/\")\n params = {\"path\": posixpath.join(root, path) if path else root}\n host_creds = _get_default_host_creds(url)\n resp = http_request(host_creds, endpoint, \"GET\", params=params, timeout=10)\n augmented_raise_for_status(resp)\n file_infos = []\n for f in resp.json().get(\"files\", []):\n file_info = FileInfo(\n posixpath.join(path, f[\"path\"]) if path else f[\"path\"],\n f[\"is_dir\"],\n int(f[\"file_size\"]) if (\"file_size\" in f) else None,\n )\n file_infos.append(file_info)\n\n return sorted(file_infos, key=lambda f: f.path)\n\n def _download_file(self, remote_file_path, local_path):\n endpoint = posixpath.join(\"/\", remote_file_path)\n resp = http_request(self._host_creds, endpoint, \"GET\", stream=True, timeout=10)\n augmented_raise_for_status(resp)\n with open(local_path, \"wb\") as f:\n chunk_size = 1024 * 1024 # 1 MB\n for chunk in resp.iter_content(chunk_size=chunk_size):\n f.write(chunk)\n\n def delete_artifacts(self, artifact_path=None):\n endpoint = posixpath.join(\"/\", artifact_path) if artifact_path else \"/\"\n resp = http_request(self._host_creds, endpoint, \"DELETE\", stream=True, timeout=10)\n augmented_raise_for_status(resp)\n", "path": "mlflow/store/artifact/http_artifact_repo.py"}], "after_files": [{"content": "import os\nimport posixpath\n\nfrom mlflow.entities import FileInfo\nfrom mlflow.store.artifact.artifact_repo import ArtifactRepository, verify_artifact_path\nfrom mlflow.tracking._tracking_service.utils import _get_default_host_creds\nfrom mlflow.utils.file_utils import relative_path_to_artifact_path\nfrom mlflow.utils.rest_utils import augmented_raise_for_status, http_request\n\n\nclass HttpArtifactRepository(ArtifactRepository):\n \"\"\"Stores artifacts in a remote artifact storage using HTTP requests\"\"\"\n\n @property\n def _host_creds(self):\n return _get_default_host_creds(self.artifact_uri)\n\n def log_artifact(self, local_file, artifact_path=None):\n verify_artifact_path(artifact_path)\n\n file_name = os.path.basename(local_file)\n paths = (artifact_path, file_name) if artifact_path else (file_name,)\n endpoint = posixpath.join(\"/\", *paths)\n with open(local_file, \"rb\") as f:\n resp = http_request(self._host_creds, endpoint, \"PUT\", data=f)\n augmented_raise_for_status(resp)\n\n def log_artifacts(self, local_dir, artifact_path=None):\n local_dir = os.path.abspath(local_dir)\n for root, _, filenames in os.walk(local_dir):\n if root == local_dir:\n artifact_dir = artifact_path\n else:\n rel_path = os.path.relpath(root, local_dir)\n rel_path = relative_path_to_artifact_path(rel_path)\n artifact_dir = (\n posixpath.join(artifact_path, rel_path) if artifact_path else rel_path\n )\n for f in filenames:\n self.log_artifact(os.path.join(root, f), artifact_dir)\n\n def list_artifacts(self, path=None):\n endpoint = \"/mlflow-artifacts/artifacts\"\n url, tail = self.artifact_uri.split(endpoint, maxsplit=1)\n root = tail.lstrip(\"/\")\n params = {\"path\": posixpath.join(root, path) if path else root}\n host_creds = _get_default_host_creds(url)\n resp = http_request(host_creds, endpoint, \"GET\", params=params)\n augmented_raise_for_status(resp)\n file_infos = []\n for f in resp.json().get(\"files\", []):\n file_info = FileInfo(\n posixpath.join(path, f[\"path\"]) if path else f[\"path\"],\n f[\"is_dir\"],\n int(f[\"file_size\"]) if (\"file_size\" in f) else None,\n )\n file_infos.append(file_info)\n\n return sorted(file_infos, key=lambda f: f.path)\n\n def _download_file(self, remote_file_path, local_path):\n endpoint = posixpath.join(\"/\", remote_file_path)\n resp = http_request(self._host_creds, endpoint, \"GET\", stream=True)\n augmented_raise_for_status(resp)\n with open(local_path, \"wb\") as f:\n chunk_size = 1024 * 1024 # 1 MB\n for chunk in resp.iter_content(chunk_size=chunk_size):\n f.write(chunk)\n\n def delete_artifacts(self, artifact_path=None):\n endpoint = posixpath.join(\"/\", artifact_path) if artifact_path else \"/\"\n resp = http_request(self._host_creds, endpoint, \"DELETE\", stream=True)\n augmented_raise_for_status(resp)\n", "path": "mlflow/store/artifact/http_artifact_repo.py"}]}
1,197
502
gh_patches_debug_15814
rasdani/github-patches
git_diff
svthalia__concrexit-1977
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error in pizza admin related to title_en Sentry Issue: [CONCREXIT-9W](https://sentry.io/organizations/thalia/issues/2692657224/?referrer=github_integration) ``` FieldError: Related Field got invalid lookup: title_en (16 additional frame(s) were not displayed) ... File "django/db/models/sql/query.py", line 1393, in add_q clause, _ = self._add_q(q_object, self.used_aliases) File "django/db/models/sql/query.py", line 1412, in _add_q child_clause, needed_inner = self.build_filter( File "django/db/models/sql/query.py", line 1265, in build_filter return self._add_q( File "django/db/models/sql/query.py", line 1412, in _add_q child_clause, needed_inner = self.build_filter( File "django/db/models/sql/query.py", line 1339, in build_filter raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0])) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `website/pizzas/admin.py` Content: ``` 1 """Registers admin interfaces for the pizzas module.""" 2 from django.conf import settings 3 from django.contrib import admin 4 from django.core.exceptions import PermissionDenied 5 from django.forms import Field 6 from django.urls import reverse, path 7 from django.utils.html import format_html 8 from django.utils.translation import gettext_lazy as _ 9 10 from events import services 11 from events.services import is_organiser 12 from payments.widgets import PaymentWidget 13 from pizzas import admin_views 14 from utils.admin import DoNextModelAdmin 15 from .models import FoodOrder, FoodEvent, Product 16 17 18 @admin.register(Product) 19 class ProductAdmin(admin.ModelAdmin): 20 """Manage the products.""" 21 22 list_display = ("name", "price", "available") 23 list_filter = ("available", "restricted") 24 search_fields = ("name",) 25 26 27 @admin.register(FoodEvent) 28 class FoodEventAdmin(admin.ModelAdmin): 29 """Manage the pizza events.""" 30 31 list_display = ("title", "start", "end", "notification_enabled", "orders") 32 date_hierarchy = "start" 33 exclude = ("end_reminder",) 34 search_fields = [f"event__title_{l[0]}" for l in settings.LANGUAGES] 35 autocomplete_fields = ("event",) 36 37 def notification_enabled(self, obj): 38 return obj.send_notification 39 40 notification_enabled.short_description = _("reminder") 41 notification_enabled.admin_order_field = "send_notification" 42 notification_enabled.boolean = True 43 44 def has_change_permission(self, request, obj=None): 45 """Only allow access to the change form if the user is an organiser.""" 46 if obj is not None and not services.is_organiser(request.member, obj.event): 47 return False 48 return super().has_change_permission(request, obj) 49 50 def has_delete_permission(self, request, obj=None): 51 """Only allow access to delete if the user is an organiser.""" 52 if obj is not None and not services.is_organiser(request.member, obj.event): 53 return False 54 return super().has_delete_permission(request, obj) 55 56 def orders(self, obj): 57 url = reverse("admin:pizzas_foodevent_details", kwargs={"pk": obj.pk}) 58 return format_html('<a href="{url}">{text}</a>', url=url, text=_("Orders")) 59 60 def get_urls(self): 61 urls = super().get_urls() 62 custom_urls = [ 63 path( 64 "<int:pk>/details/", 65 self.admin_site.admin_view( 66 admin_views.PizzaOrderDetails.as_view(admin=self) 67 ), 68 name="pizzas_foodevent_details", 69 ), 70 path( 71 "<int:pk>/overview/", 72 self.admin_site.admin_view( 73 admin_views.PizzaOrderSummary.as_view(admin=self) 74 ), 75 name="pizzas_foodevent_overview", 76 ), 77 ] 78 return custom_urls + urls 79 80 81 @admin.register(FoodOrder) 82 class FoodOrderAdmin(DoNextModelAdmin): 83 """Manage the orders.""" 84 85 list_display = ( 86 "food_event", 87 "member_first_name", 88 "member_last_name", 89 "product", 90 "payment", 91 ) 92 93 fields = ( 94 "food_event", 95 "member", 96 "name", 97 "product", 98 "payment", 99 ) 100 101 def formfield_for_dbfield(self, db_field, request, obj=None, **kwargs): 102 """Payment field widget.""" 103 field = super().formfield_for_dbfield(db_field, request, **kwargs) 104 if db_field.name == "payment": 105 return Field( 106 widget=PaymentWidget(obj=obj), initial=field.initial, required=False, 107 ) 108 return field 109 110 def save_model(self, request, obj, form, change): 111 """You can only save the orders if you have permission.""" 112 if not is_organiser(request.member, obj.food_event.event): 113 raise PermissionDenied 114 return super().save_model(request, obj, form, change) 115 116 def has_view_permission(self, request, order=None): 117 """Only give view permission if the user is an organiser.""" 118 if order is not None and not is_organiser( 119 request.member, order.food_event.event 120 ): 121 return False 122 return super().has_view_permission(request, order) 123 124 def has_change_permission(self, request, order=None): 125 """Only give change permission if the user is an organiser.""" 126 if order is not None and not is_organiser( 127 request.member, order.food_event.event 128 ): 129 return False 130 return super().has_change_permission(request, order) 131 132 def has_delete_permission(self, request, order=None): 133 """Only give delete permission if the user is an organiser.""" 134 if order is not None and not is_organiser( 135 request.member, order.food_event.event 136 ): 137 return False 138 return super().has_delete_permission(request, order) 139 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/website/pizzas/admin.py b/website/pizzas/admin.py --- a/website/pizzas/admin.py +++ b/website/pizzas/admin.py @@ -1,5 +1,4 @@ """Registers admin interfaces for the pizzas module.""" -from django.conf import settings from django.contrib import admin from django.core.exceptions import PermissionDenied from django.forms import Field @@ -31,7 +30,7 @@ list_display = ("title", "start", "end", "notification_enabled", "orders") date_hierarchy = "start" exclude = ("end_reminder",) - search_fields = [f"event__title_{l[0]}" for l in settings.LANGUAGES] + search_fields = ("event__title",) autocomplete_fields = ("event",) def notification_enabled(self, obj):
{"golden_diff": "diff --git a/website/pizzas/admin.py b/website/pizzas/admin.py\n--- a/website/pizzas/admin.py\n+++ b/website/pizzas/admin.py\n@@ -1,5 +1,4 @@\n \"\"\"Registers admin interfaces for the pizzas module.\"\"\"\n-from django.conf import settings\n from django.contrib import admin\n from django.core.exceptions import PermissionDenied\n from django.forms import Field\n@@ -31,7 +30,7 @@\n list_display = (\"title\", \"start\", \"end\", \"notification_enabled\", \"orders\")\n date_hierarchy = \"start\"\n exclude = (\"end_reminder\",)\n- search_fields = [f\"event__title_{l[0]}\" for l in settings.LANGUAGES]\n+ search_fields = (\"event__title\",)\n autocomplete_fields = (\"event\",)\n \n def notification_enabled(self, obj):\n", "issue": "Error in pizza admin related to title_en\nSentry Issue: [CONCREXIT-9W](https://sentry.io/organizations/thalia/issues/2692657224/?referrer=github_integration)\n\n```\nFieldError: Related Field got invalid lookup: title_en\n(16 additional frame(s) were not displayed)\n...\n File \"django/db/models/sql/query.py\", line 1393, in add_q\n clause, _ = self._add_q(q_object, self.used_aliases)\n File \"django/db/models/sql/query.py\", line 1412, in _add_q\n child_clause, needed_inner = self.build_filter(\n File \"django/db/models/sql/query.py\", line 1265, in build_filter\n return self._add_q(\n File \"django/db/models/sql/query.py\", line 1412, in _add_q\n child_clause, needed_inner = self.build_filter(\n File \"django/db/models/sql/query.py\", line 1339, in build_filter\n raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0]))\n```\n", "before_files": [{"content": "\"\"\"Registers admin interfaces for the pizzas module.\"\"\"\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.core.exceptions import PermissionDenied\nfrom django.forms import Field\nfrom django.urls import reverse, path\nfrom django.utils.html import format_html\nfrom django.utils.translation import gettext_lazy as _\n\nfrom events import services\nfrom events.services import is_organiser\nfrom payments.widgets import PaymentWidget\nfrom pizzas import admin_views\nfrom utils.admin import DoNextModelAdmin\nfrom .models import FoodOrder, FoodEvent, Product\n\n\[email protected](Product)\nclass ProductAdmin(admin.ModelAdmin):\n \"\"\"Manage the products.\"\"\"\n\n list_display = (\"name\", \"price\", \"available\")\n list_filter = (\"available\", \"restricted\")\n search_fields = (\"name\",)\n\n\[email protected](FoodEvent)\nclass FoodEventAdmin(admin.ModelAdmin):\n \"\"\"Manage the pizza events.\"\"\"\n\n list_display = (\"title\", \"start\", \"end\", \"notification_enabled\", \"orders\")\n date_hierarchy = \"start\"\n exclude = (\"end_reminder\",)\n search_fields = [f\"event__title_{l[0]}\" for l in settings.LANGUAGES]\n autocomplete_fields = (\"event\",)\n\n def notification_enabled(self, obj):\n return obj.send_notification\n\n notification_enabled.short_description = _(\"reminder\")\n notification_enabled.admin_order_field = \"send_notification\"\n notification_enabled.boolean = True\n\n def has_change_permission(self, request, obj=None):\n \"\"\"Only allow access to the change form if the user is an organiser.\"\"\"\n if obj is not None and not services.is_organiser(request.member, obj.event):\n return False\n return super().has_change_permission(request, obj)\n\n def has_delete_permission(self, request, obj=None):\n \"\"\"Only allow access to delete if the user is an organiser.\"\"\"\n if obj is not None and not services.is_organiser(request.member, obj.event):\n return False\n return super().has_delete_permission(request, obj)\n\n def orders(self, obj):\n url = reverse(\"admin:pizzas_foodevent_details\", kwargs={\"pk\": obj.pk})\n return format_html('<a href=\"{url}\">{text}</a>', url=url, text=_(\"Orders\"))\n\n def get_urls(self):\n urls = super().get_urls()\n custom_urls = [\n path(\n \"<int:pk>/details/\",\n self.admin_site.admin_view(\n admin_views.PizzaOrderDetails.as_view(admin=self)\n ),\n name=\"pizzas_foodevent_details\",\n ),\n path(\n \"<int:pk>/overview/\",\n self.admin_site.admin_view(\n admin_views.PizzaOrderSummary.as_view(admin=self)\n ),\n name=\"pizzas_foodevent_overview\",\n ),\n ]\n return custom_urls + urls\n\n\[email protected](FoodOrder)\nclass FoodOrderAdmin(DoNextModelAdmin):\n \"\"\"Manage the orders.\"\"\"\n\n list_display = (\n \"food_event\",\n \"member_first_name\",\n \"member_last_name\",\n \"product\",\n \"payment\",\n )\n\n fields = (\n \"food_event\",\n \"member\",\n \"name\",\n \"product\",\n \"payment\",\n )\n\n def formfield_for_dbfield(self, db_field, request, obj=None, **kwargs):\n \"\"\"Payment field widget.\"\"\"\n field = super().formfield_for_dbfield(db_field, request, **kwargs)\n if db_field.name == \"payment\":\n return Field(\n widget=PaymentWidget(obj=obj), initial=field.initial, required=False,\n )\n return field\n\n def save_model(self, request, obj, form, change):\n \"\"\"You can only save the orders if you have permission.\"\"\"\n if not is_organiser(request.member, obj.food_event.event):\n raise PermissionDenied\n return super().save_model(request, obj, form, change)\n\n def has_view_permission(self, request, order=None):\n \"\"\"Only give view permission if the user is an organiser.\"\"\"\n if order is not None and not is_organiser(\n request.member, order.food_event.event\n ):\n return False\n return super().has_view_permission(request, order)\n\n def has_change_permission(self, request, order=None):\n \"\"\"Only give change permission if the user is an organiser.\"\"\"\n if order is not None and not is_organiser(\n request.member, order.food_event.event\n ):\n return False\n return super().has_change_permission(request, order)\n\n def has_delete_permission(self, request, order=None):\n \"\"\"Only give delete permission if the user is an organiser.\"\"\"\n if order is not None and not is_organiser(\n request.member, order.food_event.event\n ):\n return False\n return super().has_delete_permission(request, order)\n", "path": "website/pizzas/admin.py"}], "after_files": [{"content": "\"\"\"Registers admin interfaces for the pizzas module.\"\"\"\nfrom django.contrib import admin\nfrom django.core.exceptions import PermissionDenied\nfrom django.forms import Field\nfrom django.urls import reverse, path\nfrom django.utils.html import format_html\nfrom django.utils.translation import gettext_lazy as _\n\nfrom events import services\nfrom events.services import is_organiser\nfrom payments.widgets import PaymentWidget\nfrom pizzas import admin_views\nfrom utils.admin import DoNextModelAdmin\nfrom .models import FoodOrder, FoodEvent, Product\n\n\[email protected](Product)\nclass ProductAdmin(admin.ModelAdmin):\n \"\"\"Manage the products.\"\"\"\n\n list_display = (\"name\", \"price\", \"available\")\n list_filter = (\"available\", \"restricted\")\n search_fields = (\"name\",)\n\n\[email protected](FoodEvent)\nclass FoodEventAdmin(admin.ModelAdmin):\n \"\"\"Manage the pizza events.\"\"\"\n\n list_display = (\"title\", \"start\", \"end\", \"notification_enabled\", \"orders\")\n date_hierarchy = \"start\"\n exclude = (\"end_reminder\",)\n search_fields = (\"event__title\",)\n autocomplete_fields = (\"event\",)\n\n def notification_enabled(self, obj):\n return obj.send_notification\n\n notification_enabled.short_description = _(\"reminder\")\n notification_enabled.admin_order_field = \"send_notification\"\n notification_enabled.boolean = True\n\n def has_change_permission(self, request, obj=None):\n \"\"\"Only allow access to the change form if the user is an organiser.\"\"\"\n if obj is not None and not services.is_organiser(request.member, obj.event):\n return False\n return super().has_change_permission(request, obj)\n\n def has_delete_permission(self, request, obj=None):\n \"\"\"Only allow access to delete if the user is an organiser.\"\"\"\n if obj is not None and not services.is_organiser(request.member, obj.event):\n return False\n return super().has_delete_permission(request, obj)\n\n def orders(self, obj):\n url = reverse(\"admin:pizzas_foodevent_details\", kwargs={\"pk\": obj.pk})\n return format_html('<a href=\"{url}\">{text}</a>', url=url, text=_(\"Orders\"))\n\n def get_urls(self):\n urls = super().get_urls()\n custom_urls = [\n path(\n \"<int:pk>/details/\",\n self.admin_site.admin_view(\n admin_views.PizzaOrderDetails.as_view(admin=self)\n ),\n name=\"pizzas_foodevent_details\",\n ),\n path(\n \"<int:pk>/overview/\",\n self.admin_site.admin_view(\n admin_views.PizzaOrderSummary.as_view(admin=self)\n ),\n name=\"pizzas_foodevent_overview\",\n ),\n ]\n return custom_urls + urls\n\n\[email protected](FoodOrder)\nclass FoodOrderAdmin(DoNextModelAdmin):\n \"\"\"Manage the orders.\"\"\"\n\n list_display = (\n \"food_event\",\n \"member_first_name\",\n \"member_last_name\",\n \"product\",\n \"payment\",\n )\n\n fields = (\n \"food_event\",\n \"member\",\n \"name\",\n \"product\",\n \"payment\",\n )\n\n def formfield_for_dbfield(self, db_field, request, obj=None, **kwargs):\n \"\"\"Payment field widget.\"\"\"\n field = super().formfield_for_dbfield(db_field, request, **kwargs)\n if db_field.name == \"payment\":\n return Field(\n widget=PaymentWidget(obj=obj), initial=field.initial, required=False,\n )\n return field\n\n def save_model(self, request, obj, form, change):\n \"\"\"You can only save the orders if you have permission.\"\"\"\n if not is_organiser(request.member, obj.food_event.event):\n raise PermissionDenied\n return super().save_model(request, obj, form, change)\n\n def has_view_permission(self, request, order=None):\n \"\"\"Only give view permission if the user is an organiser.\"\"\"\n if order is not None and not is_organiser(\n request.member, order.food_event.event\n ):\n return False\n return super().has_view_permission(request, order)\n\n def has_change_permission(self, request, order=None):\n \"\"\"Only give change permission if the user is an organiser.\"\"\"\n if order is not None and not is_organiser(\n request.member, order.food_event.event\n ):\n return False\n return super().has_change_permission(request, order)\n\n def has_delete_permission(self, request, order=None):\n \"\"\"Only give delete permission if the user is an organiser.\"\"\"\n if order is not None and not is_organiser(\n request.member, order.food_event.event\n ):\n return False\n return super().has_delete_permission(request, order)\n", "path": "website/pizzas/admin.py"}]}
1,832
182
gh_patches_debug_10832
rasdani/github-patches
git_diff
pyqtgraph__pyqtgraph-309
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- unexpected 'str' during Arrow test testing on Windows Python 3.4.4/Qt5.5.1/ PyQtgraph github of 20160102, I have thefollowing error on "Arrow" test: ``` Using PyQt5 (default graphics system) Using PyQt5 (default graphics system) Using PyQt5 (default graphics system) Using PyQt5 (default graphics system) Traceback (most recent call last): File "D:\WinPython\basedir34\buildQt5\winpython-3.4.4.amd64\python-3.4.4.amd64 \lib\site-packages\pyqtgraph\examples\Arrow.py", line 50, in <module> anim = a.makeAnimation(loop=-1) File "D:\WinPython\basedir34\buildQt5\winpython-3.4.4.amd64\python-3.4.4.amd64 \lib\site-packages\pyqtgraph\graphicsItems\CurvePoint.py", line 94, in makeAnima tion anim = QtCore.QPropertyAnimation(self, prop) TypeError: arguments did not match any overloaded call: QPropertyAnimation(QObject parent=None): too many arguments QPropertyAnimation(QObject, QByteArray, QObject parent=None): argument 2 has u nexpected type 'str' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pyqtgraph/graphicsItems/CurvePoint.py` Content: ``` 1 from ..Qt import QtGui, QtCore 2 from . import ArrowItem 3 import numpy as np 4 from ..Point import Point 5 import weakref 6 from .GraphicsObject import GraphicsObject 7 8 __all__ = ['CurvePoint', 'CurveArrow'] 9 class CurvePoint(GraphicsObject): 10 """A GraphicsItem that sets its location to a point on a PlotCurveItem. 11 Also rotates to be tangent to the curve. 12 The position along the curve is a Qt property, and thus can be easily animated. 13 14 Note: This class does not display anything; see CurveArrow for an applied example 15 """ 16 17 def __init__(self, curve, index=0, pos=None, rotate=True): 18 """Position can be set either as an index referring to the sample number or 19 the position 0.0 - 1.0 20 If *rotate* is True, then the item rotates to match the tangent of the curve. 21 """ 22 23 GraphicsObject.__init__(self) 24 #QObjectWorkaround.__init__(self) 25 self._rotate = rotate 26 self.curve = weakref.ref(curve) 27 self.setParentItem(curve) 28 self.setProperty('position', 0.0) 29 self.setProperty('index', 0) 30 31 if hasattr(self, 'ItemHasNoContents'): 32 self.setFlags(self.flags() | self.ItemHasNoContents) 33 34 if pos is not None: 35 self.setPos(pos) 36 else: 37 self.setIndex(index) 38 39 def setPos(self, pos): 40 self.setProperty('position', float(pos))## cannot use numpy types here, MUST be python float. 41 42 def setIndex(self, index): 43 self.setProperty('index', int(index)) ## cannot use numpy types here, MUST be python int. 44 45 def event(self, ev): 46 if not isinstance(ev, QtCore.QDynamicPropertyChangeEvent) or self.curve() is None: 47 return False 48 49 if ev.propertyName() == 'index': 50 index = self.property('index') 51 if 'QVariant' in repr(index): 52 index = index.toInt()[0] 53 elif ev.propertyName() == 'position': 54 index = None 55 else: 56 return False 57 58 (x, y) = self.curve().getData() 59 if index is None: 60 #print ev.propertyName(), self.property('position').toDouble()[0], self.property('position').typeName() 61 pos = self.property('position') 62 if 'QVariant' in repr(pos): ## need to support 2 APIs :( 63 pos = pos.toDouble()[0] 64 index = (len(x)-1) * np.clip(pos, 0.0, 1.0) 65 66 if index != int(index): ## interpolate floating-point values 67 i1 = int(index) 68 i2 = np.clip(i1+1, 0, len(x)-1) 69 s2 = index-i1 70 s1 = 1.0-s2 71 newPos = (x[i1]*s1+x[i2]*s2, y[i1]*s1+y[i2]*s2) 72 else: 73 index = int(index) 74 i1 = np.clip(index-1, 0, len(x)-1) 75 i2 = np.clip(index+1, 0, len(x)-1) 76 newPos = (x[index], y[index]) 77 78 p1 = self.parentItem().mapToScene(QtCore.QPointF(x[i1], y[i1])) 79 p2 = self.parentItem().mapToScene(QtCore.QPointF(x[i2], y[i2])) 80 ang = np.arctan2(p2.y()-p1.y(), p2.x()-p1.x()) ## returns radians 81 self.resetTransform() 82 if self._rotate: 83 self.rotate(180+ ang * 180 / np.pi) ## takes degrees 84 QtGui.QGraphicsItem.setPos(self, *newPos) 85 return True 86 87 def boundingRect(self): 88 return QtCore.QRectF() 89 90 def paint(self, *args): 91 pass 92 93 def makeAnimation(self, prop='position', start=0.0, end=1.0, duration=10000, loop=1): 94 anim = QtCore.QPropertyAnimation(self, prop) 95 anim.setDuration(duration) 96 anim.setStartValue(start) 97 anim.setEndValue(end) 98 anim.setLoopCount(loop) 99 return anim 100 101 102 class CurveArrow(CurvePoint): 103 """Provides an arrow that points to any specific sample on a PlotCurveItem. 104 Provides properties that can be animated.""" 105 106 def __init__(self, curve, index=0, pos=None, **opts): 107 CurvePoint.__init__(self, curve, index=index, pos=pos) 108 if opts.get('pxMode', True): 109 opts['pxMode'] = False 110 self.setFlags(self.flags() | self.ItemIgnoresTransformations) 111 opts['angle'] = 0 112 self.arrow = ArrowItem.ArrowItem(**opts) 113 self.arrow.setParentItem(self) 114 115 def setStyle(self, **opts): 116 return self.arrow.setStyle(**opts) 117 118 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pyqtgraph/graphicsItems/CurvePoint.py b/pyqtgraph/graphicsItems/CurvePoint.py --- a/pyqtgraph/graphicsItems/CurvePoint.py +++ b/pyqtgraph/graphicsItems/CurvePoint.py @@ -91,6 +91,11 @@ pass def makeAnimation(self, prop='position', start=0.0, end=1.0, duration=10000, loop=1): + # In Python 3, a bytes object needs to be used as a property name in + # QPropertyAnimation. PyQt stopped automatically encoding a str when a + # QByteArray was expected in v5.5 (see qbytearray.sip). + if not isinstance(prop, bytes): + prop = prop.encode('latin-1') anim = QtCore.QPropertyAnimation(self, prop) anim.setDuration(duration) anim.setStartValue(start)
{"golden_diff": "diff --git a/pyqtgraph/graphicsItems/CurvePoint.py b/pyqtgraph/graphicsItems/CurvePoint.py\n--- a/pyqtgraph/graphicsItems/CurvePoint.py\n+++ b/pyqtgraph/graphicsItems/CurvePoint.py\n@@ -91,6 +91,11 @@\n pass\n \n def makeAnimation(self, prop='position', start=0.0, end=1.0, duration=10000, loop=1):\n+ # In Python 3, a bytes object needs to be used as a property name in\n+ # QPropertyAnimation. PyQt stopped automatically encoding a str when a\n+ # QByteArray was expected in v5.5 (see qbytearray.sip).\n+ if not isinstance(prop, bytes):\n+ prop = prop.encode('latin-1')\n anim = QtCore.QPropertyAnimation(self, prop)\n anim.setDuration(duration)\n anim.setStartValue(start)\n", "issue": "unexpected 'str' during Arrow test\ntesting on Windows Python 3.4.4/Qt5.5.1/ PyQtgraph github of 20160102, I have thefollowing error on \"Arrow\" test:\n\n```\nUsing PyQt5 (default graphics system)\nUsing PyQt5 (default graphics system)\nUsing PyQt5 (default graphics system)\nUsing PyQt5 (default graphics system)\nTraceback (most recent call last):\n File \"D:\\WinPython\\basedir34\\buildQt5\\winpython-3.4.4.amd64\\python-3.4.4.amd64\n\\lib\\site-packages\\pyqtgraph\\examples\\Arrow.py\", line 50, in <module>\n anim = a.makeAnimation(loop=-1)\n File \"D:\\WinPython\\basedir34\\buildQt5\\winpython-3.4.4.amd64\\python-3.4.4.amd64\n\\lib\\site-packages\\pyqtgraph\\graphicsItems\\CurvePoint.py\", line 94, in makeAnima\ntion\n anim = QtCore.QPropertyAnimation(self, prop)\nTypeError: arguments did not match any overloaded call:\n QPropertyAnimation(QObject parent=None): too many arguments\n QPropertyAnimation(QObject, QByteArray, QObject parent=None): argument 2 has u\nnexpected type 'str'\n\n```\n\n", "before_files": [{"content": "from ..Qt import QtGui, QtCore\nfrom . import ArrowItem\nimport numpy as np\nfrom ..Point import Point\nimport weakref\nfrom .GraphicsObject import GraphicsObject\n\n__all__ = ['CurvePoint', 'CurveArrow']\nclass CurvePoint(GraphicsObject):\n \"\"\"A GraphicsItem that sets its location to a point on a PlotCurveItem.\n Also rotates to be tangent to the curve.\n The position along the curve is a Qt property, and thus can be easily animated.\n \n Note: This class does not display anything; see CurveArrow for an applied example\n \"\"\"\n \n def __init__(self, curve, index=0, pos=None, rotate=True):\n \"\"\"Position can be set either as an index referring to the sample number or\n the position 0.0 - 1.0\n If *rotate* is True, then the item rotates to match the tangent of the curve.\n \"\"\"\n \n GraphicsObject.__init__(self)\n #QObjectWorkaround.__init__(self)\n self._rotate = rotate\n self.curve = weakref.ref(curve)\n self.setParentItem(curve)\n self.setProperty('position', 0.0)\n self.setProperty('index', 0)\n \n if hasattr(self, 'ItemHasNoContents'):\n self.setFlags(self.flags() | self.ItemHasNoContents)\n \n if pos is not None:\n self.setPos(pos)\n else:\n self.setIndex(index)\n \n def setPos(self, pos):\n self.setProperty('position', float(pos))## cannot use numpy types here, MUST be python float.\n \n def setIndex(self, index):\n self.setProperty('index', int(index)) ## cannot use numpy types here, MUST be python int.\n \n def event(self, ev):\n if not isinstance(ev, QtCore.QDynamicPropertyChangeEvent) or self.curve() is None:\n return False\n \n if ev.propertyName() == 'index':\n index = self.property('index')\n if 'QVariant' in repr(index):\n index = index.toInt()[0]\n elif ev.propertyName() == 'position':\n index = None\n else:\n return False\n \n (x, y) = self.curve().getData()\n if index is None:\n #print ev.propertyName(), self.property('position').toDouble()[0], self.property('position').typeName()\n pos = self.property('position')\n if 'QVariant' in repr(pos): ## need to support 2 APIs :(\n pos = pos.toDouble()[0]\n index = (len(x)-1) * np.clip(pos, 0.0, 1.0)\n \n if index != int(index): ## interpolate floating-point values\n i1 = int(index)\n i2 = np.clip(i1+1, 0, len(x)-1)\n s2 = index-i1\n s1 = 1.0-s2\n newPos = (x[i1]*s1+x[i2]*s2, y[i1]*s1+y[i2]*s2)\n else:\n index = int(index)\n i1 = np.clip(index-1, 0, len(x)-1)\n i2 = np.clip(index+1, 0, len(x)-1)\n newPos = (x[index], y[index])\n \n p1 = self.parentItem().mapToScene(QtCore.QPointF(x[i1], y[i1]))\n p2 = self.parentItem().mapToScene(QtCore.QPointF(x[i2], y[i2]))\n ang = np.arctan2(p2.y()-p1.y(), p2.x()-p1.x()) ## returns radians\n self.resetTransform()\n if self._rotate:\n self.rotate(180+ ang * 180 / np.pi) ## takes degrees\n QtGui.QGraphicsItem.setPos(self, *newPos)\n return True\n \n def boundingRect(self):\n return QtCore.QRectF()\n \n def paint(self, *args):\n pass\n \n def makeAnimation(self, prop='position', start=0.0, end=1.0, duration=10000, loop=1):\n anim = QtCore.QPropertyAnimation(self, prop)\n anim.setDuration(duration)\n anim.setStartValue(start)\n anim.setEndValue(end)\n anim.setLoopCount(loop)\n return anim\n\n\nclass CurveArrow(CurvePoint):\n \"\"\"Provides an arrow that points to any specific sample on a PlotCurveItem.\n Provides properties that can be animated.\"\"\"\n \n def __init__(self, curve, index=0, pos=None, **opts):\n CurvePoint.__init__(self, curve, index=index, pos=pos)\n if opts.get('pxMode', True):\n opts['pxMode'] = False\n self.setFlags(self.flags() | self.ItemIgnoresTransformations)\n opts['angle'] = 0\n self.arrow = ArrowItem.ArrowItem(**opts)\n self.arrow.setParentItem(self)\n \n def setStyle(self, **opts):\n return self.arrow.setStyle(**opts)\n \n", "path": "pyqtgraph/graphicsItems/CurvePoint.py"}], "after_files": [{"content": "from ..Qt import QtGui, QtCore\nfrom . import ArrowItem\nimport numpy as np\nfrom ..Point import Point\nimport weakref\nfrom .GraphicsObject import GraphicsObject\n\n__all__ = ['CurvePoint', 'CurveArrow']\nclass CurvePoint(GraphicsObject):\n \"\"\"A GraphicsItem that sets its location to a point on a PlotCurveItem.\n Also rotates to be tangent to the curve.\n The position along the curve is a Qt property, and thus can be easily animated.\n \n Note: This class does not display anything; see CurveArrow for an applied example\n \"\"\"\n \n def __init__(self, curve, index=0, pos=None, rotate=True):\n \"\"\"Position can be set either as an index referring to the sample number or\n the position 0.0 - 1.0\n If *rotate* is True, then the item rotates to match the tangent of the curve.\n \"\"\"\n \n GraphicsObject.__init__(self)\n #QObjectWorkaround.__init__(self)\n self._rotate = rotate\n self.curve = weakref.ref(curve)\n self.setParentItem(curve)\n self.setProperty('position', 0.0)\n self.setProperty('index', 0)\n \n if hasattr(self, 'ItemHasNoContents'):\n self.setFlags(self.flags() | self.ItemHasNoContents)\n \n if pos is not None:\n self.setPos(pos)\n else:\n self.setIndex(index)\n \n def setPos(self, pos):\n self.setProperty('position', float(pos))## cannot use numpy types here, MUST be python float.\n \n def setIndex(self, index):\n self.setProperty('index', int(index)) ## cannot use numpy types here, MUST be python int.\n \n def event(self, ev):\n if not isinstance(ev, QtCore.QDynamicPropertyChangeEvent) or self.curve() is None:\n return False\n \n if ev.propertyName() == 'index':\n index = self.property('index')\n if 'QVariant' in repr(index):\n index = index.toInt()[0]\n elif ev.propertyName() == 'position':\n index = None\n else:\n return False\n \n (x, y) = self.curve().getData()\n if index is None:\n #print ev.propertyName(), self.property('position').toDouble()[0], self.property('position').typeName()\n pos = self.property('position')\n if 'QVariant' in repr(pos): ## need to support 2 APIs :(\n pos = pos.toDouble()[0]\n index = (len(x)-1) * np.clip(pos, 0.0, 1.0)\n \n if index != int(index): ## interpolate floating-point values\n i1 = int(index)\n i2 = np.clip(i1+1, 0, len(x)-1)\n s2 = index-i1\n s1 = 1.0-s2\n newPos = (x[i1]*s1+x[i2]*s2, y[i1]*s1+y[i2]*s2)\n else:\n index = int(index)\n i1 = np.clip(index-1, 0, len(x)-1)\n i2 = np.clip(index+1, 0, len(x)-1)\n newPos = (x[index], y[index])\n \n p1 = self.parentItem().mapToScene(QtCore.QPointF(x[i1], y[i1]))\n p2 = self.parentItem().mapToScene(QtCore.QPointF(x[i2], y[i2]))\n ang = np.arctan2(p2.y()-p1.y(), p2.x()-p1.x()) ## returns radians\n self.resetTransform()\n if self._rotate:\n self.rotate(180+ ang * 180 / np.pi) ## takes degrees\n QtGui.QGraphicsItem.setPos(self, *newPos)\n return True\n \n def boundingRect(self):\n return QtCore.QRectF()\n \n def paint(self, *args):\n pass\n \n def makeAnimation(self, prop='position', start=0.0, end=1.0, duration=10000, loop=1):\n # In Python 3, a bytes object needs to be used as a property name in\n # QPropertyAnimation. PyQt stopped automatically encoding a str when a\n # QByteArray was expected in v5.5 (see qbytearray.sip).\n if not isinstance(prop, bytes):\n prop = prop.encode('latin-1')\n anim = QtCore.QPropertyAnimation(self, prop)\n anim.setDuration(duration)\n anim.setStartValue(start)\n anim.setEndValue(end)\n anim.setLoopCount(loop)\n return anim\n\n\nclass CurveArrow(CurvePoint):\n \"\"\"Provides an arrow that points to any specific sample on a PlotCurveItem.\n Provides properties that can be animated.\"\"\"\n \n def __init__(self, curve, index=0, pos=None, **opts):\n CurvePoint.__init__(self, curve, index=index, pos=pos)\n if opts.get('pxMode', True):\n opts['pxMode'] = False\n self.setFlags(self.flags() | self.ItemIgnoresTransformations)\n opts['angle'] = 0\n self.arrow = ArrowItem.ArrowItem(**opts)\n self.arrow.setParentItem(self)\n \n def setStyle(self, **opts):\n return self.arrow.setStyle(**opts)\n \n", "path": "pyqtgraph/graphicsItems/CurvePoint.py"}]}
1,894
202
gh_patches_debug_33750
rasdani/github-patches
git_diff
conan-io__conan-4349
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fix experimental make generator CONAN_CPPFLAGS and CONAN_INCLUDE_PATHS Following the conversation here https://github.com/conan-io/conan/issues/4286#issuecomment-454194188 We have finally reached the conclusion of having ``cpp_info.cppflags`` converted to ``CONAN_CXXFLAGS`` in the ``make`` generator to be consistent with orhter generators such as ``cmake``. Also the flag ``CONAN_INCLUDE_PATHS`` should be renamed to ``CONAN_INCLUDE_DIRS`` for the same reason. In another issue we would probably introduce a ``cpp_info.cxxflags`` that would be an internal alias of ``cpp_info.cppflags`` to avoid this confusion without breaking. cc/ @solvingj --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conans/client/generators/make.py` Content: ``` 1 from conans.model import Generator 2 from conans.paths import BUILD_INFO_MAKE 3 4 5 class MakeGenerator(Generator): 6 7 def __init__(self, conanfile): 8 Generator.__init__(self, conanfile) 9 self.makefile_newline = "\n" 10 self.makefile_line_continuation = " \\\n" 11 self.assignment_if_absent = " ?= " 12 self.assignment_append = " += " 13 14 @property 15 def filename(self): 16 return BUILD_INFO_MAKE 17 18 @property 19 def content(self): 20 21 content = [ 22 "#-------------------------------------------------------------------#", 23 "# Makefile variables from Conan Dependencies #", 24 "#-------------------------------------------------------------------#", 25 "", 26 ] 27 28 for line_as_list in self.create_deps_content(): 29 content.append("".join(line_as_list)) 30 31 content.append("#-------------------------------------------------------------------#") 32 content.append(self.makefile_newline) 33 return self.makefile_newline.join(content) 34 35 def create_deps_content(self): 36 deps_content = self.create_content_from_deps() 37 deps_content.extend(self.create_combined_content()) 38 return deps_content 39 40 def create_content_from_deps(self): 41 content = [] 42 for pkg_name, cpp_info in self.deps_build_info.dependencies: 43 content.extend(self.create_content_from_dep(pkg_name, cpp_info)) 44 return content 45 46 def create_content_from_dep(self, pkg_name, cpp_info): 47 48 vars_info = [("ROOT", self.assignment_if_absent, [cpp_info.rootpath]), 49 ("SYSROOT", self.assignment_if_absent, [cpp_info.sysroot]), 50 ("INCLUDE_PATHS", self.assignment_append, cpp_info.include_paths), 51 ("LIB_PATHS", self.assignment_append, cpp_info.lib_paths), 52 ("BIN_PATHS", self.assignment_append, cpp_info.bin_paths), 53 ("BUILD_PATHS", self.assignment_append, cpp_info.build_paths), 54 ("RES_PATHS", self.assignment_append, cpp_info.res_paths), 55 ("LIBS", self.assignment_append, cpp_info.libs), 56 ("DEFINES", self.assignment_append, cpp_info.defines), 57 ("CFLAGS", self.assignment_append, cpp_info.cflags), 58 ("CPPFLAGS", self.assignment_append, cpp_info.cppflags), 59 ("SHAREDLINKFLAGS", self.assignment_append, cpp_info.sharedlinkflags), 60 ("EXELINKFLAGS", self.assignment_append, cpp_info.exelinkflags)] 61 62 return [self.create_makefile_var_pkg(var_name, pkg_name, operator, info) 63 for var_name, operator, info in vars_info] 64 65 def create_combined_content(self): 66 content = [] 67 for var_name in self.all_dep_vars(): 68 content.append(self.create_makefile_var_global(var_name, self.assignment_append, 69 self.create_combined_var_list(var_name))) 70 return content 71 72 def create_combined_var_list(self, var_name): 73 make_vars = [] 74 for pkg_name, _ in self.deps_build_info.dependencies: 75 pkg_var = self.create_makefile_var_name_pkg(var_name, pkg_name) 76 make_vars.append("$({pkg_var})".format(pkg_var=pkg_var)) 77 return make_vars 78 79 def create_makefile_var_global(self, var_name, operator, values): 80 make_var = [self.create_makefile_var_name_global(var_name)] 81 make_var.extend(self.create_makefile_var_common(operator, values)) 82 return make_var 83 84 def create_makefile_var_pkg(self, var_name, pkg_name, operator, values): 85 make_var = [self.create_makefile_var_name_pkg(var_name, pkg_name)] 86 make_var.extend(self.create_makefile_var_common(operator, values)) 87 return make_var 88 89 def create_makefile_var_common(self, operator, values): 90 return [operator, self.makefile_line_continuation, self.create_makefile_var_value(values), 91 self.makefile_newline] 92 93 @staticmethod 94 def create_makefile_var_name_global(var_name): 95 return "CONAN_{var}".format(var=var_name).upper() 96 97 @staticmethod 98 def create_makefile_var_name_pkg(var_name, pkg_name): 99 return "CONAN_{var}_{lib}".format(var=var_name, lib=pkg_name).upper() 100 101 def create_makefile_var_value(self, values): 102 formatted_values = [value.replace("\\", "/") for value in values] 103 return self.makefile_line_continuation.join(formatted_values) 104 105 @staticmethod 106 def all_dep_vars(): 107 return ["rootpath", "sysroot", "include_paths", "lib_paths", "bin_paths", "build_paths", 108 "res_paths", "libs", "defines", "cflags", "cppflags", "sharedlinkflags", 109 "exelinkflags"] 110 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conans/client/generators/make.py b/conans/client/generators/make.py --- a/conans/client/generators/make.py +++ b/conans/client/generators/make.py @@ -47,15 +47,15 @@ vars_info = [("ROOT", self.assignment_if_absent, [cpp_info.rootpath]), ("SYSROOT", self.assignment_if_absent, [cpp_info.sysroot]), - ("INCLUDE_PATHS", self.assignment_append, cpp_info.include_paths), - ("LIB_PATHS", self.assignment_append, cpp_info.lib_paths), - ("BIN_PATHS", self.assignment_append, cpp_info.bin_paths), - ("BUILD_PATHS", self.assignment_append, cpp_info.build_paths), - ("RES_PATHS", self.assignment_append, cpp_info.res_paths), + ("INCLUDE_DIRS", self.assignment_append, cpp_info.include_paths), + ("LIB_DIRS", self.assignment_append, cpp_info.lib_paths), + ("BIN_DIRS", self.assignment_append, cpp_info.bin_paths), + ("BUILD_DIRS", self.assignment_append, cpp_info.build_paths), + ("RES_DIRS", self.assignment_append, cpp_info.res_paths), ("LIBS", self.assignment_append, cpp_info.libs), ("DEFINES", self.assignment_append, cpp_info.defines), ("CFLAGS", self.assignment_append, cpp_info.cflags), - ("CPPFLAGS", self.assignment_append, cpp_info.cppflags), + ("CXXFLAGS", self.assignment_append, cpp_info.cppflags), ("SHAREDLINKFLAGS", self.assignment_append, cpp_info.sharedlinkflags), ("EXELINKFLAGS", self.assignment_append, cpp_info.exelinkflags)] @@ -104,6 +104,6 @@ @staticmethod def all_dep_vars(): - return ["rootpath", "sysroot", "include_paths", "lib_paths", "bin_paths", "build_paths", - "res_paths", "libs", "defines", "cflags", "cppflags", "sharedlinkflags", + return ["rootpath", "sysroot", "include_dirs", "lib_dirs", "bin_dirs", "build_dirs", + "res_dirs", "libs", "defines", "cflags", "cxxflags", "sharedlinkflags", "exelinkflags"]
{"golden_diff": "diff --git a/conans/client/generators/make.py b/conans/client/generators/make.py\n--- a/conans/client/generators/make.py\n+++ b/conans/client/generators/make.py\n@@ -47,15 +47,15 @@\n \n vars_info = [(\"ROOT\", self.assignment_if_absent, [cpp_info.rootpath]),\n (\"SYSROOT\", self.assignment_if_absent, [cpp_info.sysroot]),\n- (\"INCLUDE_PATHS\", self.assignment_append, cpp_info.include_paths),\n- (\"LIB_PATHS\", self.assignment_append, cpp_info.lib_paths),\n- (\"BIN_PATHS\", self.assignment_append, cpp_info.bin_paths),\n- (\"BUILD_PATHS\", self.assignment_append, cpp_info.build_paths),\n- (\"RES_PATHS\", self.assignment_append, cpp_info.res_paths),\n+ (\"INCLUDE_DIRS\", self.assignment_append, cpp_info.include_paths),\n+ (\"LIB_DIRS\", self.assignment_append, cpp_info.lib_paths),\n+ (\"BIN_DIRS\", self.assignment_append, cpp_info.bin_paths),\n+ (\"BUILD_DIRS\", self.assignment_append, cpp_info.build_paths),\n+ (\"RES_DIRS\", self.assignment_append, cpp_info.res_paths),\n (\"LIBS\", self.assignment_append, cpp_info.libs),\n (\"DEFINES\", self.assignment_append, cpp_info.defines),\n (\"CFLAGS\", self.assignment_append, cpp_info.cflags),\n- (\"CPPFLAGS\", self.assignment_append, cpp_info.cppflags),\n+ (\"CXXFLAGS\", self.assignment_append, cpp_info.cppflags),\n (\"SHAREDLINKFLAGS\", self.assignment_append, cpp_info.sharedlinkflags),\n (\"EXELINKFLAGS\", self.assignment_append, cpp_info.exelinkflags)]\n \n@@ -104,6 +104,6 @@\n \n @staticmethod\n def all_dep_vars():\n- return [\"rootpath\", \"sysroot\", \"include_paths\", \"lib_paths\", \"bin_paths\", \"build_paths\",\n- \"res_paths\", \"libs\", \"defines\", \"cflags\", \"cppflags\", \"sharedlinkflags\",\n+ return [\"rootpath\", \"sysroot\", \"include_dirs\", \"lib_dirs\", \"bin_dirs\", \"build_dirs\",\n+ \"res_dirs\", \"libs\", \"defines\", \"cflags\", \"cxxflags\", \"sharedlinkflags\",\n \"exelinkflags\"]\n", "issue": "Fix experimental make generator CONAN_CPPFLAGS and CONAN_INCLUDE_PATHS\nFollowing the conversation here https://github.com/conan-io/conan/issues/4286#issuecomment-454194188\r\n\r\nWe have finally reached the conclusion of having ``cpp_info.cppflags`` converted to ``CONAN_CXXFLAGS`` in the ``make`` generator to be consistent with orhter generators such as ``cmake``.\r\n\r\nAlso the flag ``CONAN_INCLUDE_PATHS`` should be renamed to ``CONAN_INCLUDE_DIRS`` for the same reason.\r\n\r\nIn another issue we would probably introduce a ``cpp_info.cxxflags`` that would be an internal alias of ``cpp_info.cppflags`` to avoid this confusion without breaking.\r\n\r\ncc/ @solvingj \n", "before_files": [{"content": "from conans.model import Generator\nfrom conans.paths import BUILD_INFO_MAKE\n\n\nclass MakeGenerator(Generator):\n\n def __init__(self, conanfile):\n Generator.__init__(self, conanfile)\n self.makefile_newline = \"\\n\"\n self.makefile_line_continuation = \" \\\\\\n\"\n self.assignment_if_absent = \" ?= \"\n self.assignment_append = \" += \"\n\n @property\n def filename(self):\n return BUILD_INFO_MAKE\n\n @property\n def content(self):\n\n content = [\n \"#-------------------------------------------------------------------#\",\n \"# Makefile variables from Conan Dependencies #\",\n \"#-------------------------------------------------------------------#\",\n \"\",\n ]\n\n for line_as_list in self.create_deps_content():\n content.append(\"\".join(line_as_list))\n\n content.append(\"#-------------------------------------------------------------------#\")\n content.append(self.makefile_newline)\n return self.makefile_newline.join(content)\n\n def create_deps_content(self):\n deps_content = self.create_content_from_deps()\n deps_content.extend(self.create_combined_content())\n return deps_content\n\n def create_content_from_deps(self):\n content = []\n for pkg_name, cpp_info in self.deps_build_info.dependencies:\n content.extend(self.create_content_from_dep(pkg_name, cpp_info))\n return content\n\n def create_content_from_dep(self, pkg_name, cpp_info):\n\n vars_info = [(\"ROOT\", self.assignment_if_absent, [cpp_info.rootpath]),\n (\"SYSROOT\", self.assignment_if_absent, [cpp_info.sysroot]),\n (\"INCLUDE_PATHS\", self.assignment_append, cpp_info.include_paths),\n (\"LIB_PATHS\", self.assignment_append, cpp_info.lib_paths),\n (\"BIN_PATHS\", self.assignment_append, cpp_info.bin_paths),\n (\"BUILD_PATHS\", self.assignment_append, cpp_info.build_paths),\n (\"RES_PATHS\", self.assignment_append, cpp_info.res_paths),\n (\"LIBS\", self.assignment_append, cpp_info.libs),\n (\"DEFINES\", self.assignment_append, cpp_info.defines),\n (\"CFLAGS\", self.assignment_append, cpp_info.cflags),\n (\"CPPFLAGS\", self.assignment_append, cpp_info.cppflags),\n (\"SHAREDLINKFLAGS\", self.assignment_append, cpp_info.sharedlinkflags),\n (\"EXELINKFLAGS\", self.assignment_append, cpp_info.exelinkflags)]\n\n return [self.create_makefile_var_pkg(var_name, pkg_name, operator, info)\n for var_name, operator, info in vars_info]\n\n def create_combined_content(self):\n content = []\n for var_name in self.all_dep_vars():\n content.append(self.create_makefile_var_global(var_name, self.assignment_append,\n self.create_combined_var_list(var_name)))\n return content\n\n def create_combined_var_list(self, var_name):\n make_vars = []\n for pkg_name, _ in self.deps_build_info.dependencies:\n pkg_var = self.create_makefile_var_name_pkg(var_name, pkg_name)\n make_vars.append(\"$({pkg_var})\".format(pkg_var=pkg_var))\n return make_vars\n\n def create_makefile_var_global(self, var_name, operator, values):\n make_var = [self.create_makefile_var_name_global(var_name)]\n make_var.extend(self.create_makefile_var_common(operator, values))\n return make_var\n\n def create_makefile_var_pkg(self, var_name, pkg_name, operator, values):\n make_var = [self.create_makefile_var_name_pkg(var_name, pkg_name)]\n make_var.extend(self.create_makefile_var_common(operator, values))\n return make_var\n\n def create_makefile_var_common(self, operator, values):\n return [operator, self.makefile_line_continuation, self.create_makefile_var_value(values),\n self.makefile_newline]\n\n @staticmethod\n def create_makefile_var_name_global(var_name):\n return \"CONAN_{var}\".format(var=var_name).upper()\n\n @staticmethod\n def create_makefile_var_name_pkg(var_name, pkg_name):\n return \"CONAN_{var}_{lib}\".format(var=var_name, lib=pkg_name).upper()\n\n def create_makefile_var_value(self, values):\n formatted_values = [value.replace(\"\\\\\", \"/\") for value in values]\n return self.makefile_line_continuation.join(formatted_values)\n\n @staticmethod\n def all_dep_vars():\n return [\"rootpath\", \"sysroot\", \"include_paths\", \"lib_paths\", \"bin_paths\", \"build_paths\",\n \"res_paths\", \"libs\", \"defines\", \"cflags\", \"cppflags\", \"sharedlinkflags\",\n \"exelinkflags\"]\n", "path": "conans/client/generators/make.py"}], "after_files": [{"content": "from conans.model import Generator\nfrom conans.paths import BUILD_INFO_MAKE\n\n\nclass MakeGenerator(Generator):\n\n def __init__(self, conanfile):\n Generator.__init__(self, conanfile)\n self.makefile_newline = \"\\n\"\n self.makefile_line_continuation = \" \\\\\\n\"\n self.assignment_if_absent = \" ?= \"\n self.assignment_append = \" += \"\n\n @property\n def filename(self):\n return BUILD_INFO_MAKE\n\n @property\n def content(self):\n\n content = [\n \"#-------------------------------------------------------------------#\",\n \"# Makefile variables from Conan Dependencies #\",\n \"#-------------------------------------------------------------------#\",\n \"\",\n ]\n\n for line_as_list in self.create_deps_content():\n content.append(\"\".join(line_as_list))\n\n content.append(\"#-------------------------------------------------------------------#\")\n content.append(self.makefile_newline)\n return self.makefile_newline.join(content)\n\n def create_deps_content(self):\n deps_content = self.create_content_from_deps()\n deps_content.extend(self.create_combined_content())\n return deps_content\n\n def create_content_from_deps(self):\n content = []\n for pkg_name, cpp_info in self.deps_build_info.dependencies:\n content.extend(self.create_content_from_dep(pkg_name, cpp_info))\n return content\n\n def create_content_from_dep(self, pkg_name, cpp_info):\n\n vars_info = [(\"ROOT\", self.assignment_if_absent, [cpp_info.rootpath]),\n (\"SYSROOT\", self.assignment_if_absent, [cpp_info.sysroot]),\n (\"INCLUDE_DIRS\", self.assignment_append, cpp_info.include_paths),\n (\"LIB_DIRS\", self.assignment_append, cpp_info.lib_paths),\n (\"BIN_DIRS\", self.assignment_append, cpp_info.bin_paths),\n (\"BUILD_DIRS\", self.assignment_append, cpp_info.build_paths),\n (\"RES_DIRS\", self.assignment_append, cpp_info.res_paths),\n (\"LIBS\", self.assignment_append, cpp_info.libs),\n (\"DEFINES\", self.assignment_append, cpp_info.defines),\n (\"CFLAGS\", self.assignment_append, cpp_info.cflags),\n (\"CXXFLAGS\", self.assignment_append, cpp_info.cppflags),\n (\"SHAREDLINKFLAGS\", self.assignment_append, cpp_info.sharedlinkflags),\n (\"EXELINKFLAGS\", self.assignment_append, cpp_info.exelinkflags)]\n\n return [self.create_makefile_var_pkg(var_name, pkg_name, operator, info)\n for var_name, operator, info in vars_info]\n\n def create_combined_content(self):\n content = []\n for var_name in self.all_dep_vars():\n content.append(self.create_makefile_var_global(var_name, self.assignment_append,\n self.create_combined_var_list(var_name)))\n return content\n\n def create_combined_var_list(self, var_name):\n make_vars = []\n for pkg_name, _ in self.deps_build_info.dependencies:\n pkg_var = self.create_makefile_var_name_pkg(var_name, pkg_name)\n make_vars.append(\"$({pkg_var})\".format(pkg_var=pkg_var))\n return make_vars\n\n def create_makefile_var_global(self, var_name, operator, values):\n make_var = [self.create_makefile_var_name_global(var_name)]\n make_var.extend(self.create_makefile_var_common(operator, values))\n return make_var\n\n def create_makefile_var_pkg(self, var_name, pkg_name, operator, values):\n make_var = [self.create_makefile_var_name_pkg(var_name, pkg_name)]\n make_var.extend(self.create_makefile_var_common(operator, values))\n return make_var\n\n def create_makefile_var_common(self, operator, values):\n return [operator, self.makefile_line_continuation, self.create_makefile_var_value(values),\n self.makefile_newline]\n\n @staticmethod\n def create_makefile_var_name_global(var_name):\n return \"CONAN_{var}\".format(var=var_name).upper()\n\n @staticmethod\n def create_makefile_var_name_pkg(var_name, pkg_name):\n return \"CONAN_{var}_{lib}\".format(var=var_name, lib=pkg_name).upper()\n\n def create_makefile_var_value(self, values):\n formatted_values = [value.replace(\"\\\\\", \"/\") for value in values]\n return self.makefile_line_continuation.join(formatted_values)\n\n @staticmethod\n def all_dep_vars():\n return [\"rootpath\", \"sysroot\", \"include_dirs\", \"lib_dirs\", \"bin_dirs\", \"build_dirs\",\n \"res_dirs\", \"libs\", \"defines\", \"cflags\", \"cxxflags\", \"sharedlinkflags\",\n \"exelinkflags\"]\n", "path": "conans/client/generators/make.py"}]}
1,622
498
gh_patches_debug_13502
rasdani/github-patches
git_diff
mne-tools__mne-bids-111
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- prune dependencies that we do not really depend on As became apparent in a discussion with @agramfort and @jasmainak, we probably do not need the `environment.yml` and instead should rely on minimal dependencies such as numpy, scipy, and matplotlib. if we decide to keep the `environment.yml` for convenience during installation, we should at least prune it. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mne_bids/datasets.py` Content: ``` 1 """Helper functions to fetch data to work with.""" 2 # Authors: Mainak Jas <[email protected]> 3 # Alexandre Gramfort <[email protected]> 4 # Teon Brooks <[email protected]> 5 # Stefan Appelhoff <[email protected]> 6 # 7 # License: BSD (3-clause) 8 9 import os 10 import os.path as op 11 import shutil 12 import tarfile 13 import requests 14 15 from mne.utils import _fetch_file 16 17 18 def fetch_faces_data(data_path=None, repo='ds000117', subject_ids=[1]): 19 """Dataset fetcher for OpenfMRI dataset ds000117. 20 21 Parameters 22 ---------- 23 data_path : str | None 24 Path to the folder where data is stored. Defaults to 25 '~/mne_data/mne_bids_examples' 26 repo : str 27 The folder name. Defaults to 'ds000117'. 28 subject_ids : list of int 29 The subjects to fetch. Defaults to [1], downloading subject 1. 30 31 Returns 32 ------- 33 data_path : str 34 Path to the folder where data is stored. 35 36 """ 37 if not data_path: 38 home = os.path.expanduser('~') 39 data_path = os.path.join(home, 'mne_data', 'mne_bids_examples') 40 if not os.path.exists(data_path): 41 os.makedirs(data_path) 42 43 for subject_id in subject_ids: 44 src_url = ('http://openfmri.s3.amazonaws.com/tarballs/' 45 'ds117_R0.1.1_sub%03d_raw.tgz' % subject_id) 46 tar_fname = op.join(data_path, repo + '.tgz') 47 target_dir = op.join(data_path, repo) 48 if not op.exists(target_dir): 49 if not op.exists(tar_fname): 50 _fetch_file(url=src_url, file_name=tar_fname, 51 print_destination=True, resume=True, timeout=10.) 52 tf = tarfile.open(tar_fname) 53 print('Extracting files. This may take a while ...') 54 tf.extractall(path=data_path) 55 shutil.move(op.join(data_path, 'ds117'), target_dir) 56 os.remove(tar_fname) 57 58 return data_path 59 60 61 def fetch_brainvision_testing_data(data_path=None): 62 """Download the MNE-Python testing data for the BrainVision format. 63 64 Parameters 65 ---------- 66 data_path : str | None 67 Path to the folder where data is stored. Defaults to 68 '~/mne_data/mne_bids_examples' 69 70 Returns 71 ------- 72 data_path : str 73 Path to the folder where data is stored. 74 75 """ 76 if not data_path: 77 home = os.path.expanduser('~') 78 data_path = os.path.join(home, 'mne_data', 'mne_bids_examples') 79 if not os.path.exists(data_path): 80 os.makedirs(data_path) 81 82 base_url = 'https://github.com/mne-tools/mne-python/' 83 base_url += 'raw/master/mne/io/brainvision/tests/data/test' 84 file_endings = ['.vhdr', '.vmrk', '.eeg', ] 85 86 for f_ending in file_endings: 87 url = base_url + f_ending 88 response = requests.get(url) 89 90 fname = os.path.join(data_path, 'test' + f_ending) 91 with open(fname, 'wb') as fout: 92 fout.write(response.content) 93 94 return data_path 95 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mne_bids/datasets.py b/mne_bids/datasets.py --- a/mne_bids/datasets.py +++ b/mne_bids/datasets.py @@ -10,7 +10,7 @@ import os.path as op import shutil import tarfile -import requests +from six.moves import urllib from mne.utils import _fetch_file @@ -85,10 +85,10 @@ for f_ending in file_endings: url = base_url + f_ending - response = requests.get(url) + response = urllib.request.urlopen(url) fname = os.path.join(data_path, 'test' + f_ending) with open(fname, 'wb') as fout: - fout.write(response.content) + fout.write(response.read()) return data_path
{"golden_diff": "diff --git a/mne_bids/datasets.py b/mne_bids/datasets.py\n--- a/mne_bids/datasets.py\n+++ b/mne_bids/datasets.py\n@@ -10,7 +10,7 @@\n import os.path as op\n import shutil\n import tarfile\n-import requests\n+from six.moves import urllib\n \n from mne.utils import _fetch_file\n \n@@ -85,10 +85,10 @@\n \n for f_ending in file_endings:\n url = base_url + f_ending\n- response = requests.get(url)\n+ response = urllib.request.urlopen(url)\n \n fname = os.path.join(data_path, 'test' + f_ending)\n with open(fname, 'wb') as fout:\n- fout.write(response.content)\n+ fout.write(response.read())\n \n return data_path\n", "issue": "prune dependencies that we do not really depend on\nAs became apparent in a discussion with @agramfort and @jasmainak, we probably do not need the `environment.yml` and instead should rely on minimal dependencies such as numpy, scipy, and matplotlib.\r\n\r\nif we decide to keep the `environment.yml` for convenience during installation, we should at least prune it.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"Helper functions to fetch data to work with.\"\"\"\n# Authors: Mainak Jas <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Teon Brooks <[email protected]>\n# Stefan Appelhoff <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os\nimport os.path as op\nimport shutil\nimport tarfile\nimport requests\n\nfrom mne.utils import _fetch_file\n\n\ndef fetch_faces_data(data_path=None, repo='ds000117', subject_ids=[1]):\n \"\"\"Dataset fetcher for OpenfMRI dataset ds000117.\n\n Parameters\n ----------\n data_path : str | None\n Path to the folder where data is stored. Defaults to\n '~/mne_data/mne_bids_examples'\n repo : str\n The folder name. Defaults to 'ds000117'.\n subject_ids : list of int\n The subjects to fetch. Defaults to [1], downloading subject 1.\n\n Returns\n -------\n data_path : str\n Path to the folder where data is stored.\n\n \"\"\"\n if not data_path:\n home = os.path.expanduser('~')\n data_path = os.path.join(home, 'mne_data', 'mne_bids_examples')\n if not os.path.exists(data_path):\n os.makedirs(data_path)\n\n for subject_id in subject_ids:\n src_url = ('http://openfmri.s3.amazonaws.com/tarballs/'\n 'ds117_R0.1.1_sub%03d_raw.tgz' % subject_id)\n tar_fname = op.join(data_path, repo + '.tgz')\n target_dir = op.join(data_path, repo)\n if not op.exists(target_dir):\n if not op.exists(tar_fname):\n _fetch_file(url=src_url, file_name=tar_fname,\n print_destination=True, resume=True, timeout=10.)\n tf = tarfile.open(tar_fname)\n print('Extracting files. This may take a while ...')\n tf.extractall(path=data_path)\n shutil.move(op.join(data_path, 'ds117'), target_dir)\n os.remove(tar_fname)\n\n return data_path\n\n\ndef fetch_brainvision_testing_data(data_path=None):\n \"\"\"Download the MNE-Python testing data for the BrainVision format.\n\n Parameters\n ----------\n data_path : str | None\n Path to the folder where data is stored. Defaults to\n '~/mne_data/mne_bids_examples'\n\n Returns\n -------\n data_path : str\n Path to the folder where data is stored.\n\n \"\"\"\n if not data_path:\n home = os.path.expanduser('~')\n data_path = os.path.join(home, 'mne_data', 'mne_bids_examples')\n if not os.path.exists(data_path):\n os.makedirs(data_path)\n\n base_url = 'https://github.com/mne-tools/mne-python/'\n base_url += 'raw/master/mne/io/brainvision/tests/data/test'\n file_endings = ['.vhdr', '.vmrk', '.eeg', ]\n\n for f_ending in file_endings:\n url = base_url + f_ending\n response = requests.get(url)\n\n fname = os.path.join(data_path, 'test' + f_ending)\n with open(fname, 'wb') as fout:\n fout.write(response.content)\n\n return data_path\n", "path": "mne_bids/datasets.py"}], "after_files": [{"content": "\"\"\"Helper functions to fetch data to work with.\"\"\"\n# Authors: Mainak Jas <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Teon Brooks <[email protected]>\n# Stefan Appelhoff <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os\nimport os.path as op\nimport shutil\nimport tarfile\nfrom six.moves import urllib\n\nfrom mne.utils import _fetch_file\n\n\ndef fetch_faces_data(data_path=None, repo='ds000117', subject_ids=[1]):\n \"\"\"Dataset fetcher for OpenfMRI dataset ds000117.\n\n Parameters\n ----------\n data_path : str | None\n Path to the folder where data is stored. Defaults to\n '~/mne_data/mne_bids_examples'\n repo : str\n The folder name. Defaults to 'ds000117'.\n subject_ids : list of int\n The subjects to fetch. Defaults to [1], downloading subject 1.\n\n Returns\n -------\n data_path : str\n Path to the folder where data is stored.\n\n \"\"\"\n if not data_path:\n home = os.path.expanduser('~')\n data_path = os.path.join(home, 'mne_data', 'mne_bids_examples')\n if not os.path.exists(data_path):\n os.makedirs(data_path)\n\n for subject_id in subject_ids:\n src_url = ('http://openfmri.s3.amazonaws.com/tarballs/'\n 'ds117_R0.1.1_sub%03d_raw.tgz' % subject_id)\n tar_fname = op.join(data_path, repo + '.tgz')\n target_dir = op.join(data_path, repo)\n if not op.exists(target_dir):\n if not op.exists(tar_fname):\n _fetch_file(url=src_url, file_name=tar_fname,\n print_destination=True, resume=True, timeout=10.)\n tf = tarfile.open(tar_fname)\n print('Extracting files. This may take a while ...')\n tf.extractall(path=data_path)\n shutil.move(op.join(data_path, 'ds117'), target_dir)\n os.remove(tar_fname)\n\n return data_path\n\n\ndef fetch_brainvision_testing_data(data_path=None):\n \"\"\"Download the MNE-Python testing data for the BrainVision format.\n\n Parameters\n ----------\n data_path : str | None\n Path to the folder where data is stored. Defaults to\n '~/mne_data/mne_bids_examples'\n\n Returns\n -------\n data_path : str\n Path to the folder where data is stored.\n\n \"\"\"\n if not data_path:\n home = os.path.expanduser('~')\n data_path = os.path.join(home, 'mne_data', 'mne_bids_examples')\n if not os.path.exists(data_path):\n os.makedirs(data_path)\n\n base_url = 'https://github.com/mne-tools/mne-python/'\n base_url += 'raw/master/mne/io/brainvision/tests/data/test'\n file_endings = ['.vhdr', '.vmrk', '.eeg', ]\n\n for f_ending in file_endings:\n url = base_url + f_ending\n response = urllib.request.urlopen(url)\n\n fname = os.path.join(data_path, 'test' + f_ending)\n with open(fname, 'wb') as fout:\n fout.write(response.read())\n\n return data_path\n", "path": "mne_bids/datasets.py"}]}
1,290
185
gh_patches_debug_17256
rasdani/github-patches
git_diff
apluslms__a-plus-1352
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Login should not take the user to the front page Common scenario: the user is browsing a particular course module. They aren’t logged in. They decide to log in, but doing so takes them to the A+ front page, from which they have to navigate back to where they were. Inconvenient. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `course/templatetags/base.py` Content: ``` 1 from datetime import datetime 2 3 from django import template 4 from django.conf import settings 5 from django.utils.safestring import mark_safe 6 from django.utils.text import format_lazy 7 from django.utils.translation import get_language, gettext_lazy as _ 8 from lib.helpers import remove_query_param_from_url, settings_text, update_url_params 9 from exercise.submission_models import PendingSubmission 10 from site_alert.models import SiteAlert 11 12 13 register = template.Library() 14 15 16 def pick_localized(message): 17 if message and isinstance(message, dict): 18 return (message.get(get_language()) or 19 message.get(settings.LANGUAGE_CODE[:2]) or 20 list(message.values())[0]) 21 return message 22 23 24 def get_date(cont, key): 25 data = cont.get(key) 26 if data and not isinstance(data, datetime): 27 data = datetime.strptime(data, '%Y-%m-%d') 28 cont[key] = data 29 return data 30 31 32 @register.simple_tag 33 def brand_name(): 34 return mark_safe(settings.BRAND_NAME) 35 36 37 @register.simple_tag 38 def brand_name_long(): 39 return mark_safe(settings.BRAND_NAME_LONG) 40 41 42 @register.simple_tag 43 def brand_institution_name(): 44 return mark_safe(settings_text('BRAND_INSTITUTION_NAME')) 45 46 47 @register.simple_tag 48 def course_alert(instance): 49 exercises = PendingSubmission.objects.get_exercise_names_if_grader_is_unstable(instance) 50 if exercises: 51 message = format_lazy( 52 _('GRADER_PROBLEMS_ALERT -- {exercises}'), 53 exercises=exercises, 54 ) 55 return mark_safe(format_lazy('<div class="alert alert-danger sticky-alert">{message}</div>', message=message)) 56 return '' 57 58 59 @register.simple_tag 60 def site_alert(): 61 alerts = SiteAlert.objects.filter(status=SiteAlert.STATUS.ACTIVE) 62 return mark_safe( 63 ''.join( 64 '<div class="alert alert-danger">{}</div>'.format(pick_localized(alert.alert)) 65 for alert in alerts 66 ) 67 ) 68 69 70 @register.simple_tag 71 def site_advert(): # pylint: disable=inconsistent-return-statements 72 advert = settings.SITEWIDE_ADVERT 73 if not advert or not isinstance(advert, dict): 74 return 75 not_before = get_date(advert, 'not-before') 76 not_after = get_date(advert, 'not-after') 77 if not_before or not_after: 78 now = datetime.now() 79 if not_before and not_before > now: 80 return 81 if not_after and not_after < now: 82 return 83 return {k: pick_localized(advert.get(k)) 84 for k in ('title', 'text', 'href', 'image')} 85 86 87 @register.simple_tag 88 def tracking_html(): 89 return mark_safe(settings.TRACKING_HTML) 90 91 92 @register.filter 93 def localized_url(path, language=None): 94 base_url = settings.BASE_URL 95 if base_url.endswith('/'): 96 base_url = base_url[:-1] 97 path = remove_query_param_from_url(path, 'hl') 98 if not language: 99 language = settings.LANGUAGE_CODE.split('-')[0] 100 path = update_url_params(path, { 'hl': language }) 101 return base_url + path 102 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/course/templatetags/base.py b/course/templatetags/base.py --- a/course/templatetags/base.py +++ b/course/templatetags/base.py @@ -2,6 +2,8 @@ from django import template from django.conf import settings +from django.urls import resolve +from django.urls.exceptions import Resolver404 from django.utils.safestring import mark_safe from django.utils.text import format_lazy from django.utils.translation import get_language, gettext_lazy as _ @@ -28,6 +30,15 @@ cont[key] = data return data [email protected]_tag(takes_context=True) +def login_next(context): + request = context["request"] + try: + matched_url_name = resolve(request.path).url_name + next_path = f"?next={request.path}" if matched_url_name != 'logout' else "" + return next_path + except Resolver404: + return "" @register.simple_tag def brand_name():
{"golden_diff": "diff --git a/course/templatetags/base.py b/course/templatetags/base.py\n--- a/course/templatetags/base.py\n+++ b/course/templatetags/base.py\n@@ -2,6 +2,8 @@\n \n from django import template\n from django.conf import settings\n+from django.urls import resolve\n+from django.urls.exceptions import Resolver404\n from django.utils.safestring import mark_safe\n from django.utils.text import format_lazy\n from django.utils.translation import get_language, gettext_lazy as _\n@@ -28,6 +30,15 @@\n cont[key] = data\n return data\n \[email protected]_tag(takes_context=True)\n+def login_next(context):\n+ request = context[\"request\"]\n+ try:\n+ matched_url_name = resolve(request.path).url_name\n+ next_path = f\"?next={request.path}\" if matched_url_name != 'logout' else \"\"\n+ return next_path\n+ except Resolver404:\n+ return \"\"\n \n @register.simple_tag\n def brand_name():\n", "issue": "Login should not take the user to the front page\nCommon scenario: the user is browsing a particular course module. They aren\u2019t logged in. They decide to log in, but doing so takes them to the A+ front page, from which they have to navigate back to where they were. Inconvenient.\n", "before_files": [{"content": "from datetime import datetime\n\nfrom django import template\nfrom django.conf import settings\nfrom django.utils.safestring import mark_safe\nfrom django.utils.text import format_lazy\nfrom django.utils.translation import get_language, gettext_lazy as _\nfrom lib.helpers import remove_query_param_from_url, settings_text, update_url_params\nfrom exercise.submission_models import PendingSubmission\nfrom site_alert.models import SiteAlert\n\n\nregister = template.Library()\n\n\ndef pick_localized(message):\n if message and isinstance(message, dict):\n return (message.get(get_language()) or\n message.get(settings.LANGUAGE_CODE[:2]) or\n list(message.values())[0])\n return message\n\n\ndef get_date(cont, key):\n data = cont.get(key)\n if data and not isinstance(data, datetime):\n data = datetime.strptime(data, '%Y-%m-%d')\n cont[key] = data\n return data\n\n\[email protected]_tag\ndef brand_name():\n return mark_safe(settings.BRAND_NAME)\n\n\[email protected]_tag\ndef brand_name_long():\n return mark_safe(settings.BRAND_NAME_LONG)\n\n\[email protected]_tag\ndef brand_institution_name():\n return mark_safe(settings_text('BRAND_INSTITUTION_NAME'))\n\n\[email protected]_tag\ndef course_alert(instance):\n exercises = PendingSubmission.objects.get_exercise_names_if_grader_is_unstable(instance)\n if exercises:\n message = format_lazy(\n _('GRADER_PROBLEMS_ALERT -- {exercises}'),\n exercises=exercises,\n )\n return mark_safe(format_lazy('<div class=\"alert alert-danger sticky-alert\">{message}</div>', message=message))\n return ''\n\n\[email protected]_tag\ndef site_alert():\n alerts = SiteAlert.objects.filter(status=SiteAlert.STATUS.ACTIVE)\n return mark_safe(\n ''.join(\n '<div class=\"alert alert-danger\">{}</div>'.format(pick_localized(alert.alert))\n for alert in alerts\n )\n )\n\n\[email protected]_tag\ndef site_advert(): # pylint: disable=inconsistent-return-statements\n advert = settings.SITEWIDE_ADVERT\n if not advert or not isinstance(advert, dict):\n return\n not_before = get_date(advert, 'not-before')\n not_after = get_date(advert, 'not-after')\n if not_before or not_after:\n now = datetime.now()\n if not_before and not_before > now:\n return\n if not_after and not_after < now:\n return\n return {k: pick_localized(advert.get(k))\n for k in ('title', 'text', 'href', 'image')}\n\n\[email protected]_tag\ndef tracking_html():\n return mark_safe(settings.TRACKING_HTML)\n\n\[email protected]\ndef localized_url(path, language=None):\n base_url = settings.BASE_URL\n if base_url.endswith('/'):\n base_url = base_url[:-1]\n path = remove_query_param_from_url(path, 'hl')\n if not language:\n language = settings.LANGUAGE_CODE.split('-')[0]\n path = update_url_params(path, { 'hl': language })\n return base_url + path\n", "path": "course/templatetags/base.py"}], "after_files": [{"content": "from datetime import datetime\n\nfrom django import template\nfrom django.conf import settings\nfrom django.urls import resolve\nfrom django.urls.exceptions import Resolver404\nfrom django.utils.safestring import mark_safe\nfrom django.utils.text import format_lazy\nfrom django.utils.translation import get_language, gettext_lazy as _\nfrom lib.helpers import remove_query_param_from_url, settings_text, update_url_params\nfrom exercise.submission_models import PendingSubmission\nfrom site_alert.models import SiteAlert\n\n\nregister = template.Library()\n\n\ndef pick_localized(message):\n if message and isinstance(message, dict):\n return (message.get(get_language()) or\n message.get(settings.LANGUAGE_CODE[:2]) or\n list(message.values())[0])\n return message\n\n\ndef get_date(cont, key):\n data = cont.get(key)\n if data and not isinstance(data, datetime):\n data = datetime.strptime(data, '%Y-%m-%d')\n cont[key] = data\n return data\n\[email protected]_tag(takes_context=True)\ndef login_next(context):\n request = context[\"request\"]\n try:\n matched_url_name = resolve(request.path).url_name\n next_path = f\"?next={request.path}\" if matched_url_name != 'logout' else \"\"\n return next_path\n except Resolver404:\n return \"\"\n\[email protected]_tag\ndef brand_name():\n return mark_safe(settings.BRAND_NAME)\n\n\[email protected]_tag\ndef brand_name_long():\n return mark_safe(settings.BRAND_NAME_LONG)\n\n\[email protected]_tag\ndef brand_institution_name():\n return mark_safe(settings_text('BRAND_INSTITUTION_NAME'))\n\n\[email protected]_tag\ndef course_alert(instance):\n exercises = PendingSubmission.objects.get_exercise_names_if_grader_is_unstable(instance)\n if exercises:\n message = format_lazy(\n _('GRADER_PROBLEMS_ALERT -- {exercises}'),\n exercises=exercises,\n )\n return mark_safe(format_lazy('<div class=\"alert alert-danger sticky-alert\">{message}</div>', message=message))\n return ''\n\n\[email protected]_tag\ndef site_alert():\n alerts = SiteAlert.objects.filter(status=SiteAlert.STATUS.ACTIVE)\n return mark_safe(\n ''.join(\n '<div class=\"alert alert-danger\">{}</div>'.format(pick_localized(alert.alert))\n for alert in alerts\n )\n )\n\n\[email protected]_tag\ndef site_advert(): # pylint: disable=inconsistent-return-statements\n advert = settings.SITEWIDE_ADVERT\n if not advert or not isinstance(advert, dict):\n return\n not_before = get_date(advert, 'not-before')\n not_after = get_date(advert, 'not-after')\n if not_before or not_after:\n now = datetime.now()\n if not_before and not_before > now:\n return\n if not_after and not_after < now:\n return\n return {k: pick_localized(advert.get(k))\n for k in ('title', 'text', 'href', 'image')}\n\n\[email protected]_tag\ndef tracking_html():\n return mark_safe(settings.TRACKING_HTML)\n\n\[email protected]\ndef localized_url(path, language=None):\n base_url = settings.BASE_URL\n if base_url.endswith('/'):\n base_url = base_url[:-1]\n path = remove_query_param_from_url(path, 'hl')\n if not language:\n language = settings.LANGUAGE_CODE.split('-')[0]\n path = update_url_params(path, { 'hl': language })\n return base_url + path\n", "path": "course/templatetags/base.py"}]}
1,187
229
gh_patches_debug_1790
rasdani/github-patches
git_diff
scikit-hep__pyhf-933
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Docs build broken with Sphinx v3.1.1 # Description After the new Sphinx patch release [`v3.1.1`](https://github.com/sphinx-doc/sphinx/releases/tag/v3.1.1) was released there is an error with building the docs due to `autodocumenting`: ``` WARNING: don't know which module to import for autodocumenting 'optimize.opt_jax.jax_optimizer' (try placing a "module" or "currentmodule" directive in the document, or giving an explicit module name) WARNING: don't know which module to import for autodocumenting 'optimize.opt_minuit.minuit_optimizer' (try placing a "module" or "currentmodule" directive in the document, or giving an explicit module name) WARNING: don't know which module to import for autodocumenting 'optimize.opt_pytorch.pytorch_optimizer' (try placing a "module" or "currentmodule" directive in the document, or giving an explicit module name) WARNING: don't know which module to import for autodocumenting 'optimize.opt_scipy.scipy_optimizer' (try placing a "module" or "currentmodule" directive in the document, or giving an explicit module name) WARNING: don't know which module to import for autodocumenting 'optimize.opt_tflow.tflow_optimizer' (try placing a "module" or "currentmodule" directive in the document, or giving an explicit module name) WARNING: don't know which module to import for autodocumenting 'tensor.jax_backend.jax_backend' (try placing a "module" or "currentmodule" directive in the document, or giving an explicit module name) WARNING: don't know which module to import for autodocumenting 'tensor.numpy_backend.numpy_backend' (try placing a "module" or "currentmodule" directive in the document, or giving an explicit module name) WARNING: don't know which module to import for autodocumenting 'tensor.pytorch_backend.pytorch_backend' (try placing a "module" or "currentmodule" directive in the document, or giving an explicit module name) WARNING: don't know which module to import for autodocumenting 'tensor.tensorflow_backend.tensorflow_backend' (try placing a "module" or "currentmodule" directive in the document, or giving an explicit module name) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 from setuptools import setup 2 3 extras_require = { 4 'tensorflow': [ 5 'tensorflow~=2.0', 6 'tensorflow-probability~=0.10', # TODO: Temp patch until tfp v0.11 7 ], 8 'torch': ['torch~=1.2'], 9 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'], 10 'xmlio': ['uproot~=3.6'], # Future proof against uproot4 API changes 11 'minuit': ['iminuit'], 12 } 13 extras_require['backends'] = sorted( 14 set( 15 extras_require['tensorflow'] 16 + extras_require['torch'] 17 + extras_require['jax'] 18 + extras_require['minuit'] 19 ) 20 ) 21 extras_require['contrib'] = sorted(set(['matplotlib'])) 22 extras_require['lint'] = sorted(set(['pyflakes', 'black'])) 23 24 extras_require['test'] = sorted( 25 set( 26 extras_require['backends'] 27 + extras_require['xmlio'] 28 + extras_require['contrib'] 29 + [ 30 'pytest~=3.5', 31 'pytest-cov>=2.5.1', 32 'pytest-mock', 33 'pytest-benchmark[histogram]', 34 'pytest-console-scripts', 35 'pytest-mpl', 36 'pydocstyle', 37 'coverage>=4.0', # coveralls 38 'papermill~=2.0', 39 'nteract-scrapbook~=0.2', 40 'jupyter', 41 'uproot~=3.3', 42 'graphviz', 43 'jsonpatch', 44 ] 45 ) 46 ) 47 extras_require['docs'] = sorted( 48 set( 49 [ 50 'sphinx~=3.0.0', # Sphinx v3.1.X regressions break docs 51 'sphinxcontrib-bibtex', 52 'sphinx-click', 53 'sphinx_rtd_theme', 54 'nbsphinx', 55 'ipywidgets', 56 'sphinx-issues', 57 'sphinx-copybutton>0.2.9', 58 ] 59 ) 60 ) 61 extras_require['develop'] = sorted( 62 set( 63 extras_require['docs'] 64 + extras_require['lint'] 65 + extras_require['test'] 66 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine'] 67 ) 68 ) 69 extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) 70 71 72 setup( 73 extras_require=extras_require, 74 use_scm_version=lambda: {'local_scheme': lambda version: ''}, 75 ) 76 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -47,7 +47,7 @@ extras_require['docs'] = sorted( set( [ - 'sphinx~=3.0.0', # Sphinx v3.1.X regressions break docs + 'sphinx>=3.1.2', 'sphinxcontrib-bibtex', 'sphinx-click', 'sphinx_rtd_theme',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -47,7 +47,7 @@\n extras_require['docs'] = sorted(\n set(\n [\n- 'sphinx~=3.0.0', # Sphinx v3.1.X regressions break docs\n+ 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n", "issue": "Docs build broken with Sphinx v3.1.1\n# Description\r\n\r\nAfter the new Sphinx patch release [`v3.1.1`](https://github.com/sphinx-doc/sphinx/releases/tag/v3.1.1) was released there is an error with building the docs due to `autodocumenting`:\r\n\r\n```\r\n\r\nWARNING: don't know which module to import for autodocumenting 'optimize.opt_jax.jax_optimizer' (try placing a \"module\" or \"currentmodule\" directive in the document, or giving an explicit module name)\r\nWARNING: don't know which module to import for autodocumenting 'optimize.opt_minuit.minuit_optimizer' (try placing a \"module\" or \"currentmodule\" directive in the document, or giving an explicit module name)\r\nWARNING: don't know which module to import for autodocumenting 'optimize.opt_pytorch.pytorch_optimizer' (try placing a \"module\" or \"currentmodule\" directive in the document, or giving an explicit module name)\r\nWARNING: don't know which module to import for autodocumenting 'optimize.opt_scipy.scipy_optimizer' (try placing a \"module\" or \"currentmodule\" directive in the document, or giving an explicit module name)\r\nWARNING: don't know which module to import for autodocumenting 'optimize.opt_tflow.tflow_optimizer' (try placing a \"module\" or \"currentmodule\" directive in the document, or giving an explicit module name)\r\nWARNING: don't know which module to import for autodocumenting 'tensor.jax_backend.jax_backend' (try placing a \"module\" or \"currentmodule\" directive in the document, or giving an explicit module name)\r\nWARNING: don't know which module to import for autodocumenting 'tensor.numpy_backend.numpy_backend' (try placing a \"module\" or \"currentmodule\" directive in the document, or giving an explicit module name)\r\nWARNING: don't know which module to import for autodocumenting 'tensor.pytorch_backend.pytorch_backend' (try placing a \"module\" or \"currentmodule\" directive in the document, or giving an explicit module name)\r\nWARNING: don't know which module to import for autodocumenting 'tensor.tensorflow_backend.tensorflow_backend' (try placing a \"module\" or \"currentmodule\" directive in the document, or giving an explicit module name)\r\n```\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow~=2.0',\n 'tensorflow-probability~=0.10', # TODO: Temp patch until tfp v0.11\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot~=3.6'], # Future proof against uproot4 API changes\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\nextras_require['lint'] = sorted(set(['pyflakes', 'black']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx~=3.0.0', # Sphinx v3.1.X regressions break docs\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow~=2.0',\n 'tensorflow-probability~=0.10', # TODO: Temp patch until tfp v0.11\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot~=3.6'], # Future proof against uproot4 API changes\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\nextras_require['lint'] = sorted(set(['pyflakes', 'black']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
1,449
105
gh_patches_debug_9087
rasdani/github-patches
git_diff
wagtail__wagtail-2585
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- RoutablePage throws a TypeError if index route is not set RoutablePage should not raise TypeError if index route is not set. [route method](https://github.com/torchbox/wagtail/blob/master/wagtail/contrib/wagtailroutablepage/models.py#L97) properly calls parent class method if route is not found, but `serve` method has different signature, so it fails. There is also a discussion of similar issue on Google groups: https://groups.google.com/forum/#!msg/wagtail/Q9FymA-MOxM/1UkQ0hRGAAAJ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wagtail/contrib/wagtailroutablepage/models.py` Content: ``` 1 from __future__ import absolute_import, unicode_literals 2 3 from django.conf.urls import url 4 from django.core.urlresolvers import RegexURLResolver 5 from django.http import Http404 6 7 from wagtail.wagtailcore.models import Page 8 from wagtail.wagtailcore.url_routing import RouteResult 9 10 _creation_counter = 0 11 12 13 def route(pattern, name=None): 14 def decorator(view_func): 15 global _creation_counter 16 _creation_counter += 1 17 18 # Make sure page has _routablepage_routes attribute 19 if not hasattr(view_func, '_routablepage_routes'): 20 view_func._routablepage_routes = [] 21 22 # Add new route to view 23 view_func._routablepage_routes.append(( 24 url(pattern, view_func, name=(name or view_func.__name__)), 25 _creation_counter, 26 )) 27 28 return view_func 29 30 return decorator 31 32 33 class RoutablePageMixin(object): 34 """ 35 This class can be mixed in to a Page model, allowing extra routes to be 36 added to it. 37 """ 38 #: Set this to a tuple of ``django.conf.urls.url`` objects. 39 subpage_urls = None 40 41 @classmethod 42 def get_subpage_urls(cls): 43 routes = [] 44 for attr in dir(cls): 45 val = getattr(cls, attr) 46 if hasattr(val, '_routablepage_routes'): 47 routes.extend(val._routablepage_routes) 48 49 return tuple([ 50 route[0] 51 for route in sorted(routes, key=lambda route: route[1]) 52 ]) 53 54 @classmethod 55 def get_resolver(cls): 56 if '_routablepage_urlresolver' not in cls.__dict__: 57 subpage_urls = cls.get_subpage_urls() 58 cls._routablepage_urlresolver = RegexURLResolver(r'^/', subpage_urls) 59 60 return cls._routablepage_urlresolver 61 62 def reverse_subpage(self, name, args=None, kwargs=None): 63 """ 64 This method takes a route name/arguments and returns a URL path. 65 """ 66 args = args or [] 67 kwargs = kwargs or {} 68 69 return self.get_resolver().reverse(name, *args, **kwargs) 70 71 def resolve_subpage(self, path): 72 """ 73 This method takes a URL path and finds the view to call. 74 """ 75 view, args, kwargs = self.get_resolver().resolve(path) 76 77 # Bind the method 78 view = view.__get__(self, type(self)) 79 80 return view, args, kwargs 81 82 def route(self, request, path_components): 83 """ 84 This hooks the subpage URLs into Wagtail's routing. 85 """ 86 if self.live: 87 try: 88 path = '/' 89 if path_components: 90 path += '/'.join(path_components) + '/' 91 92 view, args, kwargs = self.resolve_subpage(path) 93 return RouteResult(self, args=(view, args, kwargs)) 94 except Http404: 95 pass 96 97 return super(RoutablePageMixin, self).route(request, path_components) 98 99 def serve(self, request, view, args, kwargs): 100 return view(request, *args, **kwargs) 101 102 def serve_preview(self, request, mode_name): 103 view, args, kwargs = self.resolve_subpage('/') 104 return view(request, *args, **kwargs) 105 106 107 class RoutablePage(RoutablePageMixin, Page): 108 """ 109 This class extends Page by adding methods which allows extra routes to be 110 added to it. 111 """ 112 113 class Meta: 114 abstract = True 115 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wagtail/contrib/wagtailroutablepage/models.py b/wagtail/contrib/wagtailroutablepage/models.py --- a/wagtail/contrib/wagtailroutablepage/models.py +++ b/wagtail/contrib/wagtailroutablepage/models.py @@ -96,7 +96,13 @@ return super(RoutablePageMixin, self).route(request, path_components) - def serve(self, request, view, args, kwargs): + def serve(self, request, view=None, args=None, kwargs=None): + if args is None: + args = [] + if kwargs is None: + kwargs = {} + if view is None: + return super(RoutablePageMixin, self).serve(request, *args, **kwargs) return view(request, *args, **kwargs) def serve_preview(self, request, mode_name):
{"golden_diff": "diff --git a/wagtail/contrib/wagtailroutablepage/models.py b/wagtail/contrib/wagtailroutablepage/models.py\n--- a/wagtail/contrib/wagtailroutablepage/models.py\n+++ b/wagtail/contrib/wagtailroutablepage/models.py\n@@ -96,7 +96,13 @@\n \n return super(RoutablePageMixin, self).route(request, path_components)\n \n- def serve(self, request, view, args, kwargs):\n+ def serve(self, request, view=None, args=None, kwargs=None):\n+ if args is None:\n+ args = []\n+ if kwargs is None:\n+ kwargs = {}\n+ if view is None:\n+ return super(RoutablePageMixin, self).serve(request, *args, **kwargs)\n return view(request, *args, **kwargs)\n \n def serve_preview(self, request, mode_name):\n", "issue": "RoutablePage throws a TypeError if index route is not set\nRoutablePage should not raise TypeError if index route is not set. \n\n[route method](https://github.com/torchbox/wagtail/blob/master/wagtail/contrib/wagtailroutablepage/models.py#L97) properly calls parent class method if route is not found, but `serve` method has different signature, so it fails.\n\nThere is also a discussion of similar issue on Google groups:\nhttps://groups.google.com/forum/#!msg/wagtail/Q9FymA-MOxM/1UkQ0hRGAAAJ\n\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom django.conf.urls import url\nfrom django.core.urlresolvers import RegexURLResolver\nfrom django.http import Http404\n\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailcore.url_routing import RouteResult\n\n_creation_counter = 0\n\n\ndef route(pattern, name=None):\n def decorator(view_func):\n global _creation_counter\n _creation_counter += 1\n\n # Make sure page has _routablepage_routes attribute\n if not hasattr(view_func, '_routablepage_routes'):\n view_func._routablepage_routes = []\n\n # Add new route to view\n view_func._routablepage_routes.append((\n url(pattern, view_func, name=(name or view_func.__name__)),\n _creation_counter,\n ))\n\n return view_func\n\n return decorator\n\n\nclass RoutablePageMixin(object):\n \"\"\"\n This class can be mixed in to a Page model, allowing extra routes to be\n added to it.\n \"\"\"\n #: Set this to a tuple of ``django.conf.urls.url`` objects.\n subpage_urls = None\n\n @classmethod\n def get_subpage_urls(cls):\n routes = []\n for attr in dir(cls):\n val = getattr(cls, attr)\n if hasattr(val, '_routablepage_routes'):\n routes.extend(val._routablepage_routes)\n\n return tuple([\n route[0]\n for route in sorted(routes, key=lambda route: route[1])\n ])\n\n @classmethod\n def get_resolver(cls):\n if '_routablepage_urlresolver' not in cls.__dict__:\n subpage_urls = cls.get_subpage_urls()\n cls._routablepage_urlresolver = RegexURLResolver(r'^/', subpage_urls)\n\n return cls._routablepage_urlresolver\n\n def reverse_subpage(self, name, args=None, kwargs=None):\n \"\"\"\n This method takes a route name/arguments and returns a URL path.\n \"\"\"\n args = args or []\n kwargs = kwargs or {}\n\n return self.get_resolver().reverse(name, *args, **kwargs)\n\n def resolve_subpage(self, path):\n \"\"\"\n This method takes a URL path and finds the view to call.\n \"\"\"\n view, args, kwargs = self.get_resolver().resolve(path)\n\n # Bind the method\n view = view.__get__(self, type(self))\n\n return view, args, kwargs\n\n def route(self, request, path_components):\n \"\"\"\n This hooks the subpage URLs into Wagtail's routing.\n \"\"\"\n if self.live:\n try:\n path = '/'\n if path_components:\n path += '/'.join(path_components) + '/'\n\n view, args, kwargs = self.resolve_subpage(path)\n return RouteResult(self, args=(view, args, kwargs))\n except Http404:\n pass\n\n return super(RoutablePageMixin, self).route(request, path_components)\n\n def serve(self, request, view, args, kwargs):\n return view(request, *args, **kwargs)\n\n def serve_preview(self, request, mode_name):\n view, args, kwargs = self.resolve_subpage('/')\n return view(request, *args, **kwargs)\n\n\nclass RoutablePage(RoutablePageMixin, Page):\n \"\"\"\n This class extends Page by adding methods which allows extra routes to be\n added to it.\n \"\"\"\n\n class Meta:\n abstract = True\n", "path": "wagtail/contrib/wagtailroutablepage/models.py"}], "after_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom django.conf.urls import url\nfrom django.core.urlresolvers import RegexURLResolver\nfrom django.http import Http404\n\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailcore.url_routing import RouteResult\n\n_creation_counter = 0\n\n\ndef route(pattern, name=None):\n def decorator(view_func):\n global _creation_counter\n _creation_counter += 1\n\n # Make sure page has _routablepage_routes attribute\n if not hasattr(view_func, '_routablepage_routes'):\n view_func._routablepage_routes = []\n\n # Add new route to view\n view_func._routablepage_routes.append((\n url(pattern, view_func, name=(name or view_func.__name__)),\n _creation_counter,\n ))\n\n return view_func\n\n return decorator\n\n\nclass RoutablePageMixin(object):\n \"\"\"\n This class can be mixed in to a Page model, allowing extra routes to be\n added to it.\n \"\"\"\n #: Set this to a tuple of ``django.conf.urls.url`` objects.\n subpage_urls = None\n\n @classmethod\n def get_subpage_urls(cls):\n routes = []\n for attr in dir(cls):\n val = getattr(cls, attr)\n if hasattr(val, '_routablepage_routes'):\n routes.extend(val._routablepage_routes)\n\n return tuple([\n route[0]\n for route in sorted(routes, key=lambda route: route[1])\n ])\n\n @classmethod\n def get_resolver(cls):\n if '_routablepage_urlresolver' not in cls.__dict__:\n subpage_urls = cls.get_subpage_urls()\n cls._routablepage_urlresolver = RegexURLResolver(r'^/', subpage_urls)\n\n return cls._routablepage_urlresolver\n\n def reverse_subpage(self, name, args=None, kwargs=None):\n \"\"\"\n This method takes a route name/arguments and returns a URL path.\n \"\"\"\n args = args or []\n kwargs = kwargs or {}\n\n return self.get_resolver().reverse(name, *args, **kwargs)\n\n def resolve_subpage(self, path):\n \"\"\"\n This method takes a URL path and finds the view to call.\n \"\"\"\n view, args, kwargs = self.get_resolver().resolve(path)\n\n # Bind the method\n view = view.__get__(self, type(self))\n\n return view, args, kwargs\n\n def route(self, request, path_components):\n \"\"\"\n This hooks the subpage URLs into Wagtail's routing.\n \"\"\"\n if self.live:\n try:\n path = '/'\n if path_components:\n path += '/'.join(path_components) + '/'\n\n view, args, kwargs = self.resolve_subpage(path)\n return RouteResult(self, args=(view, args, kwargs))\n except Http404:\n pass\n\n return super(RoutablePageMixin, self).route(request, path_components)\n\n def serve(self, request, view=None, args=None, kwargs=None):\n if args is None:\n args = []\n if kwargs is None:\n kwargs = {}\n if view is None:\n return super(RoutablePageMixin, self).serve(request, *args, **kwargs)\n return view(request, *args, **kwargs)\n\n def serve_preview(self, request, mode_name):\n view, args, kwargs = self.resolve_subpage('/')\n return view(request, *args, **kwargs)\n\n\nclass RoutablePage(RoutablePageMixin, Page):\n \"\"\"\n This class extends Page by adding methods which allows extra routes to be\n added to it.\n \"\"\"\n\n class Meta:\n abstract = True\n", "path": "wagtail/contrib/wagtailroutablepage/models.py"}]}
1,399
206
gh_patches_debug_9881
rasdani/github-patches
git_diff
Kinto__kinto-1862
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Updated Error on Version File Missing 4XX errors are client errors. In that case, if the file is not on the server, the client is not guilty Raise error instead of returning 404 on __version__ if version file is missing https://github.com/Kinto/kinto/blame/master/kinto/core/views/version.py#L49 suggested by @peterbe 4XX errors are client errors. In that case, if the file is not on the server, the client is not guilty ;) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kinto/core/views/version.py` Content: ``` 1 import json 2 import os 3 4 import colander 5 from pyramid import httpexceptions 6 from pyramid.security import NO_PERMISSION_REQUIRED 7 from kinto.core import Service 8 9 HERE = os.path.dirname(__file__) 10 ORIGIN = os.path.dirname(HERE) 11 12 13 class VersionResponseSchema(colander.MappingSchema): 14 body = colander.SchemaNode(colander.Mapping(unknown="preserve")) 15 16 17 version_response_schemas = { 18 "200": VersionResponseSchema(description="Return the running Instance version information.") 19 } 20 21 22 version = Service(name="version", path="/__version__", description="Version") 23 24 25 @version.get( 26 permission=NO_PERMISSION_REQUIRED, 27 tags=["Utilities"], 28 operation_id="__version__", 29 response_schemas=version_response_schemas, 30 ) 31 def version_view(request): 32 try: 33 return version_view.__json__ 34 except AttributeError: 35 pass 36 37 location = request.registry.settings["version_json_path"] 38 files = [ 39 location, # Default is current working dir. 40 os.path.join(ORIGIN, "version.json"), # Relative to the package root. 41 os.path.join(HERE, "version.json"), # Relative to this file. 42 ] 43 for version_file in files: 44 if os.path.exists(version_file): 45 with open(version_file) as f: 46 version_view.__json__ = json.load(f) 47 return version_view.__json__ # First one wins. 48 49 raise httpexceptions.HTTPNotFound() 50 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kinto/core/views/version.py b/kinto/core/views/version.py --- a/kinto/core/views/version.py +++ b/kinto/core/views/version.py @@ -2,7 +2,6 @@ import os import colander -from pyramid import httpexceptions from pyramid.security import NO_PERMISSION_REQUIRED from kinto.core import Service @@ -46,4 +45,4 @@ version_view.__json__ = json.load(f) return version_view.__json__ # First one wins. - raise httpexceptions.HTTPNotFound() + raise FileNotFoundError("Version file missing from {}".format(files.join(",")))
{"golden_diff": "diff --git a/kinto/core/views/version.py b/kinto/core/views/version.py\n--- a/kinto/core/views/version.py\n+++ b/kinto/core/views/version.py\n@@ -2,7 +2,6 @@\n import os\n \n import colander\n-from pyramid import httpexceptions\n from pyramid.security import NO_PERMISSION_REQUIRED\n from kinto.core import Service\n \n@@ -46,4 +45,4 @@\n version_view.__json__ = json.load(f)\n return version_view.__json__ # First one wins.\n \n- raise httpexceptions.HTTPNotFound()\n+ raise FileNotFoundError(\"Version file missing from {}\".format(files.join(\",\")))\n", "issue": "Updated Error on Version File Missing\n4XX errors are client errors. In that case, if the file is not on the server, the client is not guilty\nRaise error instead of returning 404 on __version__ if version file is missing\nhttps://github.com/Kinto/kinto/blame/master/kinto/core/views/version.py#L49\r\n\r\nsuggested by @peterbe \r\n\r\n4XX errors are client errors. In that case, if the file is not on the server, the client is not guilty ;)\n", "before_files": [{"content": "import json\nimport os\n\nimport colander\nfrom pyramid import httpexceptions\nfrom pyramid.security import NO_PERMISSION_REQUIRED\nfrom kinto.core import Service\n\nHERE = os.path.dirname(__file__)\nORIGIN = os.path.dirname(HERE)\n\n\nclass VersionResponseSchema(colander.MappingSchema):\n body = colander.SchemaNode(colander.Mapping(unknown=\"preserve\"))\n\n\nversion_response_schemas = {\n \"200\": VersionResponseSchema(description=\"Return the running Instance version information.\")\n}\n\n\nversion = Service(name=\"version\", path=\"/__version__\", description=\"Version\")\n\n\[email protected](\n permission=NO_PERMISSION_REQUIRED,\n tags=[\"Utilities\"],\n operation_id=\"__version__\",\n response_schemas=version_response_schemas,\n)\ndef version_view(request):\n try:\n return version_view.__json__\n except AttributeError:\n pass\n\n location = request.registry.settings[\"version_json_path\"]\n files = [\n location, # Default is current working dir.\n os.path.join(ORIGIN, \"version.json\"), # Relative to the package root.\n os.path.join(HERE, \"version.json\"), # Relative to this file.\n ]\n for version_file in files:\n if os.path.exists(version_file):\n with open(version_file) as f:\n version_view.__json__ = json.load(f)\n return version_view.__json__ # First one wins.\n\n raise httpexceptions.HTTPNotFound()\n", "path": "kinto/core/views/version.py"}], "after_files": [{"content": "import json\nimport os\n\nimport colander\nfrom pyramid.security import NO_PERMISSION_REQUIRED\nfrom kinto.core import Service\n\nHERE = os.path.dirname(__file__)\nORIGIN = os.path.dirname(HERE)\n\n\nclass VersionResponseSchema(colander.MappingSchema):\n body = colander.SchemaNode(colander.Mapping(unknown=\"preserve\"))\n\n\nversion_response_schemas = {\n \"200\": VersionResponseSchema(description=\"Return the running Instance version information.\")\n}\n\n\nversion = Service(name=\"version\", path=\"/__version__\", description=\"Version\")\n\n\[email protected](\n permission=NO_PERMISSION_REQUIRED,\n tags=[\"Utilities\"],\n operation_id=\"__version__\",\n response_schemas=version_response_schemas,\n)\ndef version_view(request):\n try:\n return version_view.__json__\n except AttributeError:\n pass\n\n location = request.registry.settings[\"version_json_path\"]\n files = [\n location, # Default is current working dir.\n os.path.join(ORIGIN, \"version.json\"), # Relative to the package root.\n os.path.join(HERE, \"version.json\"), # Relative to this file.\n ]\n for version_file in files:\n if os.path.exists(version_file):\n with open(version_file) as f:\n version_view.__json__ = json.load(f)\n return version_view.__json__ # First one wins.\n\n raise FileNotFoundError(\"Version file missing from {}\".format(files.join(\",\")))\n", "path": "kinto/core/views/version.py"}]}
762
134
gh_patches_debug_13538
rasdani/github-patches
git_diff
fossasia__open-event-server-4418
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [sentry] KeyError/v1/users/<int:user_id>/access-codes **I'm submitting a ...** (check one with "x") - [x] bug report - [ ] feature request - [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server ``` KeyError: 'id' File "flask/app.py", line 1982, in wsgi_app response = self.full_dispatch_request() File "flask/app.py", line 1614, in full_dispatch_request rv = self.handle_user_exception(e) File "flask_cors/extension.py", line 161, in wrapped_function return cors_after_request(app.make_response(f(*args, **kwargs))) File "flask/app.py", line 1517, in handle_user_exception reraise(exc_type, exc_value, tb) File "flask/app.py", line 1612, in full_dispatch_request rv = self.dispatch_request() File "flask/app.py", line 1598, in dispatch_request return self.view_functions[rule.endpoint](**req.view_args) File "flask_rest_jsonapi/decorators.py", line 32, in wrapper return func(*args, **kwargs) File "flask/views.py", line 84, in view return self.dispatch_request(*args, **kwargs) File "flask_rest_jsonapi/resource.py", line 68, in dispatch_request response = method(*args, **kwargs) File "flask_rest_jsonapi/decorators.py", line 56, in wrapper return func(*args, **kwargs) File "flask_rest_jsonapi/resource.py", line 145, in get objects_count, objects = self._data_layer.get_collection(qs, kwargs) File "flask_rest_jsonapi/data_layers/alchemy.py", line 92, in get_collection query = self.query(view_kwargs) File "app/api/access_codes.py", line 56, in query if not has_access('is_user_itself', user_id=user.id): File "app/api/helpers/permission_manager.py", line 450, in has_access auth = permissions[access_level](lambda *a, **b: True, (), {}, (), **kwargs) File "app/api/helpers/permissions.py", line 43, in decorator return fn(*args, **kwargs) File "app/api/helpers/permission_manager.py", line 109, in is_user_itself if not user.is_admin and not user.is_super_admin and user.id != kwargs['id']: ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `app/api/access_codes.py` Content: ``` 1 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship 2 from flask_rest_jsonapi.exceptions import ObjectNotFound 3 from app.api.helpers.exceptions import ForbiddenException 4 5 from app.api.bootstrap import api 6 from app.api.helpers.db import safe_query 7 from app.api.helpers.permission_manager import has_access 8 from app.api.helpers.permissions import jwt_required 9 from app.api.helpers.query import event_query 10 from app.api.helpers.utilities import require_relationship 11 from app.api.schema.access_codes import AccessCodeSchema 12 from app.models import db 13 from app.models.access_code import AccessCode 14 from app.models.ticket import Ticket 15 from app.models.user import User 16 17 18 class AccessCodeListPost(ResourceList): 19 """ 20 Create AccessCodes 21 """ 22 def before_post(self, args, kwargs, data): 23 """ 24 before post method to check for required relationships and permissions 25 :param args: 26 :param kwargs: 27 :param data: 28 :return: 29 """ 30 require_relationship(['event', 'user'], data) 31 if not has_access('is_coorganizer', event_id=data['event']): 32 raise ObjectNotFound({'parameter': 'event_id'}, 33 "Event: {} not found".format(data['event'])) 34 35 schema = AccessCodeSchema 36 methods = ['POST', ] 37 data_layer = {'session': db.session, 38 'model': AccessCode 39 } 40 41 42 class AccessCodeList(ResourceList): 43 """ 44 List AccessCodes 45 """ 46 def query(self, view_kwargs): 47 """ 48 Method to get access codes list based on different view_kwargs 49 :param view_kwargs: 50 :return: 51 """ 52 query_ = self.session.query(AccessCode) 53 query_ = event_query(self, query_, view_kwargs, permission='is_coorganizer') 54 if view_kwargs.get('user_id'): 55 user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id') 56 if not has_access('is_user_itself', user_id=user.id): 57 raise ForbiddenException({'source': ''}, 'Access Forbidden') 58 query_ = query_.join(User).filter(User.id == user.id) 59 if view_kwargs.get('ticket_id'): 60 ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id') 61 if not has_access('is_coorganizer', event_id=ticket.event_id): 62 raise ForbiddenException({'source': ''}, 'Access Forbidden') 63 # access_code - ticket :: many-to-many relationship 64 query_ = AccessCode.query.filter(AccessCode.tickets.any(id=ticket.id)) 65 query_ 66 return query_ 67 68 view_kwargs = True 69 methods = ['GET', ] 70 schema = AccessCodeSchema 71 data_layer = {'session': db.session, 72 'model': AccessCode, 73 'methods': { 74 'query': query, 75 }} 76 77 78 class AccessCodeDetail(ResourceDetail): 79 """ 80 AccessCode detail by id 81 """ 82 83 decorators = (api.has_permission('is_coorganizer', fetch='event_id', 84 fetch_as="event_id", model=AccessCode, methods="GET, PATCH"), 85 api.has_permission('is_coorganizer_but_not_admin', fetch='event_id', 86 fetch_as="event_id", model=AccessCode, methods="DELETE"),) 87 schema = AccessCodeSchema 88 data_layer = {'session': db.session, 89 'model': AccessCode 90 } 91 92 93 class AccessCodeRelationshipRequired(ResourceRelationship): 94 """ 95 AccessCode Relationship Required 96 """ 97 decorators = (jwt_required,) 98 methods = ['GET', 'PATCH'] 99 schema = AccessCodeSchema 100 data_layer = {'session': db.session, 101 'model': AccessCode} 102 103 104 class AccessCodeRelationshipOptional(ResourceRelationship): 105 """ 106 AccessCode Relationship Optional 107 """ 108 decorators = (jwt_required,) 109 schema = AccessCodeSchema 110 data_layer = {'session': db.session, 111 'model': AccessCode} 112 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/app/api/access_codes.py b/app/api/access_codes.py --- a/app/api/access_codes.py +++ b/app/api/access_codes.py @@ -53,7 +53,7 @@ query_ = event_query(self, query_, view_kwargs, permission='is_coorganizer') if view_kwargs.get('user_id'): user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id') - if not has_access('is_user_itself', user_id=user.id): + if not has_access('is_user_itself', id=user.id): raise ForbiddenException({'source': ''}, 'Access Forbidden') query_ = query_.join(User).filter(User.id == user.id) if view_kwargs.get('ticket_id'):
{"golden_diff": "diff --git a/app/api/access_codes.py b/app/api/access_codes.py\n--- a/app/api/access_codes.py\n+++ b/app/api/access_codes.py\n@@ -53,7 +53,7 @@\n query_ = event_query(self, query_, view_kwargs, permission='is_coorganizer')\n if view_kwargs.get('user_id'):\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n- if not has_access('is_user_itself', user_id=user.id):\n+ if not has_access('is_user_itself', id=user.id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(User).filter(User.id == user.id)\n if view_kwargs.get('ticket_id'):\n", "issue": "[sentry] KeyError/v1/users/<int:user_id>/access-codes\n**I'm submitting a ...** (check one with \"x\")\r\n- [x] bug report\r\n- [ ] feature request\r\n- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server\r\n\r\n```\r\nKeyError: 'id'\r\n File \"flask/app.py\", line 1982, in wsgi_app\r\n response = self.full_dispatch_request()\r\n File \"flask/app.py\", line 1614, in full_dispatch_request\r\n rv = self.handle_user_exception(e)\r\n File \"flask_cors/extension.py\", line 161, in wrapped_function\r\n return cors_after_request(app.make_response(f(*args, **kwargs)))\r\n File \"flask/app.py\", line 1517, in handle_user_exception\r\n reraise(exc_type, exc_value, tb)\r\n File \"flask/app.py\", line 1612, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"flask/app.py\", line 1598, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"flask_rest_jsonapi/decorators.py\", line 32, in wrapper\r\n return func(*args, **kwargs)\r\n File \"flask/views.py\", line 84, in view\r\n return self.dispatch_request(*args, **kwargs)\r\n File \"flask_rest_jsonapi/resource.py\", line 68, in dispatch_request\r\n response = method(*args, **kwargs)\r\n File \"flask_rest_jsonapi/decorators.py\", line 56, in wrapper\r\n return func(*args, **kwargs)\r\n File \"flask_rest_jsonapi/resource.py\", line 145, in get\r\n objects_count, objects = self._data_layer.get_collection(qs, kwargs)\r\n File \"flask_rest_jsonapi/data_layers/alchemy.py\", line 92, in get_collection\r\n query = self.query(view_kwargs)\r\n File \"app/api/access_codes.py\", line 56, in query\r\n if not has_access('is_user_itself', user_id=user.id):\r\n File \"app/api/helpers/permission_manager.py\", line 450, in has_access\r\n auth = permissions[access_level](lambda *a, **b: True, (), {}, (), **kwargs)\r\n File \"app/api/helpers/permissions.py\", line 43, in decorator\r\n return fn(*args, **kwargs)\r\n File \"app/api/helpers/permission_manager.py\", line 109, in is_user_itself\r\n if not user.is_admin and not user.is_super_admin and user.id != kwargs['id']:\r\n```\n", "before_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\nfrom app.api.helpers.exceptions import ForbiddenException\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.access_codes import AccessCodeSchema\nfrom app.models import db\nfrom app.models.access_code import AccessCode\nfrom app.models.ticket import Ticket\nfrom app.models.user import User\n\n\nclass AccessCodeListPost(ResourceList):\n \"\"\"\n Create AccessCodes\n \"\"\"\n def before_post(self, args, kwargs, data):\n \"\"\"\n before post method to check for required relationships and permissions\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event', 'user'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ObjectNotFound({'parameter': 'event_id'},\n \"Event: {} not found\".format(data['event']))\n\n schema = AccessCodeSchema\n methods = ['POST', ]\n data_layer = {'session': db.session,\n 'model': AccessCode\n }\n\n\nclass AccessCodeList(ResourceList):\n \"\"\"\n List AccessCodes\n \"\"\"\n def query(self, view_kwargs):\n \"\"\"\n Method to get access codes list based on different view_kwargs\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(AccessCode)\n query_ = event_query(self, query_, view_kwargs, permission='is_coorganizer')\n if view_kwargs.get('user_id'):\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n if not has_access('is_user_itself', user_id=user.id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(User).filter(User.id == user.id)\n if view_kwargs.get('ticket_id'):\n ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')\n if not has_access('is_coorganizer', event_id=ticket.event_id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n # access_code - ticket :: many-to-many relationship\n query_ = AccessCode.query.filter(AccessCode.tickets.any(id=ticket.id))\n query_\n return query_\n\n view_kwargs = True\n methods = ['GET', ]\n schema = AccessCodeSchema\n data_layer = {'session': db.session,\n 'model': AccessCode,\n 'methods': {\n 'query': query,\n }}\n\n\nclass AccessCodeDetail(ResourceDetail):\n \"\"\"\n AccessCode detail by id\n \"\"\"\n\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=AccessCode, methods=\"GET, PATCH\"),\n api.has_permission('is_coorganizer_but_not_admin', fetch='event_id',\n fetch_as=\"event_id\", model=AccessCode, methods=\"DELETE\"),)\n schema = AccessCodeSchema\n data_layer = {'session': db.session,\n 'model': AccessCode\n }\n\n\nclass AccessCodeRelationshipRequired(ResourceRelationship):\n \"\"\"\n AccessCode Relationship Required\n \"\"\"\n decorators = (jwt_required,)\n methods = ['GET', 'PATCH']\n schema = AccessCodeSchema\n data_layer = {'session': db.session,\n 'model': AccessCode}\n\n\nclass AccessCodeRelationshipOptional(ResourceRelationship):\n \"\"\"\n AccessCode Relationship Optional\n \"\"\"\n decorators = (jwt_required,)\n schema = AccessCodeSchema\n data_layer = {'session': db.session,\n 'model': AccessCode}\n", "path": "app/api/access_codes.py"}], "after_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\nfrom app.api.helpers.exceptions import ForbiddenException\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.access_codes import AccessCodeSchema\nfrom app.models import db\nfrom app.models.access_code import AccessCode\nfrom app.models.ticket import Ticket\nfrom app.models.user import User\n\n\nclass AccessCodeListPost(ResourceList):\n \"\"\"\n Create AccessCodes\n \"\"\"\n def before_post(self, args, kwargs, data):\n \"\"\"\n before post method to check for required relationships and permissions\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event', 'user'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ObjectNotFound({'parameter': 'event_id'},\n \"Event: {} not found\".format(data['event']))\n\n schema = AccessCodeSchema\n methods = ['POST', ]\n data_layer = {'session': db.session,\n 'model': AccessCode\n }\n\n\nclass AccessCodeList(ResourceList):\n \"\"\"\n List AccessCodes\n \"\"\"\n def query(self, view_kwargs):\n \"\"\"\n Method to get access codes list based on different view_kwargs\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(AccessCode)\n query_ = event_query(self, query_, view_kwargs, permission='is_coorganizer')\n if view_kwargs.get('user_id'):\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n if not has_access('is_user_itself', id=user.id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(User).filter(User.id == user.id)\n if view_kwargs.get('ticket_id'):\n ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')\n if not has_access('is_coorganizer', event_id=ticket.event_id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n # access_code - ticket :: many-to-many relationship\n query_ = AccessCode.query.filter(AccessCode.tickets.any(id=ticket.id))\n query_\n return query_\n\n view_kwargs = True\n methods = ['GET', ]\n schema = AccessCodeSchema\n data_layer = {'session': db.session,\n 'model': AccessCode,\n 'methods': {\n 'query': query,\n }}\n\n\nclass AccessCodeDetail(ResourceDetail):\n \"\"\"\n AccessCode detail by id\n \"\"\"\n\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=AccessCode, methods=\"GET, PATCH\"),\n api.has_permission('is_coorganizer_but_not_admin', fetch='event_id',\n fetch_as=\"event_id\", model=AccessCode, methods=\"DELETE\"),)\n schema = AccessCodeSchema\n data_layer = {'session': db.session,\n 'model': AccessCode\n }\n\n\nclass AccessCodeRelationshipRequired(ResourceRelationship):\n \"\"\"\n AccessCode Relationship Required\n \"\"\"\n decorators = (jwt_required,)\n methods = ['GET', 'PATCH']\n schema = AccessCodeSchema\n data_layer = {'session': db.session,\n 'model': AccessCode}\n\n\nclass AccessCodeRelationshipOptional(ResourceRelationship):\n \"\"\"\n AccessCode Relationship Optional\n \"\"\"\n decorators = (jwt_required,)\n schema = AccessCodeSchema\n data_layer = {'session': db.session,\n 'model': AccessCode}\n", "path": "app/api/access_codes.py"}]}
1,917
166
gh_patches_debug_31495
rasdani/github-patches
git_diff
modin-project__modin-2784
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [ASV] add read_csv benchmark with dtype/names/parse_dates parameters --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `asv_bench/benchmarks/io/csv.py` Content: ``` 1 # Licensed to Modin Development Team under one or more contributor license agreements. 2 # See the NOTICE file distributed with this work for additional information regarding 3 # copyright ownership. The Modin Development Team licenses this file to you under the 4 # Apache License, Version 2.0 (the "License"); you may not use this file except in 5 # compliance with the License. You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software distributed under 10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 # ANY KIND, either express or implied. See the License for the specific language 12 # governing permissions and limitations under the License. 13 14 import modin.pandas as pd 15 import numpy as np 16 17 from ..utils import ( 18 generate_dataframe, 19 RAND_LOW, 20 RAND_HIGH, 21 ASV_USE_IMPL, 22 ASV_DATASET_SIZE, 23 UNARY_OP_DATA_SIZE, 24 IMPL, 25 execute, 26 get_shape_id, 27 ) 28 29 # ray init 30 if ASV_USE_IMPL == "modin": 31 pd.DataFrame([]) 32 33 34 class BaseReadCsv: 35 # test data file can de created only once 36 def setup_cache(self, test_filename="io_test_file"): 37 test_filenames = {} 38 for shape in UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]: 39 shape_id = get_shape_id(shape) 40 test_filenames[shape_id] = f"{test_filename}_{shape_id}.csv" 41 df = generate_dataframe("pandas", "str_int", *shape, RAND_LOW, RAND_HIGH) 42 df.to_csv(test_filenames[shape_id], index=False) 43 44 return test_filenames 45 46 def setup(self, test_filenames, shape, *args, **kwargs): 47 self.shape_id = get_shape_id(shape) 48 49 50 class TimeReadCsvSkiprows(BaseReadCsv): 51 param_names = ["shape", "skiprows"] 52 params = [ 53 UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE], 54 [ 55 None, 56 lambda x: x % 2, 57 np.arange(1, UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE][0][0] // 10), 58 np.arange(1, UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE][0][0], 2), 59 ], 60 ] 61 62 def time_skiprows(self, test_filenames, shape, skiprows): 63 execute( 64 IMPL[ASV_USE_IMPL].read_csv( 65 test_filenames[self.shape_id], skiprows=skiprows 66 ) 67 ) 68 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/asv_bench/benchmarks/io/csv.py b/asv_bench/benchmarks/io/csv.py --- a/asv_bench/benchmarks/io/csv.py +++ b/asv_bench/benchmarks/io/csv.py @@ -65,3 +65,70 @@ test_filenames[self.shape_id], skiprows=skiprows ) ) + + +class TimeReadCsvNamesDtype: + _dtypes_params = ["Int64", "Int64_Timestamp"] + _timestamp_columns = ["col1", "col2"] + + param_names = ["shape", "names", "dtype"] + params = [ + UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE], + ["array-like"], + _dtypes_params, + ] + + def _get_file_id(self, shape, dtype): + return get_shape_id(shape) + dtype + + def _add_timestamp_columns(self, df): + df = df.copy() + date_column = IMPL["pandas"].date_range( + "2000", + periods=df.shape[0], + freq="ms", + ) + for col in self._timestamp_columns: + df[col] = date_column + return df + + def setup_cache(self, test_filename="io_test_file_csv_names_dtype"): + # filenames with a metadata of saved dataframes + cache = {} + for shape in UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]: + for dtype in self._dtypes_params: + df = generate_dataframe("pandas", "int", *shape, RAND_LOW, RAND_HIGH) + if dtype == "Int64_Timestamp": + df = self._add_timestamp_columns(df) + + file_id = self._get_file_id(shape, dtype) + cache[file_id] = ( + f"{test_filename}_{file_id}.csv", + df.columns.to_list(), + df.dtypes.to_dict(), + ) + df.to_csv(cache[file_id][0], index=False) + return cache + + def setup(self, cache, shape, names, dtype): + file_id = self._get_file_id(shape, dtype) + self.filename, self.names, self.dtype = cache[file_id] + + self.parse_dates = None + if dtype == "Int64_Timestamp": + # cached version of dtype should not change + self.dtype = self.dtype.copy() + for col in self._timestamp_columns: + del self.dtype[col] + self.parse_dates = self._timestamp_columns + + def time_read_csv_names_dtype(self, cache, shape, names, dtype): + execute( + IMPL[ASV_USE_IMPL].read_csv( + self.filename, + names=self.names, + header=0, + dtype=self.dtype, + parse_dates=self.parse_dates, + ) + )
{"golden_diff": "diff --git a/asv_bench/benchmarks/io/csv.py b/asv_bench/benchmarks/io/csv.py\n--- a/asv_bench/benchmarks/io/csv.py\n+++ b/asv_bench/benchmarks/io/csv.py\n@@ -65,3 +65,70 @@\n test_filenames[self.shape_id], skiprows=skiprows\n )\n )\n+\n+\n+class TimeReadCsvNamesDtype:\n+ _dtypes_params = [\"Int64\", \"Int64_Timestamp\"]\n+ _timestamp_columns = [\"col1\", \"col2\"]\n+\n+ param_names = [\"shape\", \"names\", \"dtype\"]\n+ params = [\n+ UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE],\n+ [\"array-like\"],\n+ _dtypes_params,\n+ ]\n+\n+ def _get_file_id(self, shape, dtype):\n+ return get_shape_id(shape) + dtype\n+\n+ def _add_timestamp_columns(self, df):\n+ df = df.copy()\n+ date_column = IMPL[\"pandas\"].date_range(\n+ \"2000\",\n+ periods=df.shape[0],\n+ freq=\"ms\",\n+ )\n+ for col in self._timestamp_columns:\n+ df[col] = date_column\n+ return df\n+\n+ def setup_cache(self, test_filename=\"io_test_file_csv_names_dtype\"):\n+ # filenames with a metadata of saved dataframes\n+ cache = {}\n+ for shape in UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]:\n+ for dtype in self._dtypes_params:\n+ df = generate_dataframe(\"pandas\", \"int\", *shape, RAND_LOW, RAND_HIGH)\n+ if dtype == \"Int64_Timestamp\":\n+ df = self._add_timestamp_columns(df)\n+\n+ file_id = self._get_file_id(shape, dtype)\n+ cache[file_id] = (\n+ f\"{test_filename}_{file_id}.csv\",\n+ df.columns.to_list(),\n+ df.dtypes.to_dict(),\n+ )\n+ df.to_csv(cache[file_id][0], index=False)\n+ return cache\n+\n+ def setup(self, cache, shape, names, dtype):\n+ file_id = self._get_file_id(shape, dtype)\n+ self.filename, self.names, self.dtype = cache[file_id]\n+\n+ self.parse_dates = None\n+ if dtype == \"Int64_Timestamp\":\n+ # cached version of dtype should not change\n+ self.dtype = self.dtype.copy()\n+ for col in self._timestamp_columns:\n+ del self.dtype[col]\n+ self.parse_dates = self._timestamp_columns\n+\n+ def time_read_csv_names_dtype(self, cache, shape, names, dtype):\n+ execute(\n+ IMPL[ASV_USE_IMPL].read_csv(\n+ self.filename,\n+ names=self.names,\n+ header=0,\n+ dtype=self.dtype,\n+ parse_dates=self.parse_dates,\n+ )\n+ )\n", "issue": "[ASV] add read_csv benchmark with dtype/names/parse_dates parameters\n\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport modin.pandas as pd\nimport numpy as np\n\nfrom ..utils import (\n generate_dataframe,\n RAND_LOW,\n RAND_HIGH,\n ASV_USE_IMPL,\n ASV_DATASET_SIZE,\n UNARY_OP_DATA_SIZE,\n IMPL,\n execute,\n get_shape_id,\n)\n\n# ray init\nif ASV_USE_IMPL == \"modin\":\n pd.DataFrame([])\n\n\nclass BaseReadCsv:\n # test data file can de created only once\n def setup_cache(self, test_filename=\"io_test_file\"):\n test_filenames = {}\n for shape in UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]:\n shape_id = get_shape_id(shape)\n test_filenames[shape_id] = f\"{test_filename}_{shape_id}.csv\"\n df = generate_dataframe(\"pandas\", \"str_int\", *shape, RAND_LOW, RAND_HIGH)\n df.to_csv(test_filenames[shape_id], index=False)\n\n return test_filenames\n\n def setup(self, test_filenames, shape, *args, **kwargs):\n self.shape_id = get_shape_id(shape)\n\n\nclass TimeReadCsvSkiprows(BaseReadCsv):\n param_names = [\"shape\", \"skiprows\"]\n params = [\n UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE],\n [\n None,\n lambda x: x % 2,\n np.arange(1, UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE][0][0] // 10),\n np.arange(1, UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE][0][0], 2),\n ],\n ]\n\n def time_skiprows(self, test_filenames, shape, skiprows):\n execute(\n IMPL[ASV_USE_IMPL].read_csv(\n test_filenames[self.shape_id], skiprows=skiprows\n )\n )\n", "path": "asv_bench/benchmarks/io/csv.py"}], "after_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport modin.pandas as pd\nimport numpy as np\n\nfrom ..utils import (\n generate_dataframe,\n RAND_LOW,\n RAND_HIGH,\n ASV_USE_IMPL,\n ASV_DATASET_SIZE,\n UNARY_OP_DATA_SIZE,\n IMPL,\n execute,\n get_shape_id,\n)\n\n# ray init\nif ASV_USE_IMPL == \"modin\":\n pd.DataFrame([])\n\n\nclass BaseReadCsv:\n # test data file can de created only once\n def setup_cache(self, test_filename=\"io_test_file\"):\n test_filenames = {}\n for shape in UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]:\n shape_id = get_shape_id(shape)\n test_filenames[shape_id] = f\"{test_filename}_{shape_id}.csv\"\n df = generate_dataframe(\"pandas\", \"str_int\", *shape, RAND_LOW, RAND_HIGH)\n df.to_csv(test_filenames[shape_id], index=False)\n\n return test_filenames\n\n def setup(self, test_filenames, shape, *args, **kwargs):\n self.shape_id = get_shape_id(shape)\n\n\nclass TimeReadCsvSkiprows(BaseReadCsv):\n param_names = [\"shape\", \"skiprows\"]\n params = [\n UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE],\n [\n None,\n lambda x: x % 2,\n np.arange(1, UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE][0][0] // 10),\n np.arange(1, UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE][0][0], 2),\n ],\n ]\n\n def time_skiprows(self, test_filenames, shape, skiprows):\n execute(\n IMPL[ASV_USE_IMPL].read_csv(\n test_filenames[self.shape_id], skiprows=skiprows\n )\n )\n\n\nclass TimeReadCsvNamesDtype:\n _dtypes_params = [\"Int64\", \"Int64_Timestamp\"]\n _timestamp_columns = [\"col1\", \"col2\"]\n\n param_names = [\"shape\", \"names\", \"dtype\"]\n params = [\n UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE],\n [\"array-like\"],\n _dtypes_params,\n ]\n\n def _get_file_id(self, shape, dtype):\n return get_shape_id(shape) + dtype\n\n def _add_timestamp_columns(self, df):\n df = df.copy()\n date_column = IMPL[\"pandas\"].date_range(\n \"2000\",\n periods=df.shape[0],\n freq=\"ms\",\n )\n for col in self._timestamp_columns:\n df[col] = date_column\n return df\n\n def setup_cache(self, test_filename=\"io_test_file_csv_names_dtype\"):\n # filenames with a metadata of saved dataframes\n cache = {}\n for shape in UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]:\n for dtype in self._dtypes_params:\n df = generate_dataframe(\"pandas\", \"int\", *shape, RAND_LOW, RAND_HIGH)\n if dtype == \"Int64_Timestamp\":\n df = self._add_timestamp_columns(df)\n\n file_id = self._get_file_id(shape, dtype)\n cache[file_id] = (\n f\"{test_filename}_{file_id}.csv\",\n df.columns.to_list(),\n df.dtypes.to_dict(),\n )\n df.to_csv(cache[file_id][0], index=False)\n return cache\n\n def setup(self, cache, shape, names, dtype):\n file_id = self._get_file_id(shape, dtype)\n self.filename, self.names, self.dtype = cache[file_id]\n\n self.parse_dates = None\n if dtype == \"Int64_Timestamp\":\n # cached version of dtype should not change\n self.dtype = self.dtype.copy()\n for col in self._timestamp_columns:\n del self.dtype[col]\n self.parse_dates = self._timestamp_columns\n\n def time_read_csv_names_dtype(self, cache, shape, names, dtype):\n execute(\n IMPL[ASV_USE_IMPL].read_csv(\n self.filename,\n names=self.names,\n header=0,\n dtype=self.dtype,\n parse_dates=self.parse_dates,\n )\n )\n", "path": "asv_bench/benchmarks/io/csv.py"}]}
961
649
gh_patches_debug_1317
rasdani/github-patches
git_diff
sopel-irc__sopel-1325
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Bugzilla] Error calling shutdown method for module bugzilla:None Noticed this in my logs. Bugzilla shutdown throwing none. On Sopel 6.5.3, Python 3.5.3. ``` Ping timeout reached after 120 seconds, closing connection Calling shutdown for 2 modules. calling reddit.shutdown calling bugzilla.shutdown Error calling shutdown method for module bugzilla:None Closed! Warning: Disconnected. Reconnecting in 20 seconds... Welcome to Sopel. Loading modules... ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sopel/modules/bugzilla.py` Content: ``` 1 # coding=utf-8 2 """Bugzilla issue reporting module 3 4 Copyright 2013-2015, Embolalia, embolalia.com 5 Licensed under the Eiffel Forum License 2. 6 """ 7 from __future__ import unicode_literals, absolute_import, print_function, division 8 9 import re 10 11 import xmltodict 12 13 from sopel import web, tools 14 from sopel.config.types import StaticSection, ListAttribute 15 from sopel.logger import get_logger 16 from sopel.module import rule 17 18 19 regex = None 20 LOGGER = get_logger(__name__) 21 22 23 class BugzillaSection(StaticSection): 24 domains = ListAttribute('domains') 25 """The domains of the Bugzilla instances from which to get information.""" 26 27 28 def configure(config): 29 config.define_section('bugzilla', BugzillaSection) 30 config.bugzilla.configure_setting( 31 'domains', 32 'Enter the domains of the Bugzillas you want extra information ' 33 'from (e.g. bugzilla.gnome.org)' 34 ) 35 36 37 def setup(bot): 38 global regex 39 bot.config.define_section('bugzilla', BugzillaSection) 40 41 if not bot.config.bugzilla.domains: 42 return 43 if not bot.memory.contains('url_callbacks'): 44 bot.memory['url_callbacks'] = tools.SopelMemory() 45 46 domains = '|'.join(bot.config.bugzilla.domains) 47 regex = re.compile((r'https?://(%s)' 48 '(/show_bug.cgi\?\S*?)' 49 '(id=\d+)') 50 % domains) 51 bot.memory['url_callbacks'][regex] = show_bug 52 53 54 def shutdown(bot): 55 del bot.memory['url_callbacks'][regex] 56 57 58 @rule(r'.*https?://(\S+?)' 59 '(/show_bug.cgi\?\S*?)' 60 '(id=\d+).*') 61 def show_bug(bot, trigger, match=None): 62 """Show information about a Bugzilla bug.""" 63 match = match or trigger 64 domain = match.group(1) 65 if domain not in bot.config.bugzilla.domains: 66 return 67 url = 'https://%s%sctype=xml&%s' % match.groups() 68 data = web.get(url, dont_decode=True) 69 bug = xmltodict.parse(data).get('bugzilla').get('bug') 70 error = bug.get('@error', None) # error="NotPermitted" 71 72 if error: 73 LOGGER.warning('Bugzilla error: %s' % error) 74 bot.say('[BUGZILLA] Unable to get infomation for ' 75 'linked bug (%s)' % error) 76 return 77 78 message = ('[BUGZILLA] %s | Product: %s | Component: %s | Version: %s | ' + 79 'Importance: %s | Status: %s | Assigned to: %s | ' + 80 'Reported: %s | Modified: %s') 81 82 resolution = bug.get('resolution') 83 if resolution is not None: 84 status = bug.get('bug_status') + ' ' + resolution 85 else: 86 status = bug.get('bug_status') 87 88 assigned_to = bug.get('assigned_to') 89 if isinstance(assigned_to, dict): 90 assigned_to = assigned_to.get('@name') 91 92 message = message % ( 93 bug.get('short_desc'), bug.get('product'), 94 bug.get('component'), bug.get('version'), 95 (bug.get('priority') + ' ' + bug.get('bug_severity')), 96 status, assigned_to, bug.get('creation_ts'), 97 bug.get('delta_ts')) 98 bot.say(message) 99 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sopel/modules/bugzilla.py b/sopel/modules/bugzilla.py --- a/sopel/modules/bugzilla.py +++ b/sopel/modules/bugzilla.py @@ -52,7 +52,12 @@ def shutdown(bot): - del bot.memory['url_callbacks'][regex] + try: + del bot.memory['url_callbacks'][regex] + except KeyError: + # bot.config.bugzilla.domains was probably just empty on startup + # everything's daijoubu + pass @rule(r'.*https?://(\S+?)'
{"golden_diff": "diff --git a/sopel/modules/bugzilla.py b/sopel/modules/bugzilla.py\n--- a/sopel/modules/bugzilla.py\n+++ b/sopel/modules/bugzilla.py\n@@ -52,7 +52,12 @@\n \n \n def shutdown(bot):\n- del bot.memory['url_callbacks'][regex]\n+ try:\n+ del bot.memory['url_callbacks'][regex]\n+ except KeyError:\n+ # bot.config.bugzilla.domains was probably just empty on startup\n+ # everything's daijoubu\n+ pass\n \n \n @rule(r'.*https?://(\\S+?)'\n", "issue": "[Bugzilla] Error calling shutdown method for module bugzilla:None\nNoticed this in my logs. Bugzilla shutdown throwing none. On Sopel 6.5.3, Python 3.5.3.\r\n\r\n```\r\nPing timeout reached after 120 seconds, closing connection\r\nCalling shutdown for 2 modules.\r\ncalling reddit.shutdown\r\ncalling bugzilla.shutdown\r\nError calling shutdown method for module bugzilla:None\r\nClosed!\r\nWarning: Disconnected. Reconnecting in 20 seconds...\r\n \r\nWelcome to Sopel. Loading modules...\r\n```\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"Bugzilla issue reporting module\n\nCopyright 2013-2015, Embolalia, embolalia.com\nLicensed under the Eiffel Forum License 2.\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\n\nimport xmltodict\n\nfrom sopel import web, tools\nfrom sopel.config.types import StaticSection, ListAttribute\nfrom sopel.logger import get_logger\nfrom sopel.module import rule\n\n\nregex = None\nLOGGER = get_logger(__name__)\n\n\nclass BugzillaSection(StaticSection):\n domains = ListAttribute('domains')\n \"\"\"The domains of the Bugzilla instances from which to get information.\"\"\"\n\n\ndef configure(config):\n config.define_section('bugzilla', BugzillaSection)\n config.bugzilla.configure_setting(\n 'domains',\n 'Enter the domains of the Bugzillas you want extra information '\n 'from (e.g. bugzilla.gnome.org)'\n )\n\n\ndef setup(bot):\n global regex\n bot.config.define_section('bugzilla', BugzillaSection)\n\n if not bot.config.bugzilla.domains:\n return\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = tools.SopelMemory()\n\n domains = '|'.join(bot.config.bugzilla.domains)\n regex = re.compile((r'https?://(%s)'\n '(/show_bug.cgi\\?\\S*?)'\n '(id=\\d+)')\n % domains)\n bot.memory['url_callbacks'][regex] = show_bug\n\n\ndef shutdown(bot):\n del bot.memory['url_callbacks'][regex]\n\n\n@rule(r'.*https?://(\\S+?)'\n '(/show_bug.cgi\\?\\S*?)'\n '(id=\\d+).*')\ndef show_bug(bot, trigger, match=None):\n \"\"\"Show information about a Bugzilla bug.\"\"\"\n match = match or trigger\n domain = match.group(1)\n if domain not in bot.config.bugzilla.domains:\n return\n url = 'https://%s%sctype=xml&%s' % match.groups()\n data = web.get(url, dont_decode=True)\n bug = xmltodict.parse(data).get('bugzilla').get('bug')\n error = bug.get('@error', None) # error=\"NotPermitted\"\n\n if error:\n LOGGER.warning('Bugzilla error: %s' % error)\n bot.say('[BUGZILLA] Unable to get infomation for '\n 'linked bug (%s)' % error)\n return\n\n message = ('[BUGZILLA] %s | Product: %s | Component: %s | Version: %s | ' +\n 'Importance: %s | Status: %s | Assigned to: %s | ' +\n 'Reported: %s | Modified: %s')\n\n resolution = bug.get('resolution')\n if resolution is not None:\n status = bug.get('bug_status') + ' ' + resolution\n else:\n status = bug.get('bug_status')\n\n assigned_to = bug.get('assigned_to')\n if isinstance(assigned_to, dict):\n assigned_to = assigned_to.get('@name')\n\n message = message % (\n bug.get('short_desc'), bug.get('product'),\n bug.get('component'), bug.get('version'),\n (bug.get('priority') + ' ' + bug.get('bug_severity')),\n status, assigned_to, bug.get('creation_ts'),\n bug.get('delta_ts'))\n bot.say(message)\n", "path": "sopel/modules/bugzilla.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"Bugzilla issue reporting module\n\nCopyright 2013-2015, Embolalia, embolalia.com\nLicensed under the Eiffel Forum License 2.\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\n\nimport xmltodict\n\nfrom sopel import web, tools\nfrom sopel.config.types import StaticSection, ListAttribute\nfrom sopel.logger import get_logger\nfrom sopel.module import rule\n\n\nregex = None\nLOGGER = get_logger(__name__)\n\n\nclass BugzillaSection(StaticSection):\n domains = ListAttribute('domains')\n \"\"\"The domains of the Bugzilla instances from which to get information.\"\"\"\n\n\ndef configure(config):\n config.define_section('bugzilla', BugzillaSection)\n config.bugzilla.configure_setting(\n 'domains',\n 'Enter the domains of the Bugzillas you want extra information '\n 'from (e.g. bugzilla.gnome.org)'\n )\n\n\ndef setup(bot):\n global regex\n bot.config.define_section('bugzilla', BugzillaSection)\n\n if not bot.config.bugzilla.domains:\n return\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = tools.SopelMemory()\n\n domains = '|'.join(bot.config.bugzilla.domains)\n regex = re.compile((r'https?://(%s)'\n '(/show_bug.cgi\\?\\S*?)'\n '(id=\\d+)')\n % domains)\n bot.memory['url_callbacks'][regex] = show_bug\n\n\ndef shutdown(bot):\n try:\n del bot.memory['url_callbacks'][regex]\n except KeyError:\n # bot.config.bugzilla.domains was probably just empty on startup\n # everything's daijoubu\n pass\n\n\n@rule(r'.*https?://(\\S+?)'\n '(/show_bug.cgi\\?\\S*?)'\n '(id=\\d+).*')\ndef show_bug(bot, trigger, match=None):\n \"\"\"Show information about a Bugzilla bug.\"\"\"\n match = match or trigger\n domain = match.group(1)\n if domain not in bot.config.bugzilla.domains:\n return\n url = 'https://%s%sctype=xml&%s' % match.groups()\n data = web.get(url, dont_decode=True)\n bug = xmltodict.parse(data).get('bugzilla').get('bug')\n error = bug.get('@error', None) # error=\"NotPermitted\"\n\n if error:\n LOGGER.warning('Bugzilla error: %s' % error)\n bot.say('[BUGZILLA] Unable to get infomation for '\n 'linked bug (%s)' % error)\n return\n\n message = ('[BUGZILLA] %s | Product: %s | Component: %s | Version: %s | ' +\n 'Importance: %s | Status: %s | Assigned to: %s | ' +\n 'Reported: %s | Modified: %s')\n\n resolution = bug.get('resolution')\n if resolution is not None:\n status = bug.get('bug_status') + ' ' + resolution\n else:\n status = bug.get('bug_status')\n\n assigned_to = bug.get('assigned_to')\n if isinstance(assigned_to, dict):\n assigned_to = assigned_to.get('@name')\n\n message = message % (\n bug.get('short_desc'), bug.get('product'),\n bug.get('component'), bug.get('version'),\n (bug.get('priority') + ' ' + bug.get('bug_severity')),\n status, assigned_to, bug.get('creation_ts'),\n bug.get('delta_ts'))\n bot.say(message)\n", "path": "sopel/modules/bugzilla.py"}]}
1,334
138
gh_patches_debug_50359
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-5239
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- gevent hook unnecessarily bundles ~17MB of "stuff" ## Description of the issue The included `gevent` hook bundles about 17MB of unnecessary files. These include: - HTML documentation - `__pycache__` folders - tests - Generated `.c` and `.h` files ## Context information (for bug reports) * Output of `pyinstaller --version`: `3.6` * Version of Python: `3.7.7` * Platform: macOS 10.15.4 / Ubuntu 18.04 ### A minimal example program which shows the error `test.py`: ``` import gevent ``` Commands ```shell $ pip install gevent pyinstaller [...] $ pyinstaller -D test.py [...] $ find dist/test/gevent -name "*.c" -or -name "*.h" -or -name "*.html" -or -path "*/__pycache__/*" -or -path "*/tests/*" | xargs du -shc [...] 17M total ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `PyInstaller/hooks/hook-gevent.py` Content: ``` 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2015-2020, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License (version 2 5 # or later) with exception for distributing the bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 # 9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception) 10 #----------------------------------------------------------------------------- 11 12 from PyInstaller.utils.hooks import collect_all 13 14 excludedimports = ["gevent.testing", "gevent.tests"] 15 16 datas, binaries, hiddenimports = collect_all( 17 'gevent', 18 filter_submodules=lambda name: ( 19 "gevent.testing" not in name or "gevent.tests" not in name)) 20 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/PyInstaller/hooks/hook-gevent.py b/PyInstaller/hooks/hook-gevent.py --- a/PyInstaller/hooks/hook-gevent.py +++ b/PyInstaller/hooks/hook-gevent.py @@ -16,4 +16,6 @@ datas, binaries, hiddenimports = collect_all( 'gevent', filter_submodules=lambda name: ( - "gevent.testing" not in name or "gevent.tests" not in name)) + "gevent.testing" not in name or "gevent.tests" not in name), + include_py_files=False, + exclude_datas=["**/tests"])
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-gevent.py b/PyInstaller/hooks/hook-gevent.py\n--- a/PyInstaller/hooks/hook-gevent.py\n+++ b/PyInstaller/hooks/hook-gevent.py\n@@ -16,4 +16,6 @@\n datas, binaries, hiddenimports = collect_all(\n 'gevent',\n filter_submodules=lambda name: (\n- \"gevent.testing\" not in name or \"gevent.tests\" not in name))\n+ \"gevent.testing\" not in name or \"gevent.tests\" not in name),\n+ include_py_files=False,\n+ exclude_datas=[\"**/tests\"])\n", "issue": "gevent hook unnecessarily bundles ~17MB of \"stuff\"\n## Description of the issue\r\n\r\nThe included `gevent` hook bundles about 17MB of unnecessary files.\r\nThese include:\r\n- HTML documentation\r\n- `__pycache__` folders\r\n- tests\r\n- Generated `.c` and `.h` files\r\n\r\n## Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: `3.6`\r\n* Version of Python: `3.7.7`\r\n* Platform: macOS 10.15.4 / Ubuntu 18.04\r\n\r\n### A minimal example program which shows the error\r\n\r\n`test.py`:\r\n```\r\nimport gevent\r\n```\r\n\r\nCommands\r\n```shell\r\n$ pip install gevent pyinstaller\r\n[...]\r\n$ pyinstaller -D test.py\r\n[...]\r\n$ find dist/test/gevent -name \"*.c\" -or -name \"*.h\" -or -name \"*.html\" -or -path \"*/__pycache__/*\" -or -path \"*/tests/*\" | xargs du -shc\r\n[...]\r\n 17M\ttotal\r\n```\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2015-2020, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.utils.hooks import collect_all\n\nexcludedimports = [\"gevent.testing\", \"gevent.tests\"]\n\ndatas, binaries, hiddenimports = collect_all(\n 'gevent',\n filter_submodules=lambda name: (\n \"gevent.testing\" not in name or \"gevent.tests\" not in name))\n", "path": "PyInstaller/hooks/hook-gevent.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2015-2020, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.utils.hooks import collect_all\n\nexcludedimports = [\"gevent.testing\", \"gevent.tests\"]\n\ndatas, binaries, hiddenimports = collect_all(\n 'gevent',\n filter_submodules=lambda name: (\n \"gevent.testing\" not in name or \"gevent.tests\" not in name),\n include_py_files=False,\n exclude_datas=[\"**/tests\"])\n", "path": "PyInstaller/hooks/hook-gevent.py"}]}
692
143
gh_patches_debug_17518
rasdani/github-patches
git_diff
PaddlePaddle__PaddleSpeech-1644
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [vec][search] update to paddlespeech model --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `demos/audio_searching/src/encode.py` Content: ``` 1 # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 import numpy as np 15 from logs import LOGGER 16 17 from paddlespeech.cli import VectorExecutor 18 19 vector_executor = VectorExecutor() 20 21 22 def get_audio_embedding(path): 23 """ 24 Use vpr_inference to generate embedding of audio 25 """ 26 try: 27 embedding = vector_executor(audio_file=path) 28 embedding = embedding / np.linalg.norm(embedding) 29 embedding = embedding.tolist() 30 return embedding 31 except Exception as e: 32 LOGGER.error(f"Error with embedding:{e}") 33 return None 34 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/demos/audio_searching/src/encode.py b/demos/audio_searching/src/encode.py --- a/demos/audio_searching/src/encode.py +++ b/demos/audio_searching/src/encode.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. import numpy as np -from logs import LOGGER +from logs import LOGGER from paddlespeech.cli import VectorExecutor vector_executor = VectorExecutor() @@ -24,7 +24,8 @@ Use vpr_inference to generate embedding of audio """ try: - embedding = vector_executor(audio_file=path) + embedding = vector_executor( + audio_file=path, model='ecapatdnn_voxceleb12') embedding = embedding / np.linalg.norm(embedding) embedding = embedding.tolist() return embedding
{"golden_diff": "diff --git a/demos/audio_searching/src/encode.py b/demos/audio_searching/src/encode.py\n--- a/demos/audio_searching/src/encode.py\n+++ b/demos/audio_searching/src/encode.py\n@@ -12,8 +12,8 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n import numpy as np\n-from logs import LOGGER\n \n+from logs import LOGGER\n from paddlespeech.cli import VectorExecutor\n \n vector_executor = VectorExecutor()\n@@ -24,7 +24,8 @@\n Use vpr_inference to generate embedding of audio\n \"\"\"\n try:\n- embedding = vector_executor(audio_file=path)\n+ embedding = vector_executor(\n+ audio_file=path, model='ecapatdnn_voxceleb12')\n embedding = embedding / np.linalg.norm(embedding)\n embedding = embedding.tolist()\n return embedding\n", "issue": "[vec][search] update to paddlespeech model\n\n", "before_files": [{"content": "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport numpy as np\nfrom logs import LOGGER\n\nfrom paddlespeech.cli import VectorExecutor\n\nvector_executor = VectorExecutor()\n\n\ndef get_audio_embedding(path):\n \"\"\"\n Use vpr_inference to generate embedding of audio\n \"\"\"\n try:\n embedding = vector_executor(audio_file=path)\n embedding = embedding / np.linalg.norm(embedding)\n embedding = embedding.tolist()\n return embedding\n except Exception as e:\n LOGGER.error(f\"Error with embedding:{e}\")\n return None\n", "path": "demos/audio_searching/src/encode.py"}], "after_files": [{"content": "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport numpy as np\n\nfrom logs import LOGGER\nfrom paddlespeech.cli import VectorExecutor\n\nvector_executor = VectorExecutor()\n\n\ndef get_audio_embedding(path):\n \"\"\"\n Use vpr_inference to generate embedding of audio\n \"\"\"\n try:\n embedding = vector_executor(\n audio_file=path, model='ecapatdnn_voxceleb12')\n embedding = embedding / np.linalg.norm(embedding)\n embedding = embedding.tolist()\n return embedding\n except Exception as e:\n LOGGER.error(f\"Error with embedding:{e}\")\n return None\n", "path": "demos/audio_searching/src/encode.py"}]}
579
195
gh_patches_debug_17425
rasdani/github-patches
git_diff
python-discord__site-716
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ValueError: Unknown format code 'X' for object of type 'str' Sentry Issue: [SITE-25](https://sentry.io/organizations/python-discord/issues/3211854811/?referrer=github_integration) ``` ValueError: Unknown format code 'X' for object of type 'str' (16 additional frame(s) were not displayed) ... File "django/template/defaulttags.py", line 211, in render nodelist.append(node.render_annotated(context)) File "django/template/base.py", line 905, in render_annotated return self.render(context) File "django/template/base.py", line 988, in render output = self.filter_expression.resolve(context) File "django/template/base.py", line 698, in resolve new_obj = func(obj, *arg_vals) File "pydis_site/apps/staff/templatetags/deletedmessage_filters.py", line 15, in hex_colour colour = f"#{color:0>6X}" ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pydis_site/apps/staff/templatetags/deletedmessage_filters.py` Content: ``` 1 from datetime import datetime 2 3 from django import template 4 5 register = template.Library() 6 7 8 @register.filter 9 def hex_colour(color: int) -> str: 10 """ 11 Converts an integer representation of a colour to the RGB hex value. 12 13 As we are using a Discord dark theme analogue, black colours are returned as white instead. 14 """ 15 colour = f"#{color:0>6X}" 16 return colour if colour != "#000000" else "#FFFFFF" 17 18 19 @register.filter 20 def footer_datetime(timestamp: str) -> datetime: 21 """Takes an embed timestamp and returns a timezone-aware datetime object.""" 22 return datetime.fromisoformat(timestamp) 23 24 25 @register.filter 26 def visible_newlines(text: str) -> str: 27 """Takes an embed timestamp and returns a timezone-aware datetime object.""" 28 return text.replace("\n", " <span class='has-text-grey'>↵</span><br>") 29 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pydis_site/apps/staff/templatetags/deletedmessage_filters.py b/pydis_site/apps/staff/templatetags/deletedmessage_filters.py --- a/pydis_site/apps/staff/templatetags/deletedmessage_filters.py +++ b/pydis_site/apps/staff/templatetags/deletedmessage_filters.py @@ -1,4 +1,5 @@ from datetime import datetime +from typing import Union from django import template @@ -6,13 +7,16 @@ @register.filter -def hex_colour(color: int) -> str: +def hex_colour(colour: Union[str, int]) -> str: """ - Converts an integer representation of a colour to the RGB hex value. + Converts the given representation of a colour to its RGB hex string. As we are using a Discord dark theme analogue, black colours are returned as white instead. """ - colour = f"#{color:0>6X}" + if isinstance(colour, str): + colour = colour if colour.startswith("#") else f"#{colour}" + else: + colour = f"#{colour:0>6X}" return colour if colour != "#000000" else "#FFFFFF"
{"golden_diff": "diff --git a/pydis_site/apps/staff/templatetags/deletedmessage_filters.py b/pydis_site/apps/staff/templatetags/deletedmessage_filters.py\n--- a/pydis_site/apps/staff/templatetags/deletedmessage_filters.py\n+++ b/pydis_site/apps/staff/templatetags/deletedmessage_filters.py\n@@ -1,4 +1,5 @@\n from datetime import datetime\n+from typing import Union\n \n from django import template\n \n@@ -6,13 +7,16 @@\n \n \n @register.filter\n-def hex_colour(color: int) -> str:\n+def hex_colour(colour: Union[str, int]) -> str:\n \"\"\"\n- Converts an integer representation of a colour to the RGB hex value.\n+ Converts the given representation of a colour to its RGB hex string.\n \n As we are using a Discord dark theme analogue, black colours are returned as white instead.\n \"\"\"\n- colour = f\"#{color:0>6X}\"\n+ if isinstance(colour, str):\n+ colour = colour if colour.startswith(\"#\") else f\"#{colour}\"\n+ else:\n+ colour = f\"#{colour:0>6X}\"\n return colour if colour != \"#000000\" else \"#FFFFFF\"\n", "issue": "ValueError: Unknown format code 'X' for object of type 'str'\nSentry Issue: [SITE-25](https://sentry.io/organizations/python-discord/issues/3211854811/?referrer=github_integration)\n\n```\nValueError: Unknown format code 'X' for object of type 'str'\n(16 additional frame(s) were not displayed)\n...\n File \"django/template/defaulttags.py\", line 211, in render\n nodelist.append(node.render_annotated(context))\n File \"django/template/base.py\", line 905, in render_annotated\n return self.render(context)\n File \"django/template/base.py\", line 988, in render\n output = self.filter_expression.resolve(context)\n File \"django/template/base.py\", line 698, in resolve\n new_obj = func(obj, *arg_vals)\n File \"pydis_site/apps/staff/templatetags/deletedmessage_filters.py\", line 15, in hex_colour\n colour = f\"#{color:0>6X}\"\n```\n", "before_files": [{"content": "from datetime import datetime\n\nfrom django import template\n\nregister = template.Library()\n\n\[email protected]\ndef hex_colour(color: int) -> str:\n \"\"\"\n Converts an integer representation of a colour to the RGB hex value.\n\n As we are using a Discord dark theme analogue, black colours are returned as white instead.\n \"\"\"\n colour = f\"#{color:0>6X}\"\n return colour if colour != \"#000000\" else \"#FFFFFF\"\n\n\[email protected]\ndef footer_datetime(timestamp: str) -> datetime:\n \"\"\"Takes an embed timestamp and returns a timezone-aware datetime object.\"\"\"\n return datetime.fromisoformat(timestamp)\n\n\[email protected]\ndef visible_newlines(text: str) -> str:\n \"\"\"Takes an embed timestamp and returns a timezone-aware datetime object.\"\"\"\n return text.replace(\"\\n\", \" <span class='has-text-grey'>\u21b5</span><br>\")\n", "path": "pydis_site/apps/staff/templatetags/deletedmessage_filters.py"}], "after_files": [{"content": "from datetime import datetime\nfrom typing import Union\n\nfrom django import template\n\nregister = template.Library()\n\n\[email protected]\ndef hex_colour(colour: Union[str, int]) -> str:\n \"\"\"\n Converts the given representation of a colour to its RGB hex string.\n\n As we are using a Discord dark theme analogue, black colours are returned as white instead.\n \"\"\"\n if isinstance(colour, str):\n colour = colour if colour.startswith(\"#\") else f\"#{colour}\"\n else:\n colour = f\"#{colour:0>6X}\"\n return colour if colour != \"#000000\" else \"#FFFFFF\"\n\n\[email protected]\ndef footer_datetime(timestamp: str) -> datetime:\n \"\"\"Takes an embed timestamp and returns a timezone-aware datetime object.\"\"\"\n return datetime.fromisoformat(timestamp)\n\n\[email protected]\ndef visible_newlines(text: str) -> str:\n \"\"\"Takes an embed timestamp and returns a timezone-aware datetime object.\"\"\"\n return text.replace(\"\\n\", \" <span class='has-text-grey'>\u21b5</span><br>\")\n", "path": "pydis_site/apps/staff/templatetags/deletedmessage_filters.py"}]}
751
273
gh_patches_debug_30245
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-6569
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Setuptools 60.7.0 breaks the executable Building with setuptools==60.7.0 I get this error. There is no `jaraco` module (seems like this is the author of `setuptools) ``` (simulator_venv) ➜ agent git:(feature/SB-22361-improve-socket-error-handling) ✗ dist/sbsimulator/sbsimulator Traceback (most recent call last): File "PyInstaller/hooks/rthooks/pyi_rth_pkgres.py", line 16, in <module> File "/Users/arossert/Shared/Bitbucket/agent/simulator_venv/lib/python3.8/site-packages/pkg_resources/__init__.py", line 74, in <module> from pkg_resources.extern.jaraco.text import ( File "/Users/arossert/Shared/Bitbucket/agent/simulator_venv/lib/python3.8/site-packages/pkg_resources/extern/__init__.py", line 52, in create_module return self.load_module(spec.name) File "/Users/arossert/Shared/Bitbucket/agent/simulator_venv/lib/python3.8/site-packages/pkg_resources/extern/__init__.py", line 44, in load_module raise ImportError( ImportError: The 'jaraco' package is required; normally this is bundled with this package so if you get this warning, consult the packager of your distribution. [23421] Failed to execute script 'pyi_rth_pkgres' due to unhandled exception! ``` PyInstaller: 4.8 OS: Linux/Mac (not tested on Windows) Python: 3.8.10 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `PyInstaller/hooks/hook-pkg_resources.py` Content: ``` 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2005-2021, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License (version 2 5 # or later) with exception for distributing the bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 # 9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception) 10 #----------------------------------------------------------------------------- 11 12 from PyInstaller.utils.hooks import collect_submodules 13 14 # pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose 15 # them as pkg_resources.extern.* 16 hiddenimports = collect_submodules('pkg_resources._vendor') 17 18 # pkg_resources v45.0 dropped support for Python 2 and added this module printing a warning. We could save some bytes if 19 # we would replace this by a fake module. 20 hiddenimports.append('pkg_resources.py2_warn') 21 22 excludedimports = ['__main__'] 23 24 # Some more hidden imports. See: 25 # https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/15#issuecomment-663699288 `packaging` can either be 26 # its own package, or embedded in `pkg_resources._vendor.packaging`, or both. Assume the worst and include both if 27 # present. 28 hiddenimports += collect_submodules('packaging') 29 30 hiddenimports += ['pkg_resources.markers'] 31 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/PyInstaller/hooks/hook-pkg_resources.py b/PyInstaller/hooks/hook-pkg_resources.py --- a/PyInstaller/hooks/hook-pkg_resources.py +++ b/PyInstaller/hooks/hook-pkg_resources.py @@ -9,7 +9,7 @@ # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception) #----------------------------------------------------------------------------- -from PyInstaller.utils.hooks import collect_submodules +from PyInstaller.utils.hooks import collect_submodules, is_module_satisfies # pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose # them as pkg_resources.extern.* @@ -28,3 +28,24 @@ hiddenimports += collect_submodules('packaging') hiddenimports += ['pkg_resources.markers'] + +# As of v60.7, setuptools vendored jaraco and has pkg_resources use it. Currently, the pkg_resources._vendor.jaraco +# namespace package cannot be automatically scanned due to limited support for pure namespace packages in our hook +# utilities. +# +# In setuptools 60.7.0, the vendored jaraco.text package included "Lorem Ipsum.txt" data file, which also has to be +# collected. However, the presence of the data file (and the resulting directory hierarchy) confuses the importer's +# redirection logic; instead of trying to work-around that, tell user to upgrade or downgrade their setuptools. +if is_module_satisfies("setuptools == 60.7.0"): + raise SystemExit( + "ERROR: Setuptools 60.7.0 is incompatible with PyInstaller. " + "Downgrade to an earlier version or upgrade to a later version." + ) +# In setuptools 60.7.1, the "Lorem Ipsum.txt" data file was dropped from the vendored jaraco.text package, so we can +# accommodate it with couple of hidden imports. +elif is_module_satisfies("setuptools >= 60.7.1"): + hiddenimports += [ + 'pkg_resources._vendor.jaraco.functools', + 'pkg_resources._vendor.jaraco.context', + 'pkg_resources._vendor.jaraco.text', + ]
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-pkg_resources.py b/PyInstaller/hooks/hook-pkg_resources.py\n--- a/PyInstaller/hooks/hook-pkg_resources.py\n+++ b/PyInstaller/hooks/hook-pkg_resources.py\n@@ -9,7 +9,7 @@\n # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n #-----------------------------------------------------------------------------\n \n-from PyInstaller.utils.hooks import collect_submodules\n+from PyInstaller.utils.hooks import collect_submodules, is_module_satisfies\n \n # pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose\n # them as pkg_resources.extern.*\n@@ -28,3 +28,24 @@\n hiddenimports += collect_submodules('packaging')\n \n hiddenimports += ['pkg_resources.markers']\n+\n+# As of v60.7, setuptools vendored jaraco and has pkg_resources use it. Currently, the pkg_resources._vendor.jaraco\n+# namespace package cannot be automatically scanned due to limited support for pure namespace packages in our hook\n+# utilities.\n+#\n+# In setuptools 60.7.0, the vendored jaraco.text package included \"Lorem Ipsum.txt\" data file, which also has to be\n+# collected. However, the presence of the data file (and the resulting directory hierarchy) confuses the importer's\n+# redirection logic; instead of trying to work-around that, tell user to upgrade or downgrade their setuptools.\n+if is_module_satisfies(\"setuptools == 60.7.0\"):\n+ raise SystemExit(\n+ \"ERROR: Setuptools 60.7.0 is incompatible with PyInstaller. \"\n+ \"Downgrade to an earlier version or upgrade to a later version.\"\n+ )\n+# In setuptools 60.7.1, the \"Lorem Ipsum.txt\" data file was dropped from the vendored jaraco.text package, so we can\n+# accommodate it with couple of hidden imports.\n+elif is_module_satisfies(\"setuptools >= 60.7.1\"):\n+ hiddenimports += [\n+ 'pkg_resources._vendor.jaraco.functools',\n+ 'pkg_resources._vendor.jaraco.context',\n+ 'pkg_resources._vendor.jaraco.text',\n+ ]\n", "issue": "Setuptools 60.7.0 breaks the executable\nBuilding with setuptools==60.7.0 I get this error.\r\nThere is no `jaraco` module (seems like this is the author of `setuptools)\r\n```\r\n(simulator_venv) \u279c agent git:(feature/SB-22361-improve-socket-error-handling) \u2717 dist/sbsimulator/sbsimulator\r\nTraceback (most recent call last):\r\n File \"PyInstaller/hooks/rthooks/pyi_rth_pkgres.py\", line 16, in <module>\r\n File \"/Users/arossert/Shared/Bitbucket/agent/simulator_venv/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 74, in <module>\r\n from pkg_resources.extern.jaraco.text import (\r\n File \"/Users/arossert/Shared/Bitbucket/agent/simulator_venv/lib/python3.8/site-packages/pkg_resources/extern/__init__.py\", line 52, in create_module\r\n return self.load_module(spec.name)\r\n File \"/Users/arossert/Shared/Bitbucket/agent/simulator_venv/lib/python3.8/site-packages/pkg_resources/extern/__init__.py\", line 44, in load_module\r\n raise ImportError(\r\nImportError: The 'jaraco' package is required; normally this is bundled with this package so if you get this warning, consult the packager of your distribution.\r\n[23421] Failed to execute script 'pyi_rth_pkgres' due to unhandled exception!\r\n```\r\n\r\nPyInstaller: 4.8\r\nOS: Linux/Mac (not tested on Windows)\r\nPython: 3.8.10\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2021, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.utils.hooks import collect_submodules\n\n# pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose\n# them as pkg_resources.extern.*\nhiddenimports = collect_submodules('pkg_resources._vendor')\n\n# pkg_resources v45.0 dropped support for Python 2 and added this module printing a warning. We could save some bytes if\n# we would replace this by a fake module.\nhiddenimports.append('pkg_resources.py2_warn')\n\nexcludedimports = ['__main__']\n\n# Some more hidden imports. See:\n# https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/15#issuecomment-663699288 `packaging` can either be\n# its own package, or embedded in `pkg_resources._vendor.packaging`, or both. Assume the worst and include both if\n# present.\nhiddenimports += collect_submodules('packaging')\n\nhiddenimports += ['pkg_resources.markers']\n", "path": "PyInstaller/hooks/hook-pkg_resources.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2021, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.utils.hooks import collect_submodules, is_module_satisfies\n\n# pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose\n# them as pkg_resources.extern.*\nhiddenimports = collect_submodules('pkg_resources._vendor')\n\n# pkg_resources v45.0 dropped support for Python 2 and added this module printing a warning. We could save some bytes if\n# we would replace this by a fake module.\nhiddenimports.append('pkg_resources.py2_warn')\n\nexcludedimports = ['__main__']\n\n# Some more hidden imports. See:\n# https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/15#issuecomment-663699288 `packaging` can either be\n# its own package, or embedded in `pkg_resources._vendor.packaging`, or both. Assume the worst and include both if\n# present.\nhiddenimports += collect_submodules('packaging')\n\nhiddenimports += ['pkg_resources.markers']\n\n# As of v60.7, setuptools vendored jaraco and has pkg_resources use it. Currently, the pkg_resources._vendor.jaraco\n# namespace package cannot be automatically scanned due to limited support for pure namespace packages in our hook\n# utilities.\n#\n# In setuptools 60.7.0, the vendored jaraco.text package included \"Lorem Ipsum.txt\" data file, which also has to be\n# collected. However, the presence of the data file (and the resulting directory hierarchy) confuses the importer's\n# redirection logic; instead of trying to work-around that, tell user to upgrade or downgrade their setuptools.\nif is_module_satisfies(\"setuptools == 60.7.0\"):\n raise SystemExit(\n \"ERROR: Setuptools 60.7.0 is incompatible with PyInstaller. \"\n \"Downgrade to an earlier version or upgrade to a later version.\"\n )\n# In setuptools 60.7.1, the \"Lorem Ipsum.txt\" data file was dropped from the vendored jaraco.text package, so we can\n# accommodate it with couple of hidden imports.\nelif is_module_satisfies(\"setuptools >= 60.7.1\"):\n hiddenimports += [\n 'pkg_resources._vendor.jaraco.functools',\n 'pkg_resources._vendor.jaraco.context',\n 'pkg_resources._vendor.jaraco.text',\n ]\n", "path": "PyInstaller/hooks/hook-pkg_resources.py"}]}
986
485
gh_patches_debug_24447
rasdani/github-patches
git_diff
bridgecrewio__checkov-4316
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- CKV_AWS_157 False Positive on Aurora **Describe the issue** CKV_AWS_157 fails on Aurora instances even though the `MultiAZ` property is not applicable to Aurora **Examples** ``` RDSinstance: Type: AWS::RDS::DBInstance Properties: DBClusterIdentifier: !Ref DBCluster DBInstanceClass: !Ref DbType DBInstanceIdentifier: !Sub ${AppName}-${EnvironmentName} DBParameterGroupName: !Ref DbParameterGroup DBSubnetGroupName: !Ref DBSubnetGroup Engine: aurora-mysql MonitoringInterval: "60" MonitoringRoleArn: !GetAtt RdsMonitoringRole.Arn PubliclyAccessible: 'false' ``` **Version (please complete the following information):** - Checkov Version 2.2.270 **Additional context** - AWS docs: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html#cfn-rds-dbinstance-multiaz <img width="1377" alt="Screenshot 2023-01-19 at 1 25 49 PM" src="https://user-images.githubusercontent.com/1328683/213552592-4736b84a-3926-44b2-9dad-196b68f28daa.png"> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py` Content: ``` 1 from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck 2 from checkov.common.models.enums import CheckCategories 3 4 5 class RDSMultiAZEnabled(BaseResourceValueCheck): 6 def __init__(self): 7 name = "Ensure that RDS instances have Multi-AZ enabled" 8 id = "CKV_AWS_157" 9 supported_resources = ['AWS::RDS::DBInstance'] 10 categories = [CheckCategories.NETWORKING] 11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 12 13 def get_inspected_key(self): 14 return 'Properties/MultiAZ' 15 16 17 check = RDSMultiAZEnabled() 18 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py b/checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py --- a/checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py +++ b/checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py @@ -1,5 +1,5 @@ +from checkov.common.models.enums import CheckResult, CheckCategories from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck -from checkov.common.models.enums import CheckCategories class RDSMultiAZEnabled(BaseResourceValueCheck): @@ -10,6 +10,16 @@ categories = [CheckCategories.NETWORKING] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) + def scan_resource_conf(self, conf): + # Aurora is replicated across all AZs and doesn't require MultiAZ to be set + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html#cfn-rds-dbinstance-multiaz + if 'Properties' in conf.keys(): + if 'Engine' in conf['Properties'].keys(): + if 'aurora' in conf['Properties']['Engine']: + return CheckResult.UNKNOWN + # Database is not Aurora; Use base class implementation + return super().scan_resource_conf(conf) + def get_inspected_key(self): return 'Properties/MultiAZ'
{"golden_diff": "diff --git a/checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py b/checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py\n--- a/checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py\n+++ b/checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py\n@@ -1,5 +1,5 @@\n+from checkov.common.models.enums import CheckResult, CheckCategories\n from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck\n-from checkov.common.models.enums import CheckCategories\n \n \n class RDSMultiAZEnabled(BaseResourceValueCheck):\n@@ -10,6 +10,16 @@\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n+ def scan_resource_conf(self, conf):\n+ # Aurora is replicated across all AZs and doesn't require MultiAZ to be set\n+ # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html#cfn-rds-dbinstance-multiaz\n+ if 'Properties' in conf.keys():\n+ if 'Engine' in conf['Properties'].keys():\n+ if 'aurora' in conf['Properties']['Engine']:\n+ return CheckResult.UNKNOWN\n+ # Database is not Aurora; Use base class implementation\n+ return super().scan_resource_conf(conf)\n+ \n def get_inspected_key(self):\n return 'Properties/MultiAZ'\n", "issue": "CKV_AWS_157 False Positive on Aurora\n**Describe the issue**\r\nCKV_AWS_157 fails on Aurora instances even though the `MultiAZ` property is not applicable to Aurora\r\n\r\n**Examples**\r\n```\r\nRDSinstance:\r\n Type: AWS::RDS::DBInstance\r\n Properties:\r\n DBClusterIdentifier: !Ref DBCluster\r\n DBInstanceClass: !Ref DbType\r\n DBInstanceIdentifier: !Sub ${AppName}-${EnvironmentName}\r\n DBParameterGroupName: !Ref DbParameterGroup\r\n DBSubnetGroupName: !Ref DBSubnetGroup\r\n Engine: aurora-mysql\r\n MonitoringInterval: \"60\"\r\n MonitoringRoleArn: !GetAtt RdsMonitoringRole.Arn\r\n PubliclyAccessible: 'false'\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.2.270\r\n\r\n**Additional context**\r\n- AWS docs: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html#cfn-rds-dbinstance-multiaz\r\n\r\n<img width=\"1377\" alt=\"Screenshot 2023-01-19 at 1 25 49 PM\" src=\"https://user-images.githubusercontent.com/1328683/213552592-4736b84a-3926-44b2-9dad-196b68f28daa.png\">\r\n\n", "before_files": [{"content": "from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\n\n\nclass RDSMultiAZEnabled(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that RDS instances have Multi-AZ enabled\"\n id = \"CKV_AWS_157\"\n supported_resources = ['AWS::RDS::DBInstance']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'Properties/MultiAZ'\n\n\ncheck = RDSMultiAZEnabled()\n", "path": "checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py"}], "after_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass RDSMultiAZEnabled(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that RDS instances have Multi-AZ enabled\"\n id = \"CKV_AWS_157\"\n supported_resources = ['AWS::RDS::DBInstance']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n # Aurora is replicated across all AZs and doesn't require MultiAZ to be set\n # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html#cfn-rds-dbinstance-multiaz\n if 'Properties' in conf.keys():\n if 'Engine' in conf['Properties'].keys():\n if 'aurora' in conf['Properties']['Engine']:\n return CheckResult.UNKNOWN\n # Database is not Aurora; Use base class implementation\n return super().scan_resource_conf(conf)\n \n def get_inspected_key(self):\n return 'Properties/MultiAZ'\n\n\ncheck = RDSMultiAZEnabled()\n", "path": "checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py"}]}
767
324
gh_patches_debug_32293
rasdani/github-patches
git_diff
translate__pootle-5675
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Move session data into session atm there is data like sidebar stuff that is stored in the actual cookie. apart from slowing down normal users (they send all that data in *every* request), bots are acumulating cookie cruft and needlessly posting that at our sites. if the cookie cruft gets too long it gets truncated and errors out --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pootle/core/helpers.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright (C) Pootle contributors. 4 # 5 # This file is a part of the Pootle project. It is distributed under the GPL3 6 # or later license. See the LICENSE file for a copy of the license and the 7 # AUTHORS file for copyright and authorship information. 8 9 import json 10 from urllib import quote, unquote 11 12 from django.utils import dateformat 13 14 15 SIDEBAR_COOKIE_NAME = 'pootle-browser-sidebar' 16 17 18 def get_sidebar_announcements_context(request, objects): 19 """Return the announcements context for the browser pages sidebar. 20 21 :param request: a :cls:`django.http.HttpRequest` object. 22 :param objects: a tuple of Project, Language and TranslationProject to 23 retrieve the announcements for. Any of those can be 24 missing, but it is recommended for them to be in that exact 25 order. 26 """ 27 announcements = [] 28 new_cookie_data = {} 29 cookie_data = {} 30 31 if SIDEBAR_COOKIE_NAME in request.COOKIES: 32 json_str = unquote(request.COOKIES[SIDEBAR_COOKIE_NAME]) 33 cookie_data = json.loads(json_str) 34 35 is_sidebar_open = cookie_data.get('isOpen', True) 36 37 for item in objects: 38 announcement = item.get_announcement(request.user) 39 40 if announcement is None: 41 continue 42 43 announcements.append(announcement) 44 # The virtual_path cannot be used as is for JSON. 45 ann_key = announcement.virtual_path.replace('/', '_') 46 ann_mtime = dateformat.format(announcement.modified_on, 'U') 47 stored_mtime = cookie_data.get(ann_key, None) 48 49 if ann_mtime != stored_mtime: 50 new_cookie_data[ann_key] = ann_mtime 51 52 if new_cookie_data: 53 # Some announcement has been changed or was never displayed before, so 54 # display sidebar and save the changed mtimes in the cookie to not 55 # display it next time unless it is necessary. 56 is_sidebar_open = True 57 cookie_data.update(new_cookie_data) 58 new_cookie_data = quote(json.dumps(cookie_data)) 59 60 ctx = { 61 'announcements': announcements, 62 'is_sidebar_open': is_sidebar_open, 63 'has_sidebar': len(announcements) > 0, 64 } 65 66 return ctx, new_cookie_data 67 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pootle/core/helpers.py b/pootle/core/helpers.py --- a/pootle/core/helpers.py +++ b/pootle/core/helpers.py @@ -12,7 +12,7 @@ from django.utils import dateformat -SIDEBAR_COOKIE_NAME = 'pootle-browser-sidebar' +SIDEBAR_COOKIE_NAME = 'pootle-browser-open-sidebar' def get_sidebar_announcements_context(request, objects): @@ -24,6 +24,7 @@ missing, but it is recommended for them to be in that exact order. """ + must_show_announcement = False announcements = [] new_cookie_data = {} cookie_data = {} @@ -41,20 +42,20 @@ continue announcements.append(announcement) - # The virtual_path cannot be used as is for JSON. - ann_key = announcement.virtual_path.replace('/', '_') + ann_mtime = dateformat.format(announcement.modified_on, 'U') - stored_mtime = cookie_data.get(ann_key, None) + stored_mtime = request.session.get(announcement.virtual_path, None) if ann_mtime != stored_mtime: - new_cookie_data[ann_key] = ann_mtime + # Some announcement has been changed or was never displayed before, + # so display sidebar and save the changed mtimes in the session to + # not display it next time unless it is necessary. + must_show_announcement = True + request.session[announcement.virtual_path] = ann_mtime - if new_cookie_data: - # Some announcement has been changed or was never displayed before, so - # display sidebar and save the changed mtimes in the cookie to not - # display it next time unless it is necessary. + if must_show_announcement and not is_sidebar_open: is_sidebar_open = True - cookie_data.update(new_cookie_data) + cookie_data['isOpen'] = is_sidebar_open new_cookie_data = quote(json.dumps(cookie_data)) ctx = {
{"golden_diff": "diff --git a/pootle/core/helpers.py b/pootle/core/helpers.py\n--- a/pootle/core/helpers.py\n+++ b/pootle/core/helpers.py\n@@ -12,7 +12,7 @@\n from django.utils import dateformat\n \n \n-SIDEBAR_COOKIE_NAME = 'pootle-browser-sidebar'\n+SIDEBAR_COOKIE_NAME = 'pootle-browser-open-sidebar'\n \n \n def get_sidebar_announcements_context(request, objects):\n@@ -24,6 +24,7 @@\n missing, but it is recommended for them to be in that exact\n order.\n \"\"\"\n+ must_show_announcement = False\n announcements = []\n new_cookie_data = {}\n cookie_data = {}\n@@ -41,20 +42,20 @@\n continue\n \n announcements.append(announcement)\n- # The virtual_path cannot be used as is for JSON.\n- ann_key = announcement.virtual_path.replace('/', '_')\n+\n ann_mtime = dateformat.format(announcement.modified_on, 'U')\n- stored_mtime = cookie_data.get(ann_key, None)\n+ stored_mtime = request.session.get(announcement.virtual_path, None)\n \n if ann_mtime != stored_mtime:\n- new_cookie_data[ann_key] = ann_mtime\n+ # Some announcement has been changed or was never displayed before,\n+ # so display sidebar and save the changed mtimes in the session to\n+ # not display it next time unless it is necessary.\n+ must_show_announcement = True\n+ request.session[announcement.virtual_path] = ann_mtime\n \n- if new_cookie_data:\n- # Some announcement has been changed or was never displayed before, so\n- # display sidebar and save the changed mtimes in the cookie to not\n- # display it next time unless it is necessary.\n+ if must_show_announcement and not is_sidebar_open:\n is_sidebar_open = True\n- cookie_data.update(new_cookie_data)\n+ cookie_data['isOpen'] = is_sidebar_open\n new_cookie_data = quote(json.dumps(cookie_data))\n \n ctx = {\n", "issue": "Move session data into session\natm there is data like sidebar stuff that is stored in the actual cookie.\r\n\r\napart from slowing down normal users (they send all that data in *every* request), bots are acumulating cookie cruft and needlessly posting that at our sites.\r\n\r\nif the cookie cruft gets too long it gets truncated and errors out\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport json\nfrom urllib import quote, unquote\n\nfrom django.utils import dateformat\n\n\nSIDEBAR_COOKIE_NAME = 'pootle-browser-sidebar'\n\n\ndef get_sidebar_announcements_context(request, objects):\n \"\"\"Return the announcements context for the browser pages sidebar.\n\n :param request: a :cls:`django.http.HttpRequest` object.\n :param objects: a tuple of Project, Language and TranslationProject to\n retrieve the announcements for. Any of those can be\n missing, but it is recommended for them to be in that exact\n order.\n \"\"\"\n announcements = []\n new_cookie_data = {}\n cookie_data = {}\n\n if SIDEBAR_COOKIE_NAME in request.COOKIES:\n json_str = unquote(request.COOKIES[SIDEBAR_COOKIE_NAME])\n cookie_data = json.loads(json_str)\n\n is_sidebar_open = cookie_data.get('isOpen', True)\n\n for item in objects:\n announcement = item.get_announcement(request.user)\n\n if announcement is None:\n continue\n\n announcements.append(announcement)\n # The virtual_path cannot be used as is for JSON.\n ann_key = announcement.virtual_path.replace('/', '_')\n ann_mtime = dateformat.format(announcement.modified_on, 'U')\n stored_mtime = cookie_data.get(ann_key, None)\n\n if ann_mtime != stored_mtime:\n new_cookie_data[ann_key] = ann_mtime\n\n if new_cookie_data:\n # Some announcement has been changed or was never displayed before, so\n # display sidebar and save the changed mtimes in the cookie to not\n # display it next time unless it is necessary.\n is_sidebar_open = True\n cookie_data.update(new_cookie_data)\n new_cookie_data = quote(json.dumps(cookie_data))\n\n ctx = {\n 'announcements': announcements,\n 'is_sidebar_open': is_sidebar_open,\n 'has_sidebar': len(announcements) > 0,\n }\n\n return ctx, new_cookie_data\n", "path": "pootle/core/helpers.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport json\nfrom urllib import quote, unquote\n\nfrom django.utils import dateformat\n\n\nSIDEBAR_COOKIE_NAME = 'pootle-browser-open-sidebar'\n\n\ndef get_sidebar_announcements_context(request, objects):\n \"\"\"Return the announcements context for the browser pages sidebar.\n\n :param request: a :cls:`django.http.HttpRequest` object.\n :param objects: a tuple of Project, Language and TranslationProject to\n retrieve the announcements for. Any of those can be\n missing, but it is recommended for them to be in that exact\n order.\n \"\"\"\n must_show_announcement = False\n announcements = []\n new_cookie_data = {}\n cookie_data = {}\n\n if SIDEBAR_COOKIE_NAME in request.COOKIES:\n json_str = unquote(request.COOKIES[SIDEBAR_COOKIE_NAME])\n cookie_data = json.loads(json_str)\n\n is_sidebar_open = cookie_data.get('isOpen', True)\n\n for item in objects:\n announcement = item.get_announcement(request.user)\n\n if announcement is None:\n continue\n\n announcements.append(announcement)\n\n ann_mtime = dateformat.format(announcement.modified_on, 'U')\n stored_mtime = request.session.get(announcement.virtual_path, None)\n\n if ann_mtime != stored_mtime:\n # Some announcement has been changed or was never displayed before,\n # so display sidebar and save the changed mtimes in the session to\n # not display it next time unless it is necessary.\n must_show_announcement = True\n request.session[announcement.virtual_path] = ann_mtime\n\n if must_show_announcement and not is_sidebar_open:\n is_sidebar_open = True\n cookie_data['isOpen'] = is_sidebar_open\n new_cookie_data = quote(json.dumps(cookie_data))\n\n ctx = {\n 'announcements': announcements,\n 'is_sidebar_open': is_sidebar_open,\n 'has_sidebar': len(announcements) > 0,\n }\n\n return ctx, new_cookie_data\n", "path": "pootle/core/helpers.py"}]}
948
443
gh_patches_debug_30619
rasdani/github-patches
git_diff
ipython__ipython-8859
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Wrong Tooltip with shift-tab @andram opened jupyter/notebook#520 at 2015-10-01 10:09:29 UTC If a notebook cell starts with a multiline string, then shift-tab gets confused and shows the wrong tooltips for following lines. For example consider a cell containing the following 5 lines: ``` """ Docstring """ max() min() ``` now position the cursor inside the brackets of max(). Expected behaviour is that the docstring for `max` is shown. Actual behaviour is that the docstring for min is shown. (see attached screenshot for current github version of jupyter notebook) ![notebook_tooltip_problem](https://cloud.githubusercontent.com/assets/3459178/10218173/b2415676-682c-11e5-8551-708c1dc1ab53.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `IPython/utils/tokenutil.py` Content: ``` 1 """Token-related utilities""" 2 3 # Copyright (c) IPython Development Team. 4 # Distributed under the terms of the Modified BSD License. 5 6 from __future__ import absolute_import, print_function 7 8 from collections import namedtuple 9 from io import StringIO 10 from keyword import iskeyword 11 12 from . import tokenize2 13 from .py3compat import cast_unicode_py2 14 15 Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line']) 16 17 def generate_tokens(readline): 18 """wrap generate_tokens to catch EOF errors""" 19 try: 20 for token in tokenize2.generate_tokens(readline): 21 yield token 22 except tokenize2.TokenError: 23 # catch EOF error 24 return 25 26 def line_at_cursor(cell, cursor_pos=0): 27 """Return the line in a cell at a given cursor position 28 29 Used for calling line-based APIs that don't support multi-line input, yet. 30 31 Parameters 32 ---------- 33 34 cell: text 35 multiline block of text 36 cursor_pos: integer 37 the cursor position 38 39 Returns 40 ------- 41 42 (line, offset): (text, integer) 43 The line with the current cursor, and the character offset of the start of the line. 44 """ 45 offset = 0 46 lines = cell.splitlines(True) 47 for line in lines: 48 next_offset = offset + len(line) 49 if next_offset >= cursor_pos: 50 break 51 offset = next_offset 52 else: 53 line = "" 54 return (line, offset) 55 56 def token_at_cursor(cell, cursor_pos=0): 57 """Get the token at a given cursor 58 59 Used for introspection. 60 61 Function calls are prioritized, so the token for the callable will be returned 62 if the cursor is anywhere inside the call. 63 64 Parameters 65 ---------- 66 67 cell : unicode 68 A block of Python code 69 cursor_pos : int 70 The location of the cursor in the block where the token should be found 71 """ 72 cell = cast_unicode_py2(cell) 73 names = [] 74 tokens = [] 75 offset = 0 76 call_names = [] 77 for tup in generate_tokens(StringIO(cell).readline): 78 79 tok = Token(*tup) 80 81 # token, text, start, end, line = tup 82 start_col = tok.start[1] 83 end_col = tok.end[1] 84 # allow '|foo' to find 'foo' at the beginning of a line 85 boundary = cursor_pos + 1 if start_col == 0 else cursor_pos 86 if offset + start_col >= boundary: 87 # current token starts after the cursor, 88 # don't consume it 89 break 90 91 if tok.token == tokenize2.NAME and not iskeyword(tok.text): 92 if names and tokens and tokens[-1].token == tokenize2.OP and tokens[-1].text == '.': 93 names[-1] = "%s.%s" % (names[-1], tok.text) 94 else: 95 names.append(tok.text) 96 elif tok.token == tokenize2.OP: 97 if tok.text == '=' and names: 98 # don't inspect the lhs of an assignment 99 names.pop(-1) 100 if tok.text == '(' and names: 101 # if we are inside a function call, inspect the function 102 call_names.append(names[-1]) 103 elif tok.text == ')' and call_names: 104 call_names.pop(-1) 105 106 if offset + end_col > cursor_pos: 107 # we found the cursor, stop reading 108 break 109 110 tokens.append(tok) 111 if tok.token in (tokenize2.NEWLINE, tokenize2.NL): 112 offset += len(tok.line) 113 114 if call_names: 115 return call_names[-1] 116 elif names: 117 return names[-1] 118 else: 119 return '' 120 121 122 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/IPython/utils/tokenutil.py b/IPython/utils/tokenutil.py --- a/IPython/utils/tokenutil.py +++ b/IPython/utils/tokenutil.py @@ -72,15 +72,24 @@ cell = cast_unicode_py2(cell) names = [] tokens = [] - offset = 0 call_names = [] + + offsets = {1: 0} # lines start at 1 for tup in generate_tokens(StringIO(cell).readline): tok = Token(*tup) # token, text, start, end, line = tup - start_col = tok.start[1] - end_col = tok.end[1] + start_line, start_col = tok.start + end_line, end_col = tok.end + if end_line + 1 not in offsets: + # keep track of offsets for each line + lines = tok.line.splitlines(True) + for lineno, line in zip(range(start_line + 1, end_line + 2), lines): + if lineno not in offsets: + offsets[lineno] = offsets[lineno-1] + len(line) + + offset = offsets[start_line] # allow '|foo' to find 'foo' at the beginning of a line boundary = cursor_pos + 1 if start_col == 0 else cursor_pos if offset + start_col >= boundary: @@ -103,14 +112,12 @@ elif tok.text == ')' and call_names: call_names.pop(-1) - if offset + end_col > cursor_pos: + tokens.append(tok) + + if offsets[end_line] + end_col > cursor_pos: # we found the cursor, stop reading break - tokens.append(tok) - if tok.token in (tokenize2.NEWLINE, tokenize2.NL): - offset += len(tok.line) - if call_names: return call_names[-1] elif names:
{"golden_diff": "diff --git a/IPython/utils/tokenutil.py b/IPython/utils/tokenutil.py\n--- a/IPython/utils/tokenutil.py\n+++ b/IPython/utils/tokenutil.py\n@@ -72,15 +72,24 @@\n cell = cast_unicode_py2(cell)\n names = []\n tokens = []\n- offset = 0\n call_names = []\n+ \n+ offsets = {1: 0} # lines start at 1\n for tup in generate_tokens(StringIO(cell).readline):\n \n tok = Token(*tup)\n \n # token, text, start, end, line = tup\n- start_col = tok.start[1]\n- end_col = tok.end[1]\n+ start_line, start_col = tok.start\n+ end_line, end_col = tok.end\n+ if end_line + 1 not in offsets:\n+ # keep track of offsets for each line\n+ lines = tok.line.splitlines(True)\n+ for lineno, line in zip(range(start_line + 1, end_line + 2), lines):\n+ if lineno not in offsets:\n+ offsets[lineno] = offsets[lineno-1] + len(line)\n+ \n+ offset = offsets[start_line]\n # allow '|foo' to find 'foo' at the beginning of a line\n boundary = cursor_pos + 1 if start_col == 0 else cursor_pos\n if offset + start_col >= boundary:\n@@ -103,14 +112,12 @@\n elif tok.text == ')' and call_names:\n call_names.pop(-1)\n \n- if offset + end_col > cursor_pos:\n+ tokens.append(tok)\n+ \n+ if offsets[end_line] + end_col > cursor_pos:\n # we found the cursor, stop reading\n break\n \n- tokens.append(tok)\n- if tok.token in (tokenize2.NEWLINE, tokenize2.NL):\n- offset += len(tok.line)\n- \n if call_names:\n return call_names[-1]\n elif names:\n", "issue": "Wrong Tooltip with shift-tab\n@andram opened jupyter/notebook#520 at 2015-10-01 10:09:29 UTC\n\nIf a notebook cell starts with a multiline string, then shift-tab gets confused and shows the wrong tooltips for following lines. For example consider a cell containing the following 5 lines:\n\n```\n\"\"\"\nDocstring\n\"\"\"\nmax()\nmin()\n```\n\nnow position the cursor inside the brackets of max(). Expected behaviour is that the docstring for `max` is shown. Actual behaviour is that the docstring for min is shown. (see attached screenshot for current github version of jupyter notebook)\n\n![notebook_tooltip_problem](https://cloud.githubusercontent.com/assets/3459178/10218173/b2415676-682c-11e5-8551-708c1dc1ab53.png)\n\n", "before_files": [{"content": "\"\"\"Token-related utilities\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom __future__ import absolute_import, print_function\n\nfrom collections import namedtuple\nfrom io import StringIO\nfrom keyword import iskeyword\n\nfrom . import tokenize2\nfrom .py3compat import cast_unicode_py2\n\nToken = namedtuple('Token', ['token', 'text', 'start', 'end', 'line'])\n\ndef generate_tokens(readline):\n \"\"\"wrap generate_tokens to catch EOF errors\"\"\"\n try:\n for token in tokenize2.generate_tokens(readline):\n yield token\n except tokenize2.TokenError:\n # catch EOF error\n return\n\ndef line_at_cursor(cell, cursor_pos=0):\n \"\"\"Return the line in a cell at a given cursor position\n \n Used for calling line-based APIs that don't support multi-line input, yet.\n \n Parameters\n ----------\n \n cell: text\n multiline block of text\n cursor_pos: integer\n the cursor position\n \n Returns\n -------\n \n (line, offset): (text, integer)\n The line with the current cursor, and the character offset of the start of the line.\n \"\"\"\n offset = 0\n lines = cell.splitlines(True)\n for line in lines:\n next_offset = offset + len(line)\n if next_offset >= cursor_pos:\n break\n offset = next_offset\n else:\n line = \"\"\n return (line, offset)\n\ndef token_at_cursor(cell, cursor_pos=0):\n \"\"\"Get the token at a given cursor\n \n Used for introspection.\n \n Function calls are prioritized, so the token for the callable will be returned\n if the cursor is anywhere inside the call.\n \n Parameters\n ----------\n \n cell : unicode\n A block of Python code\n cursor_pos : int\n The location of the cursor in the block where the token should be found\n \"\"\"\n cell = cast_unicode_py2(cell)\n names = []\n tokens = []\n offset = 0\n call_names = []\n for tup in generate_tokens(StringIO(cell).readline):\n \n tok = Token(*tup)\n \n # token, text, start, end, line = tup\n start_col = tok.start[1]\n end_col = tok.end[1]\n # allow '|foo' to find 'foo' at the beginning of a line\n boundary = cursor_pos + 1 if start_col == 0 else cursor_pos\n if offset + start_col >= boundary:\n # current token starts after the cursor,\n # don't consume it\n break\n \n if tok.token == tokenize2.NAME and not iskeyword(tok.text):\n if names and tokens and tokens[-1].token == tokenize2.OP and tokens[-1].text == '.':\n names[-1] = \"%s.%s\" % (names[-1], tok.text)\n else:\n names.append(tok.text)\n elif tok.token == tokenize2.OP:\n if tok.text == '=' and names:\n # don't inspect the lhs of an assignment\n names.pop(-1)\n if tok.text == '(' and names:\n # if we are inside a function call, inspect the function\n call_names.append(names[-1])\n elif tok.text == ')' and call_names:\n call_names.pop(-1)\n \n if offset + end_col > cursor_pos:\n # we found the cursor, stop reading\n break\n \n tokens.append(tok)\n if tok.token in (tokenize2.NEWLINE, tokenize2.NL):\n offset += len(tok.line)\n \n if call_names:\n return call_names[-1]\n elif names:\n return names[-1]\n else:\n return ''\n \n\n", "path": "IPython/utils/tokenutil.py"}], "after_files": [{"content": "\"\"\"Token-related utilities\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom __future__ import absolute_import, print_function\n\nfrom collections import namedtuple\nfrom io import StringIO\nfrom keyword import iskeyword\n\nfrom . import tokenize2\nfrom .py3compat import cast_unicode_py2\n\nToken = namedtuple('Token', ['token', 'text', 'start', 'end', 'line'])\n\ndef generate_tokens(readline):\n \"\"\"wrap generate_tokens to catch EOF errors\"\"\"\n try:\n for token in tokenize2.generate_tokens(readline):\n yield token\n except tokenize2.TokenError:\n # catch EOF error\n return\n\ndef line_at_cursor(cell, cursor_pos=0):\n \"\"\"Return the line in a cell at a given cursor position\n \n Used for calling line-based APIs that don't support multi-line input, yet.\n \n Parameters\n ----------\n \n cell: text\n multiline block of text\n cursor_pos: integer\n the cursor position\n \n Returns\n -------\n \n (line, offset): (text, integer)\n The line with the current cursor, and the character offset of the start of the line.\n \"\"\"\n offset = 0\n lines = cell.splitlines(True)\n for line in lines:\n next_offset = offset + len(line)\n if next_offset >= cursor_pos:\n break\n offset = next_offset\n else:\n line = \"\"\n return (line, offset)\n\ndef token_at_cursor(cell, cursor_pos=0):\n \"\"\"Get the token at a given cursor\n \n Used for introspection.\n \n Function calls are prioritized, so the token for the callable will be returned\n if the cursor is anywhere inside the call.\n \n Parameters\n ----------\n \n cell : unicode\n A block of Python code\n cursor_pos : int\n The location of the cursor in the block where the token should be found\n \"\"\"\n cell = cast_unicode_py2(cell)\n names = []\n tokens = []\n call_names = []\n \n offsets = {1: 0} # lines start at 1\n for tup in generate_tokens(StringIO(cell).readline):\n \n tok = Token(*tup)\n \n # token, text, start, end, line = tup\n start_line, start_col = tok.start\n end_line, end_col = tok.end\n if end_line + 1 not in offsets:\n # keep track of offsets for each line\n lines = tok.line.splitlines(True)\n for lineno, line in zip(range(start_line + 1, end_line + 2), lines):\n if lineno not in offsets:\n offsets[lineno] = offsets[lineno-1] + len(line)\n \n offset = offsets[start_line]\n # allow '|foo' to find 'foo' at the beginning of a line\n boundary = cursor_pos + 1 if start_col == 0 else cursor_pos\n if offset + start_col >= boundary:\n # current token starts after the cursor,\n # don't consume it\n break\n \n if tok.token == tokenize2.NAME and not iskeyword(tok.text):\n if names and tokens and tokens[-1].token == tokenize2.OP and tokens[-1].text == '.':\n names[-1] = \"%s.%s\" % (names[-1], tok.text)\n else:\n names.append(tok.text)\n elif tok.token == tokenize2.OP:\n if tok.text == '=' and names:\n # don't inspect the lhs of an assignment\n names.pop(-1)\n if tok.text == '(' and names:\n # if we are inside a function call, inspect the function\n call_names.append(names[-1])\n elif tok.text == ')' and call_names:\n call_names.pop(-1)\n \n tokens.append(tok)\n \n if offsets[end_line] + end_col > cursor_pos:\n # we found the cursor, stop reading\n break\n \n if call_names:\n return call_names[-1]\n elif names:\n return names[-1]\n else:\n return ''\n \n\n", "path": "IPython/utils/tokenutil.py"}]}
1,526
444
gh_patches_debug_21233
rasdani/github-patches
git_diff
liqd__a4-meinberlin-4048
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- (react list) proposal-list-item should show created/modified date **URL:** proposal-list **user:** any **expected behaviour:** list items (tiles) in react list should show `created on/modified on` in front of date (see django list) **behaviour:** list items do not indicate created on or modified on, but shows only the date **important screensize:** any **device & browser:** any **Comment/Question:** Screenshot? Note: This will only affect where this particular react list is used (currently only used for budgeting/proposal-list) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `meinberlin/apps/budgeting/serializers.py` Content: ``` 1 from django.utils import translation 2 from rest_framework import serializers 3 4 from adhocracy4.categories.models import Category 5 6 from .models import Proposal 7 8 9 class CategoryField(serializers.Field): 10 11 def to_internal_value(self, category): 12 if category: 13 return Category.objects.get(pk=category) 14 else: 15 return None 16 17 def to_representation(self, category): 18 return {'id': category.pk, 'name': category.name} 19 20 21 class ProposalSerializer(serializers.ModelSerializer): 22 23 creator = serializers.SerializerMethodField() 24 comment_count = serializers.SerializerMethodField() 25 positive_rating_count = serializers.SerializerMethodField() 26 negative_rating_count = serializers.SerializerMethodField() 27 category = CategoryField() 28 url = serializers.SerializerMethodField() 29 locale = serializers.SerializerMethodField() 30 31 class Meta: 32 model = Proposal 33 fields = ('budget', 'category', 'comment_count', 'created', 'creator', 34 'is_archived', 'name', 'negative_rating_count', 35 'positive_rating_count', 'url', 'pk', 'moderator_feedback', 36 'moderator_feedback_choices', 'locale') 37 read_only_fields = ('budget', 'category', 'comment_count', 'created', 38 'creator', 'is_archived', 'name', 39 'negative_rating_count', 'positive_rating_count', 40 'url', 'pk', 'moderator_feedback', 41 'moderator_feedback_choices', 'locale') 42 43 def get_creator(self, proposal): 44 return proposal.creator.username 45 46 def get_comment_count(self, proposal): 47 if hasattr(proposal, 'comment_count'): 48 return proposal.comment_count 49 else: 50 return 0 51 52 def get_positive_rating_count(self, proposal): 53 if hasattr(proposal, 'positive_rating_count'): 54 return proposal.positive_rating_count 55 else: 56 return 0 57 58 def get_negative_rating_count(self, proposal): 59 if hasattr(proposal, 'negative_rating_count'): 60 return proposal.negative_rating_count 61 else: 62 return 0 63 64 def get_url(self, proposal): 65 return proposal.get_absolute_url() 66 67 def get_moderator_feedback(self, proposal): 68 if hasattr(proposal, 'moderator_feedback'): 69 return proposal.moderator_feedback 70 else: 71 return None 72 73 def get_moderator_feedback_choices(self, proposal): 74 if hasattr(proposal, 'moderator_feedback_choices'): 75 return proposal.moderator_feedback_choices 76 else: 77 return None 78 79 def get_locale(self, proposal): 80 return translation.get_language() 81 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/meinberlin/apps/budgeting/serializers.py b/meinberlin/apps/budgeting/serializers.py --- a/meinberlin/apps/budgeting/serializers.py +++ b/meinberlin/apps/budgeting/serializers.py @@ -30,12 +30,12 @@ class Meta: model = Proposal - fields = ('budget', 'category', 'comment_count', 'created', 'creator', - 'is_archived', 'name', 'negative_rating_count', + fields = ('budget', 'category', 'comment_count', 'created', 'modified', + 'creator', 'is_archived', 'name', 'negative_rating_count', 'positive_rating_count', 'url', 'pk', 'moderator_feedback', 'moderator_feedback_choices', 'locale') read_only_fields = ('budget', 'category', 'comment_count', 'created', - 'creator', 'is_archived', 'name', + 'modified', 'creator', 'is_archived', 'name', 'negative_rating_count', 'positive_rating_count', 'url', 'pk', 'moderator_feedback', 'moderator_feedback_choices', 'locale')
{"golden_diff": "diff --git a/meinberlin/apps/budgeting/serializers.py b/meinberlin/apps/budgeting/serializers.py\n--- a/meinberlin/apps/budgeting/serializers.py\n+++ b/meinberlin/apps/budgeting/serializers.py\n@@ -30,12 +30,12 @@\n \n class Meta:\n model = Proposal\n- fields = ('budget', 'category', 'comment_count', 'created', 'creator',\n- 'is_archived', 'name', 'negative_rating_count',\n+ fields = ('budget', 'category', 'comment_count', 'created', 'modified',\n+ 'creator', 'is_archived', 'name', 'negative_rating_count',\n 'positive_rating_count', 'url', 'pk', 'moderator_feedback',\n 'moderator_feedback_choices', 'locale')\n read_only_fields = ('budget', 'category', 'comment_count', 'created',\n- 'creator', 'is_archived', 'name',\n+ 'modified', 'creator', 'is_archived', 'name',\n 'negative_rating_count', 'positive_rating_count',\n 'url', 'pk', 'moderator_feedback',\n 'moderator_feedback_choices', 'locale')\n", "issue": "(react list) proposal-list-item should show created/modified date\n**URL:** proposal-list\r\n**user:** any\r\n**expected behaviour:** list items (tiles) in react list should show `created on/modified on` in front of date (see django list)\r\n**behaviour:** list items do not indicate created on or modified on, but shows only the date\r\n**important screensize:** any\r\n**device & browser:** any\r\n**Comment/Question:** \r\n\r\nScreenshot?\r\n\r\nNote: This will only affect where this particular react list is used (currently only used for budgeting/proposal-list)\n", "before_files": [{"content": "from django.utils import translation\nfrom rest_framework import serializers\n\nfrom adhocracy4.categories.models import Category\n\nfrom .models import Proposal\n\n\nclass CategoryField(serializers.Field):\n\n def to_internal_value(self, category):\n if category:\n return Category.objects.get(pk=category)\n else:\n return None\n\n def to_representation(self, category):\n return {'id': category.pk, 'name': category.name}\n\n\nclass ProposalSerializer(serializers.ModelSerializer):\n\n creator = serializers.SerializerMethodField()\n comment_count = serializers.SerializerMethodField()\n positive_rating_count = serializers.SerializerMethodField()\n negative_rating_count = serializers.SerializerMethodField()\n category = CategoryField()\n url = serializers.SerializerMethodField()\n locale = serializers.SerializerMethodField()\n\n class Meta:\n model = Proposal\n fields = ('budget', 'category', 'comment_count', 'created', 'creator',\n 'is_archived', 'name', 'negative_rating_count',\n 'positive_rating_count', 'url', 'pk', 'moderator_feedback',\n 'moderator_feedback_choices', 'locale')\n read_only_fields = ('budget', 'category', 'comment_count', 'created',\n 'creator', 'is_archived', 'name',\n 'negative_rating_count', 'positive_rating_count',\n 'url', 'pk', 'moderator_feedback',\n 'moderator_feedback_choices', 'locale')\n\n def get_creator(self, proposal):\n return proposal.creator.username\n\n def get_comment_count(self, proposal):\n if hasattr(proposal, 'comment_count'):\n return proposal.comment_count\n else:\n return 0\n\n def get_positive_rating_count(self, proposal):\n if hasattr(proposal, 'positive_rating_count'):\n return proposal.positive_rating_count\n else:\n return 0\n\n def get_negative_rating_count(self, proposal):\n if hasattr(proposal, 'negative_rating_count'):\n return proposal.negative_rating_count\n else:\n return 0\n\n def get_url(self, proposal):\n return proposal.get_absolute_url()\n\n def get_moderator_feedback(self, proposal):\n if hasattr(proposal, 'moderator_feedback'):\n return proposal.moderator_feedback\n else:\n return None\n\n def get_moderator_feedback_choices(self, proposal):\n if hasattr(proposal, 'moderator_feedback_choices'):\n return proposal.moderator_feedback_choices\n else:\n return None\n\n def get_locale(self, proposal):\n return translation.get_language()\n", "path": "meinberlin/apps/budgeting/serializers.py"}], "after_files": [{"content": "from django.utils import translation\nfrom rest_framework import serializers\n\nfrom adhocracy4.categories.models import Category\n\nfrom .models import Proposal\n\n\nclass CategoryField(serializers.Field):\n\n def to_internal_value(self, category):\n if category:\n return Category.objects.get(pk=category)\n else:\n return None\n\n def to_representation(self, category):\n return {'id': category.pk, 'name': category.name}\n\n\nclass ProposalSerializer(serializers.ModelSerializer):\n\n creator = serializers.SerializerMethodField()\n comment_count = serializers.SerializerMethodField()\n positive_rating_count = serializers.SerializerMethodField()\n negative_rating_count = serializers.SerializerMethodField()\n category = CategoryField()\n url = serializers.SerializerMethodField()\n locale = serializers.SerializerMethodField()\n\n class Meta:\n model = Proposal\n fields = ('budget', 'category', 'comment_count', 'created', 'modified',\n 'creator', 'is_archived', 'name', 'negative_rating_count',\n 'positive_rating_count', 'url', 'pk', 'moderator_feedback',\n 'moderator_feedback_choices', 'locale')\n read_only_fields = ('budget', 'category', 'comment_count', 'created',\n 'modified', 'creator', 'is_archived', 'name',\n 'negative_rating_count', 'positive_rating_count',\n 'url', 'pk', 'moderator_feedback',\n 'moderator_feedback_choices', 'locale')\n\n def get_creator(self, proposal):\n return proposal.creator.username\n\n def get_comment_count(self, proposal):\n if hasattr(proposal, 'comment_count'):\n return proposal.comment_count\n else:\n return 0\n\n def get_positive_rating_count(self, proposal):\n if hasattr(proposal, 'positive_rating_count'):\n return proposal.positive_rating_count\n else:\n return 0\n\n def get_negative_rating_count(self, proposal):\n if hasattr(proposal, 'negative_rating_count'):\n return proposal.negative_rating_count\n else:\n return 0\n\n def get_url(self, proposal):\n return proposal.get_absolute_url()\n\n def get_moderator_feedback(self, proposal):\n if hasattr(proposal, 'moderator_feedback'):\n return proposal.moderator_feedback\n else:\n return None\n\n def get_moderator_feedback_choices(self, proposal):\n if hasattr(proposal, 'moderator_feedback_choices'):\n return proposal.moderator_feedback_choices\n else:\n return None\n\n def get_locale(self, proposal):\n return translation.get_language()\n", "path": "meinberlin/apps/budgeting/serializers.py"}]}
1,069
264
gh_patches_debug_5471
rasdani/github-patches
git_diff
archlinux__archinstall-565
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Exit code 256 in networking.py While doing PR #562 I noticed that we check if exit code is 256. I thought for any process it was an 8 bit value and could be 0 through 255, so I'm not sure about this logic. I'd like to figure out why it was written in this manner and it probably should be fixed. Maybe drop the exit code check entirely? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `archinstall/lib/networking.py` Content: ``` 1 import fcntl 2 import logging 3 import os 4 import socket 5 import struct 6 from collections import OrderedDict 7 8 from .exceptions import * 9 from .general import SysCommand 10 from .output import log 11 from .storage import storage 12 13 14 def get_hw_addr(ifname): 15 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 16 info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', bytes(ifname, 'utf-8')[:15])) 17 return ':'.join('%02x' % b for b in info[18:24]) 18 19 20 def list_interfaces(skip_loopback=True): 21 interfaces = OrderedDict() 22 for index, iface in socket.if_nameindex(): 23 if skip_loopback and iface == "lo": 24 continue 25 26 mac = get_hw_addr(iface).replace(':', '-').lower() 27 interfaces[mac] = iface 28 return interfaces 29 30 31 def check_mirror_reachable(): 32 if (exit_code := SysCommand("pacman -Sy").exit_code) == 0: 33 return True 34 elif exit_code == 256: 35 if os.geteuid() != 0: 36 log("check_mirror_reachable() uses 'pacman -Sy' which requires root.", level=logging.ERROR, fg="red") 37 38 return False 39 40 41 def enrich_iface_types(interfaces: dict): 42 result = {} 43 for iface in interfaces: 44 if os.path.isdir(f"/sys/class/net/{iface}/bridge/"): 45 result[iface] = 'BRIDGE' 46 elif os.path.isfile(f"/sys/class/net/{iface}/tun_flags"): 47 # ethtool -i {iface} 48 result[iface] = 'TUN/TAP' 49 elif os.path.isdir(f"/sys/class/net/{iface}/device"): 50 if os.path.isdir(f"/sys/class/net/{iface}/wireless/"): 51 result[iface] = 'WIRELESS' 52 else: 53 result[iface] = 'PHYSICAL' 54 else: 55 result[iface] = 'UNKNOWN' 56 return result 57 58 59 def get_interface_from_mac(mac): 60 return list_interfaces().get(mac.lower(), None) 61 62 63 def wireless_scan(interface): 64 interfaces = enrich_iface_types(list_interfaces().values()) 65 if interfaces[interface] != 'WIRELESS': 66 raise HardwareIncompatibilityError(f"Interface {interface} is not a wireless interface: {interfaces}") 67 68 SysCommand(f"iwctl station {interface} scan") 69 70 if '_WIFI' not in storage: 71 storage['_WIFI'] = {} 72 if interface not in storage['_WIFI']: 73 storage['_WIFI'][interface] = {} 74 75 storage['_WIFI'][interface]['scanning'] = True 76 77 78 # TODO: Full WiFi experience might get evolved in the future, pausing for now 2021-01-25 79 def get_wireless_networks(interface): 80 # TODO: Make this oneliner pritter to check if the interface is scanning or not. 81 if '_WIFI' not in storage or interface not in storage['_WIFI'] or storage['_WIFI'][interface].get('scanning', False) is False: 82 import time 83 84 wireless_scan(interface) 85 time.sleep(5) 86 87 for line in SysCommand(f"iwctl station {interface} get-networks"): 88 print(line) 89 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/archinstall/lib/networking.py b/archinstall/lib/networking.py --- a/archinstall/lib/networking.py +++ b/archinstall/lib/networking.py @@ -31,9 +31,8 @@ def check_mirror_reachable(): if (exit_code := SysCommand("pacman -Sy").exit_code) == 0: return True - elif exit_code == 256: - if os.geteuid() != 0: - log("check_mirror_reachable() uses 'pacman -Sy' which requires root.", level=logging.ERROR, fg="red") + elif os.geteuid() != 0: + log("check_mirror_reachable() uses 'pacman -Sy' which requires root.", level=logging.ERROR, fg="red") return False
{"golden_diff": "diff --git a/archinstall/lib/networking.py b/archinstall/lib/networking.py\n--- a/archinstall/lib/networking.py\n+++ b/archinstall/lib/networking.py\n@@ -31,9 +31,8 @@\n def check_mirror_reachable():\n \tif (exit_code := SysCommand(\"pacman -Sy\").exit_code) == 0:\n \t\treturn True\n-\telif exit_code == 256:\n-\t\tif os.geteuid() != 0:\n-\t\t\tlog(\"check_mirror_reachable() uses 'pacman -Sy' which requires root.\", level=logging.ERROR, fg=\"red\")\n+\telif os.geteuid() != 0:\n+\t\tlog(\"check_mirror_reachable() uses 'pacman -Sy' which requires root.\", level=logging.ERROR, fg=\"red\")\n \n \treturn False\n", "issue": "Exit code 256 in networking.py\nWhile doing PR #562 I noticed that we check if exit code is 256. I thought for any process it was an 8 bit value and could be 0 through 255, so I'm not sure about this logic. I'd like to figure out why it was written in this manner and it probably should be fixed. Maybe drop the exit code check entirely? \n", "before_files": [{"content": "import fcntl\nimport logging\nimport os\nimport socket\nimport struct\nfrom collections import OrderedDict\n\nfrom .exceptions import *\nfrom .general import SysCommand\nfrom .output import log\nfrom .storage import storage\n\n\ndef get_hw_addr(ifname):\n\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\tinfo = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', bytes(ifname, 'utf-8')[:15]))\n\treturn ':'.join('%02x' % b for b in info[18:24])\n\n\ndef list_interfaces(skip_loopback=True):\n\tinterfaces = OrderedDict()\n\tfor index, iface in socket.if_nameindex():\n\t\tif skip_loopback and iface == \"lo\":\n\t\t\tcontinue\n\n\t\tmac = get_hw_addr(iface).replace(':', '-').lower()\n\t\tinterfaces[mac] = iface\n\treturn interfaces\n\n\ndef check_mirror_reachable():\n\tif (exit_code := SysCommand(\"pacman -Sy\").exit_code) == 0:\n\t\treturn True\n\telif exit_code == 256:\n\t\tif os.geteuid() != 0:\n\t\t\tlog(\"check_mirror_reachable() uses 'pacman -Sy' which requires root.\", level=logging.ERROR, fg=\"red\")\n\n\treturn False\n\n\ndef enrich_iface_types(interfaces: dict):\n\tresult = {}\n\tfor iface in interfaces:\n\t\tif os.path.isdir(f\"/sys/class/net/{iface}/bridge/\"):\n\t\t\tresult[iface] = 'BRIDGE'\n\t\telif os.path.isfile(f\"/sys/class/net/{iface}/tun_flags\"):\n\t\t\t# ethtool -i {iface}\n\t\t\tresult[iface] = 'TUN/TAP'\n\t\telif os.path.isdir(f\"/sys/class/net/{iface}/device\"):\n\t\t\tif os.path.isdir(f\"/sys/class/net/{iface}/wireless/\"):\n\t\t\t\tresult[iface] = 'WIRELESS'\n\t\t\telse:\n\t\t\t\tresult[iface] = 'PHYSICAL'\n\t\telse:\n\t\t\tresult[iface] = 'UNKNOWN'\n\treturn result\n\n\ndef get_interface_from_mac(mac):\n\treturn list_interfaces().get(mac.lower(), None)\n\n\ndef wireless_scan(interface):\n\tinterfaces = enrich_iface_types(list_interfaces().values())\n\tif interfaces[interface] != 'WIRELESS':\n\t\traise HardwareIncompatibilityError(f\"Interface {interface} is not a wireless interface: {interfaces}\")\n\n\tSysCommand(f\"iwctl station {interface} scan\")\n\n\tif '_WIFI' not in storage:\n\t\tstorage['_WIFI'] = {}\n\tif interface not in storage['_WIFI']:\n\t\tstorage['_WIFI'][interface] = {}\n\n\tstorage['_WIFI'][interface]['scanning'] = True\n\n\n# TODO: Full WiFi experience might get evolved in the future, pausing for now 2021-01-25\ndef get_wireless_networks(interface):\n\t# TODO: Make this oneliner pritter to check if the interface is scanning or not.\n\tif '_WIFI' not in storage or interface not in storage['_WIFI'] or storage['_WIFI'][interface].get('scanning', False) is False:\n\t\timport time\n\n\t\twireless_scan(interface)\n\t\ttime.sleep(5)\n\n\tfor line in SysCommand(f\"iwctl station {interface} get-networks\"):\n\t\tprint(line)\n", "path": "archinstall/lib/networking.py"}], "after_files": [{"content": "import fcntl\nimport logging\nimport os\nimport socket\nimport struct\nfrom collections import OrderedDict\n\nfrom .exceptions import *\nfrom .general import SysCommand\nfrom .output import log\nfrom .storage import storage\n\n\ndef get_hw_addr(ifname):\n\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\tinfo = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', bytes(ifname, 'utf-8')[:15]))\n\treturn ':'.join('%02x' % b for b in info[18:24])\n\n\ndef list_interfaces(skip_loopback=True):\n\tinterfaces = OrderedDict()\n\tfor index, iface in socket.if_nameindex():\n\t\tif skip_loopback and iface == \"lo\":\n\t\t\tcontinue\n\n\t\tmac = get_hw_addr(iface).replace(':', '-').lower()\n\t\tinterfaces[mac] = iface\n\treturn interfaces\n\n\ndef check_mirror_reachable():\n\tif (exit_code := SysCommand(\"pacman -Sy\").exit_code) == 0:\n\t\treturn True\n\telif os.geteuid() != 0:\n\t\tlog(\"check_mirror_reachable() uses 'pacman -Sy' which requires root.\", level=logging.ERROR, fg=\"red\")\n\n\treturn False\n\n\ndef enrich_iface_types(interfaces: dict):\n\tresult = {}\n\tfor iface in interfaces:\n\t\tif os.path.isdir(f\"/sys/class/net/{iface}/bridge/\"):\n\t\t\tresult[iface] = 'BRIDGE'\n\t\telif os.path.isfile(f\"/sys/class/net/{iface}/tun_flags\"):\n\t\t\t# ethtool -i {iface}\n\t\t\tresult[iface] = 'TUN/TAP'\n\t\telif os.path.isdir(f\"/sys/class/net/{iface}/device\"):\n\t\t\tif os.path.isdir(f\"/sys/class/net/{iface}/wireless/\"):\n\t\t\t\tresult[iface] = 'WIRELESS'\n\t\t\telse:\n\t\t\t\tresult[iface] = 'PHYSICAL'\n\t\telse:\n\t\t\tresult[iface] = 'UNKNOWN'\n\treturn result\n\n\ndef get_interface_from_mac(mac):\n\treturn list_interfaces().get(mac.lower(), None)\n\n\ndef wireless_scan(interface):\n\tinterfaces = enrich_iface_types(list_interfaces().values())\n\tif interfaces[interface] != 'WIRELESS':\n\t\traise HardwareIncompatibilityError(f\"Interface {interface} is not a wireless interface: {interfaces}\")\n\n\tSysCommand(f\"iwctl station {interface} scan\")\n\n\tif '_WIFI' not in storage:\n\t\tstorage['_WIFI'] = {}\n\tif interface not in storage['_WIFI']:\n\t\tstorage['_WIFI'][interface] = {}\n\n\tstorage['_WIFI'][interface]['scanning'] = True\n\n\n# TODO: Full WiFi experience might get evolved in the future, pausing for now 2021-01-25\ndef get_wireless_networks(interface):\n\t# TODO: Make this oneliner pritter to check if the interface is scanning or not.\n\tif '_WIFI' not in storage or interface not in storage['_WIFI'] or storage['_WIFI'][interface].get('scanning', False) is False:\n\t\timport time\n\n\t\twireless_scan(interface)\n\t\ttime.sleep(5)\n\n\tfor line in SysCommand(f\"iwctl station {interface} get-networks\"):\n\t\tprint(line)\n", "path": "archinstall/lib/networking.py"}]}
1,252
173
gh_patches_debug_3036
rasdani/github-patches
git_diff
archlinux__archinstall-184
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- gnome-extra provides WAY too much bloatware I can't imagine most people wanting all the packages this installs on a new installation. Most of these applications are things like games and advanced tools like dconf-editor that your average user should not be touching. Some of them are nice to have but can be installed later manually instead of during initial installation. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `profiles/applications/gnome.py` Content: ``` 1 import archinstall 2 3 installation.add_additional_packages("gnome gnome-extra gdm") # We'll create a gnome-minimal later, but for now, we'll avoid issues by giving more than we need. 4 # Note: gdm should be part of the gnome group, but adding it here for clarity ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/profiles/applications/gnome.py b/profiles/applications/gnome.py --- a/profiles/applications/gnome.py +++ b/profiles/applications/gnome.py @@ -1,4 +1,4 @@ import archinstall -installation.add_additional_packages("gnome gnome-extra gdm") # We'll create a gnome-minimal later, but for now, we'll avoid issues by giving more than we need. -# Note: gdm should be part of the gnome group, but adding it here for clarity \ No newline at end of file +installation.add_additional_packages("gnome gnome-tweaks gnome-todo gnome-sound-recorder evolution gdm") +# Note: gdm should be part of the gnome group, but adding it here for clarity
{"golden_diff": "diff --git a/profiles/applications/gnome.py b/profiles/applications/gnome.py\n--- a/profiles/applications/gnome.py\n+++ b/profiles/applications/gnome.py\n@@ -1,4 +1,4 @@\n import archinstall\n \n-installation.add_additional_packages(\"gnome gnome-extra gdm\") # We'll create a gnome-minimal later, but for now, we'll avoid issues by giving more than we need.\n-# Note: gdm should be part of the gnome group, but adding it here for clarity\n\\ No newline at end of file\n+installation.add_additional_packages(\"gnome gnome-tweaks gnome-todo gnome-sound-recorder evolution gdm\")\n+# Note: gdm should be part of the gnome group, but adding it here for clarity\n", "issue": "gnome-extra provides WAY too much bloatware\nI can't imagine most people wanting all the packages this installs on a new installation. Most of these applications are things like games and advanced tools like dconf-editor that your average user should not be touching. Some of them are nice to have but can be installed later manually instead of during initial installation.\n", "before_files": [{"content": "import archinstall\n\ninstallation.add_additional_packages(\"gnome gnome-extra gdm\") # We'll create a gnome-minimal later, but for now, we'll avoid issues by giving more than we need.\n# Note: gdm should be part of the gnome group, but adding it here for clarity", "path": "profiles/applications/gnome.py"}], "after_files": [{"content": "import archinstall\n\ninstallation.add_additional_packages(\"gnome gnome-tweaks gnome-todo gnome-sound-recorder evolution gdm\")\n# Note: gdm should be part of the gnome group, but adding it here for clarity\n", "path": "profiles/applications/gnome.py"}]}
396
165
gh_patches_debug_5503
rasdani/github-patches
git_diff
getsentry__sentry-3604
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- New Auth Tokens cannot pass sudo check The new auth token based API access never passes sudo: ``` python def is_considered_sudo(request): return request.is_sudo() or \ isinstance(request.auth, ApiKey) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/sentry/api/decorators.py` Content: ``` 1 from __future__ import absolute_import 2 3 import json 4 5 from django.http import HttpResponse 6 from functools import wraps 7 8 from sentry.models import ApiKey 9 10 11 def is_considered_sudo(request): 12 return request.is_sudo() or \ 13 isinstance(request.auth, ApiKey) 14 15 16 def sudo_required(func): 17 @wraps(func) 18 def wrapped(self, request, *args, **kwargs): 19 # If we are already authenticated through an API key we do not 20 # care about the sudo flag. 21 if not is_considered_sudo(request): 22 # TODO(dcramer): support some kind of auth flow to allow this 23 # externally 24 data = { 25 "error": "Account verification required.", 26 "sudoRequired": True, 27 "username": request.user.username, 28 } 29 return HttpResponse(json.dumps(data), status=401) 30 return func(self, request, *args, **kwargs) 31 return wrapped 32 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/sentry/api/decorators.py b/src/sentry/api/decorators.py --- a/src/sentry/api/decorators.py +++ b/src/sentry/api/decorators.py @@ -5,12 +5,13 @@ from django.http import HttpResponse from functools import wraps -from sentry.models import ApiKey +from sentry.models import ApiKey, ApiToken def is_considered_sudo(request): return request.is_sudo() or \ - isinstance(request.auth, ApiKey) + isinstance(request.auth, ApiKey) or \ + isinstance(request.auth, ApiToken) def sudo_required(func):
{"golden_diff": "diff --git a/src/sentry/api/decorators.py b/src/sentry/api/decorators.py\n--- a/src/sentry/api/decorators.py\n+++ b/src/sentry/api/decorators.py\n@@ -5,12 +5,13 @@\n from django.http import HttpResponse\n from functools import wraps\n \n-from sentry.models import ApiKey\n+from sentry.models import ApiKey, ApiToken\n \n \n def is_considered_sudo(request):\n return request.is_sudo() or \\\n- isinstance(request.auth, ApiKey)\n+ isinstance(request.auth, ApiKey) or \\\n+ isinstance(request.auth, ApiToken)\n \n \n def sudo_required(func):\n", "issue": "New Auth Tokens cannot pass sudo check\nThe new auth token based API access never passes sudo:\n\n``` python\ndef is_considered_sudo(request):\n return request.is_sudo() or \\\n isinstance(request.auth, ApiKey)\n```\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport json\n\nfrom django.http import HttpResponse\nfrom functools import wraps\n\nfrom sentry.models import ApiKey\n\n\ndef is_considered_sudo(request):\n return request.is_sudo() or \\\n isinstance(request.auth, ApiKey)\n\n\ndef sudo_required(func):\n @wraps(func)\n def wrapped(self, request, *args, **kwargs):\n # If we are already authenticated through an API key we do not\n # care about the sudo flag.\n if not is_considered_sudo(request):\n # TODO(dcramer): support some kind of auth flow to allow this\n # externally\n data = {\n \"error\": \"Account verification required.\",\n \"sudoRequired\": True,\n \"username\": request.user.username,\n }\n return HttpResponse(json.dumps(data), status=401)\n return func(self, request, *args, **kwargs)\n return wrapped\n", "path": "src/sentry/api/decorators.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport json\n\nfrom django.http import HttpResponse\nfrom functools import wraps\n\nfrom sentry.models import ApiKey, ApiToken\n\n\ndef is_considered_sudo(request):\n return request.is_sudo() or \\\n isinstance(request.auth, ApiKey) or \\\n isinstance(request.auth, ApiToken)\n\n\ndef sudo_required(func):\n @wraps(func)\n def wrapped(self, request, *args, **kwargs):\n # If we are already authenticated through an API key we do not\n # care about the sudo flag.\n if not is_considered_sudo(request):\n # TODO(dcramer): support some kind of auth flow to allow this\n # externally\n data = {\n \"error\": \"Account verification required.\",\n \"sudoRequired\": True,\n \"username\": request.user.username,\n }\n return HttpResponse(json.dumps(data), status=401)\n return func(self, request, *args, **kwargs)\n return wrapped\n", "path": "src/sentry/api/decorators.py"}]}
568
141
gh_patches_debug_18519
rasdani/github-patches
git_diff
quantumlib__Cirq-3527
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Prevent installing both cirq and cirq-unstable. **Is your feature request related to a use case or problem? Please describe.** If `cirq-unstable` is installed in a virtualenv and you `pip install cirq`, the virtualenv gets into a bad state. This is because they are different packages with separate metadata, but they put the python code in the same location in a `cirq` folder in site-packages. So pip thinks both are installed but in fact only the most-recently installed package is "active". If you try to fix such a situation by uninstalling one package, the code gets removed but pip still thinks the other package is installed. The other package is now in a broken state (can't be imported, etc.). The only way to recover is to uninstall _both_ packages and then install one of them again. **Describe the solution you'd like** I'd like `pip install cirq` to fail if `cirq-unstable` is installed, and vice-versa. I did a quick glance at setuptools docs to see if there is some way to specify packages that _must not_ be present, but I couldn't see anything. It also doesn't seem to work to add a package requirement like `cirq != *`. I'm not sure if there's any other mechanism we can use to accomplish this. **What is the urgency from your perspective for this issue? Is it blocking important work?** P3 - I'm not really blocked by it, it is an idea I'd like to discuss / suggestion based on principle We have internal workarounds for this to try to keep people's virtualenvs in a good state if they accidentally install both versions, but it'd be great if we could prevent the problem from happening at all. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # Copyright 2018 The Cirq Developers 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import io 16 import os 17 from setuptools import find_packages, setup 18 19 # This reads the __version__ variable from cirq/_version.py 20 __version__ = '' 21 exec(open('cirq/_version.py').read()) 22 23 name = 'cirq' 24 25 description = ( 26 'A framework for creating, editing, and invoking ' 27 'Noisy Intermediate Scale Quantum (NISQ) circuits.' 28 ) 29 30 # README file as long_description. 31 long_description = io.open('README.rst', encoding='utf-8').read() 32 33 # If CIRQ_UNSTABLE_VERSION is set then we use cirq-unstable as the name of the package 34 # and update the version to this value. 35 if 'CIRQ_UNSTABLE_VERSION' in os.environ: 36 name = 'cirq-unstable' 37 __version__ = os.environ['CIRQ_UNSTABLE_VERSION'] 38 long_description = ( 39 "**This is a development version of Cirq and may be " 40 "unstable.**\n\n**For the latest stable release of Cirq " 41 "see**\n`here <https://pypi.org/project/cirq>`__.\n\n" + long_description 42 ) 43 44 # Read in requirements 45 requirements = open('requirements.txt').readlines() 46 requirements = [r.strip() for r in requirements] 47 contrib_requirements = open('cirq/contrib/contrib-requirements.txt').readlines() 48 contrib_requirements = [r.strip() for r in contrib_requirements] 49 dev_requirements = open('dev_tools/conf/pip-list-dev-tools.txt').readlines() 50 dev_requirements = [r.strip() for r in dev_requirements] 51 52 cirq_packages = ['cirq'] + ['cirq.' + package for package in find_packages(where='cirq')] 53 54 # Sanity check 55 assert __version__, 'Version string cannot be empty' 56 57 setup( 58 name=name, 59 version=__version__, 60 url='http://github.com/quantumlib/cirq', 61 author='The Cirq Developers', 62 author_email='[email protected]', 63 python_requires=('>=3.6.0'), 64 install_requires=requirements, 65 extras_require={ 66 'contrib': contrib_requirements, 67 'dev_env': dev_requirements + contrib_requirements, 68 }, 69 license='Apache 2', 70 description=description, 71 long_description=long_description, 72 packages=cirq_packages, 73 package_data={ 74 'cirq': ['py.typed'], 75 'cirq.google.api.v1': ['*.proto', '*.pyi'], 76 'cirq.google.api.v2': ['*.proto', '*.pyi'], 77 'cirq.protocols.json_test_data': ['*'], 78 }, 79 ) 80 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -30,11 +30,13 @@ # README file as long_description. long_description = io.open('README.rst', encoding='utf-8').read() -# If CIRQ_UNSTABLE_VERSION is set then we use cirq-unstable as the name of the package -# and update the version to this value. -if 'CIRQ_UNSTABLE_VERSION' in os.environ: - name = 'cirq-unstable' - __version__ = os.environ['CIRQ_UNSTABLE_VERSION'] +# If CIRQ_PRE_RELEASE_VERSION is set then we update the version to this value. +# It is assumed that it ends with one of `.devN`, `.aN`, `.bN`, `.rcN` and hence +# it will be a pre-release version on PyPi. See +# https://packaging.python.org/guides/distributing-packages-using-setuptools/#pre-release-versioning +# for more details. +if 'CIRQ_PRE_RELEASE_VERSION' in os.environ: + __version__ = os.environ['CIRQ_PRE_RELEASE_VERSION'] long_description = ( "**This is a development version of Cirq and may be " "unstable.**\n\n**For the latest stable release of Cirq "
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,11 +30,13 @@\n # README file as long_description.\n long_description = io.open('README.rst', encoding='utf-8').read()\n \n-# If CIRQ_UNSTABLE_VERSION is set then we use cirq-unstable as the name of the package\n-# and update the version to this value.\n-if 'CIRQ_UNSTABLE_VERSION' in os.environ:\n- name = 'cirq-unstable'\n- __version__ = os.environ['CIRQ_UNSTABLE_VERSION']\n+# If CIRQ_PRE_RELEASE_VERSION is set then we update the version to this value.\n+# It is assumed that it ends with one of `.devN`, `.aN`, `.bN`, `.rcN` and hence\n+# it will be a pre-release version on PyPi. See\n+# https://packaging.python.org/guides/distributing-packages-using-setuptools/#pre-release-versioning\n+# for more details.\n+if 'CIRQ_PRE_RELEASE_VERSION' in os.environ:\n+ __version__ = os.environ['CIRQ_PRE_RELEASE_VERSION']\n long_description = (\n \"**This is a development version of Cirq and may be \"\n \"unstable.**\\n\\n**For the latest stable release of Cirq \"\n", "issue": "Prevent installing both cirq and cirq-unstable.\n**Is your feature request related to a use case or problem? Please describe.**\r\n\r\nIf `cirq-unstable` is installed in a virtualenv and you `pip install cirq`, the virtualenv gets into a bad state. This is because they are different packages with separate metadata, but they put the python code in the same location in a `cirq` folder in site-packages. So pip thinks both are installed but in fact only the most-recently installed package is \"active\".\r\n\r\nIf you try to fix such a situation by uninstalling one package, the code gets removed but pip still thinks the other package is installed. The other package is now in a broken state (can't be imported, etc.). The only way to recover is to uninstall _both_ packages and then install one of them again.\r\n\r\n**Describe the solution you'd like**\r\n\r\nI'd like `pip install cirq` to fail if `cirq-unstable` is installed, and vice-versa. I did a quick glance at setuptools docs to see if there is some way to specify packages that _must not_ be present, but I couldn't see anything. It also doesn't seem to work to add a package requirement like `cirq != *`. I'm not sure if there's any other mechanism we can use to accomplish this.\r\n\r\n**What is the urgency from your perspective for this issue? Is it blocking important work?**\r\n\r\nP3 - I'm not really blocked by it, it is an idea I'd like to discuss / suggestion based on principle \r\n\r\nWe have internal workarounds for this to try to keep people's virtualenvs in a good state if they accidentally install both versions, but it'd be great if we could prevent the problem from happening at all.\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\nfrom setuptools import find_packages, setup\n\n# This reads the __version__ variable from cirq/_version.py\n__version__ = ''\nexec(open('cirq/_version.py').read())\n\nname = 'cirq'\n\ndescription = (\n 'A framework for creating, editing, and invoking '\n 'Noisy Intermediate Scale Quantum (NISQ) circuits.'\n)\n\n# README file as long_description.\nlong_description = io.open('README.rst', encoding='utf-8').read()\n\n# If CIRQ_UNSTABLE_VERSION is set then we use cirq-unstable as the name of the package\n# and update the version to this value.\nif 'CIRQ_UNSTABLE_VERSION' in os.environ:\n name = 'cirq-unstable'\n __version__ = os.environ['CIRQ_UNSTABLE_VERSION']\n long_description = (\n \"**This is a development version of Cirq and may be \"\n \"unstable.**\\n\\n**For the latest stable release of Cirq \"\n \"see**\\n`here <https://pypi.org/project/cirq>`__.\\n\\n\" + long_description\n )\n\n# Read in requirements\nrequirements = open('requirements.txt').readlines()\nrequirements = [r.strip() for r in requirements]\ncontrib_requirements = open('cirq/contrib/contrib-requirements.txt').readlines()\ncontrib_requirements = [r.strip() for r in contrib_requirements]\ndev_requirements = open('dev_tools/conf/pip-list-dev-tools.txt').readlines()\ndev_requirements = [r.strip() for r in dev_requirements]\n\ncirq_packages = ['cirq'] + ['cirq.' + package for package in find_packages(where='cirq')]\n\n# Sanity check\nassert __version__, 'Version string cannot be empty'\n\nsetup(\n name=name,\n version=__version__,\n url='http://github.com/quantumlib/cirq',\n author='The Cirq Developers',\n author_email='[email protected]',\n python_requires=('>=3.6.0'),\n install_requires=requirements,\n extras_require={\n 'contrib': contrib_requirements,\n 'dev_env': dev_requirements + contrib_requirements,\n },\n license='Apache 2',\n description=description,\n long_description=long_description,\n packages=cirq_packages,\n package_data={\n 'cirq': ['py.typed'],\n 'cirq.google.api.v1': ['*.proto', '*.pyi'],\n 'cirq.google.api.v2': ['*.proto', '*.pyi'],\n 'cirq.protocols.json_test_data': ['*'],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\nfrom setuptools import find_packages, setup\n\n# This reads the __version__ variable from cirq/_version.py\n__version__ = ''\nexec(open('cirq/_version.py').read())\n\nname = 'cirq'\n\ndescription = (\n 'A framework for creating, editing, and invoking '\n 'Noisy Intermediate Scale Quantum (NISQ) circuits.'\n)\n\n# README file as long_description.\nlong_description = io.open('README.rst', encoding='utf-8').read()\n\n# If CIRQ_PRE_RELEASE_VERSION is set then we update the version to this value.\n# It is assumed that it ends with one of `.devN`, `.aN`, `.bN`, `.rcN` and hence\n# it will be a pre-release version on PyPi. See\n# https://packaging.python.org/guides/distributing-packages-using-setuptools/#pre-release-versioning\n# for more details.\nif 'CIRQ_PRE_RELEASE_VERSION' in os.environ:\n __version__ = os.environ['CIRQ_PRE_RELEASE_VERSION']\n long_description = (\n \"**This is a development version of Cirq and may be \"\n \"unstable.**\\n\\n**For the latest stable release of Cirq \"\n \"see**\\n`here <https://pypi.org/project/cirq>`__.\\n\\n\" + long_description\n )\n\n# Read in requirements\nrequirements = open('requirements.txt').readlines()\nrequirements = [r.strip() for r in requirements]\ncontrib_requirements = open('cirq/contrib/contrib-requirements.txt').readlines()\ncontrib_requirements = [r.strip() for r in contrib_requirements]\ndev_requirements = open('dev_tools/conf/pip-list-dev-tools.txt').readlines()\ndev_requirements = [r.strip() for r in dev_requirements]\n\ncirq_packages = ['cirq'] + ['cirq.' + package for package in find_packages(where='cirq')]\n\n# Sanity check\nassert __version__, 'Version string cannot be empty'\n\nsetup(\n name=name,\n version=__version__,\n url='http://github.com/quantumlib/cirq',\n author='The Cirq Developers',\n author_email='[email protected]',\n python_requires=('>=3.6.0'),\n install_requires=requirements,\n extras_require={\n 'contrib': contrib_requirements,\n 'dev_env': dev_requirements + contrib_requirements,\n },\n license='Apache 2',\n description=description,\n long_description=long_description,\n packages=cirq_packages,\n package_data={\n 'cirq': ['py.typed'],\n 'cirq.google.api.v1': ['*.proto', '*.pyi'],\n 'cirq.google.api.v2': ['*.proto', '*.pyi'],\n 'cirq.protocols.json_test_data': ['*'],\n },\n)\n", "path": "setup.py"}]}
1,468
288
gh_patches_debug_9804
rasdani/github-patches
git_diff
bokeh__bokeh-9604
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [FEATURE] BokehJS embed_item should return a reference to the plot. Bokeh.embed.embed_item currently returns nothing. I propose it should return a reference to the plot object so I don't have to go fishing (Bokeh.index[plotData.root_id]) in Bokeh.index to do things like call resize_layout() manually. Honestly any sort of 'official' method for doing this would be super useful. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/embed/json_item.py` Content: ``` 1 import json 2 3 from flask import Flask 4 from jinja2 import Template 5 6 from bokeh.embed import json_item 7 from bokeh.plotting import figure 8 from bokeh.resources import CDN 9 from bokeh.sampledata.iris import flowers 10 11 app = Flask(__name__) 12 13 page = Template(""" 14 <!DOCTYPE html> 15 <html lang="en"> 16 <head> 17 {{ resources }} 18 </head> 19 20 <body> 21 <div id="myplot"></div> 22 <div id="myplot2"></div> 23 <script> 24 fetch('/plot') 25 .then(function(response) { return response.json(); }) 26 .then(function(item) { Bokeh.embed.embed_item(item); }) 27 </script> 28 <script> 29 fetch('/plot2') 30 .then(function(response) { return response.json(); }) 31 .then(function(item) { Bokeh.embed.embed_item(item, "myplot2"); }) 32 </script> 33 </body> 34 """) 35 36 colormap = {'setosa': 'red', 'versicolor': 'green', 'virginica': 'blue'} 37 colors = [colormap[x] for x in flowers['species']] 38 39 def make_plot(x, y): 40 p = figure(title = "Iris Morphology", sizing_mode="fixed", plot_width=400, plot_height=400) 41 p.xaxis.axis_label = x 42 p.yaxis.axis_label = y 43 p.circle(flowers[x], flowers[y], color=colors, fill_alpha=0.2, size=10) 44 return p 45 46 @app.route('/') 47 def root(): 48 return page.render(resources=CDN.render()) 49 50 @app.route('/plot') 51 def plot(): 52 p = make_plot('petal_width', 'petal_length') 53 return json.dumps(json_item(p, "myplot")) 54 55 @app.route('/plot2') 56 def plot2(): 57 p = make_plot('sepal_width', 'sepal_length') 58 return json.dumps(json_item(p)) 59 60 if __name__ == '__main__': 61 app.run() 62 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/embed/json_item.py b/examples/embed/json_item.py --- a/examples/embed/json_item.py +++ b/examples/embed/json_item.py @@ -23,12 +23,12 @@ <script> fetch('/plot') .then(function(response) { return response.json(); }) - .then(function(item) { Bokeh.embed.embed_item(item); }) + .then(function(item) { return Bokeh.embed.embed_item(item); }) </script> <script> fetch('/plot2') .then(function(response) { return response.json(); }) - .then(function(item) { Bokeh.embed.embed_item(item, "myplot2"); }) + .then(function(item) { return Bokeh.embed.embed_item(item, "myplot2"); }) </script> </body> """)
{"golden_diff": "diff --git a/examples/embed/json_item.py b/examples/embed/json_item.py\n--- a/examples/embed/json_item.py\n+++ b/examples/embed/json_item.py\n@@ -23,12 +23,12 @@\n <script>\n fetch('/plot')\n .then(function(response) { return response.json(); })\n- .then(function(item) { Bokeh.embed.embed_item(item); })\n+ .then(function(item) { return Bokeh.embed.embed_item(item); })\n </script>\n <script>\n fetch('/plot2')\n .then(function(response) { return response.json(); })\n- .then(function(item) { Bokeh.embed.embed_item(item, \"myplot2\"); })\n+ .then(function(item) { return Bokeh.embed.embed_item(item, \"myplot2\"); })\n </script>\n </body>\n \"\"\")\n", "issue": "[FEATURE] BokehJS embed_item should return a reference to the plot. \nBokeh.embed.embed_item currently returns nothing. I propose it should return a reference to the plot object so I don't have to go fishing (Bokeh.index[plotData.root_id]) in Bokeh.index to do things like call resize_layout() manually.\r\n\r\nHonestly any sort of 'official' method for doing this would be super useful.\r\n\n", "before_files": [{"content": "import json\n\nfrom flask import Flask\nfrom jinja2 import Template\n\nfrom bokeh.embed import json_item\nfrom bokeh.plotting import figure\nfrom bokeh.resources import CDN\nfrom bokeh.sampledata.iris import flowers\n\napp = Flask(__name__)\n\npage = Template(\"\"\"\n<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n {{ resources }}\n</head>\n\n<body>\n <div id=\"myplot\"></div>\n <div id=\"myplot2\"></div>\n <script>\n fetch('/plot')\n .then(function(response) { return response.json(); })\n .then(function(item) { Bokeh.embed.embed_item(item); })\n </script>\n <script>\n fetch('/plot2')\n .then(function(response) { return response.json(); })\n .then(function(item) { Bokeh.embed.embed_item(item, \"myplot2\"); })\n </script>\n</body>\n\"\"\")\n\ncolormap = {'setosa': 'red', 'versicolor': 'green', 'virginica': 'blue'}\ncolors = [colormap[x] for x in flowers['species']]\n\ndef make_plot(x, y):\n p = figure(title = \"Iris Morphology\", sizing_mode=\"fixed\", plot_width=400, plot_height=400)\n p.xaxis.axis_label = x\n p.yaxis.axis_label = y\n p.circle(flowers[x], flowers[y], color=colors, fill_alpha=0.2, size=10)\n return p\n\[email protected]('/')\ndef root():\n return page.render(resources=CDN.render())\n\[email protected]('/plot')\ndef plot():\n p = make_plot('petal_width', 'petal_length')\n return json.dumps(json_item(p, \"myplot\"))\n\[email protected]('/plot2')\ndef plot2():\n p = make_plot('sepal_width', 'sepal_length')\n return json.dumps(json_item(p))\n\nif __name__ == '__main__':\n app.run()\n", "path": "examples/embed/json_item.py"}], "after_files": [{"content": "import json\n\nfrom flask import Flask\nfrom jinja2 import Template\n\nfrom bokeh.embed import json_item\nfrom bokeh.plotting import figure\nfrom bokeh.resources import CDN\nfrom bokeh.sampledata.iris import flowers\n\napp = Flask(__name__)\n\npage = Template(\"\"\"\n<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n {{ resources }}\n</head>\n\n<body>\n <div id=\"myplot\"></div>\n <div id=\"myplot2\"></div>\n <script>\n fetch('/plot')\n .then(function(response) { return response.json(); })\n .then(function(item) { return Bokeh.embed.embed_item(item); })\n </script>\n <script>\n fetch('/plot2')\n .then(function(response) { return response.json(); })\n .then(function(item) { return Bokeh.embed.embed_item(item, \"myplot2\"); })\n </script>\n</body>\n\"\"\")\n\ncolormap = {'setosa': 'red', 'versicolor': 'green', 'virginica': 'blue'}\ncolors = [colormap[x] for x in flowers['species']]\n\ndef make_plot(x, y):\n p = figure(title = \"Iris Morphology\", sizing_mode=\"fixed\", plot_width=400, plot_height=400)\n p.xaxis.axis_label = x\n p.yaxis.axis_label = y\n p.circle(flowers[x], flowers[y], color=colors, fill_alpha=0.2, size=10)\n return p\n\[email protected]('/')\ndef root():\n return page.render(resources=CDN.render())\n\[email protected]('/plot')\ndef plot():\n p = make_plot('petal_width', 'petal_length')\n return json.dumps(json_item(p, \"myplot\"))\n\[email protected]('/plot2')\ndef plot2():\n p = make_plot('sepal_width', 'sepal_length')\n return json.dumps(json_item(p))\n\nif __name__ == '__main__':\n app.run()\n", "path": "examples/embed/json_item.py"}]}
887
178
gh_patches_debug_60846
rasdani/github-patches
git_diff
uclapi__uclapi-883
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- AMP Doesn't Return STUDENT_TYPE I am an idiot. https://github.com/uclapi/uclapi/blob/69f0d3240d8ec6cf2a3b018897dc247aa58cb1bf/backend/uclapi/timetable/amp.py#L31 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `backend/uclapi/timetable/amp.py` Content: ``` 1 """ 2 UCL Academic Modelling Project 3 Fast Code Processing 4 """ 5 6 STUDENT_TYPES = { 7 'A': "Campus-based, numeric mark scheme", 8 'B': "Campus-based, non-numeric mark scheme", 9 'C': "Distance learner, numeric mark scheme", 10 'D': "Distance learner, non-numeric mark scheme", 11 'E': "MBBS Resit" 12 } 13 14 15 class InvalidAMPCodeException(Exception): 16 pass 17 18 19 class ModuleDelivery: 20 def __init__(self, delivery_code): 21 # Sanity check the code we have 22 if len(delivery_code) != 3: 23 raise InvalidAMPCodeException("Delivery code is too long") 24 if delivery_code[0] in STUDENT_TYPES: 25 self.student_type = STUDENT_TYPES[delivery_code[0]] 26 else: 27 raise InvalidAMPCodeException("Student type is not valid") 28 self.fheq_level = int(delivery_code[1]) 29 self.undergraduate = delivery_code[2] == 'U' 30 31 def get_delivery(self): 32 return { 33 "fheq_level": self.fheq_level, 34 "is_undergraduate": self.undergraduate 35 } 36 37 38 class ModulePeriods: 39 # Default Attributes 40 term_1 = False 41 term_2 = False 42 term_3 = False 43 term_4 = False # Term 1 of the next academic year 44 summer = False # Summer Teaching Period 45 summer_school = False # UCL Summer School 46 summer_school_1 = False # UCL Summer School Session 1 47 summer_school_2 = False # UCL Summer School Session 2 48 lsr = False # Late Summer Resit period 49 year = False # Whole year module 50 51 def __init__(self, periods_code): 52 if periods_code == 'YEAR': 53 self.term_1 = True 54 self.term_2 = True 55 self.term_3 = True 56 self.year = True 57 elif periods_code == 'SUMMER': 58 self.summer = True 59 elif periods_code == 'LSR': 60 self.lsr = True 61 elif periods_code[0] == 'S': 62 # Summer School periods start with an S. 63 # S1, S2, S1+2 64 self.summer_school = True 65 if periods_code == 'S1': 66 self.summer_school_1 = True 67 elif periods_code == 'S2': 68 self.summer_school_2 = True 69 elif periods_code == 'S1+2': 70 self.summer_school_1 = True 71 self.summer_school_2 = True 72 else: 73 raise InvalidAMPCodeException( 74 "An invalid AMP code was found: " + periods_code 75 ) 76 elif periods_code[0] == 'T': 77 # Normal classes start with a T for Term 78 if periods_code == 'T1': 79 self.term_1 = True 80 elif periods_code == 'T1/2': 81 self.term_1 = True 82 self.term_2 = True 83 elif periods_code == 'T1/2/3': 84 self.term_1 = True 85 self.term_2 = True 86 self.term_3 = True 87 elif periods_code == 'T1/3': 88 self.term_1 = True 89 self.term_3 = True 90 elif periods_code == 'T2': 91 self.term_2 = True 92 elif periods_code == 'T2/3': 93 self.term_2 = True 94 self.term_3 = True 95 elif periods_code == 'T2/3/S' or periods_code == 'T2/3/4': 96 self.term_2 = True 97 self.term_3 = True 98 self.summer = True 99 elif periods_code == 'T3': 100 self.term_3 = True 101 elif periods_code == 'T3/1': 102 self.term_3 = True 103 self.term_4 = True 104 elif periods_code == 'T3/S' or periods_code == 'T3/4': 105 self.term_3 = True 106 self.summer = True 107 elif periods_code == 'T4': 108 self.term_4 = True 109 else: 110 raise InvalidAMPCodeException( 111 "AMP Periods Code contained an invalid term element" 112 ) 113 else: 114 raise InvalidAMPCodeException( 115 "An invalid AMP code was found: " + periods_code 116 ) 117 118 def get_periods(self): 119 return { 120 "teaching_periods": { 121 "term_1": self.term_1, 122 "term_2": self.term_2, 123 "term_3": self.term_3, 124 "term_1_next_year": self.term_4, 125 "summer": self.summer 126 }, 127 "year_long": self.year, 128 "lsr": self.lsr, 129 "summer_school": { 130 "is_summer_school": self.summer_school, 131 "sessions": { 132 "session_1": self.summer_school_1, 133 "session_2": self.summer_school_2 134 } 135 } 136 } 137 138 139 class ModuleInstance: 140 def __init__(self, amp_code): 141 """ 142 An AMP Code is stored as the INSTID in CMIS. 143 It looks something like this: A6U-T1/2 144 """ 145 parts = amp_code.split('-') 146 module_delivery_code = parts[0] # A6U 147 periods_code = parts[1] # T1/2 148 149 self.delivery = ModuleDelivery(module_delivery_code) 150 self.periods = ModulePeriods(periods_code) 151 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/backend/uclapi/timetable/amp.py b/backend/uclapi/timetable/amp.py --- a/backend/uclapi/timetable/amp.py +++ b/backend/uclapi/timetable/amp.py @@ -31,7 +31,8 @@ def get_delivery(self): return { "fheq_level": self.fheq_level, - "is_undergraduate": self.undergraduate + "is_undergraduate": self.undergraduate, + "student_type": self.student_type }
{"golden_diff": "diff --git a/backend/uclapi/timetable/amp.py b/backend/uclapi/timetable/amp.py\n--- a/backend/uclapi/timetable/amp.py\n+++ b/backend/uclapi/timetable/amp.py\n@@ -31,7 +31,8 @@\n def get_delivery(self):\r\n return {\r\n \"fheq_level\": self.fheq_level,\r\n- \"is_undergraduate\": self.undergraduate\r\n+ \"is_undergraduate\": self.undergraduate,\r\n+ \"student_type\": self.student_type\r\n }\n", "issue": "AMP Doesn't Return STUDENT_TYPE\nI am an idiot.\r\n\r\nhttps://github.com/uclapi/uclapi/blob/69f0d3240d8ec6cf2a3b018897dc247aa58cb1bf/backend/uclapi/timetable/amp.py#L31\n", "before_files": [{"content": "\"\"\"\r\nUCL Academic Modelling Project\r\nFast Code Processing\r\n\"\"\"\r\n\r\nSTUDENT_TYPES = {\r\n 'A': \"Campus-based, numeric mark scheme\",\r\n 'B': \"Campus-based, non-numeric mark scheme\",\r\n 'C': \"Distance learner, numeric mark scheme\",\r\n 'D': \"Distance learner, non-numeric mark scheme\",\r\n 'E': \"MBBS Resit\"\r\n}\r\n\r\n\r\nclass InvalidAMPCodeException(Exception):\r\n pass\r\n\r\n\r\nclass ModuleDelivery:\r\n def __init__(self, delivery_code):\r\n # Sanity check the code we have\r\n if len(delivery_code) != 3:\r\n raise InvalidAMPCodeException(\"Delivery code is too long\")\r\n if delivery_code[0] in STUDENT_TYPES:\r\n self.student_type = STUDENT_TYPES[delivery_code[0]]\r\n else:\r\n raise InvalidAMPCodeException(\"Student type is not valid\")\r\n self.fheq_level = int(delivery_code[1])\r\n self.undergraduate = delivery_code[2] == 'U'\r\n\r\n def get_delivery(self):\r\n return {\r\n \"fheq_level\": self.fheq_level,\r\n \"is_undergraduate\": self.undergraduate\r\n }\r\n\r\n\r\nclass ModulePeriods:\r\n # Default Attributes\r\n term_1 = False\r\n term_2 = False\r\n term_3 = False\r\n term_4 = False # Term 1 of the next academic year\r\n summer = False # Summer Teaching Period\r\n summer_school = False # UCL Summer School\r\n summer_school_1 = False # UCL Summer School Session 1\r\n summer_school_2 = False # UCL Summer School Session 2\r\n lsr = False # Late Summer Resit period\r\n year = False # Whole year module\r\n\r\n def __init__(self, periods_code):\r\n if periods_code == 'YEAR':\r\n self.term_1 = True\r\n self.term_2 = True\r\n self.term_3 = True\r\n self.year = True\r\n elif periods_code == 'SUMMER':\r\n self.summer = True\r\n elif periods_code == 'LSR':\r\n self.lsr = True\r\n elif periods_code[0] == 'S':\r\n # Summer School periods start with an S.\r\n # S1, S2, S1+2\r\n self.summer_school = True\r\n if periods_code == 'S1':\r\n self.summer_school_1 = True\r\n elif periods_code == 'S2':\r\n self.summer_school_2 = True\r\n elif periods_code == 'S1+2':\r\n self.summer_school_1 = True\r\n self.summer_school_2 = True\r\n else:\r\n raise InvalidAMPCodeException(\r\n \"An invalid AMP code was found: \" + periods_code\r\n )\r\n elif periods_code[0] == 'T':\r\n # Normal classes start with a T for Term\r\n if periods_code == 'T1':\r\n self.term_1 = True\r\n elif periods_code == 'T1/2':\r\n self.term_1 = True\r\n self.term_2 = True\r\n elif periods_code == 'T1/2/3':\r\n self.term_1 = True\r\n self.term_2 = True\r\n self.term_3 = True\r\n elif periods_code == 'T1/3':\r\n self.term_1 = True\r\n self.term_3 = True\r\n elif periods_code == 'T2':\r\n self.term_2 = True\r\n elif periods_code == 'T2/3':\r\n self.term_2 = True\r\n self.term_3 = True\r\n elif periods_code == 'T2/3/S' or periods_code == 'T2/3/4':\r\n self.term_2 = True\r\n self.term_3 = True\r\n self.summer = True\r\n elif periods_code == 'T3':\r\n self.term_3 = True\r\n elif periods_code == 'T3/1':\r\n self.term_3 = True\r\n self.term_4 = True\r\n elif periods_code == 'T3/S' or periods_code == 'T3/4':\r\n self.term_3 = True\r\n self.summer = True\r\n elif periods_code == 'T4':\r\n self.term_4 = True\r\n else:\r\n raise InvalidAMPCodeException(\r\n \"AMP Periods Code contained an invalid term element\"\r\n )\r\n else:\r\n raise InvalidAMPCodeException(\r\n \"An invalid AMP code was found: \" + periods_code\r\n )\r\n\r\n def get_periods(self):\r\n return {\r\n \"teaching_periods\": {\r\n \"term_1\": self.term_1,\r\n \"term_2\": self.term_2,\r\n \"term_3\": self.term_3,\r\n \"term_1_next_year\": self.term_4,\r\n \"summer\": self.summer\r\n },\r\n \"year_long\": self.year,\r\n \"lsr\": self.lsr,\r\n \"summer_school\": {\r\n \"is_summer_school\": self.summer_school,\r\n \"sessions\": {\r\n \"session_1\": self.summer_school_1,\r\n \"session_2\": self.summer_school_2\r\n }\r\n }\r\n }\r\n\r\n\r\nclass ModuleInstance:\r\n def __init__(self, amp_code):\r\n \"\"\"\r\n An AMP Code is stored as the INSTID in CMIS.\r\n It looks something like this: A6U-T1/2\r\n \"\"\"\r\n parts = amp_code.split('-')\r\n module_delivery_code = parts[0] # A6U\r\n periods_code = parts[1] # T1/2\r\n\r\n self.delivery = ModuleDelivery(module_delivery_code)\r\n self.periods = ModulePeriods(periods_code)\r\n", "path": "backend/uclapi/timetable/amp.py"}], "after_files": [{"content": "\"\"\"\r\nUCL Academic Modelling Project\r\nFast Code Processing\r\n\"\"\"\r\n\r\nSTUDENT_TYPES = {\r\n 'A': \"Campus-based, numeric mark scheme\",\r\n 'B': \"Campus-based, non-numeric mark scheme\",\r\n 'C': \"Distance learner, numeric mark scheme\",\r\n 'D': \"Distance learner, non-numeric mark scheme\",\r\n 'E': \"MBBS Resit\"\r\n}\r\n\r\n\r\nclass InvalidAMPCodeException(Exception):\r\n pass\r\n\r\n\r\nclass ModuleDelivery:\r\n def __init__(self, delivery_code):\r\n # Sanity check the code we have\r\n if len(delivery_code) != 3:\r\n raise InvalidAMPCodeException(\"Delivery code is too long\")\r\n if delivery_code[0] in STUDENT_TYPES:\r\n self.student_type = STUDENT_TYPES[delivery_code[0]]\r\n else:\r\n raise InvalidAMPCodeException(\"Student type is not valid\")\r\n self.fheq_level = int(delivery_code[1])\r\n self.undergraduate = delivery_code[2] == 'U'\r\n\r\n def get_delivery(self):\r\n return {\r\n \"fheq_level\": self.fheq_level,\r\n \"is_undergraduate\": self.undergraduate,\r\n \"student_type\": self.student_type\r\n }\r\n\r\n\r\nclass ModulePeriods:\r\n # Default Attributes\r\n term_1 = False\r\n term_2 = False\r\n term_3 = False\r\n term_4 = False # Term 1 of the next academic year\r\n summer = False # Summer Teaching Period\r\n summer_school = False # UCL Summer School\r\n summer_school_1 = False # UCL Summer School Session 1\r\n summer_school_2 = False # UCL Summer School Session 2\r\n lsr = False # Late Summer Resit period\r\n year = False # Whole year module\r\n\r\n def __init__(self, periods_code):\r\n if periods_code == 'YEAR':\r\n self.term_1 = True\r\n self.term_2 = True\r\n self.term_3 = True\r\n self.year = True\r\n elif periods_code == 'SUMMER':\r\n self.summer = True\r\n elif periods_code == 'LSR':\r\n self.lsr = True\r\n elif periods_code[0] == 'S':\r\n # Summer School periods start with an S.\r\n # S1, S2, S1+2\r\n self.summer_school = True\r\n if periods_code == 'S1':\r\n self.summer_school_1 = True\r\n elif periods_code == 'S2':\r\n self.summer_school_2 = True\r\n elif periods_code == 'S1+2':\r\n self.summer_school_1 = True\r\n self.summer_school_2 = True\r\n else:\r\n raise InvalidAMPCodeException(\r\n \"An invalid AMP code was found: \" + periods_code\r\n )\r\n elif periods_code[0] == 'T':\r\n # Normal classes start with a T for Term\r\n if periods_code == 'T1':\r\n self.term_1 = True\r\n elif periods_code == 'T1/2':\r\n self.term_1 = True\r\n self.term_2 = True\r\n elif periods_code == 'T1/2/3':\r\n self.term_1 = True\r\n self.term_2 = True\r\n self.term_3 = True\r\n elif periods_code == 'T1/3':\r\n self.term_1 = True\r\n self.term_3 = True\r\n elif periods_code == 'T2':\r\n self.term_2 = True\r\n elif periods_code == 'T2/3':\r\n self.term_2 = True\r\n self.term_3 = True\r\n elif periods_code == 'T2/3/S' or periods_code == 'T2/3/4':\r\n self.term_2 = True\r\n self.term_3 = True\r\n self.summer = True\r\n elif periods_code == 'T3':\r\n self.term_3 = True\r\n elif periods_code == 'T3/1':\r\n self.term_3 = True\r\n self.term_4 = True\r\n elif periods_code == 'T3/S' or periods_code == 'T3/4':\r\n self.term_3 = True\r\n self.summer = True\r\n elif periods_code == 'T4':\r\n self.term_4 = True\r\n else:\r\n raise InvalidAMPCodeException(\r\n \"AMP Periods Code contained an invalid term element\"\r\n )\r\n else:\r\n raise InvalidAMPCodeException(\r\n \"An invalid AMP code was found: \" + periods_code\r\n )\r\n\r\n def get_periods(self):\r\n return {\r\n \"teaching_periods\": {\r\n \"term_1\": self.term_1,\r\n \"term_2\": self.term_2,\r\n \"term_3\": self.term_3,\r\n \"term_1_next_year\": self.term_4,\r\n \"summer\": self.summer\r\n },\r\n \"year_long\": self.year,\r\n \"lsr\": self.lsr,\r\n \"summer_school\": {\r\n \"is_summer_school\": self.summer_school,\r\n \"sessions\": {\r\n \"session_1\": self.summer_school_1,\r\n \"session_2\": self.summer_school_2\r\n }\r\n }\r\n }\r\n\r\n\r\nclass ModuleInstance:\r\n def __init__(self, amp_code):\r\n \"\"\"\r\n An AMP Code is stored as the INSTID in CMIS.\r\n It looks something like this: A6U-T1/2\r\n \"\"\"\r\n parts = amp_code.split('-')\r\n module_delivery_code = parts[0] # A6U\r\n periods_code = parts[1] # T1/2\r\n\r\n self.delivery = ModuleDelivery(module_delivery_code)\r\n self.periods = ModulePeriods(periods_code)\r\n", "path": "backend/uclapi/timetable/amp.py"}]}
1,904
122
gh_patches_debug_16323
rasdani/github-patches
git_diff
litestar-org__litestar-2602
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- docs: update TODO app w/ sqlalchemy tutorial to use autocommitting before send handler. > @AgarwalPragy You are correct. By default, the plugin session handler does not automatically commit on a successful response. You can easily change it by using the following `before_send` handler: > > ```python > from advanced_alchemy.extensions.litestar.plugins.init.config.asyncio import autocommit_before_send_handler > > db_url = "sqlite+aiosqlite:///:memory:" > app = Litestar( > route_handlers=[hello], > plugins=[ > SQLAlchemyPlugin( > config=SQLAlchemyAsyncConfig( > connection_string=db_url, > session_dependency_key="transaction", > create_all=True, > alembic_config=AlembicAsyncConfig(target_metadata=orm_registry.metadata), > before_send_handler=autocommit_before_send_handler, > ), > ), > ], > ) > ``` I'd say this is a documentation bug on our side now. We should update https://docs.litestar.dev/latest/tutorials/sqlalchemy/3-init-plugin.html to do the same as this (it was written before the autocommit handler was a part of the plugin, IIRC). _Originally posted by @peterschutt in https://github.com/litestar-org/litestar/issues/2556#issuecomment-1786287414_ <!-- POLAR PLEDGE BADGE START --> --- > [!NOTE] > While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and > [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship. > > Check out all issues funded or available for funding [on our Polar.sh Litestar dashboard](https://polar.sh/litestar-org) > * If you would like to see an issue prioritized, make a pledge towards it! > * We receive the pledge once the issue is completed & verified > * This, along with engagement in the community, helps us know which features are a priority to our users. <a href="https://polar.sh/litestar-org/litestar/issues/2569"> <picture> <source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/2569/pledge.svg?darkmode=1"> <img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/2569/pledge.svg"> </picture> </a> <!-- POLAR PLEDGE BADGE END --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py` Content: ``` 1 from typing import AsyncGenerator, List, Optional 2 3 from sqlalchemy import select 4 from sqlalchemy.exc import IntegrityError, NoResultFound 5 from sqlalchemy.ext.asyncio import AsyncSession 6 from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column 7 8 from litestar import Litestar, get, post, put 9 from litestar.contrib.sqlalchemy.plugins import SQLAlchemyAsyncConfig, SQLAlchemyPlugin 10 from litestar.exceptions import ClientException, NotFoundException 11 from litestar.status_codes import HTTP_409_CONFLICT 12 13 14 class Base(DeclarativeBase): 15 ... 16 17 18 class TodoItem(Base): 19 __tablename__ = "todo_items" 20 21 title: Mapped[str] = mapped_column(primary_key=True) 22 done: Mapped[bool] 23 24 25 async def provide_transaction(db_session: AsyncSession) -> AsyncGenerator[AsyncSession, None]: 26 try: 27 async with db_session.begin(): 28 yield db_session 29 except IntegrityError as exc: 30 raise ClientException( 31 status_code=HTTP_409_CONFLICT, 32 detail=str(exc), 33 ) from exc 34 35 36 async def get_todo_by_title(todo_name, session: AsyncSession) -> TodoItem: 37 query = select(TodoItem).where(TodoItem.title == todo_name) 38 result = await session.execute(query) 39 try: 40 return result.scalar_one() 41 except NoResultFound as e: 42 raise NotFoundException(detail=f"TODO {todo_name!r} not found") from e 43 44 45 async def get_todo_list(done: Optional[bool], session: AsyncSession) -> List[TodoItem]: 46 query = select(TodoItem) 47 if done is not None: 48 query = query.where(TodoItem.done.is_(done)) 49 50 result = await session.execute(query) 51 return result.scalars().all() 52 53 54 @get("/") 55 async def get_list(transaction: AsyncSession, done: Optional[bool] = None) -> List[TodoItem]: 56 return await get_todo_list(done, transaction) 57 58 59 @post("/") 60 async def add_item(data: TodoItem, transaction: AsyncSession) -> TodoItem: 61 transaction.add(data) 62 return data 63 64 65 @put("/{item_title:str}") 66 async def update_item(item_title: str, data: TodoItem, transaction: AsyncSession) -> TodoItem: 67 todo_item = await get_todo_by_title(item_title, transaction) 68 todo_item.title = data.title 69 todo_item.done = data.done 70 return todo_item 71 72 73 db_config = SQLAlchemyAsyncConfig( 74 connection_string="sqlite+aiosqlite:///todo.sqlite", metadata=Base.metadata, create_all=True 75 ) 76 77 app = Litestar( 78 [get_list, add_item, update_item], 79 dependencies={"transaction": provide_transaction}, 80 plugins=[SQLAlchemyPlugin(db_config)], 81 ) 82 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py b/docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py --- a/docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py +++ b/docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py @@ -1,5 +1,6 @@ from typing import AsyncGenerator, List, Optional +from advanced_alchemy.extensions.litestar.plugins.init.config.asyncio import autocommit_before_send_handler from sqlalchemy import select from sqlalchemy.exc import IntegrityError, NoResultFound from sqlalchemy.ext.asyncio import AsyncSession @@ -71,7 +72,10 @@ db_config = SQLAlchemyAsyncConfig( - connection_string="sqlite+aiosqlite:///todo.sqlite", metadata=Base.metadata, create_all=True + connection_string="sqlite+aiosqlite:///todo.sqlite", + metadata=Base.metadata, + create_all=True, + before_send_handler=autocommit_before_send_handler, ) app = Litestar(
{"golden_diff": "diff --git a/docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py b/docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py\n--- a/docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py\n+++ b/docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py\n@@ -1,5 +1,6 @@\n from typing import AsyncGenerator, List, Optional\n \n+from advanced_alchemy.extensions.litestar.plugins.init.config.asyncio import autocommit_before_send_handler\n from sqlalchemy import select\n from sqlalchemy.exc import IntegrityError, NoResultFound\n from sqlalchemy.ext.asyncio import AsyncSession\n@@ -71,7 +72,10 @@\n \n \n db_config = SQLAlchemyAsyncConfig(\n- connection_string=\"sqlite+aiosqlite:///todo.sqlite\", metadata=Base.metadata, create_all=True\n+ connection_string=\"sqlite+aiosqlite:///todo.sqlite\",\n+ metadata=Base.metadata,\n+ create_all=True,\n+ before_send_handler=autocommit_before_send_handler,\n )\n \n app = Litestar(\n", "issue": "docs: update TODO app w/ sqlalchemy tutorial to use autocommitting before send handler.\n > @AgarwalPragy You are correct. By default, the plugin session handler does not automatically commit on a successful response. You can easily change it by using the following `before_send` handler:\r\n> \r\n> ```python\r\n> from advanced_alchemy.extensions.litestar.plugins.init.config.asyncio import autocommit_before_send_handler\r\n> \r\n> db_url = \"sqlite+aiosqlite:///:memory:\"\r\n> app = Litestar(\r\n> route_handlers=[hello],\r\n> plugins=[\r\n> SQLAlchemyPlugin(\r\n> config=SQLAlchemyAsyncConfig(\r\n> connection_string=db_url,\r\n> session_dependency_key=\"transaction\",\r\n> create_all=True,\r\n> alembic_config=AlembicAsyncConfig(target_metadata=orm_registry.metadata),\r\n> before_send_handler=autocommit_before_send_handler,\r\n> ),\r\n> ),\r\n> ],\r\n> )\r\n> ```\r\n\r\nI'd say this is a documentation bug on our side now.\r\n\r\nWe should update https://docs.litestar.dev/latest/tutorials/sqlalchemy/3-init-plugin.html to do the same as this (it was written before the autocommit handler was a part of the plugin, IIRC).\r\n\r\n_Originally posted by @peterschutt in https://github.com/litestar-org/litestar/issues/2556#issuecomment-1786287414_\r\n \n\n<!-- POLAR PLEDGE BADGE START -->\n---\n> [!NOTE] \n> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and \n> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.\n>\n> Check out all issues funded or available for funding [on our Polar.sh Litestar dashboard](https://polar.sh/litestar-org)\n> * If you would like to see an issue prioritized, make a pledge towards it!\n> * We receive the pledge once the issue is completed & verified\n> * This, along with engagement in the community, helps us know which features are a priority to our users.\n\n<a href=\"https://polar.sh/litestar-org/litestar/issues/2569\">\n<picture>\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/litestar-org/litestar/issues/2569/pledge.svg?darkmode=1\">\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/litestar-org/litestar/issues/2569/pledge.svg\">\n</picture>\n</a>\n<!-- POLAR PLEDGE BADGE END -->\n\n", "before_files": [{"content": "from typing import AsyncGenerator, List, Optional\n\nfrom sqlalchemy import select\nfrom sqlalchemy.exc import IntegrityError, NoResultFound\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column\n\nfrom litestar import Litestar, get, post, put\nfrom litestar.contrib.sqlalchemy.plugins import SQLAlchemyAsyncConfig, SQLAlchemyPlugin\nfrom litestar.exceptions import ClientException, NotFoundException\nfrom litestar.status_codes import HTTP_409_CONFLICT\n\n\nclass Base(DeclarativeBase):\n ...\n\n\nclass TodoItem(Base):\n __tablename__ = \"todo_items\"\n\n title: Mapped[str] = mapped_column(primary_key=True)\n done: Mapped[bool]\n\n\nasync def provide_transaction(db_session: AsyncSession) -> AsyncGenerator[AsyncSession, None]:\n try:\n async with db_session.begin():\n yield db_session\n except IntegrityError as exc:\n raise ClientException(\n status_code=HTTP_409_CONFLICT,\n detail=str(exc),\n ) from exc\n\n\nasync def get_todo_by_title(todo_name, session: AsyncSession) -> TodoItem:\n query = select(TodoItem).where(TodoItem.title == todo_name)\n result = await session.execute(query)\n try:\n return result.scalar_one()\n except NoResultFound as e:\n raise NotFoundException(detail=f\"TODO {todo_name!r} not found\") from e\n\n\nasync def get_todo_list(done: Optional[bool], session: AsyncSession) -> List[TodoItem]:\n query = select(TodoItem)\n if done is not None:\n query = query.where(TodoItem.done.is_(done))\n\n result = await session.execute(query)\n return result.scalars().all()\n\n\n@get(\"/\")\nasync def get_list(transaction: AsyncSession, done: Optional[bool] = None) -> List[TodoItem]:\n return await get_todo_list(done, transaction)\n\n\n@post(\"/\")\nasync def add_item(data: TodoItem, transaction: AsyncSession) -> TodoItem:\n transaction.add(data)\n return data\n\n\n@put(\"/{item_title:str}\")\nasync def update_item(item_title: str, data: TodoItem, transaction: AsyncSession) -> TodoItem:\n todo_item = await get_todo_by_title(item_title, transaction)\n todo_item.title = data.title\n todo_item.done = data.done\n return todo_item\n\n\ndb_config = SQLAlchemyAsyncConfig(\n connection_string=\"sqlite+aiosqlite:///todo.sqlite\", metadata=Base.metadata, create_all=True\n)\n\napp = Litestar(\n [get_list, add_item, update_item],\n dependencies={\"transaction\": provide_transaction},\n plugins=[SQLAlchemyPlugin(db_config)],\n)\n", "path": "docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py"}], "after_files": [{"content": "from typing import AsyncGenerator, List, Optional\n\nfrom advanced_alchemy.extensions.litestar.plugins.init.config.asyncio import autocommit_before_send_handler\nfrom sqlalchemy import select\nfrom sqlalchemy.exc import IntegrityError, NoResultFound\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column\n\nfrom litestar import Litestar, get, post, put\nfrom litestar.contrib.sqlalchemy.plugins import SQLAlchemyAsyncConfig, SQLAlchemyPlugin\nfrom litestar.exceptions import ClientException, NotFoundException\nfrom litestar.status_codes import HTTP_409_CONFLICT\n\n\nclass Base(DeclarativeBase):\n ...\n\n\nclass TodoItem(Base):\n __tablename__ = \"todo_items\"\n\n title: Mapped[str] = mapped_column(primary_key=True)\n done: Mapped[bool]\n\n\nasync def provide_transaction(db_session: AsyncSession) -> AsyncGenerator[AsyncSession, None]:\n try:\n async with db_session.begin():\n yield db_session\n except IntegrityError as exc:\n raise ClientException(\n status_code=HTTP_409_CONFLICT,\n detail=str(exc),\n ) from exc\n\n\nasync def get_todo_by_title(todo_name, session: AsyncSession) -> TodoItem:\n query = select(TodoItem).where(TodoItem.title == todo_name)\n result = await session.execute(query)\n try:\n return result.scalar_one()\n except NoResultFound as e:\n raise NotFoundException(detail=f\"TODO {todo_name!r} not found\") from e\n\n\nasync def get_todo_list(done: Optional[bool], session: AsyncSession) -> List[TodoItem]:\n query = select(TodoItem)\n if done is not None:\n query = query.where(TodoItem.done.is_(done))\n\n result = await session.execute(query)\n return result.scalars().all()\n\n\n@get(\"/\")\nasync def get_list(transaction: AsyncSession, done: Optional[bool] = None) -> List[TodoItem]:\n return await get_todo_list(done, transaction)\n\n\n@post(\"/\")\nasync def add_item(data: TodoItem, transaction: AsyncSession) -> TodoItem:\n transaction.add(data)\n return data\n\n\n@put(\"/{item_title:str}\")\nasync def update_item(item_title: str, data: TodoItem, transaction: AsyncSession) -> TodoItem:\n todo_item = await get_todo_by_title(item_title, transaction)\n todo_item.title = data.title\n todo_item.done = data.done\n return todo_item\n\n\ndb_config = SQLAlchemyAsyncConfig(\n connection_string=\"sqlite+aiosqlite:///todo.sqlite\",\n metadata=Base.metadata,\n create_all=True,\n before_send_handler=autocommit_before_send_handler,\n)\n\napp = Litestar(\n [get_list, add_item, update_item],\n dependencies={\"transaction\": provide_transaction},\n plugins=[SQLAlchemyPlugin(db_config)],\n)\n", "path": "docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py"}]}
1,603
224
gh_patches_debug_29329
rasdani/github-patches
git_diff
streamlink__streamlink-2326
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Streamlink no longer provides streamlink-script.py As of version 0.14.0 streamlink does not install the streamlink-script.py script, because of this streamlink-twitch-gui does not work as it cannot load the script. This may an issue in streamlink, as there is no mention of its removal in the changelog. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 import codecs 3 from os import environ 4 from os import path 5 from sys import path as sys_path 6 7 from setuptools import setup, find_packages 8 9 import versioneer 10 11 deps = [ 12 # Require backport of concurrent.futures on Python 2 13 'futures;python_version<"3.0"', 14 # Require singledispatch on Python <3.4 15 'singledispatch;python_version<"3.4"', 16 "requests>=2.21.0,<3.0", 17 'urllib3[secure]>=1.23;python_version<"3.0"', 18 "isodate", 19 "websocket-client", 20 # Support for SOCKS proxies 21 "PySocks!=1.5.7,>=1.5.6", 22 # win-inet-pton is missing a dependency in PySocks, this has been fixed but not released yet 23 # Required due to missing socket.inet_ntop & socket.inet_pton method in Windows Python 2.x 24 'win-inet-pton;python_version<"3.0" and platform_system=="Windows"', 25 # shutil.get_terminal_size and which were added in Python 3.3 26 'backports.shutil_which;python_version<"3.3"', 27 'backports.shutil_get_terminal_size;python_version<"3.3"' 28 ] 29 30 # for encrypted streams 31 if environ.get("STREAMLINK_USE_PYCRYPTO"): 32 deps.append("pycrypto") 33 else: 34 # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6 35 deps.append("pycryptodome>=3.4.3,<4") 36 37 # for localization 38 if environ.get("STREAMLINK_USE_PYCOUNTRY"): 39 deps.append("pycountry") 40 else: 41 deps.append("iso-639") 42 deps.append("iso3166") 43 44 # When we build an egg for the Win32 bootstrap we don"t want dependency 45 # information built into it. 46 if environ.get("NO_DEPS"): 47 deps = [] 48 49 this_directory = path.abspath(path.dirname(__file__)) 50 srcdir = path.join(this_directory, "src/") 51 sys_path.insert(0, srcdir) 52 53 with codecs.open(path.join(this_directory, "README.md"), 'r', "utf8") as f: 54 long_description = f.read() 55 56 setup(name="streamlink", 57 version=versioneer.get_version(), 58 cmdclass=versioneer.get_cmdclass(), 59 description="Streamlink is command-line utility that extracts streams " 60 "from various services and pipes them into a video player of " 61 "choice.", 62 long_description=long_description, 63 long_description_content_type="text/markdown", 64 url="https://github.com/streamlink/streamlink", 65 project_urls={ 66 "Documentation": "https://streamlink.github.io/", 67 "Tracker": "https://github.com/streamlink/streamlink/issues", 68 "Source": "https://github.com/streamlink/streamlink", 69 "Funding": "https://opencollective.com/streamlink" 70 }, 71 author="Streamlink", 72 # temp until we have a mailing list / global email 73 author_email="[email protected]", 74 license="Simplified BSD", 75 packages=find_packages("src"), 76 package_dir={"": "src"}, 77 entry_points={ 78 "console_scripts": ["streamlink=streamlink_cli.main:main"] 79 }, 80 install_requires=deps, 81 test_suite="tests", 82 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4", 83 classifiers=["Development Status :: 5 - Production/Stable", 84 "License :: OSI Approved :: BSD License", 85 "Environment :: Console", 86 "Intended Audience :: End Users/Desktop", 87 "Operating System :: POSIX", 88 "Operating System :: Microsoft :: Windows", 89 "Operating System :: MacOS", 90 "Programming Language :: Python :: 2.7", 91 "Programming Language :: Python :: 3.4", 92 "Programming Language :: Python :: 3.5", 93 "Programming Language :: Python :: 3.6", 94 "Programming Language :: Python :: 3.7", 95 "Topic :: Internet :: WWW/HTTP", 96 "Topic :: Multimedia :: Sound/Audio", 97 "Topic :: Multimedia :: Video", 98 "Topic :: Utilities"]) 99 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,8 +1,7 @@ #!/usr/bin/env python import codecs -from os import environ -from os import path -from sys import path as sys_path +from os import environ, path +from sys import argv, path as sys_path from setuptools import setup, find_packages @@ -53,6 +52,27 @@ with codecs.open(path.join(this_directory, "README.md"), 'r', "utf8") as f: long_description = f.read() + +def is_wheel_for_windows(): + if "bdist_wheel" in argv: + names = ["win32", "win-amd64", "cygwin"] + length = len(argv) + for pos in range(argv.index("bdist_wheel") + 1, length): + if argv[pos] == "--plat-name" and pos + 1 < length: + return argv[pos + 1] in names + elif argv[pos][:12] == "--plat-name=": + return argv[pos][12:] in names + return False + + +entry_points = { + "console_scripts": ["streamlink=streamlink_cli.main:main"] +} + +if is_wheel_for_windows(): + entry_points["gui_scripts"] = ["streamlinkw=streamlink_cli.main:main"] + + setup(name="streamlink", version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), @@ -74,9 +94,7 @@ license="Simplified BSD", packages=find_packages("src"), package_dir={"": "src"}, - entry_points={ - "console_scripts": ["streamlink=streamlink_cli.main:main"] - }, + entry_points=entry_points, install_requires=deps, test_suite="tests", python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,8 +1,7 @@\n #!/usr/bin/env python\n import codecs\n-from os import environ\n-from os import path\n-from sys import path as sys_path\n+from os import environ, path\n+from sys import argv, path as sys_path\n \n from setuptools import setup, find_packages\n \n@@ -53,6 +52,27 @@\n with codecs.open(path.join(this_directory, \"README.md\"), 'r', \"utf8\") as f:\n long_description = f.read()\n \n+\n+def is_wheel_for_windows():\n+ if \"bdist_wheel\" in argv:\n+ names = [\"win32\", \"win-amd64\", \"cygwin\"]\n+ length = len(argv)\n+ for pos in range(argv.index(\"bdist_wheel\") + 1, length):\n+ if argv[pos] == \"--plat-name\" and pos + 1 < length:\n+ return argv[pos + 1] in names\n+ elif argv[pos][:12] == \"--plat-name=\":\n+ return argv[pos][12:] in names\n+ return False\n+\n+\n+entry_points = {\n+ \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n+}\n+\n+if is_wheel_for_windows():\n+ entry_points[\"gui_scripts\"] = [\"streamlinkw=streamlink_cli.main:main\"]\n+\n+\n setup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n@@ -74,9 +94,7 @@\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n- entry_points={\n- \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n- },\n+ entry_points=entry_points,\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n", "issue": "Streamlink no longer provides streamlink-script.py\nAs of version 0.14.0 streamlink does not install the streamlink-script.py script,\r\nbecause of this streamlink-twitch-gui does not work as it cannot load the script.\r\nThis may an issue in streamlink, as there is no mention of its removal in the changelog.\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nfrom os import environ\nfrom os import path\nfrom sys import path as sys_path\n\nfrom setuptools import setup, find_packages\n\nimport versioneer\n\ndeps = [\n # Require backport of concurrent.futures on Python 2\n 'futures;python_version<\"3.0\"',\n # Require singledispatch on Python <3.4\n 'singledispatch;python_version<\"3.4\"',\n \"requests>=2.21.0,<3.0\",\n 'urllib3[secure]>=1.23;python_version<\"3.0\"',\n \"isodate\",\n \"websocket-client\",\n # Support for SOCKS proxies\n \"PySocks!=1.5.7,>=1.5.6\",\n # win-inet-pton is missing a dependency in PySocks, this has been fixed but not released yet\n # Required due to missing socket.inet_ntop & socket.inet_pton method in Windows Python 2.x\n 'win-inet-pton;python_version<\"3.0\" and platform_system==\"Windows\"',\n # shutil.get_terminal_size and which were added in Python 3.3\n 'backports.shutil_which;python_version<\"3.3\"',\n 'backports.shutil_get_terminal_size;python_version<\"3.3\"'\n]\n\n# for encrypted streams\nif environ.get(\"STREAMLINK_USE_PYCRYPTO\"):\n deps.append(\"pycrypto\")\nelse:\n # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\n deps.append(\"pycryptodome>=3.4.3,<4\")\n\n# for localization\nif environ.get(\"STREAMLINK_USE_PYCOUNTRY\"):\n deps.append(\"pycountry\")\nelse:\n deps.append(\"iso-639\")\n deps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don\"t want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nthis_directory = path.abspath(path.dirname(__file__))\nsrcdir = path.join(this_directory, \"src/\")\nsys_path.insert(0, srcdir)\n\nwith codecs.open(path.join(this_directory, \"README.md\"), 'r', \"utf8\") as f:\n long_description = f.read()\n\nsetup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Streamlink is command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/streamlink/streamlink\",\n project_urls={\n \"Documentation\": \"https://streamlink.github.io/\",\n \"Tracker\": \"https://github.com/streamlink/streamlink/issues\",\n \"Source\": \"https://github.com/streamlink/streamlink\",\n \"Funding\": \"https://opencollective.com/streamlink\"\n },\n author=\"Streamlink\",\n # temp until we have a mailing list / global email\n author_email=\"[email protected]\",\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points={\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n },\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: BSD License\",\n \"Environment :: Console\",\n \"Intended Audience :: End Users/Desktop\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport codecs\nfrom os import environ, path\nfrom sys import argv, path as sys_path\n\nfrom setuptools import setup, find_packages\n\nimport versioneer\n\ndeps = [\n # Require backport of concurrent.futures on Python 2\n 'futures;python_version<\"3.0\"',\n # Require singledispatch on Python <3.4\n 'singledispatch;python_version<\"3.4\"',\n \"requests>=2.21.0,<3.0\",\n 'urllib3[secure]>=1.23;python_version<\"3.0\"',\n \"isodate\",\n \"websocket-client\",\n # Support for SOCKS proxies\n \"PySocks!=1.5.7,>=1.5.6\",\n # win-inet-pton is missing a dependency in PySocks, this has been fixed but not released yet\n # Required due to missing socket.inet_ntop & socket.inet_pton method in Windows Python 2.x\n 'win-inet-pton;python_version<\"3.0\" and platform_system==\"Windows\"',\n # shutil.get_terminal_size and which were added in Python 3.3\n 'backports.shutil_which;python_version<\"3.3\"',\n 'backports.shutil_get_terminal_size;python_version<\"3.3\"'\n]\n\n# for encrypted streams\nif environ.get(\"STREAMLINK_USE_PYCRYPTO\"):\n deps.append(\"pycrypto\")\nelse:\n # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\n deps.append(\"pycryptodome>=3.4.3,<4\")\n\n# for localization\nif environ.get(\"STREAMLINK_USE_PYCOUNTRY\"):\n deps.append(\"pycountry\")\nelse:\n deps.append(\"iso-639\")\n deps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don\"t want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nthis_directory = path.abspath(path.dirname(__file__))\nsrcdir = path.join(this_directory, \"src/\")\nsys_path.insert(0, srcdir)\n\nwith codecs.open(path.join(this_directory, \"README.md\"), 'r', \"utf8\") as f:\n long_description = f.read()\n\n\ndef is_wheel_for_windows():\n if \"bdist_wheel\" in argv:\n names = [\"win32\", \"win-amd64\", \"cygwin\"]\n length = len(argv)\n for pos in range(argv.index(\"bdist_wheel\") + 1, length):\n if argv[pos] == \"--plat-name\" and pos + 1 < length:\n return argv[pos + 1] in names\n elif argv[pos][:12] == \"--plat-name=\":\n return argv[pos][12:] in names\n return False\n\n\nentry_points = {\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n}\n\nif is_wheel_for_windows():\n entry_points[\"gui_scripts\"] = [\"streamlinkw=streamlink_cli.main:main\"]\n\n\nsetup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Streamlink is command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/streamlink/streamlink\",\n project_urls={\n \"Documentation\": \"https://streamlink.github.io/\",\n \"Tracker\": \"https://github.com/streamlink/streamlink/issues\",\n \"Source\": \"https://github.com/streamlink/streamlink\",\n \"Funding\": \"https://opencollective.com/streamlink\"\n },\n author=\"Streamlink\",\n # temp until we have a mailing list / global email\n author_email=\"[email protected]\",\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points=entry_points,\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: BSD License\",\n \"Environment :: Console\",\n \"Intended Audience :: End Users/Desktop\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}]}
1,455
449
gh_patches_debug_29106
rasdani/github-patches
git_diff
apache__airflow-34931
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Airflow 2.7.1 can not start Scheduler & trigger ### Apache Airflow version 2.7.1 ### What happened After upgrade from 2.6.0 to 2.7.1 (try pip uninstall apache-airflow, and clear dir airflow - remove airflow.cfg), I can start scheduler & trigger with daemon. I try start with command, it can start, but logout console it killed. I try: airflow scheduler or airflow triggerer :done but kill when logout console airflow scheduler --daemon && airflow triggerer --daemon: fail, can not start scheduler & triggerer (2.6.0 run ok). but start deamon with webserver & celery worker is fine Help me ### What you think should happen instead _No response_ ### How to reproduce 1. run airflow 2.6.0 fine on ubuntu server 22.04.3 lts 2. install airflow 2.7.1 3. can not start daemon triggerer & scheduler ### Operating System ubuntu server 22.04.3 LTS ### Versions of Apache Airflow Providers _No response_ ### Deployment Virtualenv installation ### Deployment details _No response_ ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `airflow/cli/commands/triggerer_command.py` Content: ``` 1 # Licensed to the Apache Software Foundation (ASF) under one 2 # or more contributor license agreements. See the NOTICE file 3 # distributed with this work for additional information 4 # regarding copyright ownership. The ASF licenses this file 5 # to you under the Apache License, Version 2.0 (the 6 # "License"); you may not use this file except in compliance 7 # with the License. You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, 12 # software distributed under the License is distributed on an 13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 # KIND, either express or implied. See the License for the 15 # specific language governing permissions and limitations 16 # under the License. 17 """Triggerer command.""" 18 from __future__ import annotations 19 20 import signal 21 from contextlib import contextmanager 22 from functools import partial 23 from multiprocessing import Process 24 from typing import Generator 25 26 import daemon 27 from daemon.pidfile import TimeoutPIDLockFile 28 29 from airflow import settings 30 from airflow.configuration import conf 31 from airflow.jobs.job import Job, run_job 32 from airflow.jobs.triggerer_job_runner import TriggererJobRunner 33 from airflow.utils import cli as cli_utils 34 from airflow.utils.cli import setup_locations, setup_logging, sigint_handler, sigquit_handler 35 from airflow.utils.providers_configuration_loader import providers_configuration_loaded 36 from airflow.utils.serve_logs import serve_logs 37 38 39 @contextmanager 40 def _serve_logs(skip_serve_logs: bool = False) -> Generator[None, None, None]: 41 """Start serve_logs sub-process.""" 42 sub_proc = None 43 if skip_serve_logs is False: 44 port = conf.getint("logging", "trigger_log_server_port", fallback=8794) 45 sub_proc = Process(target=partial(serve_logs, port=port)) 46 sub_proc.start() 47 try: 48 yield 49 finally: 50 if sub_proc: 51 sub_proc.terminate() 52 53 54 @cli_utils.action_cli 55 @providers_configuration_loaded 56 def triggerer(args): 57 """Start Airflow Triggerer.""" 58 settings.MASK_SECRETS_IN_LOGS = True 59 print(settings.HEADER) 60 triggerer_heartrate = conf.getfloat("triggerer", "JOB_HEARTBEAT_SEC") 61 triggerer_job_runner = TriggererJobRunner(job=Job(heartrate=triggerer_heartrate), capacity=args.capacity) 62 63 if args.daemon: 64 pid, stdout, stderr, log_file = setup_locations( 65 "triggerer", args.pid, args.stdout, args.stderr, args.log_file 66 ) 67 handle = setup_logging(log_file) 68 with open(stdout, "a") as stdout_handle, open(stderr, "a") as stderr_handle: 69 stdout_handle.truncate(0) 70 stderr_handle.truncate(0) 71 72 daemon_context = daemon.DaemonContext( 73 pidfile=TimeoutPIDLockFile(pid, -1), 74 files_preserve=[handle], 75 stdout=stdout_handle, 76 stderr=stderr_handle, 77 umask=int(settings.DAEMON_UMASK, 8), 78 ) 79 with daemon_context, _serve_logs(args.skip_serve_logs): 80 run_job(job=triggerer_job_runner.job, execute_callable=triggerer_job_runner._execute) 81 else: 82 signal.signal(signal.SIGINT, sigint_handler) 83 signal.signal(signal.SIGTERM, sigint_handler) 84 signal.signal(signal.SIGQUIT, sigquit_handler) 85 with _serve_logs(args.skip_serve_logs): 86 run_job(job=triggerer_job_runner.job, execute_callable=triggerer_job_runner._execute) 87 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/airflow/cli/commands/triggerer_command.py b/airflow/cli/commands/triggerer_command.py --- a/airflow/cli/commands/triggerer_command.py +++ b/airflow/cli/commands/triggerer_command.py @@ -58,7 +58,6 @@ settings.MASK_SECRETS_IN_LOGS = True print(settings.HEADER) triggerer_heartrate = conf.getfloat("triggerer", "JOB_HEARTBEAT_SEC") - triggerer_job_runner = TriggererJobRunner(job=Job(heartrate=triggerer_heartrate), capacity=args.capacity) if args.daemon: pid, stdout, stderr, log_file = setup_locations( @@ -77,10 +76,16 @@ umask=int(settings.DAEMON_UMASK, 8), ) with daemon_context, _serve_logs(args.skip_serve_logs): + triggerer_job_runner = TriggererJobRunner( + job=Job(heartrate=triggerer_heartrate), capacity=args.capacity + ) run_job(job=triggerer_job_runner.job, execute_callable=triggerer_job_runner._execute) else: signal.signal(signal.SIGINT, sigint_handler) signal.signal(signal.SIGTERM, sigint_handler) signal.signal(signal.SIGQUIT, sigquit_handler) with _serve_logs(args.skip_serve_logs): + triggerer_job_runner = TriggererJobRunner( + job=Job(heartrate=triggerer_heartrate), capacity=args.capacity + ) run_job(job=triggerer_job_runner.job, execute_callable=triggerer_job_runner._execute)
{"golden_diff": "diff --git a/airflow/cli/commands/triggerer_command.py b/airflow/cli/commands/triggerer_command.py\n--- a/airflow/cli/commands/triggerer_command.py\n+++ b/airflow/cli/commands/triggerer_command.py\n@@ -58,7 +58,6 @@\n settings.MASK_SECRETS_IN_LOGS = True\n print(settings.HEADER)\n triggerer_heartrate = conf.getfloat(\"triggerer\", \"JOB_HEARTBEAT_SEC\")\n- triggerer_job_runner = TriggererJobRunner(job=Job(heartrate=triggerer_heartrate), capacity=args.capacity)\n \n if args.daemon:\n pid, stdout, stderr, log_file = setup_locations(\n@@ -77,10 +76,16 @@\n umask=int(settings.DAEMON_UMASK, 8),\n )\n with daemon_context, _serve_logs(args.skip_serve_logs):\n+ triggerer_job_runner = TriggererJobRunner(\n+ job=Job(heartrate=triggerer_heartrate), capacity=args.capacity\n+ )\n run_job(job=triggerer_job_runner.job, execute_callable=triggerer_job_runner._execute)\n else:\n signal.signal(signal.SIGINT, sigint_handler)\n signal.signal(signal.SIGTERM, sigint_handler)\n signal.signal(signal.SIGQUIT, sigquit_handler)\n with _serve_logs(args.skip_serve_logs):\n+ triggerer_job_runner = TriggererJobRunner(\n+ job=Job(heartrate=triggerer_heartrate), capacity=args.capacity\n+ )\n run_job(job=triggerer_job_runner.job, execute_callable=triggerer_job_runner._execute)\n", "issue": "Airflow 2.7.1 can not start Scheduler & trigger\n### Apache Airflow version\n\n2.7.1\n\n### What happened\n\nAfter upgrade from 2.6.0 to 2.7.1 (try pip uninstall apache-airflow, and clear dir airflow - remove airflow.cfg), I can start scheduler & trigger with daemon. \r\nI try start with command, it can start, but logout console it killed.\r\nI try: airflow scheduler or airflow triggerer :done but kill when logout console\r\nairflow scheduler --daemon && airflow triggerer --daemon: fail, can not start scheduler & triggerer (2.6.0 run ok). but start deamon with webserver & celery worker is fine\r\n\r\nHelp me\n\n### What you think should happen instead\n\n_No response_\n\n### How to reproduce\n\n1. run airflow 2.6.0 fine on ubuntu server 22.04.3 lts\r\n2. install airflow 2.7.1 \r\n3. can not start daemon triggerer & scheduler\n\n### Operating System\n\nubuntu server 22.04.3 LTS\n\n### Versions of Apache Airflow Providers\n\n_No response_\n\n### Deployment\n\nVirtualenv installation\n\n### Deployment details\n\n_No response_\n\n### Anything else\n\n_No response_\n\n### Are you willing to submit PR?\n\n- [X] Yes I am willing to submit a PR!\n\n### Code of Conduct\n\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Triggerer command.\"\"\"\nfrom __future__ import annotations\n\nimport signal\nfrom contextlib import contextmanager\nfrom functools import partial\nfrom multiprocessing import Process\nfrom typing import Generator\n\nimport daemon\nfrom daemon.pidfile import TimeoutPIDLockFile\n\nfrom airflow import settings\nfrom airflow.configuration import conf\nfrom airflow.jobs.job import Job, run_job\nfrom airflow.jobs.triggerer_job_runner import TriggererJobRunner\nfrom airflow.utils import cli as cli_utils\nfrom airflow.utils.cli import setup_locations, setup_logging, sigint_handler, sigquit_handler\nfrom airflow.utils.providers_configuration_loader import providers_configuration_loaded\nfrom airflow.utils.serve_logs import serve_logs\n\n\n@contextmanager\ndef _serve_logs(skip_serve_logs: bool = False) -> Generator[None, None, None]:\n \"\"\"Start serve_logs sub-process.\"\"\"\n sub_proc = None\n if skip_serve_logs is False:\n port = conf.getint(\"logging\", \"trigger_log_server_port\", fallback=8794)\n sub_proc = Process(target=partial(serve_logs, port=port))\n sub_proc.start()\n try:\n yield\n finally:\n if sub_proc:\n sub_proc.terminate()\n\n\n@cli_utils.action_cli\n@providers_configuration_loaded\ndef triggerer(args):\n \"\"\"Start Airflow Triggerer.\"\"\"\n settings.MASK_SECRETS_IN_LOGS = True\n print(settings.HEADER)\n triggerer_heartrate = conf.getfloat(\"triggerer\", \"JOB_HEARTBEAT_SEC\")\n triggerer_job_runner = TriggererJobRunner(job=Job(heartrate=triggerer_heartrate), capacity=args.capacity)\n\n if args.daemon:\n pid, stdout, stderr, log_file = setup_locations(\n \"triggerer\", args.pid, args.stdout, args.stderr, args.log_file\n )\n handle = setup_logging(log_file)\n with open(stdout, \"a\") as stdout_handle, open(stderr, \"a\") as stderr_handle:\n stdout_handle.truncate(0)\n stderr_handle.truncate(0)\n\n daemon_context = daemon.DaemonContext(\n pidfile=TimeoutPIDLockFile(pid, -1),\n files_preserve=[handle],\n stdout=stdout_handle,\n stderr=stderr_handle,\n umask=int(settings.DAEMON_UMASK, 8),\n )\n with daemon_context, _serve_logs(args.skip_serve_logs):\n run_job(job=triggerer_job_runner.job, execute_callable=triggerer_job_runner._execute)\n else:\n signal.signal(signal.SIGINT, sigint_handler)\n signal.signal(signal.SIGTERM, sigint_handler)\n signal.signal(signal.SIGQUIT, sigquit_handler)\n with _serve_logs(args.skip_serve_logs):\n run_job(job=triggerer_job_runner.job, execute_callable=triggerer_job_runner._execute)\n", "path": "airflow/cli/commands/triggerer_command.py"}], "after_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Triggerer command.\"\"\"\nfrom __future__ import annotations\n\nimport signal\nfrom contextlib import contextmanager\nfrom functools import partial\nfrom multiprocessing import Process\nfrom typing import Generator\n\nimport daemon\nfrom daemon.pidfile import TimeoutPIDLockFile\n\nfrom airflow import settings\nfrom airflow.configuration import conf\nfrom airflow.jobs.job import Job, run_job\nfrom airflow.jobs.triggerer_job_runner import TriggererJobRunner\nfrom airflow.utils import cli as cli_utils\nfrom airflow.utils.cli import setup_locations, setup_logging, sigint_handler, sigquit_handler\nfrom airflow.utils.providers_configuration_loader import providers_configuration_loaded\nfrom airflow.utils.serve_logs import serve_logs\n\n\n@contextmanager\ndef _serve_logs(skip_serve_logs: bool = False) -> Generator[None, None, None]:\n \"\"\"Start serve_logs sub-process.\"\"\"\n sub_proc = None\n if skip_serve_logs is False:\n port = conf.getint(\"logging\", \"trigger_log_server_port\", fallback=8794)\n sub_proc = Process(target=partial(serve_logs, port=port))\n sub_proc.start()\n try:\n yield\n finally:\n if sub_proc:\n sub_proc.terminate()\n\n\n@cli_utils.action_cli\n@providers_configuration_loaded\ndef triggerer(args):\n \"\"\"Start Airflow Triggerer.\"\"\"\n settings.MASK_SECRETS_IN_LOGS = True\n print(settings.HEADER)\n triggerer_heartrate = conf.getfloat(\"triggerer\", \"JOB_HEARTBEAT_SEC\")\n\n if args.daemon:\n pid, stdout, stderr, log_file = setup_locations(\n \"triggerer\", args.pid, args.stdout, args.stderr, args.log_file\n )\n handle = setup_logging(log_file)\n with open(stdout, \"a\") as stdout_handle, open(stderr, \"a\") as stderr_handle:\n stdout_handle.truncate(0)\n stderr_handle.truncate(0)\n\n daemon_context = daemon.DaemonContext(\n pidfile=TimeoutPIDLockFile(pid, -1),\n files_preserve=[handle],\n stdout=stdout_handle,\n stderr=stderr_handle,\n umask=int(settings.DAEMON_UMASK, 8),\n )\n with daemon_context, _serve_logs(args.skip_serve_logs):\n triggerer_job_runner = TriggererJobRunner(\n job=Job(heartrate=triggerer_heartrate), capacity=args.capacity\n )\n run_job(job=triggerer_job_runner.job, execute_callable=triggerer_job_runner._execute)\n else:\n signal.signal(signal.SIGINT, sigint_handler)\n signal.signal(signal.SIGTERM, sigint_handler)\n signal.signal(signal.SIGQUIT, sigquit_handler)\n with _serve_logs(args.skip_serve_logs):\n triggerer_job_runner = TriggererJobRunner(\n job=Job(heartrate=triggerer_heartrate), capacity=args.capacity\n )\n run_job(job=triggerer_job_runner.job, execute_callable=triggerer_job_runner._execute)\n", "path": "airflow/cli/commands/triggerer_command.py"}]}
1,521
366
gh_patches_debug_4006
rasdani/github-patches
git_diff
kserve__kserve-818
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- kubernetes probes using HealthHandler endpoint succeed when models are not ready /kind bug **What steps did you take and what happened:** Used the [HealthHandler endpoint](https://github.com/kubeflow/kfserving/blob/92c5f3c9df662c853beef86bddf7f30ab5d8b996/python/kfserving/kfserving/kfserver.py#L62-L63) to set up a Kubernetes readiness probe. The probe succeeded even though the model was not ready. **What did you expect to happen:** Readiness probe failure when model is not ready. **Anything else you would like to add:** [Kubernetes probe documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) says `Any code greater than or equal to 200 and less than 400 indicates success. Any other code indicates failure`, which seems to imply that the [response contents](https://github.com/kubeflow/kfserving/blob/92c5f3c9df662c853beef86bddf7f30ab5d8b996/python/kfserving/kfserving/kfserver.py#L108-L111) (which include the model's `.ready` property) are ignored. **Environment:** - Istio Version: - Knative Version: - KFServing Version: - Kubeflow version: - Minikube version: - Kubernetes version: (use `kubectl version`): - OS (e.g. from `/etc/os-release`): --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `python/kfserving/kfserving/kfserver.py` Content: ``` 1 # Copyright 2020 kubeflow.org. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import argparse 16 import logging 17 import json 18 from typing import List, Dict 19 import tornado.ioloop 20 import tornado.web 21 import tornado.httpserver 22 import tornado.log 23 from kfserving.handlers.http import PredictHandler, ExplainHandler 24 from kfserving import KFModel 25 26 DEFAULT_HTTP_PORT = 8080 27 DEFAULT_GRPC_PORT = 8081 28 DEFAULT_MAX_BUFFER_SIZE = 104857600 29 30 parser = argparse.ArgumentParser(add_help=False) 31 parser.add_argument('--http_port', default=DEFAULT_HTTP_PORT, type=int, 32 help='The HTTP Port listened to by the model server.') 33 parser.add_argument('--grpc_port', default=DEFAULT_GRPC_PORT, type=int, 34 help='The GRPC Port listened to by the model server.') 35 parser.add_argument('--max_buffer_size', default=DEFAULT_MAX_BUFFER_SIZE, type=int, 36 help='The max buffer size for tornado.') 37 parser.add_argument('--workers', default=0, type=int, 38 help='The number of works to fork') 39 args, _ = parser.parse_known_args() 40 41 tornado.log.enable_pretty_logging() 42 43 class KFServer: 44 def __init__(self, http_port: int = args.http_port, 45 grpc_port: int = args.grpc_port, 46 max_buffer_size: int = args.max_buffer_size, 47 workers: int = args.workers): 48 self.registered_models = {} 49 self.http_port = http_port 50 self.grpc_port = grpc_port 51 self.max_buffer_size = max_buffer_size 52 self.workers = workers 53 self._http_server = None 54 55 def create_application(self): 56 return tornado.web.Application([ 57 # Server Liveness API returns 200 if server is alive. 58 (r"/", LivenessHandler), 59 (r"/v1/models", 60 ListHandler, dict(models=self.registered_models)), 61 # Model Health API returns 200 if model is ready to serve. 62 (r"/v1/models/([a-zA-Z0-9_-]+)", 63 HealthHandler, dict(models=self.registered_models)), 64 (r"/v1/models/([a-zA-Z0-9_-]+):predict", 65 PredictHandler, dict(models=self.registered_models)), 66 (r"/v1/models/([a-zA-Z0-9_-]+):explain", 67 ExplainHandler, dict(models=self.registered_models)), 68 ]) 69 70 def start(self, models: List[KFModel]): 71 for model in models: 72 self.register_model(model) 73 74 self._http_server = tornado.httpserver.HTTPServer( 75 self.create_application(), max_buffer_size=self.max_buffer_size) 76 77 logging.info("Listening on port %s", self.http_port) 78 self._http_server.bind(self.http_port) 79 logging.info("Will fork %d workers", self.workers) 80 self._http_server.start(self.workers) 81 tornado.ioloop.IOLoop.current().start() 82 83 def register_model(self, model: KFModel): 84 if not model.name: 85 raise Exception( 86 "Failed to register model, model.name must be provided.") 87 self.registered_models[model.name] = model 88 logging.info("Registering model: %s", model.name) 89 90 91 class LivenessHandler(tornado.web.RequestHandler): # pylint:disable=too-few-public-methods 92 def get(self): 93 self.write("Alive") 94 95 96 class HealthHandler(tornado.web.RequestHandler): 97 def initialize(self, models: Dict[str, KFModel]): 98 self.models = models # pylint:disable=attribute-defined-outside-init 99 100 def get(self, name: str): 101 if name not in self.models: 102 raise tornado.web.HTTPError( 103 status_code=404, 104 reason="Model with name %s does not exist." % name 105 ) 106 107 model = self.models[name] 108 self.write(json.dumps({ 109 "name": model.name, 110 "ready": model.ready 111 })) 112 113 114 class ListHandler(tornado.web.RequestHandler): 115 def initialize(self, models: Dict[str, KFModel]): 116 self.models = models # pylint:disable=attribute-defined-outside-init 117 118 def get(self): 119 self.write(json.dumps(list(self.models.values()))) 120 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/python/kfserving/kfserving/kfserver.py b/python/kfserving/kfserving/kfserver.py --- a/python/kfserving/kfserving/kfserver.py +++ b/python/kfserving/kfserving/kfserver.py @@ -105,6 +105,12 @@ ) model = self.models[name] + if not model.ready: + raise tornado.web.HTTPError( + status_code=503, + reason="Model with name %s is not ready." % name + ) + self.write(json.dumps({ "name": model.name, "ready": model.ready
{"golden_diff": "diff --git a/python/kfserving/kfserving/kfserver.py b/python/kfserving/kfserving/kfserver.py\n--- a/python/kfserving/kfserving/kfserver.py\n+++ b/python/kfserving/kfserving/kfserver.py\n@@ -105,6 +105,12 @@\n )\n \n model = self.models[name]\n+ if not model.ready:\n+ raise tornado.web.HTTPError(\n+ status_code=503,\n+ reason=\"Model with name %s is not ready.\" % name\n+ )\n+\n self.write(json.dumps({\n \"name\": model.name,\n \"ready\": model.ready\n", "issue": "kubernetes probes using HealthHandler endpoint succeed when models are not ready\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\nUsed the [HealthHandler endpoint](https://github.com/kubeflow/kfserving/blob/92c5f3c9df662c853beef86bddf7f30ab5d8b996/python/kfserving/kfserving/kfserver.py#L62-L63) to set up a Kubernetes readiness probe. The probe succeeded even though the model was not ready. \r\n\r\n**What did you expect to happen:**\r\nReadiness probe failure when model is not ready. \r\n\r\n**Anything else you would like to add:**\r\n[Kubernetes probe documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) says `Any code greater than or equal to 200 and less than 400 indicates success. Any other code indicates failure`, which seems to imply that the [response contents](https://github.com/kubeflow/kfserving/blob/92c5f3c9df662c853beef86bddf7f30ab5d8b996/python/kfserving/kfserving/kfserver.py#L108-L111) (which include the model's `.ready` property) are ignored. \r\n\r\n\r\n**Environment:**\r\n\r\n- Istio Version:\r\n- Knative Version:\r\n- KFServing Version:\r\n- Kubeflow version:\r\n- Minikube version:\r\n- Kubernetes version: (use `kubectl version`):\r\n- OS (e.g. from `/etc/os-release`):\r\n\n", "before_files": [{"content": "# Copyright 2020 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport logging\nimport json\nfrom typing import List, Dict\nimport tornado.ioloop\nimport tornado.web\nimport tornado.httpserver\nimport tornado.log\nfrom kfserving.handlers.http import PredictHandler, ExplainHandler\nfrom kfserving import KFModel\n\nDEFAULT_HTTP_PORT = 8080\nDEFAULT_GRPC_PORT = 8081\nDEFAULT_MAX_BUFFER_SIZE = 104857600\n\nparser = argparse.ArgumentParser(add_help=False)\nparser.add_argument('--http_port', default=DEFAULT_HTTP_PORT, type=int,\n help='The HTTP Port listened to by the model server.')\nparser.add_argument('--grpc_port', default=DEFAULT_GRPC_PORT, type=int,\n help='The GRPC Port listened to by the model server.')\nparser.add_argument('--max_buffer_size', default=DEFAULT_MAX_BUFFER_SIZE, type=int,\n help='The max buffer size for tornado.')\nparser.add_argument('--workers', default=0, type=int,\n help='The number of works to fork')\nargs, _ = parser.parse_known_args()\n\ntornado.log.enable_pretty_logging()\n\nclass KFServer:\n def __init__(self, http_port: int = args.http_port,\n grpc_port: int = args.grpc_port,\n max_buffer_size: int = args.max_buffer_size,\n workers: int = args.workers):\n self.registered_models = {}\n self.http_port = http_port\n self.grpc_port = grpc_port\n self.max_buffer_size = max_buffer_size\n self.workers = workers\n self._http_server = None\n\n def create_application(self):\n return tornado.web.Application([\n # Server Liveness API returns 200 if server is alive.\n (r\"/\", LivenessHandler),\n (r\"/v1/models\",\n ListHandler, dict(models=self.registered_models)),\n # Model Health API returns 200 if model is ready to serve.\n (r\"/v1/models/([a-zA-Z0-9_-]+)\",\n HealthHandler, dict(models=self.registered_models)),\n (r\"/v1/models/([a-zA-Z0-9_-]+):predict\",\n PredictHandler, dict(models=self.registered_models)),\n (r\"/v1/models/([a-zA-Z0-9_-]+):explain\",\n ExplainHandler, dict(models=self.registered_models)),\n ])\n\n def start(self, models: List[KFModel]):\n for model in models:\n self.register_model(model)\n\n self._http_server = tornado.httpserver.HTTPServer(\n self.create_application(), max_buffer_size=self.max_buffer_size)\n\n logging.info(\"Listening on port %s\", self.http_port)\n self._http_server.bind(self.http_port)\n logging.info(\"Will fork %d workers\", self.workers)\n self._http_server.start(self.workers)\n tornado.ioloop.IOLoop.current().start()\n\n def register_model(self, model: KFModel):\n if not model.name:\n raise Exception(\n \"Failed to register model, model.name must be provided.\")\n self.registered_models[model.name] = model\n logging.info(\"Registering model: %s\", model.name)\n\n\nclass LivenessHandler(tornado.web.RequestHandler): # pylint:disable=too-few-public-methods\n def get(self):\n self.write(\"Alive\")\n\n\nclass HealthHandler(tornado.web.RequestHandler):\n def initialize(self, models: Dict[str, KFModel]):\n self.models = models # pylint:disable=attribute-defined-outside-init\n\n def get(self, name: str):\n if name not in self.models:\n raise tornado.web.HTTPError(\n status_code=404,\n reason=\"Model with name %s does not exist.\" % name\n )\n\n model = self.models[name]\n self.write(json.dumps({\n \"name\": model.name,\n \"ready\": model.ready\n }))\n\n\nclass ListHandler(tornado.web.RequestHandler):\n def initialize(self, models: Dict[str, KFModel]):\n self.models = models # pylint:disable=attribute-defined-outside-init\n\n def get(self):\n self.write(json.dumps(list(self.models.values())))\n", "path": "python/kfserving/kfserving/kfserver.py"}], "after_files": [{"content": "# Copyright 2020 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport logging\nimport json\nfrom typing import List, Dict\nimport tornado.ioloop\nimport tornado.web\nimport tornado.httpserver\nimport tornado.log\nfrom kfserving.handlers.http import PredictHandler, ExplainHandler\nfrom kfserving import KFModel\n\nDEFAULT_HTTP_PORT = 8080\nDEFAULT_GRPC_PORT = 8081\nDEFAULT_MAX_BUFFER_SIZE = 104857600\n\nparser = argparse.ArgumentParser(add_help=False)\nparser.add_argument('--http_port', default=DEFAULT_HTTP_PORT, type=int,\n help='The HTTP Port listened to by the model server.')\nparser.add_argument('--grpc_port', default=DEFAULT_GRPC_PORT, type=int,\n help='The GRPC Port listened to by the model server.')\nparser.add_argument('--max_buffer_size', default=DEFAULT_MAX_BUFFER_SIZE, type=int,\n help='The max buffer size for tornado.')\nparser.add_argument('--workers', default=0, type=int,\n help='The number of works to fork')\nargs, _ = parser.parse_known_args()\n\ntornado.log.enable_pretty_logging()\n\nclass KFServer:\n def __init__(self, http_port: int = args.http_port,\n grpc_port: int = args.grpc_port,\n max_buffer_size: int = args.max_buffer_size,\n workers: int = args.workers):\n self.registered_models = {}\n self.http_port = http_port\n self.grpc_port = grpc_port\n self.max_buffer_size = max_buffer_size\n self.workers = workers\n self._http_server = None\n\n def create_application(self):\n return tornado.web.Application([\n # Server Liveness API returns 200 if server is alive.\n (r\"/\", LivenessHandler),\n (r\"/v1/models\",\n ListHandler, dict(models=self.registered_models)),\n # Model Health API returns 200 if model is ready to serve.\n (r\"/v1/models/([a-zA-Z0-9_-]+)\",\n HealthHandler, dict(models=self.registered_models)),\n (r\"/v1/models/([a-zA-Z0-9_-]+):predict\",\n PredictHandler, dict(models=self.registered_models)),\n (r\"/v1/models/([a-zA-Z0-9_-]+):explain\",\n ExplainHandler, dict(models=self.registered_models)),\n ])\n\n def start(self, models: List[KFModel]):\n for model in models:\n self.register_model(model)\n\n self._http_server = tornado.httpserver.HTTPServer(\n self.create_application(), max_buffer_size=self.max_buffer_size)\n\n logging.info(\"Listening on port %s\", self.http_port)\n self._http_server.bind(self.http_port)\n logging.info(\"Will fork %d workers\", self.workers)\n self._http_server.start(self.workers)\n tornado.ioloop.IOLoop.current().start()\n\n def register_model(self, model: KFModel):\n if not model.name:\n raise Exception(\n \"Failed to register model, model.name must be provided.\")\n self.registered_models[model.name] = model\n logging.info(\"Registering model: %s\", model.name)\n\n\nclass LivenessHandler(tornado.web.RequestHandler): # pylint:disable=too-few-public-methods\n def get(self):\n self.write(\"Alive\")\n\n\nclass HealthHandler(tornado.web.RequestHandler):\n def initialize(self, models: Dict[str, KFModel]):\n self.models = models # pylint:disable=attribute-defined-outside-init\n\n def get(self, name: str):\n if name not in self.models:\n raise tornado.web.HTTPError(\n status_code=404,\n reason=\"Model with name %s does not exist.\" % name\n )\n\n model = self.models[name]\n if not model.ready:\n raise tornado.web.HTTPError(\n status_code=503,\n reason=\"Model with name %s is not ready.\" % name\n )\n\n self.write(json.dumps({\n \"name\": model.name,\n \"ready\": model.ready\n }))\n\n\nclass ListHandler(tornado.web.RequestHandler):\n def initialize(self, models: Dict[str, KFModel]):\n self.models = models # pylint:disable=attribute-defined-outside-init\n\n def get(self):\n self.write(json.dumps(list(self.models.values())))\n", "path": "python/kfserving/kfserving/kfserver.py"}]}
1,898
145
gh_patches_debug_5667
rasdani/github-patches
git_diff
ivy-llc__ivy-16244
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- softshrink --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ivy/functional/frontends/paddle/nn/functional/activation.py` Content: ``` 1 # local 2 import ivy 3 from ivy.func_wrapper import with_supported_dtypes 4 from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back 5 from ivy.functional.frontends.paddle.tensor.math import tanh as paddle_tanh 6 from ivy.functional.frontends.paddle.tensor.math import ( 7 log_softmax as paddle_log_softmax, 8 ) 9 10 11 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle") 12 @to_ivy_arrays_and_back 13 def selu( 14 x, 15 /, 16 *, 17 alpha=1.6732632423543772848170429916717, 18 scale=1.0507009873554804934193349852946, 19 name=None, 20 ): 21 if scale <= 1.0: 22 raise ValueError(f"The scale must be greater than 1.0. Received: {scale}.") 23 24 if alpha < 0: 25 raise ValueError(f"The alpha must be no less than zero. Received: {alpha}.") 26 27 ret = ivy.where(x > 0, x, alpha * ivy.expm1(x)) 28 arr = scale * ret 29 return ivy.astype(arr, x.dtype) 30 31 32 tanh = paddle_tanh 33 log_softmax = paddle_log_softmax 34 35 36 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle") 37 @to_ivy_arrays_and_back 38 def hardshrink(x, threshold=0.5, name=None): 39 mask = ivy.logical_or(ivy.greater(x, threshold), ivy.less(x, -threshold)) 40 return ivy.where(mask, x, 0.0) 41 42 43 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle") 44 @to_ivy_arrays_and_back 45 def hardtanh( 46 x, 47 /, 48 *, 49 min=-1.0, 50 max=1.0, 51 name=None, 52 ): 53 less = ivy.where(ivy.less(x, min), min, x) 54 ret = ivy.where(ivy.greater(x, max), max, less).astype(x.dtype) 55 return ret 56 57 58 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle") 59 @to_ivy_arrays_and_back 60 def gelu(x, approximate=False, name=None): 61 return ivy.gelu(x, approximate=approximate) 62 63 64 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle") 65 @to_ivy_arrays_and_back 66 def hardsigmoid(x, slope=0.1666667, offset=0.5, name=None): 67 ret = ivy.minimum(ivy.maximum(ivy.add(ivy.multiply(x, slope), offset), 0), 1) 68 return ret 69 70 71 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle") 72 @to_ivy_arrays_and_back 73 def relu6(x, name=None): 74 return ivy.relu6(x) 75 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ivy/functional/frontends/paddle/nn/functional/activation.py b/ivy/functional/frontends/paddle/nn/functional/activation.py --- a/ivy/functional/frontends/paddle/nn/functional/activation.py +++ b/ivy/functional/frontends/paddle/nn/functional/activation.py @@ -72,3 +72,18 @@ @to_ivy_arrays_and_back def relu6(x, name=None): return ivy.relu6(x) + + +@with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle") +@to_ivy_arrays_and_back +def softshrink( + x, + /, + *, + threshold=0.5, + name=None, +): + low = ivy.where(ivy.less(x, -threshold), ivy.add(x, threshold), 0) + up = ivy.where(ivy.greater(x, threshold), ivy.subtract(x, threshold), 0) + add = ivy.add(low, up) + return ivy.astype(add, x.dtype)
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/nn/functional/activation.py b/ivy/functional/frontends/paddle/nn/functional/activation.py\n--- a/ivy/functional/frontends/paddle/nn/functional/activation.py\n+++ b/ivy/functional/frontends/paddle/nn/functional/activation.py\n@@ -72,3 +72,18 @@\n @to_ivy_arrays_and_back\n def relu6(x, name=None):\n return ivy.relu6(x)\n+\n+\n+@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n+@to_ivy_arrays_and_back\n+def softshrink(\n+ x,\n+ /,\n+ *,\n+ threshold=0.5,\n+ name=None,\n+):\n+ low = ivy.where(ivy.less(x, -threshold), ivy.add(x, threshold), 0)\n+ up = ivy.where(ivy.greater(x, threshold), ivy.subtract(x, threshold), 0)\n+ add = ivy.add(low, up)\n+ return ivy.astype(add, x.dtype)\n", "issue": "softshrink\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.functional.frontends.paddle.tensor.math import tanh as paddle_tanh\nfrom ivy.functional.frontends.paddle.tensor.math import (\n log_softmax as paddle_log_softmax,\n)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef selu(\n x,\n /,\n *,\n alpha=1.6732632423543772848170429916717,\n scale=1.0507009873554804934193349852946,\n name=None,\n):\n if scale <= 1.0:\n raise ValueError(f\"The scale must be greater than 1.0. Received: {scale}.\")\n\n if alpha < 0:\n raise ValueError(f\"The alpha must be no less than zero. Received: {alpha}.\")\n\n ret = ivy.where(x > 0, x, alpha * ivy.expm1(x))\n arr = scale * ret\n return ivy.astype(arr, x.dtype)\n\n\ntanh = paddle_tanh\nlog_softmax = paddle_log_softmax\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef hardshrink(x, threshold=0.5, name=None):\n mask = ivy.logical_or(ivy.greater(x, threshold), ivy.less(x, -threshold))\n return ivy.where(mask, x, 0.0)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef hardtanh(\n x,\n /,\n *,\n min=-1.0,\n max=1.0,\n name=None,\n):\n less = ivy.where(ivy.less(x, min), min, x)\n ret = ivy.where(ivy.greater(x, max), max, less).astype(x.dtype)\n return ret\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef gelu(x, approximate=False, name=None):\n return ivy.gelu(x, approximate=approximate)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef hardsigmoid(x, slope=0.1666667, offset=0.5, name=None):\n ret = ivy.minimum(ivy.maximum(ivy.add(ivy.multiply(x, slope), offset), 0), 1)\n return ret\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef relu6(x, name=None):\n return ivy.relu6(x)\n", "path": "ivy/functional/frontends/paddle/nn/functional/activation.py"}], "after_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.functional.frontends.paddle.tensor.math import tanh as paddle_tanh\nfrom ivy.functional.frontends.paddle.tensor.math import (\n log_softmax as paddle_log_softmax,\n)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef selu(\n x,\n /,\n *,\n alpha=1.6732632423543772848170429916717,\n scale=1.0507009873554804934193349852946,\n name=None,\n):\n if scale <= 1.0:\n raise ValueError(f\"The scale must be greater than 1.0. Received: {scale}.\")\n\n if alpha < 0:\n raise ValueError(f\"The alpha must be no less than zero. Received: {alpha}.\")\n\n ret = ivy.where(x > 0, x, alpha * ivy.expm1(x))\n arr = scale * ret\n return ivy.astype(arr, x.dtype)\n\n\ntanh = paddle_tanh\nlog_softmax = paddle_log_softmax\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef hardshrink(x, threshold=0.5, name=None):\n mask = ivy.logical_or(ivy.greater(x, threshold), ivy.less(x, -threshold))\n return ivy.where(mask, x, 0.0)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef hardtanh(\n x,\n /,\n *,\n min=-1.0,\n max=1.0,\n name=None,\n):\n less = ivy.where(ivy.less(x, min), min, x)\n ret = ivy.where(ivy.greater(x, max), max, less).astype(x.dtype)\n return ret\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef gelu(x, approximate=False, name=None):\n return ivy.gelu(x, approximate=approximate)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef hardsigmoid(x, slope=0.1666667, offset=0.5, name=None):\n ret = ivy.minimum(ivy.maximum(ivy.add(ivy.multiply(x, slope), offset), 0), 1)\n return ret\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef relu6(x, name=None):\n return ivy.relu6(x)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef softshrink(\n x,\n /,\n *,\n threshold=0.5,\n name=None,\n):\n low = ivy.where(ivy.less(x, -threshold), ivy.add(x, threshold), 0)\n up = ivy.where(ivy.greater(x, threshold), ivy.subtract(x, threshold), 0)\n add = ivy.add(low, up)\n return ivy.astype(add, x.dtype)\n", "path": "ivy/functional/frontends/paddle/nn/functional/activation.py"}]}
1,151
254
gh_patches_debug_53637
rasdani/github-patches
git_diff
Kinto__kinto-797
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- init fails on osx at the backend selection step I followed the instructions given [here](https://kinto.readthedocs.io/en/stable/tutorials/install.html#from-sources), but when asked: ``` $ Select the backend you would like to use: (1 - postgresql, 2 - redis, default - memory) ``` entering `1` and `2` leads to the following error: ``` Traceback (most recent call last): File ".venv/bin/kinto", line 11, in <module> load_entry_point('kinto', 'console_scripts', 'kinto')() File "/work/git/kinto/kinto/__main__.py", line 108, in main answer = input(prompt).strip() AttributeError: 'int' object has no attribute 'strip' ``` and entering nothing + enter will lead to the following error. ``` Traceback (most recent call last): File ".venv/bin/kinto", line 11, in <module> load_entry_point('kinto', 'console_scripts', 'kinto')() File "/work/git/kinto/kinto/__main__.py", line 108, in main answer = input(prompt).strip() File "<string>", line 0 ^ SyntaxError: unexpected EOF while parsing ``` It appears that the code expects a `string` but getting a number and null, therefore failing on the `.strip()` call [here](https://github.com/Kinto/kinto/blob/master/kinto/__main__.py#L108). --- Entering `""`, `"1"` and `"2"` works. I'm assuming that's not the way it's designed to be? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kinto/__main__.py` Content: ``` 1 from __future__ import print_function 2 import argparse 3 import os 4 import sys 5 import logging 6 import logging.config 7 8 from kinto.core import scripts 9 from pyramid.scripts import pserve 10 from pyramid.paster import bootstrap 11 from kinto import __version__ 12 from kinto.config import init 13 14 DEFAULT_CONFIG_FILE = 'config/kinto.ini' 15 DEFAULT_PORT = 8888 16 DEFAULT_LOG_LEVEL = logging.INFO 17 DEFAULT_LOG_FORMAT = "%(levelname)-5.5s %(message)s" 18 19 20 def main(args=None): 21 """The main routine.""" 22 if args is None: 23 args = sys.argv[1:] 24 25 parser = argparse.ArgumentParser(description="Kinto Command-Line " 26 "Interface") 27 # XXX: deprecate this option, unnatural as first argument. 28 parser.add_argument('--ini', 29 help='Application configuration file', 30 dest='ini_file', 31 required=False, 32 default=DEFAULT_CONFIG_FILE) 33 34 parser.add_argument('-q', '--quiet', action='store_const', 35 const=logging.CRITICAL, dest='verbosity', 36 help='Show only critical errors.') 37 38 parser.add_argument('--debug', action='store_const', 39 const=logging.DEBUG, dest='verbosity', 40 help='Show all messages, including debug messages.') 41 42 commands = ('init', 'start', 'migrate', 'delete-collection', 'version') 43 subparsers = parser.add_subparsers(title='subcommands', 44 description='Main Kinto CLI commands', 45 dest='subcommand', 46 help="Choose and run with --help") 47 subparsers.required = True 48 49 for command in commands: 50 subparser = subparsers.add_parser(command) 51 subparser.set_defaults(which=command) 52 53 if command == 'init': 54 subparser.add_argument('--backend', 55 help='{memory,redis,postgresql}', 56 dest='backend', 57 required=False, 58 default=None) 59 elif command == 'migrate': 60 subparser.add_argument('--dry-run', 61 action='store_true', 62 help='Simulate the migration operations ' 63 'and show information', 64 dest='dry_run', 65 required=False, 66 default=False) 67 elif command == 'delete-collection': 68 subparser.add_argument('--bucket', 69 help='The bucket where the collection ' 70 'belongs to.', 71 required=True) 72 subparser.add_argument('--collection', 73 help='The collection to remove.', 74 required=True) 75 76 elif command == 'start': 77 subparser.add_argument('--reload', 78 action='store_true', 79 help='Restart when code or config changes', 80 required=False, 81 default=False) 82 subparser.add_argument('--port', 83 type=int, 84 help='Listening port number', 85 required=False, 86 default=DEFAULT_PORT) 87 88 # Parse command-line arguments 89 parsed_args = vars(parser.parse_args(args)) 90 91 config_file = parsed_args['ini_file'] 92 which_command = parsed_args['which'] 93 94 # Initialize logging from 95 level = parsed_args.get('verbosity') or DEFAULT_LOG_LEVEL 96 logging.basicConfig(level=level, format=DEFAULT_LOG_FORMAT) 97 98 if which_command == 'init': 99 if os.path.exists(config_file): 100 print("%s already exists." % config_file, file=sys.stderr) 101 return 1 102 103 backend = parsed_args['backend'] 104 if not backend: 105 while True: 106 prompt = ("Select the backend you would like to use: " 107 "(1 - postgresql, 2 - redis, default - memory) ") 108 answer = input(prompt).strip() 109 try: 110 backends = {"1": "postgresql", "2": "redis", "": "memory"} 111 backend = backends[answer] 112 break 113 except KeyError: 114 pass 115 116 init(config_file, backend) 117 118 # Install postgresql libraries if necessary 119 if backend == "postgresql": 120 try: 121 import psycopg2 # NOQA 122 except ImportError: 123 import pip 124 pip.main(['install', "kinto[postgresql]"]) 125 elif backend == "redis": 126 try: 127 import kinto_redis # NOQA 128 except ImportError: 129 import pip 130 pip.main(['install', "kinto[redis]"]) 131 132 elif which_command == 'migrate': 133 dry_run = parsed_args['dry_run'] 134 env = bootstrap(config_file) 135 scripts.migrate(env, dry_run=dry_run) 136 137 elif which_command == 'delete-collection': 138 env = bootstrap(config_file) 139 return scripts.delete_collection(env, 140 parsed_args['bucket'], 141 parsed_args['collection']) 142 143 elif which_command == 'start': 144 pserve_argv = ['pserve', config_file] 145 if parsed_args['reload']: 146 pserve_argv.append('--reload') 147 pserve_argv.append('http_port=%s' % parsed_args['port']) 148 pserve.main(pserve_argv) 149 150 elif which_command == 'version': 151 print(__version__) 152 153 return 0 154 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kinto/__main__.py b/kinto/__main__.py --- a/kinto/__main__.py +++ b/kinto/__main__.py @@ -4,6 +4,7 @@ import sys import logging import logging.config +from six.moves import input from kinto.core import scripts from pyramid.scripts import pserve
{"golden_diff": "diff --git a/kinto/__main__.py b/kinto/__main__.py\n--- a/kinto/__main__.py\n+++ b/kinto/__main__.py\n@@ -4,6 +4,7 @@\n import sys\n import logging\n import logging.config\n+from six.moves import input\n \n from kinto.core import scripts\n from pyramid.scripts import pserve\n", "issue": "init fails on osx at the backend selection step\nI followed the instructions given [here](https://kinto.readthedocs.io/en/stable/tutorials/install.html#from-sources), but when asked:\n\n```\n$ Select the backend you would like to use: (1 - postgresql, 2 - redis, default - memory)\n```\n\nentering `1` and `2` leads to the following error:\n\n```\nTraceback (most recent call last):\n File \".venv/bin/kinto\", line 11, in <module>\n load_entry_point('kinto', 'console_scripts', 'kinto')()\n File \"/work/git/kinto/kinto/__main__.py\", line 108, in main\n answer = input(prompt).strip()\nAttributeError: 'int' object has no attribute 'strip'\n```\n\nand entering nothing + enter will lead to the following error.\n\n```\nTraceback (most recent call last):\n File \".venv/bin/kinto\", line 11, in <module>\n load_entry_point('kinto', 'console_scripts', 'kinto')()\n File \"/work/git/kinto/kinto/__main__.py\", line 108, in main\n answer = input(prompt).strip()\n File \"<string>\", line 0\n\n ^\nSyntaxError: unexpected EOF while parsing\n```\n\nIt appears that the code expects a `string` but getting a number and null, therefore failing on the `.strip()` call [here](https://github.com/Kinto/kinto/blob/master/kinto/__main__.py#L108).\n\n---\n\nEntering `\"\"`, `\"1\"` and `\"2\"` works. I'm assuming that's not the way it's designed to be? \n\n", "before_files": [{"content": "from __future__ import print_function\nimport argparse\nimport os\nimport sys\nimport logging\nimport logging.config\n\nfrom kinto.core import scripts\nfrom pyramid.scripts import pserve\nfrom pyramid.paster import bootstrap\nfrom kinto import __version__\nfrom kinto.config import init\n\nDEFAULT_CONFIG_FILE = 'config/kinto.ini'\nDEFAULT_PORT = 8888\nDEFAULT_LOG_LEVEL = logging.INFO\nDEFAULT_LOG_FORMAT = \"%(levelname)-5.5s %(message)s\"\n\n\ndef main(args=None):\n \"\"\"The main routine.\"\"\"\n if args is None:\n args = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=\"Kinto Command-Line \"\n \"Interface\")\n # XXX: deprecate this option, unnatural as first argument.\n parser.add_argument('--ini',\n help='Application configuration file',\n dest='ini_file',\n required=False,\n default=DEFAULT_CONFIG_FILE)\n\n parser.add_argument('-q', '--quiet', action='store_const',\n const=logging.CRITICAL, dest='verbosity',\n help='Show only critical errors.')\n\n parser.add_argument('--debug', action='store_const',\n const=logging.DEBUG, dest='verbosity',\n help='Show all messages, including debug messages.')\n\n commands = ('init', 'start', 'migrate', 'delete-collection', 'version')\n subparsers = parser.add_subparsers(title='subcommands',\n description='Main Kinto CLI commands',\n dest='subcommand',\n help=\"Choose and run with --help\")\n subparsers.required = True\n\n for command in commands:\n subparser = subparsers.add_parser(command)\n subparser.set_defaults(which=command)\n\n if command == 'init':\n subparser.add_argument('--backend',\n help='{memory,redis,postgresql}',\n dest='backend',\n required=False,\n default=None)\n elif command == 'migrate':\n subparser.add_argument('--dry-run',\n action='store_true',\n help='Simulate the migration operations '\n 'and show information',\n dest='dry_run',\n required=False,\n default=False)\n elif command == 'delete-collection':\n subparser.add_argument('--bucket',\n help='The bucket where the collection '\n 'belongs to.',\n required=True)\n subparser.add_argument('--collection',\n help='The collection to remove.',\n required=True)\n\n elif command == 'start':\n subparser.add_argument('--reload',\n action='store_true',\n help='Restart when code or config changes',\n required=False,\n default=False)\n subparser.add_argument('--port',\n type=int,\n help='Listening port number',\n required=False,\n default=DEFAULT_PORT)\n\n # Parse command-line arguments\n parsed_args = vars(parser.parse_args(args))\n\n config_file = parsed_args['ini_file']\n which_command = parsed_args['which']\n\n # Initialize logging from\n level = parsed_args.get('verbosity') or DEFAULT_LOG_LEVEL\n logging.basicConfig(level=level, format=DEFAULT_LOG_FORMAT)\n\n if which_command == 'init':\n if os.path.exists(config_file):\n print(\"%s already exists.\" % config_file, file=sys.stderr)\n return 1\n\n backend = parsed_args['backend']\n if not backend:\n while True:\n prompt = (\"Select the backend you would like to use: \"\n \"(1 - postgresql, 2 - redis, default - memory) \")\n answer = input(prompt).strip()\n try:\n backends = {\"1\": \"postgresql\", \"2\": \"redis\", \"\": \"memory\"}\n backend = backends[answer]\n break\n except KeyError:\n pass\n\n init(config_file, backend)\n\n # Install postgresql libraries if necessary\n if backend == \"postgresql\":\n try:\n import psycopg2 # NOQA\n except ImportError:\n import pip\n pip.main(['install', \"kinto[postgresql]\"])\n elif backend == \"redis\":\n try:\n import kinto_redis # NOQA\n except ImportError:\n import pip\n pip.main(['install', \"kinto[redis]\"])\n\n elif which_command == 'migrate':\n dry_run = parsed_args['dry_run']\n env = bootstrap(config_file)\n scripts.migrate(env, dry_run=dry_run)\n\n elif which_command == 'delete-collection':\n env = bootstrap(config_file)\n return scripts.delete_collection(env,\n parsed_args['bucket'],\n parsed_args['collection'])\n\n elif which_command == 'start':\n pserve_argv = ['pserve', config_file]\n if parsed_args['reload']:\n pserve_argv.append('--reload')\n pserve_argv.append('http_port=%s' % parsed_args['port'])\n pserve.main(pserve_argv)\n\n elif which_command == 'version':\n print(__version__)\n\n return 0\n", "path": "kinto/__main__.py"}], "after_files": [{"content": "from __future__ import print_function\nimport argparse\nimport os\nimport sys\nimport logging\nimport logging.config\nfrom six.moves import input\n\nfrom kinto.core import scripts\nfrom pyramid.scripts import pserve\nfrom pyramid.paster import bootstrap\nfrom kinto import __version__\nfrom kinto.config import init\n\nDEFAULT_CONFIG_FILE = 'config/kinto.ini'\nDEFAULT_PORT = 8888\nDEFAULT_LOG_LEVEL = logging.INFO\nDEFAULT_LOG_FORMAT = \"%(levelname)-5.5s %(message)s\"\n\n\ndef main(args=None):\n \"\"\"The main routine.\"\"\"\n if args is None:\n args = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=\"Kinto Command-Line \"\n \"Interface\")\n # XXX: deprecate this option, unnatural as first argument.\n parser.add_argument('--ini',\n help='Application configuration file',\n dest='ini_file',\n required=False,\n default=DEFAULT_CONFIG_FILE)\n\n parser.add_argument('-q', '--quiet', action='store_const',\n const=logging.CRITICAL, dest='verbosity',\n help='Show only critical errors.')\n\n parser.add_argument('--debug', action='store_const',\n const=logging.DEBUG, dest='verbosity',\n help='Show all messages, including debug messages.')\n\n commands = ('init', 'start', 'migrate', 'delete-collection', 'version')\n subparsers = parser.add_subparsers(title='subcommands',\n description='Main Kinto CLI commands',\n dest='subcommand',\n help=\"Choose and run with --help\")\n subparsers.required = True\n\n for command in commands:\n subparser = subparsers.add_parser(command)\n subparser.set_defaults(which=command)\n\n if command == 'init':\n subparser.add_argument('--backend',\n help='{memory,redis,postgresql}',\n dest='backend',\n required=False,\n default=None)\n elif command == 'migrate':\n subparser.add_argument('--dry-run',\n action='store_true',\n help='Simulate the migration operations '\n 'and show information',\n dest='dry_run',\n required=False,\n default=False)\n elif command == 'delete-collection':\n subparser.add_argument('--bucket',\n help='The bucket where the collection '\n 'belongs to.',\n required=True)\n subparser.add_argument('--collection',\n help='The collection to remove.',\n required=True)\n\n elif command == 'start':\n subparser.add_argument('--reload',\n action='store_true',\n help='Restart when code or config changes',\n required=False,\n default=False)\n subparser.add_argument('--port',\n type=int,\n help='Listening port number',\n required=False,\n default=DEFAULT_PORT)\n\n # Parse command-line arguments\n parsed_args = vars(parser.parse_args(args))\n\n config_file = parsed_args['ini_file']\n which_command = parsed_args['which']\n\n # Initialize logging from\n level = parsed_args.get('verbosity') or DEFAULT_LOG_LEVEL\n logging.basicConfig(level=level, format=DEFAULT_LOG_FORMAT)\n\n if which_command == 'init':\n if os.path.exists(config_file):\n print(\"%s already exists.\" % config_file, file=sys.stderr)\n return 1\n\n backend = parsed_args['backend']\n if not backend:\n while True:\n prompt = (\"Select the backend you would like to use: \"\n \"(1 - postgresql, 2 - redis, default - memory) \")\n answer = input(prompt).strip()\n try:\n backends = {\"1\": \"postgresql\", \"2\": \"redis\", \"\": \"memory\"}\n backend = backends[answer]\n break\n except KeyError:\n pass\n\n init(config_file, backend)\n\n # Install postgresql libraries if necessary\n if backend == \"postgresql\":\n try:\n import psycopg2 # NOQA\n except ImportError:\n import pip\n pip.main(['install', \"kinto[postgresql]\"])\n elif backend == \"redis\":\n try:\n import kinto_redis # NOQA\n except ImportError:\n import pip\n pip.main(['install', \"kinto[redis]\"])\n\n elif which_command == 'migrate':\n dry_run = parsed_args['dry_run']\n env = bootstrap(config_file)\n scripts.migrate(env, dry_run=dry_run)\n\n elif which_command == 'delete-collection':\n env = bootstrap(config_file)\n return scripts.delete_collection(env,\n parsed_args['bucket'],\n parsed_args['collection'])\n\n elif which_command == 'start':\n pserve_argv = ['pserve', config_file]\n if parsed_args['reload']:\n pserve_argv.append('--reload')\n pserve_argv.append('http_port=%s' % parsed_args['port'])\n pserve.main(pserve_argv)\n\n elif which_command == 'version':\n print(__version__)\n\n return 0\n", "path": "kinto/__main__.py"}]}
2,009
78
gh_patches_debug_59317
rasdani/github-patches
git_diff
chainer__chainer-1178
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- HuberLoss's backward() should not ignore gy `HuberLoss`'s `backward()` ignores `gy`, so any computation after `HuberLoss` has no effect for gradients backpropagated by `HuberLoss`. I think such a behavior is not correct. ``` python >>> x = chainer.Variable(np.zeros((1,1), dtype=np.float32)) >>> t = chainer.Variable(np.ones((1,1), dtype=np.float32)) >>> F.huber_loss(x, t, 1.0).backward() >>> x.grad array([[-1.]], dtype=float32) ``` ``` python >>> x = chainer.Variable(np.zeros((1,1), dtype=np.float32)) >>> t = chainer.Variable(np.ones((1,1), dtype=np.float32)) >>> (F.huber_loss(x, t, 1.0) * 0).backward() # Multiply the loss by zero >>> x.grad array([[-1.]], dtype=float32) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `chainer/functions/loss/huber_loss.py` Content: ``` 1 import numpy 2 3 from chainer import cuda 4 from chainer import function 5 from chainer.utils import type_check 6 7 8 class HuberLoss(function.Function): 9 10 def __init__(self, delta): 11 self.delta = delta 12 13 def check_type_forward(self, in_types): 14 type_check.expect(in_types.size() == 2) 15 type_check.expect( 16 in_types[0].dtype == numpy.float32, 17 in_types[1].dtype == numpy.float32, 18 in_types[0].shape == in_types[1].shape 19 ) 20 21 def forward(self, inputs): 22 xp = cuda.get_array_module(*inputs) 23 x0, x1 = inputs 24 self.diff = x0 - x1 25 y = xp.square(self.diff) 26 mask = y > (self.delta ** 2) 27 y -= mask * xp.square(abs(self.diff) - self.delta) 28 y *= 0.5 29 return y.sum(axis=1), 30 31 def backward(self, inputs, gy): 32 xp = cuda.get_array_module(*inputs) 33 mask = xp.abs(self.diff) <= self.delta 34 gx = xp.where(mask, self.diff, self.delta * xp.sign(self.diff)) 35 return gx, -gx 36 37 38 def huber_loss(x, t, delta): 39 """Loss function which is less sensitive to outliers in data than MSE. 40 41 .. math:: 42 a = x - t 43 44 and 45 46 .. math:: 47 L_{\\delta}(a) = \\left \\{ \\begin{array}{cc} 48 \\frac{1}{2} a^2 & {\\rm if~|a| \\leq \\delta} \\\\ 49 \\delta (|a| - \\frac{1}{2} \\delta) & {\\rm otherwise,} 50 \\end{array} \\right. 51 52 Args: 53 x (~chainer.Variable): Input variable. 54 The shape of ``x`` should be (:math:`N`, :math:`K`). 55 t (~chainer.Variable): Target variable for regression. 56 The shape of ``t`` should be (:math:`N`, :math:`K`). 57 delta (float): Constant variable for huber loss function 58 as used in definition. 59 60 Returns: 61 ~chainer.Variable: A variable object holding a scalar array of the 62 huber loss :math:`L_{\\delta}`. 63 64 See: 65 `Huber loss - Wikipedia <https://en.wikipedia.org/wiki/Huber_loss>`_. 66 67 """ 68 return HuberLoss(delta=delta)(x, t) 69 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/chainer/functions/loss/huber_loss.py b/chainer/functions/loss/huber_loss.py --- a/chainer/functions/loss/huber_loss.py +++ b/chainer/functions/loss/huber_loss.py @@ -31,7 +31,8 @@ def backward(self, inputs, gy): xp = cuda.get_array_module(*inputs) mask = xp.abs(self.diff) <= self.delta - gx = xp.where(mask, self.diff, self.delta * xp.sign(self.diff)) + gx = gy[0].reshape(gy[0].shape + (1,) * (self.diff.ndim - 1)) * \ + xp.where(mask, self.diff, self.delta * xp.sign(self.diff)) return gx, -gx
{"golden_diff": "diff --git a/chainer/functions/loss/huber_loss.py b/chainer/functions/loss/huber_loss.py\n--- a/chainer/functions/loss/huber_loss.py\n+++ b/chainer/functions/loss/huber_loss.py\n@@ -31,7 +31,8 @@\n def backward(self, inputs, gy):\n xp = cuda.get_array_module(*inputs)\n mask = xp.abs(self.diff) <= self.delta\n- gx = xp.where(mask, self.diff, self.delta * xp.sign(self.diff))\n+ gx = gy[0].reshape(gy[0].shape + (1,) * (self.diff.ndim - 1)) * \\\n+ xp.where(mask, self.diff, self.delta * xp.sign(self.diff))\n return gx, -gx\n", "issue": "HuberLoss's backward() should not ignore gy\n`HuberLoss`'s `backward()` ignores `gy`, so any computation after `HuberLoss` has no effect for gradients backpropagated by `HuberLoss`. I think such a behavior is not correct.\n\n``` python\n>>> x = chainer.Variable(np.zeros((1,1), dtype=np.float32))\n>>> t = chainer.Variable(np.ones((1,1), dtype=np.float32))\n>>> F.huber_loss(x, t, 1.0).backward()\n>>> x.grad\narray([[-1.]], dtype=float32)\n```\n\n``` python\n>>> x = chainer.Variable(np.zeros((1,1), dtype=np.float32))\n>>> t = chainer.Variable(np.ones((1,1), dtype=np.float32))\n>>> (F.huber_loss(x, t, 1.0) * 0).backward() # Multiply the loss by zero\n>>> x.grad\narray([[-1.]], dtype=float32)\n```\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass HuberLoss(function.Function):\n\n def __init__(self, delta):\n self.delta = delta\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n type_check.expect(\n in_types[0].dtype == numpy.float32,\n in_types[1].dtype == numpy.float32,\n in_types[0].shape == in_types[1].shape\n )\n\n def forward(self, inputs):\n xp = cuda.get_array_module(*inputs)\n x0, x1 = inputs\n self.diff = x0 - x1\n y = xp.square(self.diff)\n mask = y > (self.delta ** 2)\n y -= mask * xp.square(abs(self.diff) - self.delta)\n y *= 0.5\n return y.sum(axis=1),\n\n def backward(self, inputs, gy):\n xp = cuda.get_array_module(*inputs)\n mask = xp.abs(self.diff) <= self.delta\n gx = xp.where(mask, self.diff, self.delta * xp.sign(self.diff))\n return gx, -gx\n\n\ndef huber_loss(x, t, delta):\n \"\"\"Loss function which is less sensitive to outliers in data than MSE.\n\n .. math::\n a = x - t\n\n and\n\n .. math::\n L_{\\\\delta}(a) = \\\\left \\\\{ \\\\begin{array}{cc}\n \\\\frac{1}{2} a^2 & {\\\\rm if~|a| \\\\leq \\\\delta} \\\\\\\\\n \\\\delta (|a| - \\\\frac{1}{2} \\\\delta) & {\\\\rm otherwise,}\n \\\\end{array} \\\\right.\n\n Args:\n x (~chainer.Variable): Input variable.\n The shape of ``x`` should be (:math:`N`, :math:`K`).\n t (~chainer.Variable): Target variable for regression.\n The shape of ``t`` should be (:math:`N`, :math:`K`).\n delta (float): Constant variable for huber loss function\n as used in definition.\n\n Returns:\n ~chainer.Variable: A variable object holding a scalar array of the\n huber loss :math:`L_{\\\\delta}`.\n\n See:\n `Huber loss - Wikipedia <https://en.wikipedia.org/wiki/Huber_loss>`_.\n\n \"\"\"\n return HuberLoss(delta=delta)(x, t)\n", "path": "chainer/functions/loss/huber_loss.py"}], "after_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass HuberLoss(function.Function):\n\n def __init__(self, delta):\n self.delta = delta\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n type_check.expect(\n in_types[0].dtype == numpy.float32,\n in_types[1].dtype == numpy.float32,\n in_types[0].shape == in_types[1].shape\n )\n\n def forward(self, inputs):\n xp = cuda.get_array_module(*inputs)\n x0, x1 = inputs\n self.diff = x0 - x1\n y = xp.square(self.diff)\n mask = y > (self.delta ** 2)\n y -= mask * xp.square(abs(self.diff) - self.delta)\n y *= 0.5\n return y.sum(axis=1),\n\n def backward(self, inputs, gy):\n xp = cuda.get_array_module(*inputs)\n mask = xp.abs(self.diff) <= self.delta\n gx = gy[0].reshape(gy[0].shape + (1,) * (self.diff.ndim - 1)) * \\\n xp.where(mask, self.diff, self.delta * xp.sign(self.diff))\n return gx, -gx\n\n\ndef huber_loss(x, t, delta):\n \"\"\"Loss function which is less sensitive to outliers in data than MSE.\n\n .. math::\n a = x - t\n\n and\n\n .. math::\n L_{\\\\delta}(a) = \\\\left \\\\{ \\\\begin{array}{cc}\n \\\\frac{1}{2} a^2 & {\\\\rm if~|a| \\\\leq \\\\delta} \\\\\\\\\n \\\\delta (|a| - \\\\frac{1}{2} \\\\delta) & {\\\\rm otherwise,}\n \\\\end{array} \\\\right.\n\n Args:\n x (~chainer.Variable): Input variable.\n The shape of ``x`` should be (:math:`N`, :math:`K`).\n t (~chainer.Variable): Target variable for regression.\n The shape of ``t`` should be (:math:`N`, :math:`K`).\n delta (float): Constant variable for huber loss function\n as used in definition.\n\n Returns:\n ~chainer.Variable: A variable object holding a scalar array of the\n huber loss :math:`L_{\\\\delta}`.\n\n See:\n `Huber loss - Wikipedia <https://en.wikipedia.org/wiki/Huber_loss>`_.\n\n \"\"\"\n return HuberLoss(delta=delta)(x, t)\n", "path": "chainer/functions/loss/huber_loss.py"}]}
1,166
164
gh_patches_debug_36731
rasdani/github-patches
git_diff
google__flax-3385
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Improve Early Stopping API See discussion in #2090 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `flax/training/early_stopping.py` Content: ``` 1 # Copyright 2023 The Flax Authors. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Early stopping.""" 16 17 import math 18 from flax import struct 19 20 21 class EarlyStopping(struct.PyTreeNode): 22 """Early stopping to avoid overfitting during training. 23 24 The following example stops training early if the difference between losses 25 recorded in the current epoch and previous epoch is less than 1e-3 26 consecutively for 2 times:: 27 28 early_stop = EarlyStopping(min_delta=1e-3, patience=2) 29 for epoch in range(1, num_epochs+1): 30 rng, input_rng = jax.random.split(rng) 31 optimizer, train_metrics = train_epoch( 32 optimizer, train_ds, config.batch_size, epoch, input_rng) 33 _, early_stop = early_stop.update(train_metrics['loss']) 34 if early_stop.should_stop: 35 print('Met early stopping criteria, breaking...') 36 break 37 38 Attributes: 39 min_delta: Minimum delta between updates to be considered an 40 improvement. 41 patience: Number of steps of no improvement before stopping. 42 best_metric: Current best metric value. 43 patience_count: Number of steps since last improving update. 44 should_stop: Whether the training loop should stop to avoid 45 overfitting. 46 """ 47 48 min_delta: float = 0 49 patience: int = 0 50 best_metric: float = float('inf') 51 patience_count: int = 0 52 should_stop: bool = False 53 54 def reset(self): 55 return self.replace( 56 best_metric=float('inf'), patience_count=0, should_stop=False 57 ) 58 59 def update(self, metric): 60 """Update the state based on metric. 61 62 Returns: 63 A pair (has_improved, early_stop), where `has_improved` is True when there 64 was an improvement greater than `min_delta` from the previous 65 `best_metric` and `early_stop` is the updated `EarlyStop` object. 66 """ 67 68 if ( 69 math.isinf(self.best_metric) 70 or self.best_metric - metric > self.min_delta 71 ): 72 return True, self.replace(best_metric=metric, patience_count=0) 73 else: 74 should_stop = self.patience_count >= self.patience or self.should_stop 75 return False, self.replace( 76 patience_count=self.patience_count + 1, should_stop=should_stop 77 ) 78 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/flax/training/early_stopping.py b/flax/training/early_stopping.py --- a/flax/training/early_stopping.py +++ b/flax/training/early_stopping.py @@ -30,7 +30,7 @@ rng, input_rng = jax.random.split(rng) optimizer, train_metrics = train_epoch( optimizer, train_ds, config.batch_size, epoch, input_rng) - _, early_stop = early_stop.update(train_metrics['loss']) + early_stop = early_stop.update(train_metrics['loss']) if early_stop.should_stop: print('Met early stopping criteria, breaking...') break @@ -43,6 +43,8 @@ patience_count: Number of steps since last improving update. should_stop: Whether the training loop should stop to avoid overfitting. + has_improved: Whether the metric has improved greater or + equal to the min_delta in the last `.update` call. """ min_delta: float = 0 @@ -50,28 +52,29 @@ best_metric: float = float('inf') patience_count: int = 0 should_stop: bool = False + has_improved: bool = False def reset(self): return self.replace( - best_metric=float('inf'), patience_count=0, should_stop=False + best_metric=float('inf'), patience_count=0, should_stop=False, has_improved=False ) def update(self, metric): """Update the state based on metric. Returns: - A pair (has_improved, early_stop), where `has_improved` is True when there - was an improvement greater than `min_delta` from the previous - `best_metric` and `early_stop` is the updated `EarlyStop` object. + The updated EarlyStopping class. The `.has_improved` attribute is True + when there was an improvement greater than `min_delta` from the previous + `best_metric`. """ if ( math.isinf(self.best_metric) or self.best_metric - metric > self.min_delta ): - return True, self.replace(best_metric=metric, patience_count=0) + return self.replace(best_metric=metric, patience_count=0, has_improved=True) else: should_stop = self.patience_count >= self.patience or self.should_stop - return False, self.replace( - patience_count=self.patience_count + 1, should_stop=should_stop + return self.replace( + patience_count=self.patience_count + 1, should_stop=should_stop, has_improved=False )
{"golden_diff": "diff --git a/flax/training/early_stopping.py b/flax/training/early_stopping.py\n--- a/flax/training/early_stopping.py\n+++ b/flax/training/early_stopping.py\n@@ -30,7 +30,7 @@\n rng, input_rng = jax.random.split(rng)\n optimizer, train_metrics = train_epoch(\n optimizer, train_ds, config.batch_size, epoch, input_rng)\n- _, early_stop = early_stop.update(train_metrics['loss'])\n+ early_stop = early_stop.update(train_metrics['loss'])\n if early_stop.should_stop:\n print('Met early stopping criteria, breaking...')\n break\n@@ -43,6 +43,8 @@\n patience_count: Number of steps since last improving update.\n should_stop: Whether the training loop should stop to avoid\n overfitting.\n+ has_improved: Whether the metric has improved greater or\n+ equal to the min_delta in the last `.update` call.\n \"\"\"\n \n min_delta: float = 0\n@@ -50,28 +52,29 @@\n best_metric: float = float('inf')\n patience_count: int = 0\n should_stop: bool = False\n+ has_improved: bool = False\n \n def reset(self):\n return self.replace(\n- best_metric=float('inf'), patience_count=0, should_stop=False\n+ best_metric=float('inf'), patience_count=0, should_stop=False, has_improved=False\n )\n \n def update(self, metric):\n \"\"\"Update the state based on metric.\n \n Returns:\n- A pair (has_improved, early_stop), where `has_improved` is True when there\n- was an improvement greater than `min_delta` from the previous\n- `best_metric` and `early_stop` is the updated `EarlyStop` object.\n+ The updated EarlyStopping class. The `.has_improved` attribute is True\n+ when there was an improvement greater than `min_delta` from the previous\n+ `best_metric`.\n \"\"\"\n \n if (\n math.isinf(self.best_metric)\n or self.best_metric - metric > self.min_delta\n ):\n- return True, self.replace(best_metric=metric, patience_count=0)\n+ return self.replace(best_metric=metric, patience_count=0, has_improved=True)\n else:\n should_stop = self.patience_count >= self.patience or self.should_stop\n- return False, self.replace(\n- patience_count=self.patience_count + 1, should_stop=should_stop\n+ return self.replace(\n+ patience_count=self.patience_count + 1, should_stop=should_stop, has_improved=False\n )\n", "issue": "Improve Early Stopping API\nSee discussion in #2090\n", "before_files": [{"content": "# Copyright 2023 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Early stopping.\"\"\"\n\nimport math\nfrom flax import struct\n\n\nclass EarlyStopping(struct.PyTreeNode):\n \"\"\"Early stopping to avoid overfitting during training.\n\n The following example stops training early if the difference between losses\n recorded in the current epoch and previous epoch is less than 1e-3\n consecutively for 2 times::\n\n early_stop = EarlyStopping(min_delta=1e-3, patience=2)\n for epoch in range(1, num_epochs+1):\n rng, input_rng = jax.random.split(rng)\n optimizer, train_metrics = train_epoch(\n optimizer, train_ds, config.batch_size, epoch, input_rng)\n _, early_stop = early_stop.update(train_metrics['loss'])\n if early_stop.should_stop:\n print('Met early stopping criteria, breaking...')\n break\n\n Attributes:\n min_delta: Minimum delta between updates to be considered an\n improvement.\n patience: Number of steps of no improvement before stopping.\n best_metric: Current best metric value.\n patience_count: Number of steps since last improving update.\n should_stop: Whether the training loop should stop to avoid\n overfitting.\n \"\"\"\n\n min_delta: float = 0\n patience: int = 0\n best_metric: float = float('inf')\n patience_count: int = 0\n should_stop: bool = False\n\n def reset(self):\n return self.replace(\n best_metric=float('inf'), patience_count=0, should_stop=False\n )\n\n def update(self, metric):\n \"\"\"Update the state based on metric.\n\n Returns:\n A pair (has_improved, early_stop), where `has_improved` is True when there\n was an improvement greater than `min_delta` from the previous\n `best_metric` and `early_stop` is the updated `EarlyStop` object.\n \"\"\"\n\n if (\n math.isinf(self.best_metric)\n or self.best_metric - metric > self.min_delta\n ):\n return True, self.replace(best_metric=metric, patience_count=0)\n else:\n should_stop = self.patience_count >= self.patience or self.should_stop\n return False, self.replace(\n patience_count=self.patience_count + 1, should_stop=should_stop\n )\n", "path": "flax/training/early_stopping.py"}], "after_files": [{"content": "# Copyright 2023 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Early stopping.\"\"\"\n\nimport math\nfrom flax import struct\n\n\nclass EarlyStopping(struct.PyTreeNode):\n \"\"\"Early stopping to avoid overfitting during training.\n\n The following example stops training early if the difference between losses\n recorded in the current epoch and previous epoch is less than 1e-3\n consecutively for 2 times::\n\n early_stop = EarlyStopping(min_delta=1e-3, patience=2)\n for epoch in range(1, num_epochs+1):\n rng, input_rng = jax.random.split(rng)\n optimizer, train_metrics = train_epoch(\n optimizer, train_ds, config.batch_size, epoch, input_rng)\n early_stop = early_stop.update(train_metrics['loss'])\n if early_stop.should_stop:\n print('Met early stopping criteria, breaking...')\n break\n\n Attributes:\n min_delta: Minimum delta between updates to be considered an\n improvement.\n patience: Number of steps of no improvement before stopping.\n best_metric: Current best metric value.\n patience_count: Number of steps since last improving update.\n should_stop: Whether the training loop should stop to avoid\n overfitting.\n has_improved: Whether the metric has improved greater or\n equal to the min_delta in the last `.update` call.\n \"\"\"\n\n min_delta: float = 0\n patience: int = 0\n best_metric: float = float('inf')\n patience_count: int = 0\n should_stop: bool = False\n has_improved: bool = False\n\n def reset(self):\n return self.replace(\n best_metric=float('inf'), patience_count=0, should_stop=False, has_improved=False\n )\n\n def update(self, metric):\n \"\"\"Update the state based on metric.\n\n Returns:\n The updated EarlyStopping class. The `.has_improved` attribute is True\n when there was an improvement greater than `min_delta` from the previous\n `best_metric`.\n \"\"\"\n\n if (\n math.isinf(self.best_metric)\n or self.best_metric - metric > self.min_delta\n ):\n return self.replace(best_metric=metric, patience_count=0, has_improved=True)\n else:\n should_stop = self.patience_count >= self.patience or self.should_stop\n return self.replace(\n patience_count=self.patience_count + 1, should_stop=should_stop, has_improved=False\n )\n", "path": "flax/training/early_stopping.py"}]}
1,050
590
gh_patches_debug_30
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-2249
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- E3002 Invalid Property Lambda/Properties/EphemeralStorage *cfn-lint version: (`cfn-lint --version`)* `cfn-lint 0.58.4` *Description of issue.* Looks like it does not yet recognize `EphemeralStorage` as a valid property for lambdas ```yml Lambda: Type: AWS::Lambda::Function Properties: Role: !GetAtt Role.Arn Timeout: 600 MemorySize: 2048 EphemeralStorage: Size: 1024 ``` Link to the [**docs**](https://aws.amazon.com/blogs/compute/using-larger-ephemeral-storage-for-aws-lambda/) where it shows the new feature Cfn-lint uses the [CloudFormation Resource Specifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-resource-specification.html) as the base to do validation. These files are included as part of the application version. Please update to the latest version of `cfn-lint` or update the spec files manually (`cfn-lint -u`) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cfnlint/version.py` Content: ``` 1 """ 2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 SPDX-License-Identifier: MIT-0 4 """ 5 6 __version__ = '0.58.4' 7 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cfnlint/version.py b/src/cfnlint/version.py --- a/src/cfnlint/version.py +++ b/src/cfnlint/version.py @@ -3,4 +3,4 @@ SPDX-License-Identifier: MIT-0 """ -__version__ = '0.58.4' +__version__ = '0.59.0'
{"golden_diff": "diff --git a/src/cfnlint/version.py b/src/cfnlint/version.py\n--- a/src/cfnlint/version.py\n+++ b/src/cfnlint/version.py\n@@ -3,4 +3,4 @@\n SPDX-License-Identifier: MIT-0\n \"\"\"\n \n-__version__ = '0.58.4'\n+__version__ = '0.59.0'\n", "issue": "E3002 Invalid Property Lambda/Properties/EphemeralStorage\n*cfn-lint version: (`cfn-lint --version`)*\r\n\r\n`cfn-lint 0.58.4`\r\n\r\n*Description of issue.*\r\nLooks like it does not yet recognize `EphemeralStorage` as a valid property for lambdas\r\n```yml\r\nLambda:\r\n Type: AWS::Lambda::Function\r\n Properties:\r\n Role: !GetAtt Role.Arn\r\n Timeout: 600\r\n MemorySize: 2048\r\n EphemeralStorage:\r\n Size: 1024\r\n```\r\n\r\nLink to the [**docs**](https://aws.amazon.com/blogs/compute/using-larger-ephemeral-storage-for-aws-lambda/) where it shows the new feature\r\n\r\nCfn-lint uses the [CloudFormation Resource Specifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-resource-specification.html) as the base to do validation. These files are included as part of the application version. Please update to the latest version of `cfn-lint` or update the spec files manually (`cfn-lint -u`)\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\n\n__version__ = '0.58.4'\n", "path": "src/cfnlint/version.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\n\n__version__ = '0.59.0'\n", "path": "src/cfnlint/version.py"}]}
557
82
gh_patches_debug_19074
rasdani/github-patches
git_diff
getpelican__pelican-280
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Feeds generate malformed urls (with double slash) When setting a site url that ends with a slash (like http://feldboris.alwaysdata.net/blog/), pelican will generate url in feeds with double slash (like http://feldboris.alwaysdata.net/blog//feeds/all.atom.xml) just after site url. W3C feeds validator show a warning with these double slashs. It's maybe the cause of a rss problem (my rss client show old feeds entries as unread) as post fields also contains double slash now. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pelican/settings.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import os 3 import locale 4 import logging 5 6 from os.path import isabs 7 8 9 logger = logging.getLogger(__name__) 10 11 12 DEFAULT_THEME = os.sep.join([os.path.dirname(os.path.abspath(__file__)), 13 "themes/notmyidea"]) 14 _DEFAULT_CONFIG = {'PATH': '.', 15 'ARTICLE_DIR': '', 16 'ARTICLE_EXCLUDES': ('pages',), 17 'PAGE_DIR': 'pages', 18 'PAGE_EXCLUDES': (), 19 'THEME': DEFAULT_THEME, 20 'OUTPUT_PATH': 'output/', 21 'MARKUP': ('rst', 'md'), 22 'STATIC_PATHS': ['images', ], 23 'THEME_STATIC_PATHS': ['static', ], 24 'FEED': 'feeds/all.atom.xml', 25 'FEED_MAIN_URL': 'feeds/all.atom.xml', 26 'CATEGORY_FEED': 'feeds/%s.atom.xml', 27 'TRANSLATION_FEED': 'feeds/all-%s.atom.xml', 28 'FEED_MAX_ITEMS': '', 29 'SITENAME': 'A Pelican Blog', 30 'DISPLAY_PAGES_ON_MENU': True, 31 'PDF_GENERATOR': False, 32 'DEFAULT_CATEGORY': 'misc', 33 'FALLBACK_ON_FS_DATE': True, 34 'WITH_FUTURE_DATES': True, 35 'CSS_FILE': 'main.css', 36 'REVERSE_ARCHIVE_ORDER': False, 37 'REVERSE_CATEGORY_ORDER': False, 38 'DELETE_OUTPUT_DIRECTORY': False, 39 'ARTICLE_URL': '{slug}.html', 40 'ARTICLE_SAVE_AS': '{slug}.html', 41 'ARTICLE_LANG_URL': '{slug}-{lang}.html', 42 'ARTICLE_LANG_SAVE_AS': '{slug}-{lang}.html', 43 'PAGE_URL': 'pages/{slug}.html', 44 'PAGE_SAVE_AS': 'pages/{slug}.html', 45 'PAGE_LANG_URL': 'pages/{slug}-{lang}.html', 46 'PAGE_LANG_SAVE_AS': 'pages/{slug}-{lang}.html', 47 'CATEGORY_URL': 'category/{name}.html', 48 'CATEGORY_SAVE_AS': 'category/{name}.html', 49 'TAG_URL': 'tag/{slug}.html', 50 'TAG_SAVE_AS': 'tag/{slug}.html', 51 'AUTHOR_URL': u'author/{slug}.html', 52 'AUTHOR_SAVE_AS': u'author/{slug}.html', 53 'RELATIVE_URLS': True, 54 'DEFAULT_LANG': 'en', 55 'TAG_CLOUD_STEPS': 4, 56 'TAG_CLOUD_MAX_ITEMS': 100, 57 'DIRECT_TEMPLATES': ('index', 'tags', 'categories', 'archives'), 58 'PAGINATED_DIRECT_TEMPLATES': ('index', ), 59 'PELICAN_CLASS': 'pelican.Pelican', 60 'DEFAULT_DATE_FORMAT': '%a %d %B %Y', 61 'DATE_FORMATS': {}, 62 'JINJA_EXTENSIONS': [], 63 'LOCALE': '', # default to user locale 64 'DEFAULT_PAGINATION': False, 65 'DEFAULT_ORPHANS': 0, 66 'DEFAULT_METADATA': (), 67 'FILES_TO_COPY': (), 68 'DEFAULT_STATUS': 'published', 69 'ARTICLE_PERMALINK_STRUCTURE': '', 70 'TYPOGRIFY': False, 71 } 72 73 74 def read_settings(filename=None): 75 if filename: 76 local_settings = get_settings_from_file(filename) 77 else: 78 local_settings = _DEFAULT_CONFIG 79 configured_settings = configure_settings(local_settings, None, filename) 80 return configured_settings 81 82 83 def get_settings_from_file(filename, default_settings=None): 84 """Load a Python file into a dictionary. 85 """ 86 if default_settings == None: 87 default_settings = _DEFAULT_CONFIG 88 context = default_settings.copy() 89 if filename: 90 tempdict = {} 91 execfile(filename, tempdict) 92 for key in tempdict: 93 if key.isupper(): 94 context[key] = tempdict[key] 95 return context 96 97 98 def configure_settings(settings, default_settings=None, filename=None): 99 """Provide optimizations, error checking, and warnings for loaded settings""" 100 if default_settings is None: 101 default_settings = _DEFAULT_CONFIG 102 103 # Make the paths relative to the settings file 104 if filename: 105 for path in ['PATH', 'OUTPUT_PATH']: 106 if path in settings: 107 if settings[path] is not None and not isabs(settings[path]): 108 settings[path] = os.path.abspath(os.path.normpath( 109 os.path.join(os.path.dirname(filename), settings[path])) 110 ) 111 112 # if locales is not a list, make it one 113 locales = settings['LOCALE'] 114 115 if isinstance(locales, basestring): 116 locales = [locales] 117 118 # try to set the different locales, fallback on the default. 119 if not locales: 120 locales = _DEFAULT_CONFIG['LOCALE'] 121 122 for locale_ in locales: 123 try: 124 locale.setlocale(locale.LC_ALL, locale_) 125 break # break if it is successfull 126 except locale.Error: 127 pass 128 else: 129 logger.warn("LOCALE option doesn't contain a correct value") 130 131 # If SITEURL is defined but FEED_DOMAIN isn't, set FEED_DOMAIN = SITEURL 132 if ('SITEURL' in settings) and (not 'FEED_DOMAIN' in settings): 133 settings['FEED_DOMAIN'] = settings['SITEURL'] 134 135 # Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined 136 if (('FEED' in settings) or ('FEED_RSS' in settings)) and (not 'FEED_DOMAIN' in settings): 137 logger.warn("Since feed URLs should always be absolute, you should specify " 138 "FEED_DOMAIN in your settings. (e.g., 'FEED_DOMAIN = " 139 "http://www.example.com')") 140 141 if not 'TIMEZONE' in settings: 142 logger.warn("No timezone information specified in the settings. Assuming" 143 " your timezone is UTC for feed generation. Check " 144 "http://docs.notmyidea.org/alexis/pelican/settings.html#timezone " 145 "for more information") 146 147 return settings 148 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -128,9 +128,15 @@ else: logger.warn("LOCALE option doesn't contain a correct value") - # If SITEURL is defined but FEED_DOMAIN isn't, set FEED_DOMAIN = SITEURL - if ('SITEURL' in settings) and (not 'FEED_DOMAIN' in settings): - settings['FEED_DOMAIN'] = settings['SITEURL'] + if ('SITEURL' in settings): + # If SITEURL has a trailing slash, remove it and provide a warning + siteurl = settings['SITEURL'] + if (siteurl[len(siteurl) - 1:] == '/'): + settings['SITEURL'] = siteurl[:-1] + logger.warn("Removed extraneous trailing slash from SITEURL.") + # If SITEURL is defined but FEED_DOMAIN isn't, set FEED_DOMAIN = SITEURL + if not 'FEED_DOMAIN' in settings: + settings['FEED_DOMAIN'] = settings['SITEURL'] # Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined if (('FEED' in settings) or ('FEED_RSS' in settings)) and (not 'FEED_DOMAIN' in settings):
{"golden_diff": "diff --git a/pelican/settings.py b/pelican/settings.py\n--- a/pelican/settings.py\n+++ b/pelican/settings.py\n@@ -128,9 +128,15 @@\n else:\n logger.warn(\"LOCALE option doesn't contain a correct value\")\n \n- # If SITEURL is defined but FEED_DOMAIN isn't, set FEED_DOMAIN = SITEURL\n- if ('SITEURL' in settings) and (not 'FEED_DOMAIN' in settings):\n- settings['FEED_DOMAIN'] = settings['SITEURL']\n+ if ('SITEURL' in settings):\n+ # If SITEURL has a trailing slash, remove it and provide a warning\n+ siteurl = settings['SITEURL']\n+ if (siteurl[len(siteurl) - 1:] == '/'):\n+ settings['SITEURL'] = siteurl[:-1]\n+ logger.warn(\"Removed extraneous trailing slash from SITEURL.\")\n+ # If SITEURL is defined but FEED_DOMAIN isn't, set FEED_DOMAIN = SITEURL\n+ if not 'FEED_DOMAIN' in settings:\n+ settings['FEED_DOMAIN'] = settings['SITEURL']\n \n # Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined\n if (('FEED' in settings) or ('FEED_RSS' in settings)) and (not 'FEED_DOMAIN' in settings):\n", "issue": "Feeds generate malformed urls (with double slash)\nWhen setting a site url that ends with a slash (like http://feldboris.alwaysdata.net/blog/), pelican will generate url in feeds with double slash (like http://feldboris.alwaysdata.net/blog//feeds/all.atom.xml) just after site url. W3C feeds validator show a warning with these double slashs.\n\nIt's maybe the cause of a rss problem (my rss client show old feeds entries as unread) as post fields also contains double slash now.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nimport locale\nimport logging\n\nfrom os.path import isabs\n\n\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_THEME = os.sep.join([os.path.dirname(os.path.abspath(__file__)),\n \"themes/notmyidea\"])\n_DEFAULT_CONFIG = {'PATH': '.',\n 'ARTICLE_DIR': '',\n 'ARTICLE_EXCLUDES': ('pages',),\n 'PAGE_DIR': 'pages',\n 'PAGE_EXCLUDES': (),\n 'THEME': DEFAULT_THEME,\n 'OUTPUT_PATH': 'output/',\n 'MARKUP': ('rst', 'md'),\n 'STATIC_PATHS': ['images', ],\n 'THEME_STATIC_PATHS': ['static', ],\n 'FEED': 'feeds/all.atom.xml',\n 'FEED_MAIN_URL': 'feeds/all.atom.xml',\n 'CATEGORY_FEED': 'feeds/%s.atom.xml',\n 'TRANSLATION_FEED': 'feeds/all-%s.atom.xml',\n 'FEED_MAX_ITEMS': '',\n 'SITENAME': 'A Pelican Blog',\n 'DISPLAY_PAGES_ON_MENU': True,\n 'PDF_GENERATOR': False,\n 'DEFAULT_CATEGORY': 'misc',\n 'FALLBACK_ON_FS_DATE': True,\n 'WITH_FUTURE_DATES': True,\n 'CSS_FILE': 'main.css',\n 'REVERSE_ARCHIVE_ORDER': False,\n 'REVERSE_CATEGORY_ORDER': False,\n 'DELETE_OUTPUT_DIRECTORY': False,\n 'ARTICLE_URL': '{slug}.html',\n 'ARTICLE_SAVE_AS': '{slug}.html',\n 'ARTICLE_LANG_URL': '{slug}-{lang}.html',\n 'ARTICLE_LANG_SAVE_AS': '{slug}-{lang}.html',\n 'PAGE_URL': 'pages/{slug}.html',\n 'PAGE_SAVE_AS': 'pages/{slug}.html',\n 'PAGE_LANG_URL': 'pages/{slug}-{lang}.html',\n 'PAGE_LANG_SAVE_AS': 'pages/{slug}-{lang}.html',\n 'CATEGORY_URL': 'category/{name}.html',\n 'CATEGORY_SAVE_AS': 'category/{name}.html',\n 'TAG_URL': 'tag/{slug}.html',\n 'TAG_SAVE_AS': 'tag/{slug}.html',\n 'AUTHOR_URL': u'author/{slug}.html',\n 'AUTHOR_SAVE_AS': u'author/{slug}.html',\n 'RELATIVE_URLS': True,\n 'DEFAULT_LANG': 'en',\n 'TAG_CLOUD_STEPS': 4,\n 'TAG_CLOUD_MAX_ITEMS': 100,\n 'DIRECT_TEMPLATES': ('index', 'tags', 'categories', 'archives'),\n 'PAGINATED_DIRECT_TEMPLATES': ('index', ),\n 'PELICAN_CLASS': 'pelican.Pelican',\n 'DEFAULT_DATE_FORMAT': '%a %d %B %Y',\n 'DATE_FORMATS': {},\n 'JINJA_EXTENSIONS': [],\n 'LOCALE': '', # default to user locale\n 'DEFAULT_PAGINATION': False,\n 'DEFAULT_ORPHANS': 0,\n 'DEFAULT_METADATA': (),\n 'FILES_TO_COPY': (),\n 'DEFAULT_STATUS': 'published',\n 'ARTICLE_PERMALINK_STRUCTURE': '',\n 'TYPOGRIFY': False,\n }\n\n\ndef read_settings(filename=None):\n if filename:\n local_settings = get_settings_from_file(filename)\n else:\n local_settings = _DEFAULT_CONFIG\n configured_settings = configure_settings(local_settings, None, filename)\n return configured_settings\n\n\ndef get_settings_from_file(filename, default_settings=None):\n \"\"\"Load a Python file into a dictionary.\n \"\"\"\n if default_settings == None:\n default_settings = _DEFAULT_CONFIG\n context = default_settings.copy()\n if filename:\n tempdict = {}\n execfile(filename, tempdict)\n for key in tempdict:\n if key.isupper():\n context[key] = tempdict[key]\n return context\n\n\ndef configure_settings(settings, default_settings=None, filename=None):\n \"\"\"Provide optimizations, error checking, and warnings for loaded settings\"\"\"\n if default_settings is None:\n default_settings = _DEFAULT_CONFIG\n\n # Make the paths relative to the settings file\n if filename:\n for path in ['PATH', 'OUTPUT_PATH']:\n if path in settings:\n if settings[path] is not None and not isabs(settings[path]):\n settings[path] = os.path.abspath(os.path.normpath(\n os.path.join(os.path.dirname(filename), settings[path]))\n )\n\n # if locales is not a list, make it one\n locales = settings['LOCALE']\n\n if isinstance(locales, basestring):\n locales = [locales]\n\n # try to set the different locales, fallback on the default.\n if not locales:\n locales = _DEFAULT_CONFIG['LOCALE']\n\n for locale_ in locales:\n try:\n locale.setlocale(locale.LC_ALL, locale_)\n break # break if it is successfull\n except locale.Error:\n pass\n else:\n logger.warn(\"LOCALE option doesn't contain a correct value\")\n\n # If SITEURL is defined but FEED_DOMAIN isn't, set FEED_DOMAIN = SITEURL\n if ('SITEURL' in settings) and (not 'FEED_DOMAIN' in settings):\n settings['FEED_DOMAIN'] = settings['SITEURL']\n\n # Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined\n if (('FEED' in settings) or ('FEED_RSS' in settings)) and (not 'FEED_DOMAIN' in settings):\n logger.warn(\"Since feed URLs should always be absolute, you should specify \"\n \"FEED_DOMAIN in your settings. (e.g., 'FEED_DOMAIN = \"\n \"http://www.example.com')\")\n\n if not 'TIMEZONE' in settings:\n logger.warn(\"No timezone information specified in the settings. Assuming\"\n \" your timezone is UTC for feed generation. Check \"\n \"http://docs.notmyidea.org/alexis/pelican/settings.html#timezone \"\n \"for more information\")\n\n return settings\n", "path": "pelican/settings.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nimport locale\nimport logging\n\nfrom os.path import isabs\n\n\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_THEME = os.sep.join([os.path.dirname(os.path.abspath(__file__)),\n \"themes/notmyidea\"])\n_DEFAULT_CONFIG = {'PATH': '.',\n 'ARTICLE_DIR': '',\n 'ARTICLE_EXCLUDES': ('pages',),\n 'PAGE_DIR': 'pages',\n 'PAGE_EXCLUDES': (),\n 'THEME': DEFAULT_THEME,\n 'OUTPUT_PATH': 'output/',\n 'MARKUP': ('rst', 'md'),\n 'STATIC_PATHS': ['images', ],\n 'THEME_STATIC_PATHS': ['static', ],\n 'FEED': 'feeds/all.atom.xml',\n 'FEED_MAIN_URL': 'feeds/all.atom.xml',\n 'CATEGORY_FEED': 'feeds/%s.atom.xml',\n 'TRANSLATION_FEED': 'feeds/all-%s.atom.xml',\n 'FEED_MAX_ITEMS': '',\n 'SITENAME': 'A Pelican Blog',\n 'DISPLAY_PAGES_ON_MENU': True,\n 'PDF_GENERATOR': False,\n 'DEFAULT_CATEGORY': 'misc',\n 'FALLBACK_ON_FS_DATE': True,\n 'WITH_FUTURE_DATES': True,\n 'CSS_FILE': 'main.css',\n 'REVERSE_ARCHIVE_ORDER': False,\n 'REVERSE_CATEGORY_ORDER': False,\n 'DELETE_OUTPUT_DIRECTORY': False,\n 'ARTICLE_URL': '{slug}.html',\n 'ARTICLE_SAVE_AS': '{slug}.html',\n 'ARTICLE_LANG_URL': '{slug}-{lang}.html',\n 'ARTICLE_LANG_SAVE_AS': '{slug}-{lang}.html',\n 'PAGE_URL': 'pages/{slug}.html',\n 'PAGE_SAVE_AS': 'pages/{slug}.html',\n 'PAGE_LANG_URL': 'pages/{slug}-{lang}.html',\n 'PAGE_LANG_SAVE_AS': 'pages/{slug}-{lang}.html',\n 'CATEGORY_URL': 'category/{name}.html',\n 'CATEGORY_SAVE_AS': 'category/{name}.html',\n 'TAG_URL': 'tag/{slug}.html',\n 'TAG_SAVE_AS': 'tag/{slug}.html',\n 'AUTHOR_URL': u'author/{slug}.html',\n 'AUTHOR_SAVE_AS': u'author/{slug}.html',\n 'RELATIVE_URLS': True,\n 'DEFAULT_LANG': 'en',\n 'TAG_CLOUD_STEPS': 4,\n 'TAG_CLOUD_MAX_ITEMS': 100,\n 'DIRECT_TEMPLATES': ('index', 'tags', 'categories', 'archives'),\n 'PAGINATED_DIRECT_TEMPLATES': ('index', ),\n 'PELICAN_CLASS': 'pelican.Pelican',\n 'DEFAULT_DATE_FORMAT': '%a %d %B %Y',\n 'DATE_FORMATS': {},\n 'JINJA_EXTENSIONS': [],\n 'LOCALE': '', # default to user locale\n 'DEFAULT_PAGINATION': False,\n 'DEFAULT_ORPHANS': 0,\n 'DEFAULT_METADATA': (),\n 'FILES_TO_COPY': (),\n 'DEFAULT_STATUS': 'published',\n 'ARTICLE_PERMALINK_STRUCTURE': '',\n 'TYPOGRIFY': False,\n }\n\n\ndef read_settings(filename=None):\n if filename:\n local_settings = get_settings_from_file(filename)\n else:\n local_settings = _DEFAULT_CONFIG\n configured_settings = configure_settings(local_settings, None, filename)\n return configured_settings\n\n\ndef get_settings_from_file(filename, default_settings=None):\n \"\"\"Load a Python file into a dictionary.\n \"\"\"\n if default_settings == None:\n default_settings = _DEFAULT_CONFIG\n context = default_settings.copy()\n if filename:\n tempdict = {}\n execfile(filename, tempdict)\n for key in tempdict:\n if key.isupper():\n context[key] = tempdict[key]\n return context\n\n\ndef configure_settings(settings, default_settings=None, filename=None):\n \"\"\"Provide optimizations, error checking, and warnings for loaded settings\"\"\"\n if default_settings is None:\n default_settings = _DEFAULT_CONFIG\n\n # Make the paths relative to the settings file\n if filename:\n for path in ['PATH', 'OUTPUT_PATH']:\n if path in settings:\n if settings[path] is not None and not isabs(settings[path]):\n settings[path] = os.path.abspath(os.path.normpath(\n os.path.join(os.path.dirname(filename), settings[path]))\n )\n\n # if locales is not a list, make it one\n locales = settings['LOCALE']\n\n if isinstance(locales, basestring):\n locales = [locales]\n\n # try to set the different locales, fallback on the default.\n if not locales:\n locales = _DEFAULT_CONFIG['LOCALE']\n\n for locale_ in locales:\n try:\n locale.setlocale(locale.LC_ALL, locale_)\n break # break if it is successfull\n except locale.Error:\n pass\n else:\n logger.warn(\"LOCALE option doesn't contain a correct value\")\n\n if ('SITEURL' in settings):\n # If SITEURL has a trailing slash, remove it and provide a warning\n siteurl = settings['SITEURL']\n if (siteurl[len(siteurl) - 1:] == '/'):\n settings['SITEURL'] = siteurl[:-1]\n logger.warn(\"Removed extraneous trailing slash from SITEURL.\")\n # If SITEURL is defined but FEED_DOMAIN isn't, set FEED_DOMAIN = SITEURL\n if not 'FEED_DOMAIN' in settings:\n settings['FEED_DOMAIN'] = settings['SITEURL']\n\n # Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined\n if (('FEED' in settings) or ('FEED_RSS' in settings)) and (not 'FEED_DOMAIN' in settings):\n logger.warn(\"Since feed URLs should always be absolute, you should specify \"\n \"FEED_DOMAIN in your settings. (e.g., 'FEED_DOMAIN = \"\n \"http://www.example.com')\")\n\n if not 'TIMEZONE' in settings:\n logger.warn(\"No timezone information specified in the settings. Assuming\"\n \" your timezone is UTC for feed generation. Check \"\n \"http://docs.notmyidea.org/alexis/pelican/settings.html#timezone \"\n \"for more information\")\n\n return settings\n", "path": "pelican/settings.py"}]}
1,989
302
gh_patches_debug_37629
rasdani/github-patches
git_diff
medtagger__MedTagger-202
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- User personal information cannot be changed ## Expected Behavior User can change its personal information at any time using Settings page. User should only be able to change their first and last name. The E-mail field should be grayed out. ## Actual Behavior Form fields are filled with user data but there is no way to change them. User cannot modify these fields and there is no "Save" button available. ## Steps to Reproduce the Problem 1. Go to Settings page. 2. Open "User data" section. 3. Done. ## Additional comment Consider changing the name of the section from "User data" to something else. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `backend/medtagger/api/users/service.py` Content: ``` 1 """Module responsible for defining endpoints for users administration.""" 2 from typing import Any 3 4 from flask import request 5 from flask_restplus import Resource 6 7 from medtagger.api import api 8 from medtagger.api.users import serializers 9 from medtagger.api.users.business import get_all_users, set_user_role 10 from medtagger.api.utils import get_current_user 11 from medtagger.api.security import login_required, role_required 12 13 users_ns = api.namespace('users', 'Users management') 14 15 16 @users_ns.route('/') 17 class GetUsers(Resource): 18 """Get all users endpoint.""" 19 20 @staticmethod 21 @login_required 22 @role_required('admin') 23 @users_ns.marshal_with(serializers.users_list) 24 @users_ns.doc(security='token') 25 def get() -> Any: 26 """Get all users endpoint.""" 27 users = get_all_users() 28 return {'users': users}, 200 29 30 31 @users_ns.route('/<int:user_id>/role') 32 class SetRole(Resource): 33 """Set user's role.""" 34 35 @staticmethod 36 @login_required 37 @role_required('admin') 38 @users_ns.doc(security='token') 39 def put(user_id: int) -> Any: 40 """Set user's role.""" 41 set_user_role(user_id, request.json['role']) 42 return {}, 204 43 44 45 @users_ns.route('/info') 46 class GetUserInfo(Resource): 47 """Get current user information.""" 48 49 @staticmethod 50 @login_required 51 @users_ns.marshal_with(serializers.user) 52 @users_ns.doc(security='token') 53 @users_ns.doc(responses={200: 'Successfully retrieved data.'}) 54 def get() -> Any: 55 """Get user info.""" 56 user = get_current_user() 57 return user, 200 58 ``` Path: `backend/medtagger/repositories/users.py` Content: ``` 1 """Module responsible for definition of Users' Repository.""" 2 from typing import List, Optional 3 4 from medtagger.database import db_session 5 from medtagger.database.models import User 6 7 8 class UsersRepository(object): 9 """Repository for Users.""" 10 11 @staticmethod 12 def add_new_user(new_user: User) -> int: 13 """Add new user. 14 15 :return: id of the new user 16 """ 17 with db_session() as session: 18 session.add(new_user) 19 return new_user.id 20 21 @staticmethod 22 def get_all_users() -> List[User]: 23 """Return list of all users.""" 24 return User.query.order_by(User.id).all() 25 26 @staticmethod 27 def get_user_by_email(user_email: str) -> Optional[User]: 28 """Get user with given email. 29 30 :return Optional of User 31 """ 32 return User.query.filter(User.email == user_email).first() 33 34 @staticmethod 35 def get_user_by_id(user_id: int) -> User: 36 """Get user with given id.""" 37 return User.query.filter(User.id == user_id).one() 38 ``` Path: `backend/medtagger/api/users/business.py` Content: ``` 1 """Module responsible for business logic for users administration.""" 2 from typing import List 3 4 from medtagger.database.models import User 5 from medtagger.repositories.users import UsersRepository 6 from medtagger.repositories.roles import RolesRepository 7 8 9 def get_all_users() -> List[User]: 10 """Return list of all users.""" 11 return UsersRepository.get_all_users() 12 13 14 def set_user_role(user_id: int, role_name: str) -> None: 15 """Set user's role. Old role is being replaced.""" 16 RolesRepository.set_user_role(user_id, role_name) 17 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/backend/medtagger/api/users/business.py b/backend/medtagger/api/users/business.py --- a/backend/medtagger/api/users/business.py +++ b/backend/medtagger/api/users/business.py @@ -1,6 +1,9 @@ """Module responsible for business logic for users administration.""" from typing import List +from sqlalchemy.orm.exc import NoResultFound + +from medtagger.api import InvalidArgumentsException from medtagger.database.models import User from medtagger.repositories.users import UsersRepository from medtagger.repositories.roles import RolesRepository @@ -14,3 +17,12 @@ def set_user_role(user_id: int, role_name: str) -> None: """Set user's role. Old role is being replaced.""" RolesRepository.set_user_role(user_id, role_name) + + +def set_user_info(user_id: int, firstName: str, lastName: str) -> None: + """Set user's information.""" + try: + user = UsersRepository.get_user_by_id(user_id) + UsersRepository.set_user_info(user, firstName, lastName) + except NoResultFound: + raise InvalidArgumentsException('User with this id does not exist.') diff --git a/backend/medtagger/api/users/service.py b/backend/medtagger/api/users/service.py --- a/backend/medtagger/api/users/service.py +++ b/backend/medtagger/api/users/service.py @@ -6,7 +6,7 @@ from medtagger.api import api from medtagger.api.users import serializers -from medtagger.api.users.business import get_all_users, set_user_role +from medtagger.api.users.business import get_all_users, set_user_role, set_user_info from medtagger.api.utils import get_current_user from medtagger.api.security import login_required, role_required @@ -55,3 +55,18 @@ """Get user info.""" user = get_current_user() return user, 200 + + +@users_ns.route('/<int:user_id>') +class SetUserInfo(Resource): + """Set user's information (first name and last name).""" + + @staticmethod + @login_required + @users_ns.doc(security='token') + def put(user_id: int) -> Any: + """Set user info.""" + if get_current_user().id != user_id: + return {}, 403 + set_user_info(user_id, request.json['firstName'], request.json['lastName']) + return {}, 204 diff --git a/backend/medtagger/repositories/users.py b/backend/medtagger/repositories/users.py --- a/backend/medtagger/repositories/users.py +++ b/backend/medtagger/repositories/users.py @@ -35,3 +35,11 @@ def get_user_by_id(user_id: int) -> User: """Get user with given id.""" return User.query.filter(User.id == user_id).one() + + @staticmethod + def set_user_info(user: User, firstName: str, lastName: str) -> None: + """Set user's info.""" + with db_session() as session: + user.first_name = firstName + user.last_name = lastName + session.add(user)
{"golden_diff": "diff --git a/backend/medtagger/api/users/business.py b/backend/medtagger/api/users/business.py\n--- a/backend/medtagger/api/users/business.py\n+++ b/backend/medtagger/api/users/business.py\n@@ -1,6 +1,9 @@\n \"\"\"Module responsible for business logic for users administration.\"\"\"\n from typing import List\n \n+from sqlalchemy.orm.exc import NoResultFound\n+\n+from medtagger.api import InvalidArgumentsException\n from medtagger.database.models import User\n from medtagger.repositories.users import UsersRepository\n from medtagger.repositories.roles import RolesRepository\n@@ -14,3 +17,12 @@\n def set_user_role(user_id: int, role_name: str) -> None:\n \"\"\"Set user's role. Old role is being replaced.\"\"\"\n RolesRepository.set_user_role(user_id, role_name)\n+\n+\n+def set_user_info(user_id: int, firstName: str, lastName: str) -> None:\n+ \"\"\"Set user's information.\"\"\"\n+ try:\n+ user = UsersRepository.get_user_by_id(user_id)\n+ UsersRepository.set_user_info(user, firstName, lastName)\n+ except NoResultFound:\n+ raise InvalidArgumentsException('User with this id does not exist.')\ndiff --git a/backend/medtagger/api/users/service.py b/backend/medtagger/api/users/service.py\n--- a/backend/medtagger/api/users/service.py\n+++ b/backend/medtagger/api/users/service.py\n@@ -6,7 +6,7 @@\n \n from medtagger.api import api\n from medtagger.api.users import serializers\n-from medtagger.api.users.business import get_all_users, set_user_role\n+from medtagger.api.users.business import get_all_users, set_user_role, set_user_info\n from medtagger.api.utils import get_current_user\n from medtagger.api.security import login_required, role_required\n \n@@ -55,3 +55,18 @@\n \"\"\"Get user info.\"\"\"\n user = get_current_user()\n return user, 200\n+\n+\n+@users_ns.route('/<int:user_id>')\n+class SetUserInfo(Resource):\n+ \"\"\"Set user's information (first name and last name).\"\"\"\n+\n+ @staticmethod\n+ @login_required\n+ @users_ns.doc(security='token')\n+ def put(user_id: int) -> Any:\n+ \"\"\"Set user info.\"\"\"\n+ if get_current_user().id != user_id:\n+ return {}, 403\n+ set_user_info(user_id, request.json['firstName'], request.json['lastName'])\n+ return {}, 204\ndiff --git a/backend/medtagger/repositories/users.py b/backend/medtagger/repositories/users.py\n--- a/backend/medtagger/repositories/users.py\n+++ b/backend/medtagger/repositories/users.py\n@@ -35,3 +35,11 @@\n def get_user_by_id(user_id: int) -> User:\n \"\"\"Get user with given id.\"\"\"\n return User.query.filter(User.id == user_id).one()\n+\n+ @staticmethod\n+ def set_user_info(user: User, firstName: str, lastName: str) -> None:\n+ \"\"\"Set user's info.\"\"\"\n+ with db_session() as session:\n+ user.first_name = firstName\n+ user.last_name = lastName\n+ session.add(user)\n", "issue": "User personal information cannot be changed\n## Expected Behavior\r\n\r\nUser can change its personal information at any time using Settings page.\r\n\r\nUser should only be able to change their first and last name. The E-mail field should be grayed out.\r\n\r\n## Actual Behavior\r\n\r\nForm fields are filled with user data but there is no way to change them. User cannot modify these fields and there is no \"Save\" button available.\r\n\r\n## Steps to Reproduce the Problem\r\n\r\n 1. Go to Settings page.\r\n 2. Open \"User data\" section.\r\n 3. Done.\r\n\r\n## Additional comment\r\n\r\nConsider changing the name of the section from \"User data\" to something else.\r\n\n", "before_files": [{"content": "\"\"\"Module responsible for defining endpoints for users administration.\"\"\"\nfrom typing import Any\n\nfrom flask import request\nfrom flask_restplus import Resource\n\nfrom medtagger.api import api\nfrom medtagger.api.users import serializers\nfrom medtagger.api.users.business import get_all_users, set_user_role\nfrom medtagger.api.utils import get_current_user\nfrom medtagger.api.security import login_required, role_required\n\nusers_ns = api.namespace('users', 'Users management')\n\n\n@users_ns.route('/')\nclass GetUsers(Resource):\n \"\"\"Get all users endpoint.\"\"\"\n\n @staticmethod\n @login_required\n @role_required('admin')\n @users_ns.marshal_with(serializers.users_list)\n @users_ns.doc(security='token')\n def get() -> Any:\n \"\"\"Get all users endpoint.\"\"\"\n users = get_all_users()\n return {'users': users}, 200\n\n\n@users_ns.route('/<int:user_id>/role')\nclass SetRole(Resource):\n \"\"\"Set user's role.\"\"\"\n\n @staticmethod\n @login_required\n @role_required('admin')\n @users_ns.doc(security='token')\n def put(user_id: int) -> Any:\n \"\"\"Set user's role.\"\"\"\n set_user_role(user_id, request.json['role'])\n return {}, 204\n\n\n@users_ns.route('/info')\nclass GetUserInfo(Resource):\n \"\"\"Get current user information.\"\"\"\n\n @staticmethod\n @login_required\n @users_ns.marshal_with(serializers.user)\n @users_ns.doc(security='token')\n @users_ns.doc(responses={200: 'Successfully retrieved data.'})\n def get() -> Any:\n \"\"\"Get user info.\"\"\"\n user = get_current_user()\n return user, 200\n", "path": "backend/medtagger/api/users/service.py"}, {"content": "\"\"\"Module responsible for definition of Users' Repository.\"\"\"\nfrom typing import List, Optional\n\nfrom medtagger.database import db_session\nfrom medtagger.database.models import User\n\n\nclass UsersRepository(object):\n \"\"\"Repository for Users.\"\"\"\n\n @staticmethod\n def add_new_user(new_user: User) -> int:\n \"\"\"Add new user.\n\n :return: id of the new user\n \"\"\"\n with db_session() as session:\n session.add(new_user)\n return new_user.id\n\n @staticmethod\n def get_all_users() -> List[User]:\n \"\"\"Return list of all users.\"\"\"\n return User.query.order_by(User.id).all()\n\n @staticmethod\n def get_user_by_email(user_email: str) -> Optional[User]:\n \"\"\"Get user with given email.\n\n :return Optional of User\n \"\"\"\n return User.query.filter(User.email == user_email).first()\n\n @staticmethod\n def get_user_by_id(user_id: int) -> User:\n \"\"\"Get user with given id.\"\"\"\n return User.query.filter(User.id == user_id).one()\n", "path": "backend/medtagger/repositories/users.py"}, {"content": "\"\"\"Module responsible for business logic for users administration.\"\"\"\nfrom typing import List\n\nfrom medtagger.database.models import User\nfrom medtagger.repositories.users import UsersRepository\nfrom medtagger.repositories.roles import RolesRepository\n\n\ndef get_all_users() -> List[User]:\n \"\"\"Return list of all users.\"\"\"\n return UsersRepository.get_all_users()\n\n\ndef set_user_role(user_id: int, role_name: str) -> None:\n \"\"\"Set user's role. Old role is being replaced.\"\"\"\n RolesRepository.set_user_role(user_id, role_name)\n", "path": "backend/medtagger/api/users/business.py"}], "after_files": [{"content": "\"\"\"Module responsible for defining endpoints for users administration.\"\"\"\nfrom typing import Any\n\nfrom flask import request\nfrom flask_restplus import Resource\n\nfrom medtagger.api import api\nfrom medtagger.api.users import serializers\nfrom medtagger.api.users.business import get_all_users, set_user_role, set_user_info\nfrom medtagger.api.utils import get_current_user\nfrom medtagger.api.security import login_required, role_required\n\nusers_ns = api.namespace('users', 'Users management')\n\n\n@users_ns.route('/')\nclass GetUsers(Resource):\n \"\"\"Get all users endpoint.\"\"\"\n\n @staticmethod\n @login_required\n @role_required('admin')\n @users_ns.marshal_with(serializers.users_list)\n @users_ns.doc(security='token')\n def get() -> Any:\n \"\"\"Get all users endpoint.\"\"\"\n users = get_all_users()\n return {'users': users}, 200\n\n\n@users_ns.route('/<int:user_id>/role')\nclass SetRole(Resource):\n \"\"\"Set user's role.\"\"\"\n\n @staticmethod\n @login_required\n @role_required('admin')\n @users_ns.doc(security='token')\n def put(user_id: int) -> Any:\n \"\"\"Set user's role.\"\"\"\n set_user_role(user_id, request.json['role'])\n return {}, 204\n\n\n@users_ns.route('/info')\nclass GetUserInfo(Resource):\n \"\"\"Get current user information.\"\"\"\n\n @staticmethod\n @login_required\n @users_ns.marshal_with(serializers.user)\n @users_ns.doc(security='token')\n @users_ns.doc(responses={200: 'Successfully retrieved data.'})\n def get() -> Any:\n \"\"\"Get user info.\"\"\"\n user = get_current_user()\n return user, 200\n\n\n@users_ns.route('/<int:user_id>')\nclass SetUserInfo(Resource):\n \"\"\"Set user's information (first name and last name).\"\"\"\n\n @staticmethod\n @login_required\n @users_ns.doc(security='token')\n def put(user_id: int) -> Any:\n \"\"\"Set user info.\"\"\"\n if get_current_user().id != user_id:\n return {}, 403\n set_user_info(user_id, request.json['firstName'], request.json['lastName'])\n return {}, 204\n", "path": "backend/medtagger/api/users/service.py"}, {"content": "\"\"\"Module responsible for definition of Users' Repository.\"\"\"\nfrom typing import List, Optional\n\nfrom medtagger.database import db_session\nfrom medtagger.database.models import User\n\n\nclass UsersRepository(object):\n \"\"\"Repository for Users.\"\"\"\n\n @staticmethod\n def add_new_user(new_user: User) -> int:\n \"\"\"Add new user.\n\n :return: id of the new user\n \"\"\"\n with db_session() as session:\n session.add(new_user)\n return new_user.id\n\n @staticmethod\n def get_all_users() -> List[User]:\n \"\"\"Return list of all users.\"\"\"\n return User.query.order_by(User.id).all()\n\n @staticmethod\n def get_user_by_email(user_email: str) -> Optional[User]:\n \"\"\"Get user with given email.\n\n :return Optional of User\n \"\"\"\n return User.query.filter(User.email == user_email).first()\n\n @staticmethod\n def get_user_by_id(user_id: int) -> User:\n \"\"\"Get user with given id.\"\"\"\n return User.query.filter(User.id == user_id).one()\n\n @staticmethod\n def set_user_info(user: User, firstName: str, lastName: str) -> None:\n \"\"\"Set user's info.\"\"\"\n with db_session() as session:\n user.first_name = firstName\n user.last_name = lastName\n session.add(user)\n", "path": "backend/medtagger/repositories/users.py"}, {"content": "\"\"\"Module responsible for business logic for users administration.\"\"\"\nfrom typing import List\n\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom medtagger.api import InvalidArgumentsException\nfrom medtagger.database.models import User\nfrom medtagger.repositories.users import UsersRepository\nfrom medtagger.repositories.roles import RolesRepository\n\n\ndef get_all_users() -> List[User]:\n \"\"\"Return list of all users.\"\"\"\n return UsersRepository.get_all_users()\n\n\ndef set_user_role(user_id: int, role_name: str) -> None:\n \"\"\"Set user's role. Old role is being replaced.\"\"\"\n RolesRepository.set_user_role(user_id, role_name)\n\n\ndef set_user_info(user_id: int, firstName: str, lastName: str) -> None:\n \"\"\"Set user's information.\"\"\"\n try:\n user = UsersRepository.get_user_by_id(user_id)\n UsersRepository.set_user_info(user, firstName, lastName)\n except NoResultFound:\n raise InvalidArgumentsException('User with this id does not exist.')\n", "path": "backend/medtagger/api/users/business.py"}]}
1,368
720
gh_patches_debug_43873
rasdani/github-patches
git_diff
Flexget__Flexget-2224
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Issue 2.7.2: Form plugin ### Expected behaviour: No error while using 2.5.2 version ### Actual behaviour: Got an error while running task that using form plugin ### Steps to reproduce: Running task with form plugin #### Config: ```yaml avistaz_subs: form: url: https://avistaz.to/auth/login username: "{{secrets.avistaz.usr}}" password: "{{secrets.avistaz.pwd}}" userfield: email_username passfield: password html: url: "https://avistaz.to/subtitles?type=2&search=720p&language=0&subtitle=45&page={{i}}" title_from: url links_re: - '\.(rar|r0+[01](/01)|zip|srt)' increment: from: 0 to: 1 #dump: result.html regexp: accept: - someregex #disable: builtins download: yes decompress: true template: [notify_me] ``` #### Log: Generated new crash log ### Additional information: - Flexget Version: 2.7.2 - Python Version: 2.7.9 - Installation method: easy_install upgrade flexget - OS and version: osmc raspberry pi - Link to crash log: http://pastebin.com/XHL3dzQH --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `flexget/plugins/operate/formlogin.py` Content: ``` 1 from __future__ import unicode_literals, division, absolute_import 2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin 3 4 import logging 5 import io 6 import os 7 import socket 8 9 try: 10 import mechanize 11 except ImportError: 12 mechanize = None 13 14 from flexget import plugin 15 from flexget.event import event 16 from flexget.utils.soup import get_soup 17 18 log = logging.getLogger('formlogin') 19 20 21 if mechanize: 22 class SanitizeHandler(mechanize.BaseHandler): 23 def http_response(self, request, response): 24 if not hasattr(response, "seek"): 25 response = mechanize.response_seek_wrapper(response) 26 # Run HTML through BeautifulSoup for sanitizing 27 if 'html' in response.info().get('content-type', ''): 28 soup = get_soup(response.get_data()) 29 response.set_data(soup.prettify(encoding=soup.original_encoding)) 30 return response 31 32 33 class FormLogin(object): 34 """ 35 Login on form 36 """ 37 38 schema = { 39 'type': 'object', 40 'properties': { 41 'url': {'type': 'string', 'format': 'url'}, 42 'username': {'type': 'string'}, 43 'password': {'type': 'string'}, 44 'userfield': {'type': 'string'}, 45 'passfield': {'type': 'string'} 46 }, 47 'required': ['url', 'username', 'password'], 48 'additionalProperties': False 49 } 50 51 def on_task_start(self, task, config): 52 if not mechanize: 53 raise plugin.PluginError('mechanize required (python module), please install it.', log) 54 55 userfield = config.get('userfield', 'username') 56 passfield = config.get('passfield', 'password') 57 58 url = config['url'] 59 username = config['username'] 60 password = config['password'] 61 62 br = mechanize.Browser() 63 br.add_handler(SanitizeHandler()) 64 br.set_handle_robots(False) 65 try: 66 br.open(url) 67 except Exception: 68 # TODO: improve error handling 69 log.debug('Exception posting login form.', exc_info=True) 70 raise plugin.PluginError('Unable to post login form', log) 71 72 # br.set_debug_redirects(True) 73 # br.set_debug_responses(True) 74 # br.set_debug_http(True) 75 76 try: 77 for form in br.forms(): 78 loginform = form 79 80 try: 81 loginform[userfield] = username 82 loginform[passfield] = password 83 break 84 except Exception: 85 pass 86 else: 87 received = os.path.join(task.manager.config_base, 'received') 88 if not os.path.isdir(received): 89 os.mkdir(received) 90 filename = os.path.join(received, '%s.formlogin.html' % task.name) 91 with io.open(filename, 'wb') as f: 92 f.write(br.response().get_data()) 93 log.critical('I have saved the login page content to %s for you to view', filename) 94 raise plugin.PluginError('Unable to find login fields', log) 95 except socket.timeout: 96 raise plugin.PluginError('Timed out on url %s' % url) 97 98 br.form = loginform 99 100 br.submit() 101 102 cookiejar = br._ua_handlers["_cookies"].cookiejar 103 104 # Add cookiejar to our requests session 105 task.requests.add_cookiejar(cookiejar) 106 107 108 @event('plugin.register') 109 def register_plugin(): 110 plugin.register(FormLogin, 'form', api_ver=2) 111 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/flexget/plugins/operate/formlogin.py b/flexget/plugins/operate/formlogin.py --- a/flexget/plugins/operate/formlogin.py +++ b/flexget/plugins/operate/formlogin.py @@ -7,29 +7,17 @@ import socket try: - import mechanize + import mechanicalsoup except ImportError: - mechanize = None + mechanicalsoup = None +import requests from flexget import plugin from flexget.event import event -from flexget.utils.soup import get_soup log = logging.getLogger('formlogin') -if mechanize: - class SanitizeHandler(mechanize.BaseHandler): - def http_response(self, request, response): - if not hasattr(response, "seek"): - response = mechanize.response_seek_wrapper(response) - # Run HTML through BeautifulSoup for sanitizing - if 'html' in response.info().get('content-type', ''): - soup = get_soup(response.get_data()) - response.set_data(soup.prettify(encoding=soup.original_encoding)) - return response - - class FormLogin(object): """ Login on form @@ -49,8 +37,8 @@ } def on_task_start(self, task, config): - if not mechanize: - raise plugin.PluginError('mechanize required (python module), please install it.', log) + if not mechanicalsoup: + raise plugin.PluginError('mechanicalsoup required (python module), please install it.', log) userfield = config.get('userfield', 'username') passfield = config.get('passfield', 'password') @@ -59,29 +47,28 @@ username = config['username'] password = config['password'] - br = mechanize.Browser() - br.add_handler(SanitizeHandler()) - br.set_handle_robots(False) + br = mechanicalsoup.StatefulBrowser(session=task.requests) + try: - br.open(url) - except Exception: + response = br.open(url) + except requests.RequestException: # TODO: improve error handling - log.debug('Exception posting login form.', exc_info=True) - raise plugin.PluginError('Unable to post login form', log) + log.debug('Exception getting login page.', exc_info=True) + raise plugin.PluginError('Unable to get login page', log) - # br.set_debug_redirects(True) - # br.set_debug_responses(True) - # br.set_debug_http(True) + # br.set_debug(True) + num_forms = len(br.get_current_page().find_all('form')) + if not num_forms: + raise plugin.PluginError('Unable to find any forms on {}'.format(url), log) try: - for form in br.forms(): - loginform = form - + for form_num in range(num_forms): + br.select_form(nr=form_num) try: - loginform[userfield] = username - loginform[passfield] = password + br[userfield] = username + br[passfield] = password break - except Exception: + except mechanicalsoup.LinkNotFoundError: pass else: received = os.path.join(task.manager.config_base, 'received') @@ -89,20 +76,17 @@ os.mkdir(received) filename = os.path.join(received, '%s.formlogin.html' % task.name) with io.open(filename, 'wb') as f: - f.write(br.response().get_data()) + f.write(response.content) log.critical('I have saved the login page content to %s for you to view', filename) raise plugin.PluginError('Unable to find login fields', log) except socket.timeout: raise plugin.PluginError('Timed out on url %s' % url) - br.form = loginform - - br.submit() - - cookiejar = br._ua_handlers["_cookies"].cookiejar - - # Add cookiejar to our requests session - task.requests.add_cookiejar(cookiejar) + try: + br.submit_selected() + except requests.RequestException: + log.debug('Exception submitting login form.', exc_info=True) + raise plugin.PluginError('Unable to post login form', log) @event('plugin.register')
{"golden_diff": "diff --git a/flexget/plugins/operate/formlogin.py b/flexget/plugins/operate/formlogin.py\n--- a/flexget/plugins/operate/formlogin.py\n+++ b/flexget/plugins/operate/formlogin.py\n@@ -7,29 +7,17 @@\n import socket\n \n try:\n- import mechanize\n+ import mechanicalsoup\n except ImportError:\n- mechanize = None\n+ mechanicalsoup = None\n+import requests\n \n from flexget import plugin\n from flexget.event import event\n-from flexget.utils.soup import get_soup\n \n log = logging.getLogger('formlogin')\n \n \n-if mechanize:\n- class SanitizeHandler(mechanize.BaseHandler):\n- def http_response(self, request, response):\n- if not hasattr(response, \"seek\"):\n- response = mechanize.response_seek_wrapper(response)\n- # Run HTML through BeautifulSoup for sanitizing\n- if 'html' in response.info().get('content-type', ''):\n- soup = get_soup(response.get_data())\n- response.set_data(soup.prettify(encoding=soup.original_encoding))\n- return response\n-\n-\n class FormLogin(object):\n \"\"\"\n Login on form\n@@ -49,8 +37,8 @@\n }\n \n def on_task_start(self, task, config):\n- if not mechanize:\n- raise plugin.PluginError('mechanize required (python module), please install it.', log)\n+ if not mechanicalsoup:\n+ raise plugin.PluginError('mechanicalsoup required (python module), please install it.', log)\n \n userfield = config.get('userfield', 'username')\n passfield = config.get('passfield', 'password')\n@@ -59,29 +47,28 @@\n username = config['username']\n password = config['password']\n \n- br = mechanize.Browser()\n- br.add_handler(SanitizeHandler())\n- br.set_handle_robots(False)\n+ br = mechanicalsoup.StatefulBrowser(session=task.requests)\n+\n try:\n- br.open(url)\n- except Exception:\n+ response = br.open(url)\n+ except requests.RequestException:\n # TODO: improve error handling\n- log.debug('Exception posting login form.', exc_info=True)\n- raise plugin.PluginError('Unable to post login form', log)\n+ log.debug('Exception getting login page.', exc_info=True)\n+ raise plugin.PluginError('Unable to get login page', log)\n \n- # br.set_debug_redirects(True)\n- # br.set_debug_responses(True)\n- # br.set_debug_http(True)\n+ # br.set_debug(True)\n \n+ num_forms = len(br.get_current_page().find_all('form'))\n+ if not num_forms:\n+ raise plugin.PluginError('Unable to find any forms on {}'.format(url), log)\n try:\n- for form in br.forms():\n- loginform = form\n-\n+ for form_num in range(num_forms):\n+ br.select_form(nr=form_num)\n try:\n- loginform[userfield] = username\n- loginform[passfield] = password\n+ br[userfield] = username\n+ br[passfield] = password\n break\n- except Exception:\n+ except mechanicalsoup.LinkNotFoundError:\n pass\n else:\n received = os.path.join(task.manager.config_base, 'received')\n@@ -89,20 +76,17 @@\n os.mkdir(received)\n filename = os.path.join(received, '%s.formlogin.html' % task.name)\n with io.open(filename, 'wb') as f:\n- f.write(br.response().get_data())\n+ f.write(response.content)\n log.critical('I have saved the login page content to %s for you to view', filename)\n raise plugin.PluginError('Unable to find login fields', log)\n except socket.timeout:\n raise plugin.PluginError('Timed out on url %s' % url)\n \n- br.form = loginform\n-\n- br.submit()\n-\n- cookiejar = br._ua_handlers[\"_cookies\"].cookiejar\n-\n- # Add cookiejar to our requests session\n- task.requests.add_cookiejar(cookiejar)\n+ try:\n+ br.submit_selected()\n+ except requests.RequestException:\n+ log.debug('Exception submitting login form.', exc_info=True)\n+ raise plugin.PluginError('Unable to post login form', log)\n \n \n @event('plugin.register')\n", "issue": "Issue 2.7.2: Form plugin\n### Expected behaviour:\r\nNo error while using 2.5.2 version\r\n### Actual behaviour:\r\nGot an error while running task that using form plugin\r\n### Steps to reproduce:\r\nRunning task with form plugin\r\n\r\n#### Config:\r\n```yaml\r\n avistaz_subs:\r\n form:\r\n url: https://avistaz.to/auth/login\r\n username: \"{{secrets.avistaz.usr}}\"\r\n password: \"{{secrets.avistaz.pwd}}\"\r\n userfield: email_username\r\n passfield: password\r\n html:\r\n url: \"https://avistaz.to/subtitles?type=2&search=720p&language=0&subtitle=45&page={{i}}\"\r\n title_from: url\r\n links_re:\r\n - '\\.(rar|r0+[01](/01)|zip|srt)'\r\n increment:\r\n from: 0\r\n to: 1\r\n #dump: result.html\r\n regexp:\r\n accept:\r\n - someregex\r\n #disable: builtins\r\n download: yes\r\n decompress: true\r\n template: [notify_me]\r\n```\r\n#### Log:\r\nGenerated new crash log\r\n\r\n### Additional information:\r\n\r\n- Flexget Version: 2.7.2\r\n- Python Version: 2.7.9\r\n- Installation method: easy_install upgrade flexget\r\n- OS and version: osmc raspberry pi\r\n- Link to crash log: http://pastebin.com/XHL3dzQH\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\nimport io\nimport os\nimport socket\n\ntry:\n import mechanize\nexcept ImportError:\n mechanize = None\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.utils.soup import get_soup\n\nlog = logging.getLogger('formlogin')\n\n\nif mechanize:\n class SanitizeHandler(mechanize.BaseHandler):\n def http_response(self, request, response):\n if not hasattr(response, \"seek\"):\n response = mechanize.response_seek_wrapper(response)\n # Run HTML through BeautifulSoup for sanitizing\n if 'html' in response.info().get('content-type', ''):\n soup = get_soup(response.get_data())\n response.set_data(soup.prettify(encoding=soup.original_encoding))\n return response\n\n\nclass FormLogin(object):\n \"\"\"\n Login on form\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'url': {'type': 'string', 'format': 'url'},\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'userfield': {'type': 'string'},\n 'passfield': {'type': 'string'}\n },\n 'required': ['url', 'username', 'password'],\n 'additionalProperties': False\n }\n\n def on_task_start(self, task, config):\n if not mechanize:\n raise plugin.PluginError('mechanize required (python module), please install it.', log)\n\n userfield = config.get('userfield', 'username')\n passfield = config.get('passfield', 'password')\n\n url = config['url']\n username = config['username']\n password = config['password']\n\n br = mechanize.Browser()\n br.add_handler(SanitizeHandler())\n br.set_handle_robots(False)\n try:\n br.open(url)\n except Exception:\n # TODO: improve error handling\n log.debug('Exception posting login form.', exc_info=True)\n raise plugin.PluginError('Unable to post login form', log)\n\n # br.set_debug_redirects(True)\n # br.set_debug_responses(True)\n # br.set_debug_http(True)\n\n try:\n for form in br.forms():\n loginform = form\n\n try:\n loginform[userfield] = username\n loginform[passfield] = password\n break\n except Exception:\n pass\n else:\n received = os.path.join(task.manager.config_base, 'received')\n if not os.path.isdir(received):\n os.mkdir(received)\n filename = os.path.join(received, '%s.formlogin.html' % task.name)\n with io.open(filename, 'wb') as f:\n f.write(br.response().get_data())\n log.critical('I have saved the login page content to %s for you to view', filename)\n raise plugin.PluginError('Unable to find login fields', log)\n except socket.timeout:\n raise plugin.PluginError('Timed out on url %s' % url)\n\n br.form = loginform\n\n br.submit()\n\n cookiejar = br._ua_handlers[\"_cookies\"].cookiejar\n\n # Add cookiejar to our requests session\n task.requests.add_cookiejar(cookiejar)\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(FormLogin, 'form', api_ver=2)\n", "path": "flexget/plugins/operate/formlogin.py"}], "after_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\nimport io\nimport os\nimport socket\n\ntry:\n import mechanicalsoup\nexcept ImportError:\n mechanicalsoup = None\nimport requests\n\nfrom flexget import plugin\nfrom flexget.event import event\n\nlog = logging.getLogger('formlogin')\n\n\nclass FormLogin(object):\n \"\"\"\n Login on form\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'url': {'type': 'string', 'format': 'url'},\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'userfield': {'type': 'string'},\n 'passfield': {'type': 'string'}\n },\n 'required': ['url', 'username', 'password'],\n 'additionalProperties': False\n }\n\n def on_task_start(self, task, config):\n if not mechanicalsoup:\n raise plugin.PluginError('mechanicalsoup required (python module), please install it.', log)\n\n userfield = config.get('userfield', 'username')\n passfield = config.get('passfield', 'password')\n\n url = config['url']\n username = config['username']\n password = config['password']\n\n br = mechanicalsoup.StatefulBrowser(session=task.requests)\n\n try:\n response = br.open(url)\n except requests.RequestException:\n # TODO: improve error handling\n log.debug('Exception getting login page.', exc_info=True)\n raise plugin.PluginError('Unable to get login page', log)\n\n # br.set_debug(True)\n\n num_forms = len(br.get_current_page().find_all('form'))\n if not num_forms:\n raise plugin.PluginError('Unable to find any forms on {}'.format(url), log)\n try:\n for form_num in range(num_forms):\n br.select_form(nr=form_num)\n try:\n br[userfield] = username\n br[passfield] = password\n break\n except mechanicalsoup.LinkNotFoundError:\n pass\n else:\n received = os.path.join(task.manager.config_base, 'received')\n if not os.path.isdir(received):\n os.mkdir(received)\n filename = os.path.join(received, '%s.formlogin.html' % task.name)\n with io.open(filename, 'wb') as f:\n f.write(response.content)\n log.critical('I have saved the login page content to %s for you to view', filename)\n raise plugin.PluginError('Unable to find login fields', log)\n except socket.timeout:\n raise plugin.PluginError('Timed out on url %s' % url)\n\n try:\n br.submit_selected()\n except requests.RequestException:\n log.debug('Exception submitting login form.', exc_info=True)\n raise plugin.PluginError('Unable to post login form', log)\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(FormLogin, 'form', api_ver=2)\n", "path": "flexget/plugins/operate/formlogin.py"}]}
1,560
957
gh_patches_debug_3597
rasdani/github-patches
git_diff
bridgecrewio__checkov-489
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Checkov crashes when evaluating a Terraform dynamic block in NSGRulePortAccessRestricted.py **Describe the bug** When checking azure_security_group_rule, azurerm_network_security_rule or azurerm_network_security_group Terraform resource types, NSGRulePortAccessRestricted.py throws a "TypeError: string indices must be integers" error whenever there's a dynamic block. **To Reproduce** Steps to reproduce the behavior: 1. Create a resource in terraform, containing a dynamic security rule - ``` resource "azurerm_network_security_group" "snet_nsgs" { count = "${length(local.subnets)}" name = "${local.root}-snet-${lookup(local.subnets[count.index], "name")}-nsg" location = "${azurerm_resource_group.net_rg.location}" resource_group_name = "${azurerm_resource_group.net_rg.name}" tags = "${local.tags}" dynamic "security_rule" { for_each = [for s in local.subnets[count.index].nsg_rules : { name = s.name priority = s.priority direction = s.direction access = s.access protocol = s.protocol source_port_range = s.source_port_range destination_port_range = s.destination_port_range source_address_prefix = s.source_address_prefix destination_address_prefix = s.destination_address_prefix description = s.description }] content { name = security_rule.value.name priority = security_rule.value.priority direction = security_rule.value.direction access = security_rule.value.access protocol = security_rule.value.protocol source_port_range = security_rule.value.source_port_range destination_port_range = security_rule.value.destination_port_range source_address_prefix = security_rule.value.source_address_prefix destination_address_prefix = security_rule.value.destination_address_prefix description = security_rule.value.description } } } ``` 2. Run checkov 3. Error! **Expected behavior** As checkov cannot evaluate the dynamic block, I expect the check to be skipped without throwing an error. **Desktop (please complete the following information):** - OS: Ubuntu - Checkov Version 1.0.479 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py` Content: ``` 1 from checkov.common.models.enums import CheckResult, CheckCategories 2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck 3 from checkov.common.util.type_forcers import force_list 4 import re 5 6 INTERNET_ADDRESSES = ["*", "0.0.0.0", "<nw>/0", "/0", "internet", "any"] 7 PORT_RANGE = re.compile('\d+-\d+') 8 9 10 class NSGRulePortAccessRestricted(BaseResourceCheck): 11 def __init__(self, name, check_id, port): 12 supported_resources = ['azure_security_group_rule', 'azurerm_network_security_rule', 'azurerm_network_security_group'] 13 categories = [CheckCategories.NETWORKING] 14 super().__init__(name=name, id=check_id, categories=categories, supported_resources=supported_resources) 15 self.port = port 16 17 def is_port_in_range(self, conf): 18 ports = force_list(conf['destination_port_range'][0]) 19 for range in ports: 20 if re.match(PORT_RANGE, range): 21 start, end = int(range.split('-')[0]), int(range.split('-')[1]) 22 if start <= self.port <= end: 23 return True 24 if range in [str(self.port), '*']: 25 return True 26 return False 27 28 def scan_resource_conf(self, conf): 29 rule_confs = [conf] 30 if 'security_rule' in conf: 31 rule_confs = conf['security_rule'] 32 33 for rule_conf in rule_confs: 34 if 'access' in rule_conf and rule_conf['access'][0] == "Allow": 35 if 'direction' in rule_conf and rule_conf['direction'][0] == "Inbound": 36 if 'protocol' in rule_conf and rule_conf['protocol'][0] == 'TCP': 37 if 'destination_port_range' in rule_conf and self.is_port_in_range(rule_conf): 38 if 'source_address_prefix' in rule_conf and rule_conf['source_address_prefix'][0] in INTERNET_ADDRESSES: 39 return CheckResult.FAILED 40 return CheckResult.PASSED 41 42 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py b/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py --- a/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py +++ b/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py @@ -26,6 +26,9 @@ return False def scan_resource_conf(self, conf): + if "dynamic" in conf: + return CheckResult.UNKNOWN + rule_confs = [conf] if 'security_rule' in conf: rule_confs = conf['security_rule']
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py b/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py\n--- a/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py\n+++ b/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py\n@@ -26,6 +26,9 @@\n return False\n \n def scan_resource_conf(self, conf):\n+ if \"dynamic\" in conf:\n+ return CheckResult.UNKNOWN\n+ \n rule_confs = [conf]\n if 'security_rule' in conf:\n rule_confs = conf['security_rule']\n", "issue": "Checkov crashes when evaluating a Terraform dynamic block in NSGRulePortAccessRestricted.py\n**Describe the bug**\r\nWhen checking azure_security_group_rule, azurerm_network_security_rule or azurerm_network_security_group Terraform resource types, NSGRulePortAccessRestricted.py throws a \"TypeError: string indices must be integers\" error whenever there's a dynamic block.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Create a resource in terraform, containing a dynamic security rule -\r\n```\r\nresource \"azurerm_network_security_group\" \"snet_nsgs\" {\r\n count = \"${length(local.subnets)}\"\r\n name = \"${local.root}-snet-${lookup(local.subnets[count.index], \"name\")}-nsg\"\r\n location = \"${azurerm_resource_group.net_rg.location}\"\r\n resource_group_name = \"${azurerm_resource_group.net_rg.name}\"\r\n tags = \"${local.tags}\"\r\n\r\n\r\n dynamic \"security_rule\" {\r\n for_each = [for s in local.subnets[count.index].nsg_rules : {\r\n name = s.name\r\n priority = s.priority\r\n direction = s.direction\r\n access = s.access\r\n protocol = s.protocol\r\n source_port_range = s.source_port_range\r\n destination_port_range = s.destination_port_range\r\n source_address_prefix = s.source_address_prefix\r\n destination_address_prefix = s.destination_address_prefix\r\n description = s.description\r\n }]\r\n content {\r\n name = security_rule.value.name\r\n priority = security_rule.value.priority\r\n direction = security_rule.value.direction\r\n access = security_rule.value.access\r\n protocol = security_rule.value.protocol\r\n source_port_range = security_rule.value.source_port_range\r\n destination_port_range = security_rule.value.destination_port_range\r\n source_address_prefix = security_rule.value.source_address_prefix\r\n destination_address_prefix = security_rule.value.destination_address_prefix\r\n description = security_rule.value.description\r\n }\r\n }\r\n}\r\n```\r\n2. Run checkov\r\n3. Error!\r\n\r\n**Expected behavior**\r\nAs checkov cannot evaluate the dynamic block, I expect the check to be skipped without throwing an error.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Ubuntu\r\n - Checkov Version 1.0.479\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck\nfrom checkov.common.util.type_forcers import force_list\nimport re\n\nINTERNET_ADDRESSES = [\"*\", \"0.0.0.0\", \"<nw>/0\", \"/0\", \"internet\", \"any\"]\nPORT_RANGE = re.compile('\\d+-\\d+')\n\n\nclass NSGRulePortAccessRestricted(BaseResourceCheck):\n def __init__(self, name, check_id, port):\n supported_resources = ['azure_security_group_rule', 'azurerm_network_security_rule', 'azurerm_network_security_group']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=check_id, categories=categories, supported_resources=supported_resources)\n self.port = port\n\n def is_port_in_range(self, conf):\n ports = force_list(conf['destination_port_range'][0])\n for range in ports:\n if re.match(PORT_RANGE, range):\n start, end = int(range.split('-')[0]), int(range.split('-')[1])\n if start <= self.port <= end:\n return True\n if range in [str(self.port), '*']:\n return True\n return False\n\n def scan_resource_conf(self, conf):\n rule_confs = [conf]\n if 'security_rule' in conf:\n rule_confs = conf['security_rule']\n\n for rule_conf in rule_confs:\n if 'access' in rule_conf and rule_conf['access'][0] == \"Allow\":\n if 'direction' in rule_conf and rule_conf['direction'][0] == \"Inbound\":\n if 'protocol' in rule_conf and rule_conf['protocol'][0] == 'TCP':\n if 'destination_port_range' in rule_conf and self.is_port_in_range(rule_conf):\n if 'source_address_prefix' in rule_conf and rule_conf['source_address_prefix'][0] in INTERNET_ADDRESSES:\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n", "path": "checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py"}], "after_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck\nfrom checkov.common.util.type_forcers import force_list\nimport re\n\nINTERNET_ADDRESSES = [\"*\", \"0.0.0.0\", \"<nw>/0\", \"/0\", \"internet\", \"any\"]\nPORT_RANGE = re.compile('\\d+-\\d+')\n\n\nclass NSGRulePortAccessRestricted(BaseResourceCheck):\n def __init__(self, name, check_id, port):\n supported_resources = ['azure_security_group_rule', 'azurerm_network_security_rule', 'azurerm_network_security_group']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=check_id, categories=categories, supported_resources=supported_resources)\n self.port = port\n\n def is_port_in_range(self, conf):\n ports = force_list(conf['destination_port_range'][0])\n for range in ports:\n if re.match(PORT_RANGE, range):\n start, end = int(range.split('-')[0]), int(range.split('-')[1])\n if start <= self.port <= end:\n return True\n if range in [str(self.port), '*']:\n return True\n return False\n\n def scan_resource_conf(self, conf):\n if \"dynamic\" in conf:\n return CheckResult.UNKNOWN\n \n rule_confs = [conf]\n if 'security_rule' in conf:\n rule_confs = conf['security_rule']\n\n for rule_conf in rule_confs:\n if 'access' in rule_conf and rule_conf['access'][0] == \"Allow\":\n if 'direction' in rule_conf and rule_conf['direction'][0] == \"Inbound\":\n if 'protocol' in rule_conf and rule_conf['protocol'][0] == 'TCP':\n if 'destination_port_range' in rule_conf and self.is_port_in_range(rule_conf):\n if 'source_address_prefix' in rule_conf and rule_conf['source_address_prefix'][0] in INTERNET_ADDRESSES:\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n", "path": "checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py"}]}
1,279
150
gh_patches_debug_3022
rasdani/github-patches
git_diff
conan-io__conan-2763
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [bug] Linter "Unable to import" warning when importing a shared Python Conan package in the build() step - [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md). - [x] I've specified the Conan version, operating system version and any tool that can be relevant. - [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion. --- I followed the instructions on http://docs.conan.io/en/latest/howtos/python_code_reuse.html. When I get to the "Requiring a python conan package" step, the linter gives me a warning about importing the shared package: $ git clone https://github.com/smokris/conan-test-library $ cd conan-test-library $ conan export . me/testing $ cd .. $ git clone https://github.com/smokris/conan-test-consumer $ cd conan-test-consumer $ conan create . me/testing HelloPyReuse/1.0@me/testing: Exporting package recipe Linter warnings WARN: Linter. Line 9: Unable to import 'hello' … HelloPyReuse/1.0@me/testing: Calling build() Hello World from Python! … (The imported package works fine; the problem is just that the linter is emitting a warning. I'd prefer that the linter not show this false-positive warning, to improve the linter's signal-to-noise ratio.) I'm able to reproduce this using: - Conan 1.1.1 on my local macOS 10.13.3 system - Conan 1.1.1 on Travis CI's Mac OS 10.10.5 image - Conan 1.1.1 on Travis CI's Ubuntu 14.04.5 image - Conan 1.2.0 on CentOS 7.4 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conans/client/cmd/export_linter.py` Content: ``` 1 import json 2 import os 3 import sys 4 5 import platform 6 7 from conans.client.output import Color 8 from conans.errors import ConanException 9 from subprocess import PIPE, Popen 10 from conans import __path__ as root_path 11 12 13 def conan_linter(conanfile_path, out): 14 if getattr(sys, 'frozen', False): 15 out.info("No linter available. Use a pip installed conan for recipe linting") 16 return 17 apply_lint = os.environ.get("CONAN_RECIPE_LINTER", True) 18 if not apply_lint or apply_lint == "False": 19 return 20 21 dir_path = os.path.dirname(root_path[0]).replace("\\", "/") 22 dirname = os.path.dirname(conanfile_path).replace("\\", "/") 23 hook = '--init-hook="import sys;sys.path.extend([\'%s\', \'%s\'])"' % (dirname, dir_path) 24 25 try: 26 py3_msgs = None 27 msgs, py3_msgs = _normal_linter(conanfile_path, hook) 28 except Exception as e: 29 out.warn("Failed pylint: %s" % e) 30 else: 31 if py3_msgs: 32 out.writeln("Python 3 incompatibilities\n ERROR: %s" 33 % "\n ERROR: ".join(py3_msgs), 34 front=Color.BRIGHT_MAGENTA) 35 if msgs: 36 out.writeln("Linter warnings\n WARN: %s" % "\n WARN: ".join(msgs), 37 front=Color.MAGENTA) 38 pylint_werr = os.environ.get("CONAN_PYLINT_WERR", None) 39 if pylint_werr and (py3_msgs or msgs): 40 raise ConanException("Package recipe has linter errors. Please fix them.") 41 42 43 def _runner(args): 44 command = ["pylint", "--output-format=json"] + args 45 command = " ".join(command) 46 shell = True if platform.system() != "Windows" else False 47 proc = Popen(command, shell=shell, bufsize=10, stdout=PIPE, stderr=PIPE) 48 stdout, _ = proc.communicate() 49 return json.loads(stdout.decode("utf-8")) if stdout else {} 50 51 52 def _normal_linter(conanfile_path, hook): 53 args = ['--py3k', "--enable=all", "--reports=no", "--disable=no-absolute-import", "--persistent=no", 54 hook, '"%s"' % conanfile_path] 55 pylintrc = os.environ.get("CONAN_PYLINTRC", None) 56 if pylintrc: 57 if not os.path.exists(pylintrc): 58 raise ConanException("File %s defined by PYLINTRC doesn't exist" % pylintrc) 59 args.append('--rcfile="%s"' % pylintrc) 60 61 output_json = _runner(args) 62 dynamic_fields = ("source_folder", "build_folder", "package_folder", "info_build", 63 "build_requires", "info") 64 65 def _accept_message(msg): 66 symbol = msg.get("symbol") 67 text = msg.get("message") 68 69 if symbol == "no-member": 70 for field in dynamic_fields: 71 if field in text: 72 return False 73 if symbol == "not-callable" and "self.copy is not callable" == text: 74 return False 75 if symbol == "not-callable" and "self.copy_deps is not callable" == text: 76 return False 77 if symbol in ("bare-except", "broad-except"): # No exception type(s) specified 78 return False 79 80 return True 81 82 result = [] 83 py3msgs = [] 84 for msg in output_json: 85 if msg.get("type") in ("warning", "error"): 86 message_id = msg.get("symbol") 87 if message_id in ("print-statement", "dict-iter-method"): 88 py3msgs.append("Py3 incompatibility. Line %s: %s" 89 % (msg.get("line"), msg.get("message"))) 90 elif _accept_message(msg): 91 result.append("Linter. Line %s: %s" % (msg.get("line"), msg.get("message"))) 92 93 return result, py3msgs 94 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conans/client/cmd/export_linter.py b/conans/client/cmd/export_linter.py --- a/conans/client/cmd/export_linter.py +++ b/conans/client/cmd/export_linter.py @@ -76,6 +76,8 @@ return False if symbol in ("bare-except", "broad-except"): # No exception type(s) specified return False + if symbol == "import-error" and msg.get("column") > 3: # Import of a conan python package + return False return True
{"golden_diff": "diff --git a/conans/client/cmd/export_linter.py b/conans/client/cmd/export_linter.py\n--- a/conans/client/cmd/export_linter.py\n+++ b/conans/client/cmd/export_linter.py\n@@ -76,6 +76,8 @@\n return False\n if symbol in (\"bare-except\", \"broad-except\"): # No exception type(s) specified\n return False\n+ if symbol == \"import-error\" and msg.get(\"column\") > 3: # Import of a conan python package\n+ return False\n \n return True\n", "issue": "[bug] Linter \"Unable to import\" warning when importing a shared Python Conan package in the build() step\n- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).\r\n- [x] I've specified the Conan version, operating system version and any tool that can be relevant.\r\n- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.\r\n\r\n---\r\n\r\nI followed the instructions on http://docs.conan.io/en/latest/howtos/python_code_reuse.html. When I get to the \"Requiring a python conan package\" step, the linter gives me a warning about importing the shared package:\r\n\r\n $ git clone https://github.com/smokris/conan-test-library\r\n $ cd conan-test-library\r\n $ conan export . me/testing\r\n $ cd ..\r\n\r\n $ git clone https://github.com/smokris/conan-test-consumer\r\n $ cd conan-test-consumer\r\n $ conan create . me/testing\r\n HelloPyReuse/1.0@me/testing: Exporting package recipe\r\n Linter warnings\r\n WARN: Linter. Line 9: Unable to import 'hello'\r\n \u2026\r\n HelloPyReuse/1.0@me/testing: Calling build()\r\n Hello World from Python!\r\n \u2026\r\n\r\n\r\n(The imported package works fine; the problem is just that the linter is emitting a warning. I'd prefer that the linter not show this false-positive warning, to improve the linter's signal-to-noise ratio.)\r\n\r\nI'm able to reproduce this using:\r\n\r\n- Conan 1.1.1 on my local macOS 10.13.3 system\r\n- Conan 1.1.1 on Travis CI's Mac OS 10.10.5 image\r\n- Conan 1.1.1 on Travis CI's Ubuntu 14.04.5 image\r\n- Conan 1.2.0 on CentOS 7.4\r\n\n", "before_files": [{"content": "import json\nimport os\nimport sys\n\nimport platform\n\nfrom conans.client.output import Color\nfrom conans.errors import ConanException\nfrom subprocess import PIPE, Popen\nfrom conans import __path__ as root_path\n\n\ndef conan_linter(conanfile_path, out):\n if getattr(sys, 'frozen', False):\n out.info(\"No linter available. Use a pip installed conan for recipe linting\")\n return\n apply_lint = os.environ.get(\"CONAN_RECIPE_LINTER\", True)\n if not apply_lint or apply_lint == \"False\":\n return\n\n dir_path = os.path.dirname(root_path[0]).replace(\"\\\\\", \"/\")\n dirname = os.path.dirname(conanfile_path).replace(\"\\\\\", \"/\")\n hook = '--init-hook=\"import sys;sys.path.extend([\\'%s\\', \\'%s\\'])\"' % (dirname, dir_path)\n\n try:\n py3_msgs = None\n msgs, py3_msgs = _normal_linter(conanfile_path, hook)\n except Exception as e:\n out.warn(\"Failed pylint: %s\" % e)\n else:\n if py3_msgs:\n out.writeln(\"Python 3 incompatibilities\\n ERROR: %s\"\n % \"\\n ERROR: \".join(py3_msgs),\n front=Color.BRIGHT_MAGENTA)\n if msgs:\n out.writeln(\"Linter warnings\\n WARN: %s\" % \"\\n WARN: \".join(msgs),\n front=Color.MAGENTA)\n pylint_werr = os.environ.get(\"CONAN_PYLINT_WERR\", None)\n if pylint_werr and (py3_msgs or msgs):\n raise ConanException(\"Package recipe has linter errors. Please fix them.\")\n\n\ndef _runner(args):\n command = [\"pylint\", \"--output-format=json\"] + args\n command = \" \".join(command)\n shell = True if platform.system() != \"Windows\" else False\n proc = Popen(command, shell=shell, bufsize=10, stdout=PIPE, stderr=PIPE)\n stdout, _ = proc.communicate()\n return json.loads(stdout.decode(\"utf-8\")) if stdout else {}\n\n\ndef _normal_linter(conanfile_path, hook):\n args = ['--py3k', \"--enable=all\", \"--reports=no\", \"--disable=no-absolute-import\", \"--persistent=no\",\n hook, '\"%s\"' % conanfile_path]\n pylintrc = os.environ.get(\"CONAN_PYLINTRC\", None)\n if pylintrc:\n if not os.path.exists(pylintrc):\n raise ConanException(\"File %s defined by PYLINTRC doesn't exist\" % pylintrc)\n args.append('--rcfile=\"%s\"' % pylintrc)\n\n output_json = _runner(args)\n dynamic_fields = (\"source_folder\", \"build_folder\", \"package_folder\", \"info_build\",\n \"build_requires\", \"info\")\n\n def _accept_message(msg):\n symbol = msg.get(\"symbol\")\n text = msg.get(\"message\")\n\n if symbol == \"no-member\":\n for field in dynamic_fields:\n if field in text:\n return False\n if symbol == \"not-callable\" and \"self.copy is not callable\" == text:\n return False\n if symbol == \"not-callable\" and \"self.copy_deps is not callable\" == text:\n return False\n if symbol in (\"bare-except\", \"broad-except\"): # No exception type(s) specified\n return False\n\n return True\n\n result = []\n py3msgs = []\n for msg in output_json:\n if msg.get(\"type\") in (\"warning\", \"error\"):\n message_id = msg.get(\"symbol\")\n if message_id in (\"print-statement\", \"dict-iter-method\"):\n py3msgs.append(\"Py3 incompatibility. Line %s: %s\"\n % (msg.get(\"line\"), msg.get(\"message\")))\n elif _accept_message(msg):\n result.append(\"Linter. Line %s: %s\" % (msg.get(\"line\"), msg.get(\"message\")))\n\n return result, py3msgs\n", "path": "conans/client/cmd/export_linter.py"}], "after_files": [{"content": "import json\nimport os\nimport sys\n\nimport platform\n\nfrom conans.client.output import Color\nfrom conans.errors import ConanException\nfrom subprocess import PIPE, Popen\nfrom conans import __path__ as root_path\n\n\ndef conan_linter(conanfile_path, out):\n if getattr(sys, 'frozen', False):\n out.info(\"No linter available. Use a pip installed conan for recipe linting\")\n return\n apply_lint = os.environ.get(\"CONAN_RECIPE_LINTER\", True)\n if not apply_lint or apply_lint == \"False\":\n return\n\n dir_path = os.path.dirname(root_path[0]).replace(\"\\\\\", \"/\")\n dirname = os.path.dirname(conanfile_path).replace(\"\\\\\", \"/\")\n hook = '--init-hook=\"import sys;sys.path.extend([\\'%s\\', \\'%s\\'])\"' % (dirname, dir_path)\n\n try:\n py3_msgs = None\n msgs, py3_msgs = _normal_linter(conanfile_path, hook)\n except Exception as e:\n out.warn(\"Failed pylint: %s\" % e)\n else:\n if py3_msgs:\n out.writeln(\"Python 3 incompatibilities\\n ERROR: %s\"\n % \"\\n ERROR: \".join(py3_msgs),\n front=Color.BRIGHT_MAGENTA)\n if msgs:\n out.writeln(\"Linter warnings\\n WARN: %s\" % \"\\n WARN: \".join(msgs),\n front=Color.MAGENTA)\n pylint_werr = os.environ.get(\"CONAN_PYLINT_WERR\", None)\n if pylint_werr and (py3_msgs or msgs):\n raise ConanException(\"Package recipe has linter errors. Please fix them.\")\n\n\ndef _runner(args):\n command = [\"pylint\", \"--output-format=json\"] + args\n command = \" \".join(command)\n shell = True if platform.system() != \"Windows\" else False\n proc = Popen(command, shell=shell, bufsize=10, stdout=PIPE, stderr=PIPE)\n stdout, _ = proc.communicate()\n return json.loads(stdout.decode(\"utf-8\")) if stdout else {}\n\n\ndef _normal_linter(conanfile_path, hook):\n args = ['--py3k', \"--enable=all\", \"--reports=no\", \"--disable=no-absolute-import\", \"--persistent=no\",\n hook, '\"%s\"' % conanfile_path]\n pylintrc = os.environ.get(\"CONAN_PYLINTRC\", None)\n if pylintrc:\n if not os.path.exists(pylintrc):\n raise ConanException(\"File %s defined by PYLINTRC doesn't exist\" % pylintrc)\n args.append('--rcfile=\"%s\"' % pylintrc)\n\n output_json = _runner(args)\n dynamic_fields = (\"source_folder\", \"build_folder\", \"package_folder\", \"info_build\",\n \"build_requires\", \"info\")\n\n def _accept_message(msg):\n symbol = msg.get(\"symbol\")\n text = msg.get(\"message\")\n\n if symbol == \"no-member\":\n for field in dynamic_fields:\n if field in text:\n return False\n if symbol == \"not-callable\" and \"self.copy is not callable\" == text:\n return False\n if symbol == \"not-callable\" and \"self.copy_deps is not callable\" == text:\n return False\n if symbol in (\"bare-except\", \"broad-except\"): # No exception type(s) specified\n return False\n if symbol == \"import-error\" and msg.get(\"column\") > 3: # Import of a conan python package\n return False\n\n return True\n\n result = []\n py3msgs = []\n for msg in output_json:\n if msg.get(\"type\") in (\"warning\", \"error\"):\n message_id = msg.get(\"symbol\")\n if message_id in (\"print-statement\", \"dict-iter-method\"):\n py3msgs.append(\"Py3 incompatibility. Line %s: %s\"\n % (msg.get(\"line\"), msg.get(\"message\")))\n elif _accept_message(msg):\n result.append(\"Linter. Line %s: %s\" % (msg.get(\"line\"), msg.get(\"message\")))\n\n return result, py3msgs\n", "path": "conans/client/cmd/export_linter.py"}]}
1,782
126
gh_patches_debug_37404
rasdani/github-patches
git_diff
svthalia__concrexit-1135
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fix "identical-code" issue in website/education/admin.py Identical blocks of code found in 2 locations. Consider refactoring. https://codeclimate.com/github/svthalia/concrexit/website/education/admin.py#issue_5ecfbb80e8d0fa00010003f6 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `website/education/admin.py` Content: ``` 1 """ 2 This module registers admin pages for the models 3 """ 4 import csv 5 6 from django.contrib import admin 7 from django.http import HttpResponse 8 from django.utils.translation import gettext_lazy as _ 9 10 from utils.translation import TranslatedModelAdmin 11 from . import models 12 from .forms import SummaryAdminForm 13 14 admin.site.register(models.Category) 15 16 17 @admin.register(models.Course) 18 class CourseAdmin(TranslatedModelAdmin): 19 fields = ( 20 "name", 21 "course_code", 22 "ec", 23 "since", 24 "until", 25 "categories", 26 "old_courses", 27 ) 28 list_filter = ("categories", "ec") 29 search_fields = ("name", "course_code") 30 31 32 @admin.register(models.Exam) 33 class ExamAdmin(TranslatedModelAdmin): 34 list_display = ( 35 "type", 36 "course", 37 "exam_date", 38 "uploader", 39 "accepted", 40 "language", 41 "download_count", 42 ) 43 readonly_fields = ("download_count",) 44 list_filter = ("accepted", "exam_date", "type", "language") 45 search_fields = ( 46 "name", 47 "uploader__first_name", 48 "uploader__last_name", 49 "course__name_nl", 50 "course__name_en", 51 ) 52 actions = ["accept", "reject", "reset_download_count", "download_csv"] 53 54 def accept(self, request, queryset): 55 queryset.update(accepted=True) 56 57 accept.short_description = _("Mark exams as accepted") 58 59 def reject(self, request, queryset): 60 queryset.update(accepted=False) 61 62 reject.short_description = _("Mark exams as rejected") 63 64 def reset_download_count(self, request, queryset): 65 queryset.update(download_count=0) 66 67 reset_download_count.short_description = _("Reset the marked exams download count") 68 69 def download_csv(self, request, queryset): 70 opts = queryset.model._meta 71 response = HttpResponse(content_type="text/csv") 72 # force download. 73 response["Content-Disposition"] = "attachment;filename=export.csv" 74 # the csv writer 75 writer = csv.writer(response) 76 field_names = [field.name for field in opts.fields] 77 # Write a first row with header information 78 writer.writerow(field_names) 79 # Write data rows 80 for obj in queryset: 81 writer.writerow([getattr(obj, field) for field in field_names]) 82 return response 83 84 download_csv.short_description = _("Download marked as csv") 85 86 87 @admin.register(models.Summary) 88 class SummaryAdmin(TranslatedModelAdmin): 89 list_display = ( 90 "name", 91 "course", 92 "uploader", 93 "accepted", 94 "language", 95 "download_count", 96 ) 97 readonly_fields = ("download_count",) 98 list_filter = ("accepted", "language") 99 search_fields = ( 100 "name", 101 "uploader__first_name", 102 "uploader__last_name", 103 "course__name_nl", 104 "course__name_en", 105 ) 106 actions = ["accept", "reject", "reset_download_count", "download_csv"] 107 form = SummaryAdminForm 108 109 def accept(self, request, queryset): 110 queryset.update(accepted=True) 111 112 accept.short_description = _("Mark summaries as accepted") 113 114 def reject(self, request, queryset): 115 queryset.update(accepted=False) 116 117 reject.short_description = _("Mark summaries as rejected") 118 119 def reset_download_count(self, request, queryset): 120 queryset.update(download_count=0) 121 122 reset_download_count.short_description = _( 123 "Reset the marked summaries download count" 124 ) 125 126 def download_csv(self, request, queryset): 127 opts = queryset.model._meta 128 response = HttpResponse(content_type="text/csv") 129 # force download. 130 response["Content-Disposition"] = "attachment;filename=export.csv" 131 # the csv writer 132 writer = csv.writer(response) 133 field_names = [field.name for field in opts.fields] 134 # Write a first row with header information 135 writer.writerow(field_names) 136 # Write data rows 137 for obj in queryset: 138 writer.writerow([getattr(obj, field) for field in field_names]) 139 return response 140 141 download_csv.short_description = _("Download marked as csv") 142 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/website/education/admin.py b/website/education/admin.py --- a/website/education/admin.py +++ b/website/education/admin.py @@ -29,8 +29,27 @@ search_fields = ("name", "course_code") +class WithDownloadCsv: + def download_csv(self, request, queryset): + opts = queryset.model._meta + response = HttpResponse(content_type="text/csv") + # force download. + response["Content-Disposition"] = "attachment;filename=export.csv" + # the csv writer + writer = csv.writer(response) + field_names = [field.name for field in opts.fields] + # Write a first row with header information + writer.writerow(field_names) + # Write data rows + for obj in queryset: + writer.writerow([getattr(obj, field) for field in field_names]) + return response + + download_csv.short_description = _("Download marked as csv") + + @admin.register(models.Exam) -class ExamAdmin(TranslatedModelAdmin): +class ExamAdmin(TranslatedModelAdmin, WithDownloadCsv): list_display = ( "type", "course", @@ -66,26 +85,9 @@ reset_download_count.short_description = _("Reset the marked exams download count") - def download_csv(self, request, queryset): - opts = queryset.model._meta - response = HttpResponse(content_type="text/csv") - # force download. - response["Content-Disposition"] = "attachment;filename=export.csv" - # the csv writer - writer = csv.writer(response) - field_names = [field.name for field in opts.fields] - # Write a first row with header information - writer.writerow(field_names) - # Write data rows - for obj in queryset: - writer.writerow([getattr(obj, field) for field in field_names]) - return response - - download_csv.short_description = _("Download marked as csv") - @admin.register(models.Summary) -class SummaryAdmin(TranslatedModelAdmin): +class SummaryAdmin(TranslatedModelAdmin, WithDownloadCsv): list_display = ( "name", "course", @@ -122,20 +124,3 @@ reset_download_count.short_description = _( "Reset the marked summaries download count" ) - - def download_csv(self, request, queryset): - opts = queryset.model._meta - response = HttpResponse(content_type="text/csv") - # force download. - response["Content-Disposition"] = "attachment;filename=export.csv" - # the csv writer - writer = csv.writer(response) - field_names = [field.name for field in opts.fields] - # Write a first row with header information - writer.writerow(field_names) - # Write data rows - for obj in queryset: - writer.writerow([getattr(obj, field) for field in field_names]) - return response - - download_csv.short_description = _("Download marked as csv")
{"golden_diff": "diff --git a/website/education/admin.py b/website/education/admin.py\n--- a/website/education/admin.py\n+++ b/website/education/admin.py\n@@ -29,8 +29,27 @@\n search_fields = (\"name\", \"course_code\")\n \n \n+class WithDownloadCsv:\n+ def download_csv(self, request, queryset):\n+ opts = queryset.model._meta\n+ response = HttpResponse(content_type=\"text/csv\")\n+ # force download.\n+ response[\"Content-Disposition\"] = \"attachment;filename=export.csv\"\n+ # the csv writer\n+ writer = csv.writer(response)\n+ field_names = [field.name for field in opts.fields]\n+ # Write a first row with header information\n+ writer.writerow(field_names)\n+ # Write data rows\n+ for obj in queryset:\n+ writer.writerow([getattr(obj, field) for field in field_names])\n+ return response\n+\n+ download_csv.short_description = _(\"Download marked as csv\")\n+\n+\n @admin.register(models.Exam)\n-class ExamAdmin(TranslatedModelAdmin):\n+class ExamAdmin(TranslatedModelAdmin, WithDownloadCsv):\n list_display = (\n \"type\",\n \"course\",\n@@ -66,26 +85,9 @@\n \n reset_download_count.short_description = _(\"Reset the marked exams download count\")\n \n- def download_csv(self, request, queryset):\n- opts = queryset.model._meta\n- response = HttpResponse(content_type=\"text/csv\")\n- # force download.\n- response[\"Content-Disposition\"] = \"attachment;filename=export.csv\"\n- # the csv writer\n- writer = csv.writer(response)\n- field_names = [field.name for field in opts.fields]\n- # Write a first row with header information\n- writer.writerow(field_names)\n- # Write data rows\n- for obj in queryset:\n- writer.writerow([getattr(obj, field) for field in field_names])\n- return response\n-\n- download_csv.short_description = _(\"Download marked as csv\")\n-\n \n @admin.register(models.Summary)\n-class SummaryAdmin(TranslatedModelAdmin):\n+class SummaryAdmin(TranslatedModelAdmin, WithDownloadCsv):\n list_display = (\n \"name\",\n \"course\",\n@@ -122,20 +124,3 @@\n reset_download_count.short_description = _(\n \"Reset the marked summaries download count\"\n )\n-\n- def download_csv(self, request, queryset):\n- opts = queryset.model._meta\n- response = HttpResponse(content_type=\"text/csv\")\n- # force download.\n- response[\"Content-Disposition\"] = \"attachment;filename=export.csv\"\n- # the csv writer\n- writer = csv.writer(response)\n- field_names = [field.name for field in opts.fields]\n- # Write a first row with header information\n- writer.writerow(field_names)\n- # Write data rows\n- for obj in queryset:\n- writer.writerow([getattr(obj, field) for field in field_names])\n- return response\n-\n- download_csv.short_description = _(\"Download marked as csv\")\n", "issue": "Fix \"identical-code\" issue in website/education/admin.py\nIdentical blocks of code found in 2 locations. Consider refactoring.\n\nhttps://codeclimate.com/github/svthalia/concrexit/website/education/admin.py#issue_5ecfbb80e8d0fa00010003f6\n", "before_files": [{"content": "\"\"\"\nThis module registers admin pages for the models\n\"\"\"\nimport csv\n\nfrom django.contrib import admin\nfrom django.http import HttpResponse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom utils.translation import TranslatedModelAdmin\nfrom . import models\nfrom .forms import SummaryAdminForm\n\nadmin.site.register(models.Category)\n\n\[email protected](models.Course)\nclass CourseAdmin(TranslatedModelAdmin):\n fields = (\n \"name\",\n \"course_code\",\n \"ec\",\n \"since\",\n \"until\",\n \"categories\",\n \"old_courses\",\n )\n list_filter = (\"categories\", \"ec\")\n search_fields = (\"name\", \"course_code\")\n\n\[email protected](models.Exam)\nclass ExamAdmin(TranslatedModelAdmin):\n list_display = (\n \"type\",\n \"course\",\n \"exam_date\",\n \"uploader\",\n \"accepted\",\n \"language\",\n \"download_count\",\n )\n readonly_fields = (\"download_count\",)\n list_filter = (\"accepted\", \"exam_date\", \"type\", \"language\")\n search_fields = (\n \"name\",\n \"uploader__first_name\",\n \"uploader__last_name\",\n \"course__name_nl\",\n \"course__name_en\",\n )\n actions = [\"accept\", \"reject\", \"reset_download_count\", \"download_csv\"]\n\n def accept(self, request, queryset):\n queryset.update(accepted=True)\n\n accept.short_description = _(\"Mark exams as accepted\")\n\n def reject(self, request, queryset):\n queryset.update(accepted=False)\n\n reject.short_description = _(\"Mark exams as rejected\")\n\n def reset_download_count(self, request, queryset):\n queryset.update(download_count=0)\n\n reset_download_count.short_description = _(\"Reset the marked exams download count\")\n\n def download_csv(self, request, queryset):\n opts = queryset.model._meta\n response = HttpResponse(content_type=\"text/csv\")\n # force download.\n response[\"Content-Disposition\"] = \"attachment;filename=export.csv\"\n # the csv writer\n writer = csv.writer(response)\n field_names = [field.name for field in opts.fields]\n # Write a first row with header information\n writer.writerow(field_names)\n # Write data rows\n for obj in queryset:\n writer.writerow([getattr(obj, field) for field in field_names])\n return response\n\n download_csv.short_description = _(\"Download marked as csv\")\n\n\[email protected](models.Summary)\nclass SummaryAdmin(TranslatedModelAdmin):\n list_display = (\n \"name\",\n \"course\",\n \"uploader\",\n \"accepted\",\n \"language\",\n \"download_count\",\n )\n readonly_fields = (\"download_count\",)\n list_filter = (\"accepted\", \"language\")\n search_fields = (\n \"name\",\n \"uploader__first_name\",\n \"uploader__last_name\",\n \"course__name_nl\",\n \"course__name_en\",\n )\n actions = [\"accept\", \"reject\", \"reset_download_count\", \"download_csv\"]\n form = SummaryAdminForm\n\n def accept(self, request, queryset):\n queryset.update(accepted=True)\n\n accept.short_description = _(\"Mark summaries as accepted\")\n\n def reject(self, request, queryset):\n queryset.update(accepted=False)\n\n reject.short_description = _(\"Mark summaries as rejected\")\n\n def reset_download_count(self, request, queryset):\n queryset.update(download_count=0)\n\n reset_download_count.short_description = _(\n \"Reset the marked summaries download count\"\n )\n\n def download_csv(self, request, queryset):\n opts = queryset.model._meta\n response = HttpResponse(content_type=\"text/csv\")\n # force download.\n response[\"Content-Disposition\"] = \"attachment;filename=export.csv\"\n # the csv writer\n writer = csv.writer(response)\n field_names = [field.name for field in opts.fields]\n # Write a first row with header information\n writer.writerow(field_names)\n # Write data rows\n for obj in queryset:\n writer.writerow([getattr(obj, field) for field in field_names])\n return response\n\n download_csv.short_description = _(\"Download marked as csv\")\n", "path": "website/education/admin.py"}], "after_files": [{"content": "\"\"\"\nThis module registers admin pages for the models\n\"\"\"\nimport csv\n\nfrom django.contrib import admin\nfrom django.http import HttpResponse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom utils.translation import TranslatedModelAdmin\nfrom . import models\nfrom .forms import SummaryAdminForm\n\nadmin.site.register(models.Category)\n\n\[email protected](models.Course)\nclass CourseAdmin(TranslatedModelAdmin):\n fields = (\n \"name\",\n \"course_code\",\n \"ec\",\n \"since\",\n \"until\",\n \"categories\",\n \"old_courses\",\n )\n list_filter = (\"categories\", \"ec\")\n search_fields = (\"name\", \"course_code\")\n\n\nclass WithDownloadCsv:\n def download_csv(self, request, queryset):\n opts = queryset.model._meta\n response = HttpResponse(content_type=\"text/csv\")\n # force download.\n response[\"Content-Disposition\"] = \"attachment;filename=export.csv\"\n # the csv writer\n writer = csv.writer(response)\n field_names = [field.name for field in opts.fields]\n # Write a first row with header information\n writer.writerow(field_names)\n # Write data rows\n for obj in queryset:\n writer.writerow([getattr(obj, field) for field in field_names])\n return response\n\n download_csv.short_description = _(\"Download marked as csv\")\n\n\[email protected](models.Exam)\nclass ExamAdmin(TranslatedModelAdmin, WithDownloadCsv):\n list_display = (\n \"type\",\n \"course\",\n \"exam_date\",\n \"uploader\",\n \"accepted\",\n \"language\",\n \"download_count\",\n )\n readonly_fields = (\"download_count\",)\n list_filter = (\"accepted\", \"exam_date\", \"type\", \"language\")\n search_fields = (\n \"name\",\n \"uploader__first_name\",\n \"uploader__last_name\",\n \"course__name_nl\",\n \"course__name_en\",\n )\n actions = [\"accept\", \"reject\", \"reset_download_count\", \"download_csv\"]\n\n def accept(self, request, queryset):\n queryset.update(accepted=True)\n\n accept.short_description = _(\"Mark exams as accepted\")\n\n def reject(self, request, queryset):\n queryset.update(accepted=False)\n\n reject.short_description = _(\"Mark exams as rejected\")\n\n def reset_download_count(self, request, queryset):\n queryset.update(download_count=0)\n\n reset_download_count.short_description = _(\"Reset the marked exams download count\")\n\n\[email protected](models.Summary)\nclass SummaryAdmin(TranslatedModelAdmin, WithDownloadCsv):\n list_display = (\n \"name\",\n \"course\",\n \"uploader\",\n \"accepted\",\n \"language\",\n \"download_count\",\n )\n readonly_fields = (\"download_count\",)\n list_filter = (\"accepted\", \"language\")\n search_fields = (\n \"name\",\n \"uploader__first_name\",\n \"uploader__last_name\",\n \"course__name_nl\",\n \"course__name_en\",\n )\n actions = [\"accept\", \"reject\", \"reset_download_count\", \"download_csv\"]\n form = SummaryAdminForm\n\n def accept(self, request, queryset):\n queryset.update(accepted=True)\n\n accept.short_description = _(\"Mark summaries as accepted\")\n\n def reject(self, request, queryset):\n queryset.update(accepted=False)\n\n reject.short_description = _(\"Mark summaries as rejected\")\n\n def reset_download_count(self, request, queryset):\n queryset.update(download_count=0)\n\n reset_download_count.short_description = _(\n \"Reset the marked summaries download count\"\n )\n", "path": "website/education/admin.py"}]}
1,520
665
gh_patches_debug_5654
rasdani/github-patches
git_diff
Nitrate__Nitrate-360
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Ensure to work with Python 3.7 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 import sys 4 5 from setuptools import setup, find_packages 6 7 8 with open('VERSION.txt', 'r') as f: 9 pkg_version = f.read().strip() 10 11 12 def get_long_description(): 13 with open('README.rst', 'r') as f: 14 return f.read() 15 16 17 install_requires = [ 18 'PyMySQL == 0.7.11', 19 'beautifulsoup4 >= 4.1.1', 20 'django >= 1.11,<2.0', 21 'django-contrib-comments == 1.8.0', 22 'django-tinymce == 2.7.0', 23 'django-uuslug == 1.1.8', 24 'html2text', 25 'kobo == 0.7.0', 26 'odfpy >= 0.9.6', 27 'python-bugzilla', 28 'six', 29 'xmltodict', 30 ] 31 32 if sys.version_info.major < 3: 33 install_requires += [ 34 'enum34', 35 ] 36 37 extras_require = { 38 # Required for tcms.core.contrib.auth.backends.KerberosBackend 39 'krbauth': [ 40 'kerberos == 1.2.5' 41 ], 42 43 # Packages for building documentation 44 'docs': [ 45 'Sphinx >= 1.1.2', 46 'sphinx_rtd_theme', 47 ], 48 49 # Necessary packages for running tests 50 'tests': [ 51 'coverage', 52 'factory_boy', 53 'flake8', 54 'mock', 55 'pytest', 56 'pytest-cov', 57 'pytest-django', 58 ], 59 60 # Contain tools that assists the development 61 'devtools': [ 62 'django-debug-toolbar == 1.7', 63 'tox', 64 'django-extensions', 65 'pygraphviz', 66 'future-breakpoint', 67 ], 68 69 # Required packages required to run async tasks 70 'async': [ 71 'celery == 4.1.0', 72 ] 73 } 74 75 76 setup( 77 name='Nitrate', 78 version=pkg_version, 79 description='Test Case Management System', 80 long_description=get_long_description(), 81 author='Nitrate Team', 82 maintainer='Chenxiong Qi', 83 maintainer_email='[email protected]', 84 url='https://github.com/Nitrate/Nitrate/', 85 license='GPLv2+', 86 keywords='test case', 87 install_requires=install_requires, 88 extras_require=extras_require, 89 packages=find_packages(), 90 include_package_data=True, 91 classifiers=[ 92 'Framework :: Django', 93 'Framework :: Django :: 1.11', 94 'Intended Audience :: Developers', 95 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)', 96 'Programming Language :: Python :: 2', 97 'Programming Language :: Python :: 2.7', 98 'Programming Language :: Python :: 3', 99 'Programming Language :: Python :: 3.6', 100 'Topic :: Software Development :: Quality Assurance', 101 'Topic :: Software Development :: Testing', 102 ], 103 ) 104 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -97,6 +97,7 @@ 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', 'Topic :: Software Development :: Quality Assurance', 'Topic :: Software Development :: Testing', ],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -97,6 +97,7 @@\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n+ 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n", "issue": "Ensure to work with Python 3.7\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'PyMySQL == 0.7.11',\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 1.11,<2.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'kobo == 0.7.0',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'six',\n 'xmltodict',\n]\n\nif sys.version_info.major < 3:\n install_requires += [\n 'enum34',\n ]\n\nextras_require = {\n # Required for tcms.core.contrib.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.1.0',\n ]\n}\n\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.11',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'PyMySQL == 0.7.11',\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 1.11,<2.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'kobo == 0.7.0',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'six',\n 'xmltodict',\n]\n\nif sys.version_info.major < 3:\n install_requires += [\n 'enum34',\n ]\n\nextras_require = {\n # Required for tcms.core.contrib.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.1.0',\n ]\n}\n\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.11',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n)\n", "path": "setup.py"}]}
1,139
100
gh_patches_debug_37911
rasdani/github-patches
git_diff
comic__grand-challenge.org-2466
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Notification and newsletter emails should only go to active users At the moment these emails are sent to all users, deactivated or not. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `app/grandchallenge/notifications/tasks.py` Content: ``` 1 from celery import shared_task 2 from django.core.paginator import Paginator 3 from django.utils.timezone import now 4 5 from grandchallenge.notifications.emails import send_unread_notifications_email 6 from grandchallenge.profiles.models import UserProfile 7 8 9 @shared_task 10 def send_unread_notification_emails(): 11 profiles = ( 12 UserProfile.objects.filter( 13 receive_notification_emails=True, user__notification__read=False 14 ) 15 .distinct() 16 .prefetch_related("user__notification_set") 17 .order_by("pk") 18 ) 19 paginator = Paginator(profiles, 1000) 20 21 for page_nr in paginator.page_range: 22 current_page_profiles = paginator.page(page_nr).object_list 23 current_time = now() 24 recipients = {} 25 for profile in current_page_profiles: 26 unread_notifications = [ 27 n 28 for n in profile.user.notification_set.all() 29 if not n.read 30 and ( 31 profile.notification_email_last_sent_at is None 32 or n.created > profile.notification_email_last_sent_at 33 ) 34 ] 35 if unread_notifications: 36 recipients[profile] = len(unread_notifications) 37 profile.notification_email_last_sent_at = current_time 38 39 UserProfile.objects.bulk_update( 40 current_page_profiles, ["notification_email_last_sent_at"] 41 ) 42 send_unread_notifications_email(recipients) 43 ``` Path: `app/grandchallenge/emails/tasks.py` Content: ``` 1 from celery import shared_task 2 from django.conf import settings 3 from django.contrib.auth import get_user_model 4 from django.contrib.sites.models import Site 5 from django.core.exceptions import ObjectDoesNotExist 6 from django.core.mail import EmailMultiAlternatives, get_connection 7 from django.core.paginator import Paginator 8 from django.template.loader import render_to_string 9 from django.utils.html import strip_tags 10 from django.utils.timezone import now 11 12 from grandchallenge.core.templatetags.bleach import md2html 13 from grandchallenge.emails.models import Email 14 from grandchallenge.emails.utils import SendActionChoices 15 from grandchallenge.subdomains.utils import reverse 16 17 18 def get_receivers(action): 19 if action == SendActionChoices.MAILING_LIST: 20 receivers = ( 21 get_user_model() 22 .objects.filter(user_profile__receive_newsletter=True) 23 .order_by("pk") 24 ) 25 elif action == SendActionChoices.STAFF: 26 receivers = ( 27 get_user_model().objects.filter(is_staff=True).order_by("pk") 28 ) 29 elif action == SendActionChoices.CHALLENGE_ADMINS: 30 receivers = ( 31 get_user_model() 32 .objects.filter( 33 groups__admins_of_challenge__isnull=False, 34 user_profile__receive_newsletter=True, 35 ) 36 .distinct() 37 .order_by("pk") 38 ) 39 elif action == SendActionChoices.READER_STUDY_EDITORS: 40 receivers = ( 41 get_user_model() 42 .objects.filter( 43 groups__editors_of_readerstudy__isnull=False, 44 user_profile__receive_newsletter=True, 45 ) 46 .distinct() 47 .order_by("pk") 48 ) 49 elif action == SendActionChoices.ALGORITHM_EDITORS: 50 receivers = ( 51 get_user_model() 52 .objects.filter( 53 groups__editors_of_algorithm__isnull=False, 54 user_profile__receive_newsletter=True, 55 ) 56 .distinct() 57 .order_by("pk") 58 ) 59 60 return receivers 61 62 63 def send_mass_html_email(datatuple): 64 connection = get_connection() 65 messages = [] 66 for subject, message, sender, recipient, html in datatuple: 67 email = EmailMultiAlternatives( 68 subject, message, sender, recipient, connection=connection 69 ) 70 email.attach_alternative(html, "text/html") 71 messages.append(email) 72 return connection.send_messages(messages) 73 74 75 @shared_task(**settings.CELERY_TASK_DECORATOR_KWARGS["acks-late-micro-short"]) 76 def send_bulk_email(action, email_pk): 77 try: 78 email = Email.objects.filter(sent=False).get(pk=email_pk) 79 except ObjectDoesNotExist: 80 return 81 subject = email.subject 82 body = email.body 83 html_body = md2html(body) 84 receivers = get_receivers(action=action) 85 paginator = Paginator(receivers, 100) 86 site = Site.objects.get_current() 87 if email.status_report: 88 start_page = email.status_report["last_processed_batch"] 89 else: 90 start_page = 0 91 for page_nr in paginator.page_range[start_page:]: 92 messages = [] 93 for recipient in paginator.page(page_nr).object_list: 94 user = get_user_model().objects.get(pk=recipient.pk) 95 link = reverse( 96 "profile-update", kwargs={"username": user.username} 97 ) 98 html_content = render_to_string( 99 "vendor/mailgun_transactional_emails/action.html", 100 { 101 "title": subject, 102 "username": user.username, 103 "content": html_body, 104 "link": link, 105 }, 106 ) 107 html_content_without_linebreaks = html_content.replace("\n", "") 108 text_content = strip_tags(html_content_without_linebreaks) 109 messages.append( 110 ( 111 f"[{site.domain.lower()}] {subject}", 112 text_content, 113 settings.DEFAULT_FROM_EMAIL, 114 [user.email], 115 html_content_without_linebreaks, 116 ) 117 ) 118 send_mass_html_email(messages) 119 email.status_report = {"last_processed_batch": page_nr} 120 email.save() 121 122 email.sent = True 123 email.sent_at = now() 124 email.status_report = None 125 email.save() 126 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/app/grandchallenge/emails/tasks.py b/app/grandchallenge/emails/tasks.py --- a/app/grandchallenge/emails/tasks.py +++ b/app/grandchallenge/emails/tasks.py @@ -19,12 +19,16 @@ if action == SendActionChoices.MAILING_LIST: receivers = ( get_user_model() - .objects.filter(user_profile__receive_newsletter=True) + .objects.filter( + user_profile__receive_newsletter=True, is_active=True + ) .order_by("pk") ) elif action == SendActionChoices.STAFF: receivers = ( - get_user_model().objects.filter(is_staff=True).order_by("pk") + get_user_model() + .objects.filter(is_staff=True, is_active=True) + .order_by("pk") ) elif action == SendActionChoices.CHALLENGE_ADMINS: receivers = ( @@ -32,6 +36,7 @@ .objects.filter( groups__admins_of_challenge__isnull=False, user_profile__receive_newsletter=True, + is_active=True, ) .distinct() .order_by("pk") @@ -42,6 +47,7 @@ .objects.filter( groups__editors_of_readerstudy__isnull=False, user_profile__receive_newsletter=True, + is_active=True, ) .distinct() .order_by("pk") @@ -52,6 +58,7 @@ .objects.filter( groups__editors_of_algorithm__isnull=False, user_profile__receive_newsletter=True, + is_active=True, ) .distinct() .order_by("pk") diff --git a/app/grandchallenge/notifications/tasks.py b/app/grandchallenge/notifications/tasks.py --- a/app/grandchallenge/notifications/tasks.py +++ b/app/grandchallenge/notifications/tasks.py @@ -10,7 +10,9 @@ def send_unread_notification_emails(): profiles = ( UserProfile.objects.filter( - receive_notification_emails=True, user__notification__read=False + receive_notification_emails=True, + user__notification__read=False, + user__is_active=True, ) .distinct() .prefetch_related("user__notification_set")
{"golden_diff": "diff --git a/app/grandchallenge/emails/tasks.py b/app/grandchallenge/emails/tasks.py\n--- a/app/grandchallenge/emails/tasks.py\n+++ b/app/grandchallenge/emails/tasks.py\n@@ -19,12 +19,16 @@\n if action == SendActionChoices.MAILING_LIST:\r\n receivers = (\r\n get_user_model()\r\n- .objects.filter(user_profile__receive_newsletter=True)\r\n+ .objects.filter(\r\n+ user_profile__receive_newsletter=True, is_active=True\r\n+ )\r\n .order_by(\"pk\")\r\n )\r\n elif action == SendActionChoices.STAFF:\r\n receivers = (\r\n- get_user_model().objects.filter(is_staff=True).order_by(\"pk\")\r\n+ get_user_model()\r\n+ .objects.filter(is_staff=True, is_active=True)\r\n+ .order_by(\"pk\")\r\n )\r\n elif action == SendActionChoices.CHALLENGE_ADMINS:\r\n receivers = (\r\n@@ -32,6 +36,7 @@\n .objects.filter(\r\n groups__admins_of_challenge__isnull=False,\r\n user_profile__receive_newsletter=True,\r\n+ is_active=True,\r\n )\r\n .distinct()\r\n .order_by(\"pk\")\r\n@@ -42,6 +47,7 @@\n .objects.filter(\r\n groups__editors_of_readerstudy__isnull=False,\r\n user_profile__receive_newsletter=True,\r\n+ is_active=True,\r\n )\r\n .distinct()\r\n .order_by(\"pk\")\r\n@@ -52,6 +58,7 @@\n .objects.filter(\r\n groups__editors_of_algorithm__isnull=False,\r\n user_profile__receive_newsletter=True,\r\n+ is_active=True,\r\n )\r\n .distinct()\r\n .order_by(\"pk\")\r\ndiff --git a/app/grandchallenge/notifications/tasks.py b/app/grandchallenge/notifications/tasks.py\n--- a/app/grandchallenge/notifications/tasks.py\n+++ b/app/grandchallenge/notifications/tasks.py\n@@ -10,7 +10,9 @@\n def send_unread_notification_emails():\r\n profiles = (\r\n UserProfile.objects.filter(\r\n- receive_notification_emails=True, user__notification__read=False\r\n+ receive_notification_emails=True,\r\n+ user__notification__read=False,\r\n+ user__is_active=True,\r\n )\r\n .distinct()\r\n .prefetch_related(\"user__notification_set\")\n", "issue": "Notification and newsletter emails should only go to active users\nAt the moment these emails are sent to all users, deactivated or not.\n", "before_files": [{"content": "from celery import shared_task\r\nfrom django.core.paginator import Paginator\r\nfrom django.utils.timezone import now\r\n\r\nfrom grandchallenge.notifications.emails import send_unread_notifications_email\r\nfrom grandchallenge.profiles.models import UserProfile\r\n\r\n\r\n@shared_task\r\ndef send_unread_notification_emails():\r\n profiles = (\r\n UserProfile.objects.filter(\r\n receive_notification_emails=True, user__notification__read=False\r\n )\r\n .distinct()\r\n .prefetch_related(\"user__notification_set\")\r\n .order_by(\"pk\")\r\n )\r\n paginator = Paginator(profiles, 1000)\r\n\r\n for page_nr in paginator.page_range:\r\n current_page_profiles = paginator.page(page_nr).object_list\r\n current_time = now()\r\n recipients = {}\r\n for profile in current_page_profiles:\r\n unread_notifications = [\r\n n\r\n for n in profile.user.notification_set.all()\r\n if not n.read\r\n and (\r\n profile.notification_email_last_sent_at is None\r\n or n.created > profile.notification_email_last_sent_at\r\n )\r\n ]\r\n if unread_notifications:\r\n recipients[profile] = len(unread_notifications)\r\n profile.notification_email_last_sent_at = current_time\r\n\r\n UserProfile.objects.bulk_update(\r\n current_page_profiles, [\"notification_email_last_sent_at\"]\r\n )\r\n send_unread_notifications_email(recipients)\r\n", "path": "app/grandchallenge/notifications/tasks.py"}, {"content": "from celery import shared_task\r\nfrom django.conf import settings\r\nfrom django.contrib.auth import get_user_model\r\nfrom django.contrib.sites.models import Site\r\nfrom django.core.exceptions import ObjectDoesNotExist\r\nfrom django.core.mail import EmailMultiAlternatives, get_connection\r\nfrom django.core.paginator import Paginator\r\nfrom django.template.loader import render_to_string\r\nfrom django.utils.html import strip_tags\r\nfrom django.utils.timezone import now\r\n\r\nfrom grandchallenge.core.templatetags.bleach import md2html\r\nfrom grandchallenge.emails.models import Email\r\nfrom grandchallenge.emails.utils import SendActionChoices\r\nfrom grandchallenge.subdomains.utils import reverse\r\n\r\n\r\ndef get_receivers(action):\r\n if action == SendActionChoices.MAILING_LIST:\r\n receivers = (\r\n get_user_model()\r\n .objects.filter(user_profile__receive_newsletter=True)\r\n .order_by(\"pk\")\r\n )\r\n elif action == SendActionChoices.STAFF:\r\n receivers = (\r\n get_user_model().objects.filter(is_staff=True).order_by(\"pk\")\r\n )\r\n elif action == SendActionChoices.CHALLENGE_ADMINS:\r\n receivers = (\r\n get_user_model()\r\n .objects.filter(\r\n groups__admins_of_challenge__isnull=False,\r\n user_profile__receive_newsletter=True,\r\n )\r\n .distinct()\r\n .order_by(\"pk\")\r\n )\r\n elif action == SendActionChoices.READER_STUDY_EDITORS:\r\n receivers = (\r\n get_user_model()\r\n .objects.filter(\r\n groups__editors_of_readerstudy__isnull=False,\r\n user_profile__receive_newsletter=True,\r\n )\r\n .distinct()\r\n .order_by(\"pk\")\r\n )\r\n elif action == SendActionChoices.ALGORITHM_EDITORS:\r\n receivers = (\r\n get_user_model()\r\n .objects.filter(\r\n groups__editors_of_algorithm__isnull=False,\r\n user_profile__receive_newsletter=True,\r\n )\r\n .distinct()\r\n .order_by(\"pk\")\r\n )\r\n\r\n return receivers\r\n\r\n\r\ndef send_mass_html_email(datatuple):\r\n connection = get_connection()\r\n messages = []\r\n for subject, message, sender, recipient, html in datatuple:\r\n email = EmailMultiAlternatives(\r\n subject, message, sender, recipient, connection=connection\r\n )\r\n email.attach_alternative(html, \"text/html\")\r\n messages.append(email)\r\n return connection.send_messages(messages)\r\n\r\n\r\n@shared_task(**settings.CELERY_TASK_DECORATOR_KWARGS[\"acks-late-micro-short\"])\r\ndef send_bulk_email(action, email_pk):\r\n try:\r\n email = Email.objects.filter(sent=False).get(pk=email_pk)\r\n except ObjectDoesNotExist:\r\n return\r\n subject = email.subject\r\n body = email.body\r\n html_body = md2html(body)\r\n receivers = get_receivers(action=action)\r\n paginator = Paginator(receivers, 100)\r\n site = Site.objects.get_current()\r\n if email.status_report:\r\n start_page = email.status_report[\"last_processed_batch\"]\r\n else:\r\n start_page = 0\r\n for page_nr in paginator.page_range[start_page:]:\r\n messages = []\r\n for recipient in paginator.page(page_nr).object_list:\r\n user = get_user_model().objects.get(pk=recipient.pk)\r\n link = reverse(\r\n \"profile-update\", kwargs={\"username\": user.username}\r\n )\r\n html_content = render_to_string(\r\n \"vendor/mailgun_transactional_emails/action.html\",\r\n {\r\n \"title\": subject,\r\n \"username\": user.username,\r\n \"content\": html_body,\r\n \"link\": link,\r\n },\r\n )\r\n html_content_without_linebreaks = html_content.replace(\"\\n\", \"\")\r\n text_content = strip_tags(html_content_without_linebreaks)\r\n messages.append(\r\n (\r\n f\"[{site.domain.lower()}] {subject}\",\r\n text_content,\r\n settings.DEFAULT_FROM_EMAIL,\r\n [user.email],\r\n html_content_without_linebreaks,\r\n )\r\n )\r\n send_mass_html_email(messages)\r\n email.status_report = {\"last_processed_batch\": page_nr}\r\n email.save()\r\n\r\n email.sent = True\r\n email.sent_at = now()\r\n email.status_report = None\r\n email.save()\r\n", "path": "app/grandchallenge/emails/tasks.py"}], "after_files": [{"content": "from celery import shared_task\r\nfrom django.core.paginator import Paginator\r\nfrom django.utils.timezone import now\r\n\r\nfrom grandchallenge.notifications.emails import send_unread_notifications_email\r\nfrom grandchallenge.profiles.models import UserProfile\r\n\r\n\r\n@shared_task\r\ndef send_unread_notification_emails():\r\n profiles = (\r\n UserProfile.objects.filter(\r\n receive_notification_emails=True,\r\n user__notification__read=False,\r\n user__is_active=True,\r\n )\r\n .distinct()\r\n .prefetch_related(\"user__notification_set\")\r\n .order_by(\"pk\")\r\n )\r\n paginator = Paginator(profiles, 1000)\r\n\r\n for page_nr in paginator.page_range:\r\n current_page_profiles = paginator.page(page_nr).object_list\r\n current_time = now()\r\n recipients = {}\r\n for profile in current_page_profiles:\r\n unread_notifications = [\r\n n\r\n for n in profile.user.notification_set.all()\r\n if not n.read\r\n and (\r\n profile.notification_email_last_sent_at is None\r\n or n.created > profile.notification_email_last_sent_at\r\n )\r\n ]\r\n if unread_notifications:\r\n recipients[profile] = len(unread_notifications)\r\n profile.notification_email_last_sent_at = current_time\r\n\r\n UserProfile.objects.bulk_update(\r\n current_page_profiles, [\"notification_email_last_sent_at\"]\r\n )\r\n send_unread_notifications_email(recipients)\r\n", "path": "app/grandchallenge/notifications/tasks.py"}, {"content": "from celery import shared_task\r\nfrom django.conf import settings\r\nfrom django.contrib.auth import get_user_model\r\nfrom django.contrib.sites.models import Site\r\nfrom django.core.exceptions import ObjectDoesNotExist\r\nfrom django.core.mail import EmailMultiAlternatives, get_connection\r\nfrom django.core.paginator import Paginator\r\nfrom django.template.loader import render_to_string\r\nfrom django.utils.html import strip_tags\r\nfrom django.utils.timezone import now\r\n\r\nfrom grandchallenge.core.templatetags.bleach import md2html\r\nfrom grandchallenge.emails.models import Email\r\nfrom grandchallenge.emails.utils import SendActionChoices\r\nfrom grandchallenge.subdomains.utils import reverse\r\n\r\n\r\ndef get_receivers(action):\r\n if action == SendActionChoices.MAILING_LIST:\r\n receivers = (\r\n get_user_model()\r\n .objects.filter(\r\n user_profile__receive_newsletter=True, is_active=True\r\n )\r\n .order_by(\"pk\")\r\n )\r\n elif action == SendActionChoices.STAFF:\r\n receivers = (\r\n get_user_model()\r\n .objects.filter(is_staff=True, is_active=True)\r\n .order_by(\"pk\")\r\n )\r\n elif action == SendActionChoices.CHALLENGE_ADMINS:\r\n receivers = (\r\n get_user_model()\r\n .objects.filter(\r\n groups__admins_of_challenge__isnull=False,\r\n user_profile__receive_newsletter=True,\r\n is_active=True,\r\n )\r\n .distinct()\r\n .order_by(\"pk\")\r\n )\r\n elif action == SendActionChoices.READER_STUDY_EDITORS:\r\n receivers = (\r\n get_user_model()\r\n .objects.filter(\r\n groups__editors_of_readerstudy__isnull=False,\r\n user_profile__receive_newsletter=True,\r\n is_active=True,\r\n )\r\n .distinct()\r\n .order_by(\"pk\")\r\n )\r\n elif action == SendActionChoices.ALGORITHM_EDITORS:\r\n receivers = (\r\n get_user_model()\r\n .objects.filter(\r\n groups__editors_of_algorithm__isnull=False,\r\n user_profile__receive_newsletter=True,\r\n is_active=True,\r\n )\r\n .distinct()\r\n .order_by(\"pk\")\r\n )\r\n\r\n return receivers\r\n\r\n\r\ndef send_mass_html_email(datatuple):\r\n connection = get_connection()\r\n messages = []\r\n for subject, message, sender, recipient, html in datatuple:\r\n email = EmailMultiAlternatives(\r\n subject, message, sender, recipient, connection=connection\r\n )\r\n email.attach_alternative(html, \"text/html\")\r\n messages.append(email)\r\n return connection.send_messages(messages)\r\n\r\n\r\n@shared_task(**settings.CELERY_TASK_DECORATOR_KWARGS[\"acks-late-micro-short\"])\r\ndef send_bulk_email(action, email_pk):\r\n try:\r\n email = Email.objects.filter(sent=False).get(pk=email_pk)\r\n except ObjectDoesNotExist:\r\n return\r\n subject = email.subject\r\n body = email.body\r\n html_body = md2html(body)\r\n receivers = get_receivers(action=action)\r\n paginator = Paginator(receivers, 100)\r\n site = Site.objects.get_current()\r\n if email.status_report:\r\n start_page = email.status_report[\"last_processed_batch\"]\r\n else:\r\n start_page = 0\r\n for page_nr in paginator.page_range[start_page:]:\r\n messages = []\r\n for recipient in paginator.page(page_nr).object_list:\r\n user = get_user_model().objects.get(pk=recipient.pk)\r\n link = reverse(\r\n \"profile-update\", kwargs={\"username\": user.username}\r\n )\r\n html_content = render_to_string(\r\n \"vendor/mailgun_transactional_emails/action.html\",\r\n {\r\n \"title\": subject,\r\n \"username\": user.username,\r\n \"content\": html_body,\r\n \"link\": link,\r\n },\r\n )\r\n html_content_without_linebreaks = html_content.replace(\"\\n\", \"\")\r\n text_content = strip_tags(html_content_without_linebreaks)\r\n messages.append(\r\n (\r\n f\"[{site.domain.lower()}] {subject}\",\r\n text_content,\r\n settings.DEFAULT_FROM_EMAIL,\r\n [user.email],\r\n html_content_without_linebreaks,\r\n )\r\n )\r\n send_mass_html_email(messages)\r\n email.status_report = {\"last_processed_batch\": page_nr}\r\n email.save()\r\n\r\n email.sent = True\r\n email.sent_at = now()\r\n email.status_report = None\r\n email.save()\r\n", "path": "app/grandchallenge/emails/tasks.py"}]}
1,792
502
gh_patches_debug_7678
rasdani/github-patches
git_diff
xonsh__xonsh-490
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add 'edit and execute command` function bash and zsh (and I'm sure others) have an edit and execute command that is invoked with Ctrl-e + Ctrl-x which opens the current contents of the command prompt in the system editor. Edits made to the command are then run on save and quit. It could be this functionality is already baked in and I'm missing it? In any case, it's especially handy for longer commands -- and given xonsh's multi-line input capabilities this seems like a good fit. Thoughts? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `xonsh/prompt_toolkit_shell.py` Content: ``` 1 """The prompt_toolkit based xonsh shell""" 2 import os 3 import builtins 4 from warnings import warn 5 6 from prompt_toolkit.shortcuts import get_input 7 from prompt_toolkit.key_binding.manager import KeyBindingManager 8 from prompt_toolkit.auto_suggest import AutoSuggestFromHistory 9 from pygments.token import Token 10 from pygments.style import Style 11 12 from xonsh.base_shell import BaseShell 13 from xonsh.tools import format_prompt_for_prompt_toolkit 14 from xonsh.prompt_toolkit_completer import PromptToolkitCompleter 15 from xonsh.prompt_toolkit_history import LimitedFileHistory 16 from xonsh.prompt_toolkit_key_bindings import load_xonsh_bindings 17 18 19 def setup_history(): 20 """Creates history object.""" 21 env = builtins.__xonsh_env__ 22 hfile = env.get('XONSH_HISTORY_FILE') 23 history = LimitedFileHistory() 24 try: 25 history.read_history_file(hfile) 26 except PermissionError: 27 warn('do not have read permissions for ' + hfile, RuntimeWarning) 28 return history 29 30 31 def teardown_history(history): 32 """Tears down the history object.""" 33 env = builtins.__xonsh_env__ 34 hsize = env.get('XONSH_HISTORY_SIZE')[0] 35 hfile = env.get('XONSH_HISTORY_FILE') 36 try: 37 history.save_history_to_file(hfile, hsize) 38 except PermissionError: 39 warn('do not have write permissions for ' + hfile, RuntimeWarning) 40 41 42 class PromptToolkitShell(BaseShell): 43 """The xonsh shell.""" 44 45 def __init__(self, **kwargs): 46 super().__init__(**kwargs) 47 self.history = setup_history() 48 self.pt_completer = PromptToolkitCompleter(self.completer, self.ctx) 49 self.key_bindings_manager = KeyBindingManager( 50 enable_auto_suggest_bindings=True, 51 enable_search=True, enable_abort_and_exit_bindings=True) 52 load_xonsh_bindings(self.key_bindings_manager) 53 54 def __del__(self): 55 if self.history is not None: 56 teardown_history(self.history) 57 58 def cmdloop(self, intro=None): 59 """Enters a loop that reads and execute input from user.""" 60 if intro: 61 print(intro) 62 _auto_suggest = AutoSuggestFromHistory() 63 while not builtins.__xonsh_exit__: 64 try: 65 token_func, style_cls = self._get_prompt_tokens_and_style() 66 mouse_support = builtins.__xonsh_env__.get('MOUSE_SUPPORT') 67 if builtins.__xonsh_env__.get('AUTO_SUGGEST'): 68 auto_suggest = _auto_suggest 69 else: 70 auto_suggest = None 71 completions_display = builtins.__xonsh_env__.get('COMPLETIONS_DISPLAY') 72 multicolumn = (completions_display == 'multi') 73 completer = None if completions_display == 'none' else self.pt_completer 74 line = get_input( 75 mouse_support=mouse_support, 76 auto_suggest=auto_suggest, 77 get_prompt_tokens=token_func, 78 style=style_cls, 79 completer=completer, 80 history=self.history, 81 key_bindings_registry=self.key_bindings_manager.registry, 82 display_completions_in_columns=multicolumn) 83 if not line: 84 self.emptyline() 85 else: 86 line = self.precmd(line) 87 self.default(line) 88 except KeyboardInterrupt: 89 self.reset_buffer() 90 except EOFError: 91 break 92 93 def _get_prompt_tokens_and_style(self): 94 """Returns function to pass as prompt to prompt_toolkit.""" 95 token_names, cstyles, strings = format_prompt_for_prompt_toolkit(self.prompt) 96 tokens = [getattr(Token, n) for n in token_names] 97 98 def get_tokens(cli): 99 return list(zip(tokens, strings)) 100 101 class CustomStyle(Style): 102 styles = { 103 Token.Menu.Completions.Completion.Current: 'bg:#00aaaa #000000', 104 Token.Menu.Completions.Completion: 'bg:#008888 #ffffff', 105 Token.Menu.Completions.ProgressButton: 'bg:#003333', 106 Token.Menu.Completions.ProgressBar: 'bg:#00aaaa', 107 Token.AutoSuggestion: '#666666', 108 Token.Aborted: '#888888', 109 } 110 # update with the prompt styles 111 styles.update({t: s for (t, s) in zip(tokens, cstyles)}) 112 # Update with with any user styles 113 userstyle = builtins.__xonsh_env__.get('PROMPT_TOOLKIT_STYLES') 114 if userstyle is not None: 115 styles.update(userstyle) 116 117 return get_tokens, CustomStyle 118 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/xonsh/prompt_toolkit_shell.py b/xonsh/prompt_toolkit_shell.py --- a/xonsh/prompt_toolkit_shell.py +++ b/xonsh/prompt_toolkit_shell.py @@ -48,6 +48,7 @@ self.pt_completer = PromptToolkitCompleter(self.completer, self.ctx) self.key_bindings_manager = KeyBindingManager( enable_auto_suggest_bindings=True, + enable_open_in_editor=True, enable_search=True, enable_abort_and_exit_bindings=True) load_xonsh_bindings(self.key_bindings_manager)
{"golden_diff": "diff --git a/xonsh/prompt_toolkit_shell.py b/xonsh/prompt_toolkit_shell.py\n--- a/xonsh/prompt_toolkit_shell.py\n+++ b/xonsh/prompt_toolkit_shell.py\n@@ -48,6 +48,7 @@\n self.pt_completer = PromptToolkitCompleter(self.completer, self.ctx)\n self.key_bindings_manager = KeyBindingManager(\n enable_auto_suggest_bindings=True,\n+ enable_open_in_editor=True,\n enable_search=True, enable_abort_and_exit_bindings=True)\n load_xonsh_bindings(self.key_bindings_manager)\n", "issue": "Add 'edit and execute command` function\nbash and zsh (and I'm sure others) have an edit and execute command that is invoked with Ctrl-e + Ctrl-x which opens the current contents of the command prompt in the system editor. Edits made to the command are then run on save and quit. \n\nIt could be this functionality is already baked in and I'm missing it? In any case, it's especially handy for longer commands -- and given xonsh's multi-line input capabilities this seems like a good fit.\n\nThoughts?\n\n", "before_files": [{"content": "\"\"\"The prompt_toolkit based xonsh shell\"\"\"\nimport os\nimport builtins\nfrom warnings import warn\n\nfrom prompt_toolkit.shortcuts import get_input\nfrom prompt_toolkit.key_binding.manager import KeyBindingManager\nfrom prompt_toolkit.auto_suggest import AutoSuggestFromHistory\nfrom pygments.token import Token\nfrom pygments.style import Style\n\nfrom xonsh.base_shell import BaseShell\nfrom xonsh.tools import format_prompt_for_prompt_toolkit\nfrom xonsh.prompt_toolkit_completer import PromptToolkitCompleter\nfrom xonsh.prompt_toolkit_history import LimitedFileHistory\nfrom xonsh.prompt_toolkit_key_bindings import load_xonsh_bindings\n\n\ndef setup_history():\n \"\"\"Creates history object.\"\"\"\n env = builtins.__xonsh_env__\n hfile = env.get('XONSH_HISTORY_FILE')\n history = LimitedFileHistory()\n try:\n history.read_history_file(hfile)\n except PermissionError:\n warn('do not have read permissions for ' + hfile, RuntimeWarning)\n return history\n\n\ndef teardown_history(history):\n \"\"\"Tears down the history object.\"\"\"\n env = builtins.__xonsh_env__\n hsize = env.get('XONSH_HISTORY_SIZE')[0]\n hfile = env.get('XONSH_HISTORY_FILE')\n try:\n history.save_history_to_file(hfile, hsize)\n except PermissionError:\n warn('do not have write permissions for ' + hfile, RuntimeWarning)\n\n\nclass PromptToolkitShell(BaseShell):\n \"\"\"The xonsh shell.\"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.history = setup_history()\n self.pt_completer = PromptToolkitCompleter(self.completer, self.ctx)\n self.key_bindings_manager = KeyBindingManager(\n enable_auto_suggest_bindings=True,\n enable_search=True, enable_abort_and_exit_bindings=True)\n load_xonsh_bindings(self.key_bindings_manager)\n\n def __del__(self):\n if self.history is not None:\n teardown_history(self.history)\n\n def cmdloop(self, intro=None):\n \"\"\"Enters a loop that reads and execute input from user.\"\"\"\n if intro:\n print(intro)\n _auto_suggest = AutoSuggestFromHistory()\n while not builtins.__xonsh_exit__:\n try:\n token_func, style_cls = self._get_prompt_tokens_and_style()\n mouse_support = builtins.__xonsh_env__.get('MOUSE_SUPPORT')\n if builtins.__xonsh_env__.get('AUTO_SUGGEST'):\n auto_suggest = _auto_suggest\n else:\n auto_suggest = None\n completions_display = builtins.__xonsh_env__.get('COMPLETIONS_DISPLAY')\n multicolumn = (completions_display == 'multi')\n completer = None if completions_display == 'none' else self.pt_completer\n line = get_input(\n mouse_support=mouse_support,\n auto_suggest=auto_suggest,\n get_prompt_tokens=token_func,\n style=style_cls,\n completer=completer,\n history=self.history,\n key_bindings_registry=self.key_bindings_manager.registry,\n display_completions_in_columns=multicolumn)\n if not line:\n self.emptyline()\n else:\n line = self.precmd(line)\n self.default(line)\n except KeyboardInterrupt:\n self.reset_buffer()\n except EOFError:\n break\n\n def _get_prompt_tokens_and_style(self):\n \"\"\"Returns function to pass as prompt to prompt_toolkit.\"\"\"\n token_names, cstyles, strings = format_prompt_for_prompt_toolkit(self.prompt)\n tokens = [getattr(Token, n) for n in token_names]\n\n def get_tokens(cli):\n return list(zip(tokens, strings))\n\n class CustomStyle(Style):\n styles = {\n Token.Menu.Completions.Completion.Current: 'bg:#00aaaa #000000',\n Token.Menu.Completions.Completion: 'bg:#008888 #ffffff',\n Token.Menu.Completions.ProgressButton: 'bg:#003333',\n Token.Menu.Completions.ProgressBar: 'bg:#00aaaa',\n Token.AutoSuggestion: '#666666',\n Token.Aborted: '#888888',\n }\n # update with the prompt styles\n styles.update({t: s for (t, s) in zip(tokens, cstyles)})\n # Update with with any user styles\n userstyle = builtins.__xonsh_env__.get('PROMPT_TOOLKIT_STYLES')\n if userstyle is not None:\n styles.update(userstyle)\n\n return get_tokens, CustomStyle\n", "path": "xonsh/prompt_toolkit_shell.py"}], "after_files": [{"content": "\"\"\"The prompt_toolkit based xonsh shell\"\"\"\nimport os\nimport builtins\nfrom warnings import warn\n\nfrom prompt_toolkit.shortcuts import get_input\nfrom prompt_toolkit.key_binding.manager import KeyBindingManager\nfrom prompt_toolkit.auto_suggest import AutoSuggestFromHistory\nfrom pygments.token import Token\nfrom pygments.style import Style\n\nfrom xonsh.base_shell import BaseShell\nfrom xonsh.tools import format_prompt_for_prompt_toolkit\nfrom xonsh.prompt_toolkit_completer import PromptToolkitCompleter\nfrom xonsh.prompt_toolkit_history import LimitedFileHistory\nfrom xonsh.prompt_toolkit_key_bindings import load_xonsh_bindings\n\n\ndef setup_history():\n \"\"\"Creates history object.\"\"\"\n env = builtins.__xonsh_env__\n hfile = env.get('XONSH_HISTORY_FILE')\n history = LimitedFileHistory()\n try:\n history.read_history_file(hfile)\n except PermissionError:\n warn('do not have read permissions for ' + hfile, RuntimeWarning)\n return history\n\n\ndef teardown_history(history):\n \"\"\"Tears down the history object.\"\"\"\n env = builtins.__xonsh_env__\n hsize = env.get('XONSH_HISTORY_SIZE')[0]\n hfile = env.get('XONSH_HISTORY_FILE')\n try:\n history.save_history_to_file(hfile, hsize)\n except PermissionError:\n warn('do not have write permissions for ' + hfile, RuntimeWarning)\n\n\nclass PromptToolkitShell(BaseShell):\n \"\"\"The xonsh shell.\"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.history = setup_history()\n self.pt_completer = PromptToolkitCompleter(self.completer, self.ctx)\n self.key_bindings_manager = KeyBindingManager(\n enable_auto_suggest_bindings=True,\n enable_open_in_editor=True,\n enable_search=True, enable_abort_and_exit_bindings=True)\n load_xonsh_bindings(self.key_bindings_manager)\n\n def __del__(self):\n if self.history is not None:\n teardown_history(self.history)\n\n def cmdloop(self, intro=None):\n \"\"\"Enters a loop that reads and execute input from user.\"\"\"\n if intro:\n print(intro)\n _auto_suggest = AutoSuggestFromHistory()\n while not builtins.__xonsh_exit__:\n try:\n token_func, style_cls = self._get_prompt_tokens_and_style()\n mouse_support = builtins.__xonsh_env__.get('MOUSE_SUPPORT')\n if builtins.__xonsh_env__.get('AUTO_SUGGEST'):\n auto_suggest = _auto_suggest\n else:\n auto_suggest = None\n completions_display = builtins.__xonsh_env__.get('COMPLETIONS_DISPLAY')\n multicolumn = (completions_display == 'multi')\n completer = None if completions_display == 'none' else self.pt_completer\n line = get_input(\n mouse_support=mouse_support,\n auto_suggest=auto_suggest,\n get_prompt_tokens=token_func,\n style=style_cls,\n completer=completer,\n history=self.history,\n key_bindings_registry=self.key_bindings_manager.registry,\n display_completions_in_columns=multicolumn)\n if not line:\n self.emptyline()\n else:\n line = self.precmd(line)\n self.default(line)\n except KeyboardInterrupt:\n self.reset_buffer()\n except EOFError:\n break\n\n def _get_prompt_tokens_and_style(self):\n \"\"\"Returns function to pass as prompt to prompt_toolkit.\"\"\"\n token_names, cstyles, strings = format_prompt_for_prompt_toolkit(self.prompt)\n tokens = [getattr(Token, n) for n in token_names]\n\n def get_tokens(cli):\n return list(zip(tokens, strings))\n\n class CustomStyle(Style):\n styles = {\n Token.Menu.Completions.Completion.Current: 'bg:#00aaaa #000000',\n Token.Menu.Completions.Completion: 'bg:#008888 #ffffff',\n Token.Menu.Completions.ProgressButton: 'bg:#003333',\n Token.Menu.Completions.ProgressBar: 'bg:#00aaaa',\n Token.AutoSuggestion: '#666666',\n Token.Aborted: '#888888',\n }\n # update with the prompt styles\n styles.update({t: s for (t, s) in zip(tokens, cstyles)})\n # Update with with any user styles\n userstyle = builtins.__xonsh_env__.get('PROMPT_TOOLKIT_STYLES')\n if userstyle is not None:\n styles.update(userstyle)\n\n return get_tokens, CustomStyle\n", "path": "xonsh/prompt_toolkit_shell.py"}]}
1,625
128
gh_patches_debug_6573
rasdani/github-patches
git_diff
OCA__bank-payment-44
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Code error in account_payment_sale I'm reporting a bug in my own code : in the 7.0 branch, in account_payment_sale/model/sale.py line 41, we have the following code: partner.customer_payment_mode.id or False, We should NOT have a coma at the end of this line. When we run the module in Odoo 8.0, it will crash when creating a sale order via a call to create because addons/sale/sale.py in odoo 8.0 silently plays the on_change and then it will try to create {"payment_mode_id: (False,)}, when leads to a crash --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `account_payment_sale/model/sale.py` Content: ``` 1 # -*- encoding: utf-8 -*- 2 ############################################################################## 3 # 4 # Account Payment Sale module for OpenERP 5 # Copyright (C) 2014 Akretion (http://www.akretion.com) 6 # @author Alexis de Lattre <[email protected]> 7 # 8 # This program is free software: you can redistribute it and/or modify 9 # it under the terms of the GNU Affero General Public License as 10 # published by the Free Software Foundation, either version 3 of the 11 # License, or (at your option) any later version. 12 # 13 # This program is distributed in the hope that it will be useful, 14 # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 # GNU Affero General Public License for more details. 17 # 18 # You should have received a copy of the GNU Affero General Public License 19 # along with this program. If not, see <http://www.gnu.org/licenses/>. 20 # 21 ############################################################################## 22 23 from openerp.osv import orm, fields 24 25 26 class sale_order(orm.Model): 27 _inherit = "sale.order" 28 29 _columns = { 30 'payment_mode_id': fields.many2one( 31 'payment.mode', 'Payment Mode'), 32 } 33 34 def onchange_partner_id(self, cr, uid, ids, part, context=None): 35 res = super(sale_order, self).onchange_partner_id( 36 cr, uid, ids, part, context=context) 37 if part: 38 partner = self.pool['res.partner'].browse( 39 cr, uid, part, context=context) 40 res['value']['payment_mode_id'] = \ 41 partner.customer_payment_mode.id or False, 42 else: 43 res['value']['payment_mode_id'] = False 44 return res 45 46 def _prepare_invoice(self, cr, uid, order, lines, context=None): 47 """Copy bank partner from sale order to invoice""" 48 invoice_vals = super(sale_order, self)._prepare_invoice( 49 cr, uid, order, lines, context=context) 50 invoice_vals.update({ 51 'payment_mode_id': order.payment_mode_id.id or False, 52 'partner_bank_id': order.payment_mode_id and 53 order.payment_mode_id.bank_id.id or False, 54 }) 55 return invoice_vals 56 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/account_payment_sale/model/sale.py b/account_payment_sale/model/sale.py --- a/account_payment_sale/model/sale.py +++ b/account_payment_sale/model/sale.py @@ -38,7 +38,7 @@ partner = self.pool['res.partner'].browse( cr, uid, part, context=context) res['value']['payment_mode_id'] = \ - partner.customer_payment_mode.id or False, + partner.customer_payment_mode.id or False else: res['value']['payment_mode_id'] = False return res
{"golden_diff": "diff --git a/account_payment_sale/model/sale.py b/account_payment_sale/model/sale.py\n--- a/account_payment_sale/model/sale.py\n+++ b/account_payment_sale/model/sale.py\n@@ -38,7 +38,7 @@\n partner = self.pool['res.partner'].browse(\n cr, uid, part, context=context)\n res['value']['payment_mode_id'] = \\\n- partner.customer_payment_mode.id or False,\n+ partner.customer_payment_mode.id or False\n else:\n res['value']['payment_mode_id'] = False\n return res\n", "issue": "Code error in account_payment_sale\nI'm reporting a bug in my own code : in the 7.0 branch, in account_payment_sale/model/sale.py line 41, we have the following code:\n\n partner.customer_payment_mode.id or False,\n\nWe should NOT have a coma at the end of this line. When we run the module in Odoo 8.0, it will crash when creating a sale order via a call to create because addons/sale/sale.py in odoo 8.0 silently plays the on_change and then it will try to create {\"payment_mode_id: (False,)}, when leads to a crash\n\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# Account Payment Sale module for OpenERP\n# Copyright (C) 2014 Akretion (http://www.akretion.com)\n# @author Alexis de Lattre <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n##############################################################################\n\nfrom openerp.osv import orm, fields\n\n\nclass sale_order(orm.Model):\n _inherit = \"sale.order\"\n\n _columns = {\n 'payment_mode_id': fields.many2one(\n 'payment.mode', 'Payment Mode'),\n }\n\n def onchange_partner_id(self, cr, uid, ids, part, context=None):\n res = super(sale_order, self).onchange_partner_id(\n cr, uid, ids, part, context=context)\n if part:\n partner = self.pool['res.partner'].browse(\n cr, uid, part, context=context)\n res['value']['payment_mode_id'] = \\\n partner.customer_payment_mode.id or False,\n else:\n res['value']['payment_mode_id'] = False\n return res\n\n def _prepare_invoice(self, cr, uid, order, lines, context=None):\n \"\"\"Copy bank partner from sale order to invoice\"\"\"\n invoice_vals = super(sale_order, self)._prepare_invoice(\n cr, uid, order, lines, context=context)\n invoice_vals.update({\n 'payment_mode_id': order.payment_mode_id.id or False,\n 'partner_bank_id': order.payment_mode_id and\n order.payment_mode_id.bank_id.id or False,\n })\n return invoice_vals\n", "path": "account_payment_sale/model/sale.py"}], "after_files": [{"content": "# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# Account Payment Sale module for OpenERP\n# Copyright (C) 2014 Akretion (http://www.akretion.com)\n# @author Alexis de Lattre <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n##############################################################################\n\nfrom openerp.osv import orm, fields\n\n\nclass sale_order(orm.Model):\n _inherit = \"sale.order\"\n\n _columns = {\n 'payment_mode_id': fields.many2one(\n 'payment.mode', 'Payment Mode'),\n }\n\n def onchange_partner_id(self, cr, uid, ids, part, context=None):\n res = super(sale_order, self).onchange_partner_id(\n cr, uid, ids, part, context=context)\n if part:\n partner = self.pool['res.partner'].browse(\n cr, uid, part, context=context)\n res['value']['payment_mode_id'] = \\\n partner.customer_payment_mode.id or False\n else:\n res['value']['payment_mode_id'] = False\n return res\n\n def _prepare_invoice(self, cr, uid, order, lines, context=None):\n \"\"\"Copy bank partner from sale order to invoice\"\"\"\n invoice_vals = super(sale_order, self)._prepare_invoice(\n cr, uid, order, lines, context=context)\n invoice_vals.update({\n 'payment_mode_id': order.payment_mode_id.id or False,\n 'partner_bank_id': order.payment_mode_id and\n order.payment_mode_id.bank_id.id or False,\n })\n return invoice_vals\n", "path": "account_payment_sale/model/sale.py"}]}
990
123
gh_patches_debug_5448
rasdani/github-patches
git_diff
kserve__kserve-704
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- storage-initialiser fails to copy model from S3 Upon using a Secret and Service account that look like this: ``` apiVersion: v1 metadata: name: kfserving-aws-secret namespace: {namespace} annotations: serving.kubeflow.org/s3-endpoint: s3.eu-east-1.amazonaws.com serving.kubeflow.org/s3-usehttps: "1" serving.kubeflow.org/s3-verifyssl: "1" serving.kubeflow.org/s3-region: eu-east-1 data: AWS_ACCESS_KEY_ID: {access_key_base64} AWS_SECRET_ACCESS_KEY: {secret_key_base64} kind: Secret type: Opaque --- apiVersion: v1 kind: ServiceAccount metadata: name: kfserving-sa namespace: {namespace} secrets: - name: kfserving-aws-secret ``` ...and an InferenceService like this: ``` apiVersion: "serving.kubeflow.org/v1alpha2" kind: "InferenceService" metadata: name: {model_name} namespace: {namespace} spec: default: predictor: serviceAccountName: kfserving-sa tensorflow: storageUri: {model_base_path} ``` ...the model fails to serve, with the `storage-initialiser` pod throwing the following error: ``` [I 200226 14:09:42 initializer-entrypoint:13] Initializing, args: src_uri [s3://at-kubeflow-training/tf-job/ksc-ebce76a1] dest_path[ [/mnt/models] [I 200226 14:09:42 storage:35] Copying contents of s3://at-kubeflow-training/tf-job/ksc-ebce76a1 to local Traceback (most recent call last): File "/storage-initializer/scripts/initializer-entrypoint", line 14, in <module> kfserving.Storage.download(src_uri, dest_path) File "/usr/local/lib/python3.7/site-packages/kfserving/storage.py", line 50, in download Storage._download_s3(uri, out_dir) File "/usr/local/lib/python3.7/site-packages/kfserving/storage.py", line 65, in _download_s3 client = Storage._create_minio_client() File "/usr/local/lib/python3.7/site-packages/kfserving/storage.py", line 217, in _create_minio_client secure=use_ssl) File "/usr/local/lib/python3.7/site-packages/minio/api.py", line 150, in __init__ is_valid_endpoint(endpoint) File "/usr/local/lib/python3.7/site-packages/minio/helpers.py", line 301, in is_valid_endpoint if hostname[-1] == '.': IndexError: string index out of range ``` This was using Kubeflow 1.0.0RC4, KFServing 0.2.2 and KNative 0.11.1 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `python/kfserving/kfserving/constants/constants.py` Content: ``` 1 # Copyright 2020 kubeflow.org. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os 16 17 # KFServing K8S constants 18 KFSERVING_GROUP = 'serving.kubeflow.org' 19 KFSERVING_KIND = 'InferenceService' 20 KFSERVING_PLURAL = 'inferenceservices' 21 KFSERVING_VERSION = os.environ.get('KFSERVING_VERSION', 'v1alpha2') 22 KFSERVING_API_VERSION = KFSERVING_GROUP + '/' + KFSERVING_VERSION 23 24 KFSERVING_LOGLEVEL = os.environ.get('KFSERVING_LOGLEVEL', 'INFO').upper() 25 26 # INFERENCESERVICE credentials common constants 27 INFERENCESERVICE_CONFIG_MAP_NAME = 'inferenceservice-config' 28 INFERENCESERVICE_SYSTEM_NAMESPACE = 'kfserving-system' 29 DEFAULT_SECRET_NAME = "kfserving-secret-" 30 DEFAULT_SA_NAME = "kfserving-service-credentials" 31 32 # S3 credentials constants 33 S3_ACCESS_KEY_ID_DEFAULT_NAME = "awsAccessKeyID" 34 S3_SECRET_ACCESS_KEY_DEFAULT_NAME = "awsSecretAccessKey" 35 S3_DEFAULT_CREDS_FILE = '~/.aws/credentials' 36 37 # GCS credentials constants 38 GCS_CREDS_FILE_DEFAULT_NAME = 'gcloud-application-credentials.json' 39 GCS_DEFAULT_CREDS_FILE = '~/.config/gcloud/application_default_credentials.json' 40 41 # Azure credentials constants 42 AZ_DEFAULT_CREDS_FILE = '~/.azure/azure_credentials.json' 43 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/python/kfserving/kfserving/constants/constants.py b/python/kfserving/kfserving/constants/constants.py --- a/python/kfserving/kfserving/constants/constants.py +++ b/python/kfserving/kfserving/constants/constants.py @@ -30,8 +30,8 @@ DEFAULT_SA_NAME = "kfserving-service-credentials" # S3 credentials constants -S3_ACCESS_KEY_ID_DEFAULT_NAME = "awsAccessKeyID" -S3_SECRET_ACCESS_KEY_DEFAULT_NAME = "awsSecretAccessKey" +S3_ACCESS_KEY_ID_DEFAULT_NAME = "AWS_ACCESS_KEY_ID" +S3_SECRET_ACCESS_KEY_DEFAULT_NAME = "AWS_SECRET_ACCESS_KEY" S3_DEFAULT_CREDS_FILE = '~/.aws/credentials' # GCS credentials constants
{"golden_diff": "diff --git a/python/kfserving/kfserving/constants/constants.py b/python/kfserving/kfserving/constants/constants.py\n--- a/python/kfserving/kfserving/constants/constants.py\n+++ b/python/kfserving/kfserving/constants/constants.py\n@@ -30,8 +30,8 @@\n DEFAULT_SA_NAME = \"kfserving-service-credentials\"\n \n # S3 credentials constants\n-S3_ACCESS_KEY_ID_DEFAULT_NAME = \"awsAccessKeyID\"\n-S3_SECRET_ACCESS_KEY_DEFAULT_NAME = \"awsSecretAccessKey\"\n+S3_ACCESS_KEY_ID_DEFAULT_NAME = \"AWS_ACCESS_KEY_ID\"\n+S3_SECRET_ACCESS_KEY_DEFAULT_NAME = \"AWS_SECRET_ACCESS_KEY\"\n S3_DEFAULT_CREDS_FILE = '~/.aws/credentials'\n \n # GCS credentials constants\n", "issue": "storage-initialiser fails to copy model from S3\nUpon using a Secret and Service account that look like this:\r\n\r\n```\r\napiVersion: v1\r\nmetadata:\r\n name: kfserving-aws-secret\r\n namespace: {namespace}\r\n annotations:\r\n serving.kubeflow.org/s3-endpoint: s3.eu-east-1.amazonaws.com\r\n serving.kubeflow.org/s3-usehttps: \"1\"\r\n serving.kubeflow.org/s3-verifyssl: \"1\"\r\n serving.kubeflow.org/s3-region: eu-east-1\r\ndata:\r\n AWS_ACCESS_KEY_ID: {access_key_base64}\r\n AWS_SECRET_ACCESS_KEY: {secret_key_base64}\r\nkind: Secret\r\ntype: Opaque\r\n---\r\napiVersion: v1\r\nkind: ServiceAccount\r\nmetadata:\r\n name: kfserving-sa\r\n namespace: {namespace}\r\nsecrets:\r\n - name: kfserving-aws-secret \r\n```\r\n...and an InferenceService like this:\r\n\r\n```\r\napiVersion: \"serving.kubeflow.org/v1alpha2\"\r\nkind: \"InferenceService\"\r\nmetadata:\r\n name: {model_name}\r\n namespace: {namespace}\r\nspec:\r\n default:\r\n predictor:\r\n serviceAccountName: kfserving-sa\r\n tensorflow:\r\n storageUri: {model_base_path} \r\n```\r\n\r\n...the model fails to serve, with the `storage-initialiser` pod throwing the following error:\r\n\r\n```\r\n[I 200226 14:09:42 initializer-entrypoint:13] Initializing, args: src_uri [s3://at-kubeflow-training/tf-job/ksc-ebce76a1] dest_path[ [/mnt/models]\r\n[I 200226 14:09:42 storage:35] Copying contents of s3://at-kubeflow-training/tf-job/ksc-ebce76a1 to local\r\nTraceback (most recent call last):\r\n File \"/storage-initializer/scripts/initializer-entrypoint\", line 14, in <module>\r\n kfserving.Storage.download(src_uri, dest_path)\r\n File \"/usr/local/lib/python3.7/site-packages/kfserving/storage.py\", line 50, in download\r\n Storage._download_s3(uri, out_dir)\r\n File \"/usr/local/lib/python3.7/site-packages/kfserving/storage.py\", line 65, in _download_s3\r\n client = Storage._create_minio_client()\r\n File \"/usr/local/lib/python3.7/site-packages/kfserving/storage.py\", line 217, in _create_minio_client\r\n secure=use_ssl)\r\n File \"/usr/local/lib/python3.7/site-packages/minio/api.py\", line 150, in __init__\r\n is_valid_endpoint(endpoint)\r\n File \"/usr/local/lib/python3.7/site-packages/minio/helpers.py\", line 301, in is_valid_endpoint\r\n if hostname[-1] == '.':\r\nIndexError: string index out of range\r\n```\r\n\r\nThis was using Kubeflow 1.0.0RC4, KFServing 0.2.2 and KNative 0.11.1\n", "before_files": [{"content": "# Copyright 2020 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\n# KFServing K8S constants\nKFSERVING_GROUP = 'serving.kubeflow.org'\nKFSERVING_KIND = 'InferenceService'\nKFSERVING_PLURAL = 'inferenceservices'\nKFSERVING_VERSION = os.environ.get('KFSERVING_VERSION', 'v1alpha2')\nKFSERVING_API_VERSION = KFSERVING_GROUP + '/' + KFSERVING_VERSION\n\nKFSERVING_LOGLEVEL = os.environ.get('KFSERVING_LOGLEVEL', 'INFO').upper()\n\n# INFERENCESERVICE credentials common constants\nINFERENCESERVICE_CONFIG_MAP_NAME = 'inferenceservice-config'\nINFERENCESERVICE_SYSTEM_NAMESPACE = 'kfserving-system'\nDEFAULT_SECRET_NAME = \"kfserving-secret-\"\nDEFAULT_SA_NAME = \"kfserving-service-credentials\"\n\n# S3 credentials constants\nS3_ACCESS_KEY_ID_DEFAULT_NAME = \"awsAccessKeyID\"\nS3_SECRET_ACCESS_KEY_DEFAULT_NAME = \"awsSecretAccessKey\"\nS3_DEFAULT_CREDS_FILE = '~/.aws/credentials'\n\n# GCS credentials constants\nGCS_CREDS_FILE_DEFAULT_NAME = 'gcloud-application-credentials.json'\nGCS_DEFAULT_CREDS_FILE = '~/.config/gcloud/application_default_credentials.json'\n\n# Azure credentials constants\nAZ_DEFAULT_CREDS_FILE = '~/.azure/azure_credentials.json'\n", "path": "python/kfserving/kfserving/constants/constants.py"}], "after_files": [{"content": "# Copyright 2020 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\n# KFServing K8S constants\nKFSERVING_GROUP = 'serving.kubeflow.org'\nKFSERVING_KIND = 'InferenceService'\nKFSERVING_PLURAL = 'inferenceservices'\nKFSERVING_VERSION = os.environ.get('KFSERVING_VERSION', 'v1alpha2')\nKFSERVING_API_VERSION = KFSERVING_GROUP + '/' + KFSERVING_VERSION\n\nKFSERVING_LOGLEVEL = os.environ.get('KFSERVING_LOGLEVEL', 'INFO').upper()\n\n# INFERENCESERVICE credentials common constants\nINFERENCESERVICE_CONFIG_MAP_NAME = 'inferenceservice-config'\nINFERENCESERVICE_SYSTEM_NAMESPACE = 'kfserving-system'\nDEFAULT_SECRET_NAME = \"kfserving-secret-\"\nDEFAULT_SA_NAME = \"kfserving-service-credentials\"\n\n# S3 credentials constants\nS3_ACCESS_KEY_ID_DEFAULT_NAME = \"AWS_ACCESS_KEY_ID\"\nS3_SECRET_ACCESS_KEY_DEFAULT_NAME = \"AWS_SECRET_ACCESS_KEY\"\nS3_DEFAULT_CREDS_FILE = '~/.aws/credentials'\n\n# GCS credentials constants\nGCS_CREDS_FILE_DEFAULT_NAME = 'gcloud-application-credentials.json'\nGCS_DEFAULT_CREDS_FILE = '~/.config/gcloud/application_default_credentials.json'\n\n# Azure credentials constants\nAZ_DEFAULT_CREDS_FILE = '~/.azure/azure_credentials.json'\n", "path": "python/kfserving/kfserving/constants/constants.py"}]}
1,448
159
gh_patches_debug_11237
rasdani/github-patches
git_diff
Mailu__Mailu-1183
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Mailu 1.7 : Issue when moving mail to the Junk folder When I move a mail to the Junk Folder, i see the following in the logs: imap_1 | Sep 22 16:51:26 imap: Error: cannot stat file mailu imap_1 | Sep 22 16:51:26 imap: Error: cannot stat file mailu imap_1 | Sep 22 16:51:26 imap: Error: tee: /dev/fd/63: I/O error imap_1 | Sep 22 16:51:26 imap([email protected])<27629><hn93GCeTsresEgAC>: Info: program exec:/conf/bin/spam (27655): Terminated with non-zero exit code 1 imap_1 | Sep 22 16:51:26 imap([email protected])<27629><hn93GCeTsresEgAC>: Info: sieve: left message in mailbox 'Junk' --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `core/dovecot/start.py` Content: ``` 1 #!/usr/bin/python3 2 3 import os 4 import glob 5 import multiprocessing 6 import logging as log 7 import sys 8 9 from podop import run_server 10 from socrate import system, conf 11 12 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING")) 13 14 def start_podop(): 15 os.setuid(8) 16 url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/dovecot/§" 17 run_server(0, "dovecot", "/tmp/podop.socket", [ 18 ("quota", "url", url ), 19 ("auth", "url", url), 20 ("sieve", "url", url), 21 ]) 22 23 # Actual startup script 24 25 os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front") 26 os.environ["REDIS_ADDRESS"] = system.get_host_address_from_environment("REDIS", "redis") 27 os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin") 28 os.environ["ANTISPAM_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM", "antispam:11334") 29 if os.environ["WEBMAIL"] != "none": 30 os.environ["WEBMAIL_ADDRESS"] = system.get_host_address_from_environment("WEBMAIL", "webmail") 31 32 for dovecot_file in glob.glob("/conf/*.conf"): 33 conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file))) 34 35 # Run Podop, then postfix 36 multiprocessing.Process(target=start_podop).start() 37 os.system("chown mail:mail /mail") 38 os.system("chown -R mail:mail /var/lib/dovecot /conf") 39 os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"]) 40 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/core/dovecot/start.py b/core/dovecot/start.py --- a/core/dovecot/start.py +++ b/core/dovecot/start.py @@ -32,6 +32,12 @@ for dovecot_file in glob.glob("/conf/*.conf"): conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file))) +os.makedirs("/conf/bin", exist_ok=True) +for script_file in glob.glob("/conf/*.script"): + out_file = os.path.join("/conf/bin/", os.path.basename(script_file).replace('.script','')) + conf.jinja(script_file, os.environ, out_file) + os.chmod(out_file, 0o555) + # Run Podop, then postfix multiprocessing.Process(target=start_podop).start() os.system("chown mail:mail /mail")
{"golden_diff": "diff --git a/core/dovecot/start.py b/core/dovecot/start.py\n--- a/core/dovecot/start.py\n+++ b/core/dovecot/start.py\n@@ -32,6 +32,12 @@\n for dovecot_file in glob.glob(\"/conf/*.conf\"):\n conf.jinja(dovecot_file, os.environ, os.path.join(\"/etc/dovecot\", os.path.basename(dovecot_file)))\n \n+os.makedirs(\"/conf/bin\", exist_ok=True)\n+for script_file in glob.glob(\"/conf/*.script\"):\n+ out_file = os.path.join(\"/conf/bin/\", os.path.basename(script_file).replace('.script',''))\n+ conf.jinja(script_file, os.environ, out_file)\n+ os.chmod(out_file, 0o555)\n+\n # Run Podop, then postfix\n multiprocessing.Process(target=start_podop).start()\n os.system(\"chown mail:mail /mail\")\n", "issue": "Mailu 1.7 : Issue when moving mail to the Junk folder\nWhen I move a mail to the Junk Folder, i see the following in the logs:\r\nimap_1 | Sep 22 16:51:26 imap: Error: cannot stat file mailu\r\nimap_1 | Sep 22 16:51:26 imap: Error: cannot stat file mailu\r\nimap_1 | Sep 22 16:51:26 imap: Error: tee: /dev/fd/63: I/O error\r\nimap_1 | Sep 22 16:51:26 imap([email protected])<27629><hn93GCeTsresEgAC>: Info: program exec:/conf/bin/spam (27655): Terminated with non-zero exit code 1\r\nimap_1 | Sep 22 16:51:26 imap([email protected])<27629><hn93GCeTsresEgAC>: Info: sieve: left message in mailbox 'Junk'\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport multiprocessing\nimport logging as log\nimport sys\n\nfrom podop import run_server\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(8)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/dovecot/\u00a7\"\n run_server(0, \"dovecot\", \"/tmp/podop.socket\", [\n\t\t(\"quota\", \"url\", url ),\n\t\t(\"auth\", \"url\", url),\n\t\t(\"sieve\", \"url\", url),\n ])\n\n# Actual startup script\n\nos.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\nos.environ[\"REDIS_ADDRESS\"] = system.get_host_address_from_environment(\"REDIS\", \"redis\")\nos.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\nos.environ[\"ANTISPAM_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM\", \"antispam:11334\")\nif os.environ[\"WEBMAIL\"] != \"none\":\n os.environ[\"WEBMAIL_ADDRESS\"] = system.get_host_address_from_environment(\"WEBMAIL\", \"webmail\")\n\nfor dovecot_file in glob.glob(\"/conf/*.conf\"):\n conf.jinja(dovecot_file, os.environ, os.path.join(\"/etc/dovecot\", os.path.basename(dovecot_file)))\n\n# Run Podop, then postfix\nmultiprocessing.Process(target=start_podop).start()\nos.system(\"chown mail:mail /mail\")\nos.system(\"chown -R mail:mail /var/lib/dovecot /conf\")\nos.execv(\"/usr/sbin/dovecot\", [\"dovecot\", \"-c\", \"/etc/dovecot/dovecot.conf\", \"-F\"])\n", "path": "core/dovecot/start.py"}], "after_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport multiprocessing\nimport logging as log\nimport sys\n\nfrom podop import run_server\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(8)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/dovecot/\u00a7\"\n run_server(0, \"dovecot\", \"/tmp/podop.socket\", [\n\t\t(\"quota\", \"url\", url ),\n\t\t(\"auth\", \"url\", url),\n\t\t(\"sieve\", \"url\", url),\n ])\n\n# Actual startup script\n\nos.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\nos.environ[\"REDIS_ADDRESS\"] = system.get_host_address_from_environment(\"REDIS\", \"redis\")\nos.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\nos.environ[\"ANTISPAM_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM\", \"antispam:11334\")\nif os.environ[\"WEBMAIL\"] != \"none\":\n os.environ[\"WEBMAIL_ADDRESS\"] = system.get_host_address_from_environment(\"WEBMAIL\", \"webmail\")\n\nfor dovecot_file in glob.glob(\"/conf/*.conf\"):\n conf.jinja(dovecot_file, os.environ, os.path.join(\"/etc/dovecot\", os.path.basename(dovecot_file)))\n\nos.makedirs(\"/conf/bin\", exist_ok=True)\nfor script_file in glob.glob(\"/conf/*.script\"):\n out_file = os.path.join(\"/conf/bin/\", os.path.basename(script_file).replace('.script',''))\n conf.jinja(script_file, os.environ, out_file)\n os.chmod(out_file, 0o555)\n\n# Run Podop, then postfix\nmultiprocessing.Process(target=start_podop).start()\nos.system(\"chown mail:mail /mail\")\nos.system(\"chown -R mail:mail /var/lib/dovecot /conf\")\nos.execv(\"/usr/sbin/dovecot\", [\"dovecot\", \"-c\", \"/etc/dovecot/dovecot.conf\", \"-F\"])\n", "path": "core/dovecot/start.py"}]}
980
196
gh_patches_debug_36986
rasdani/github-patches
git_diff
encode__httpx-167
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- non-ASCII characters filename in multipart/form-data I tried to send a file with russian letters in the file name, and `httpx` escaped all non-ASCII characters (as I understood it the way specified in [RFC 7578](https://tools.ietf.org/html/rfc7578)). But this is different from `requests` [behavior](https://github.com/psf/requests/blob/589a82256759018a7e5e289302898dae32544949/requests/models.py#L110), where the file name is simply written as is (using [`RequestField.make_multipart`](https://github.com/urllib3/urllib3/blob/f0d9ebc41e51c4c4c9990b1eed02d297fd1b20d8/src/urllib3/fields.py#L248) from `urllib3`). Maybe `httpx` should just change [`FileField.render_headers`](https://github.com/encode/httpx/blob/3ba2e8c328f05b07ee06ae40f99dce7fe1e8292c/httpx/multipart.py#L57), so as not to escape the file name and instead just put it as bytes? ```diff def render_headers(self) -> bytes: - name = quote(self.name, encoding="utf-8").encode("ascii") - filename = quote(self.filename, encoding="utf-8").encode("ascii") - content_type = self.content_type.encode("ascii") return b"".join( [ b'Content-Disposition: form-data; name="', - name, + self.name.encode(), b'"; filename="', - filename, + self.filename.encode(), b'"\r\n', b"Content-Type: ", - content_type, + self.content_type.encode(), b"\r\n", ] ) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `httpx/multipart.py` Content: ``` 1 import binascii 2 import mimetypes 3 import os 4 import typing 5 from io import BytesIO 6 from urllib.parse import quote 7 8 9 class Field: 10 def render_headers(self) -> bytes: 11 raise NotImplementedError() # pragma: nocover 12 13 def render_data(self) -> bytes: 14 raise NotImplementedError() # pragma: nocover 15 16 17 class DataField(Field): 18 def __init__(self, name: str, value: typing.Union[str, bytes]) -> None: 19 if not isinstance(name, str): 20 raise TypeError("Invalid type for name. Expected str.") 21 if not isinstance(value, (str, bytes)): 22 raise TypeError("Invalid type for value. Expected str or bytes.") 23 self.name = name 24 self.value = value 25 26 def render_headers(self) -> bytes: 27 name = quote(self.name, encoding="utf-8").encode("ascii") 28 return b"".join( 29 [b'Content-Disposition: form-data; name="', name, b'"\r\n' b"\r\n"] 30 ) 31 32 def render_data(self) -> bytes: 33 return ( 34 self.value if isinstance(self.value, bytes) else self.value.encode("utf-8") 35 ) 36 37 38 class FileField(Field): 39 def __init__( 40 self, name: str, value: typing.Union[typing.IO[typing.AnyStr], tuple] 41 ) -> None: 42 self.name = name 43 if not isinstance(value, tuple): 44 self.filename = os.path.basename(getattr(value, "name", "upload")) 45 self.file = value # type: typing.Union[typing.IO[str], typing.IO[bytes]] 46 self.content_type = self.guess_content_type() 47 else: 48 self.filename = value[0] 49 self.file = value[1] 50 self.content_type = ( 51 value[2] if len(value) > 2 else self.guess_content_type() 52 ) 53 54 def guess_content_type(self) -> str: 55 return mimetypes.guess_type(self.filename)[0] or "application/octet-stream" 56 57 def render_headers(self) -> bytes: 58 name = quote(self.name, encoding="utf-8").encode("ascii") 59 filename = quote(self.filename, encoding="utf-8").encode("ascii") 60 content_type = self.content_type.encode("ascii") 61 return b"".join( 62 [ 63 b'Content-Disposition: form-data; name="', 64 name, 65 b'"; filename="', 66 filename, 67 b'"\r\n', 68 b"Content-Type: ", 69 content_type, 70 b"\r\n", 71 b"\r\n", 72 ] 73 ) 74 75 def render_data(self) -> bytes: 76 content = self.file.read() 77 return content.encode("utf-8") if isinstance(content, str) else content 78 79 80 def iter_fields(data: dict, files: dict) -> typing.Iterator[Field]: 81 for name, value in data.items(): 82 if isinstance(value, (list, dict)): 83 for item in value: 84 yield DataField(name=name, value=item) 85 else: 86 yield DataField(name=name, value=value) 87 88 for name, value in files.items(): 89 yield FileField(name=name, value=value) 90 91 92 def multipart_encode(data: dict, files: dict) -> typing.Tuple[bytes, str]: 93 body = BytesIO() 94 boundary = binascii.hexlify(os.urandom(16)) 95 96 for field in iter_fields(data, files): 97 body.write(b"--%s\r\n" % boundary) 98 body.write(field.render_headers()) 99 body.write(field.render_data()) 100 body.write(b"\r\n") 101 102 body.write(b"--%s--\r\n" % boundary) 103 104 content_type = "multipart/form-data; boundary=%s" % boundary.decode("ascii") 105 106 return body.getvalue(), content_type 107 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/httpx/multipart.py b/httpx/multipart.py --- a/httpx/multipart.py +++ b/httpx/multipart.py @@ -1,9 +1,17 @@ import binascii import mimetypes import os +import re import typing from io import BytesIO -from urllib.parse import quote + +_HTML5_FORM_ENCODING_REPLACEMENTS = {'"': "%22", "\\": "\\\\"} +_HTML5_FORM_ENCODING_REPLACEMENTS.update( + {chr(c): "%{:02X}".format(c) for c in range(0x00, 0x1F + 1) if c != 0x1B} +) +_HTML5_FORM_ENCODING_RE = re.compile( + r"|".join([re.escape(c) for c in _HTML5_FORM_ENCODING_REPLACEMENTS.keys()]) +) class Field: @@ -24,10 +32,8 @@ self.value = value def render_headers(self) -> bytes: - name = quote(self.name, encoding="utf-8").encode("ascii") - return b"".join( - [b'Content-Disposition: form-data; name="', name, b'"\r\n' b"\r\n"] - ) + name = _format_param("name", self.name) + return b"".join([b"Content-Disposition: form-data; ", name, b"\r\n\r\n"]) def render_data(self) -> bytes: return ( @@ -55,20 +61,18 @@ return mimetypes.guess_type(self.filename)[0] or "application/octet-stream" def render_headers(self) -> bytes: - name = quote(self.name, encoding="utf-8").encode("ascii") - filename = quote(self.filename, encoding="utf-8").encode("ascii") - content_type = self.content_type.encode("ascii") + name = _format_param("name", self.name) + filename = _format_param("filename", self.filename) + content_type = self.content_type.encode() return b"".join( [ - b'Content-Disposition: form-data; name="', + b"Content-Disposition: form-data; ", name, - b'"; filename="', + b"; ", filename, - b'"\r\n', - b"Content-Type: ", + b"\r\nContent-Type: ", content_type, - b"\r\n", - b"\r\n", + b"\r\n\r\n", ] ) @@ -104,3 +108,14 @@ content_type = "multipart/form-data; boundary=%s" % boundary.decode("ascii") return body.getvalue(), content_type + + +def _format_param(name: str, value: typing.Union[str, bytes]) -> bytes: + if isinstance(value, bytes): + value = value.decode() + + def replacer(match: typing.Match[str]) -> str: + return _HTML5_FORM_ENCODING_REPLACEMENTS[match.group(0)] + + value = _HTML5_FORM_ENCODING_RE.sub(replacer, value) + return f'{name}="{value}"'.encode()
{"golden_diff": "diff --git a/httpx/multipart.py b/httpx/multipart.py\n--- a/httpx/multipart.py\n+++ b/httpx/multipart.py\n@@ -1,9 +1,17 @@\n import binascii\n import mimetypes\n import os\n+import re\n import typing\n from io import BytesIO\n-from urllib.parse import quote\n+\n+_HTML5_FORM_ENCODING_REPLACEMENTS = {'\"': \"%22\", \"\\\\\": \"\\\\\\\\\"}\n+_HTML5_FORM_ENCODING_REPLACEMENTS.update(\n+ {chr(c): \"%{:02X}\".format(c) for c in range(0x00, 0x1F + 1) if c != 0x1B}\n+)\n+_HTML5_FORM_ENCODING_RE = re.compile(\n+ r\"|\".join([re.escape(c) for c in _HTML5_FORM_ENCODING_REPLACEMENTS.keys()])\n+)\n \n \n class Field:\n@@ -24,10 +32,8 @@\n self.value = value\n \n def render_headers(self) -> bytes:\n- name = quote(self.name, encoding=\"utf-8\").encode(\"ascii\")\n- return b\"\".join(\n- [b'Content-Disposition: form-data; name=\"', name, b'\"\\r\\n' b\"\\r\\n\"]\n- )\n+ name = _format_param(\"name\", self.name)\n+ return b\"\".join([b\"Content-Disposition: form-data; \", name, b\"\\r\\n\\r\\n\"])\n \n def render_data(self) -> bytes:\n return (\n@@ -55,20 +61,18 @@\n return mimetypes.guess_type(self.filename)[0] or \"application/octet-stream\"\n \n def render_headers(self) -> bytes:\n- name = quote(self.name, encoding=\"utf-8\").encode(\"ascii\")\n- filename = quote(self.filename, encoding=\"utf-8\").encode(\"ascii\")\n- content_type = self.content_type.encode(\"ascii\")\n+ name = _format_param(\"name\", self.name)\n+ filename = _format_param(\"filename\", self.filename)\n+ content_type = self.content_type.encode()\n return b\"\".join(\n [\n- b'Content-Disposition: form-data; name=\"',\n+ b\"Content-Disposition: form-data; \",\n name,\n- b'\"; filename=\"',\n+ b\"; \",\n filename,\n- b'\"\\r\\n',\n- b\"Content-Type: \",\n+ b\"\\r\\nContent-Type: \",\n content_type,\n- b\"\\r\\n\",\n- b\"\\r\\n\",\n+ b\"\\r\\n\\r\\n\",\n ]\n )\n \n@@ -104,3 +108,14 @@\n content_type = \"multipart/form-data; boundary=%s\" % boundary.decode(\"ascii\")\n \n return body.getvalue(), content_type\n+\n+\n+def _format_param(name: str, value: typing.Union[str, bytes]) -> bytes:\n+ if isinstance(value, bytes):\n+ value = value.decode()\n+ \n+ def replacer(match: typing.Match[str]) -> str:\n+ return _HTML5_FORM_ENCODING_REPLACEMENTS[match.group(0)]\n+\n+ value = _HTML5_FORM_ENCODING_RE.sub(replacer, value)\n+ return f'{name}=\"{value}\"'.encode()\n", "issue": "non-ASCII characters filename in multipart/form-data\nI tried to send a file with russian letters in the file name, and `httpx` escaped all non-ASCII characters (as I understood it the way specified in [RFC 7578](https://tools.ietf.org/html/rfc7578)). But this is different from `requests` [behavior](https://github.com/psf/requests/blob/589a82256759018a7e5e289302898dae32544949/requests/models.py#L110), where the file name is simply written as is (using [`RequestField.make_multipart`](https://github.com/urllib3/urllib3/blob/f0d9ebc41e51c4c4c9990b1eed02d297fd1b20d8/src/urllib3/fields.py#L248) from `urllib3`). Maybe `httpx` should just change [`FileField.render_headers`](https://github.com/encode/httpx/blob/3ba2e8c328f05b07ee06ae40f99dce7fe1e8292c/httpx/multipart.py#L57), so as not to escape the file name and instead just put it as bytes?\r\n\r\n```diff\r\ndef render_headers(self) -> bytes:\r\n- name = quote(self.name, encoding=\"utf-8\").encode(\"ascii\")\r\n- filename = quote(self.filename, encoding=\"utf-8\").encode(\"ascii\")\r\n- content_type = self.content_type.encode(\"ascii\")\r\n return b\"\".join(\r\n [\r\n b'Content-Disposition: form-data; name=\"',\r\n- name,\r\n+ self.name.encode(),\r\n b'\"; filename=\"',\r\n- filename,\r\n+ self.filename.encode(),\r\n b'\"\\r\\n',\r\n b\"Content-Type: \",\r\n- content_type,\r\n+ self.content_type.encode(),\r\n b\"\\r\\n\",\r\n ]\r\n )\r\n```\n", "before_files": [{"content": "import binascii\nimport mimetypes\nimport os\nimport typing\nfrom io import BytesIO\nfrom urllib.parse import quote\n\n\nclass Field:\n def render_headers(self) -> bytes:\n raise NotImplementedError() # pragma: nocover\n\n def render_data(self) -> bytes:\n raise NotImplementedError() # pragma: nocover\n\n\nclass DataField(Field):\n def __init__(self, name: str, value: typing.Union[str, bytes]) -> None:\n if not isinstance(name, str):\n raise TypeError(\"Invalid type for name. Expected str.\")\n if not isinstance(value, (str, bytes)):\n raise TypeError(\"Invalid type for value. Expected str or bytes.\")\n self.name = name\n self.value = value\n\n def render_headers(self) -> bytes:\n name = quote(self.name, encoding=\"utf-8\").encode(\"ascii\")\n return b\"\".join(\n [b'Content-Disposition: form-data; name=\"', name, b'\"\\r\\n' b\"\\r\\n\"]\n )\n\n def render_data(self) -> bytes:\n return (\n self.value if isinstance(self.value, bytes) else self.value.encode(\"utf-8\")\n )\n\n\nclass FileField(Field):\n def __init__(\n self, name: str, value: typing.Union[typing.IO[typing.AnyStr], tuple]\n ) -> None:\n self.name = name\n if not isinstance(value, tuple):\n self.filename = os.path.basename(getattr(value, \"name\", \"upload\"))\n self.file = value # type: typing.Union[typing.IO[str], typing.IO[bytes]]\n self.content_type = self.guess_content_type()\n else:\n self.filename = value[0]\n self.file = value[1]\n self.content_type = (\n value[2] if len(value) > 2 else self.guess_content_type()\n )\n\n def guess_content_type(self) -> str:\n return mimetypes.guess_type(self.filename)[0] or \"application/octet-stream\"\n\n def render_headers(self) -> bytes:\n name = quote(self.name, encoding=\"utf-8\").encode(\"ascii\")\n filename = quote(self.filename, encoding=\"utf-8\").encode(\"ascii\")\n content_type = self.content_type.encode(\"ascii\")\n return b\"\".join(\n [\n b'Content-Disposition: form-data; name=\"',\n name,\n b'\"; filename=\"',\n filename,\n b'\"\\r\\n',\n b\"Content-Type: \",\n content_type,\n b\"\\r\\n\",\n b\"\\r\\n\",\n ]\n )\n\n def render_data(self) -> bytes:\n content = self.file.read()\n return content.encode(\"utf-8\") if isinstance(content, str) else content\n\n\ndef iter_fields(data: dict, files: dict) -> typing.Iterator[Field]:\n for name, value in data.items():\n if isinstance(value, (list, dict)):\n for item in value:\n yield DataField(name=name, value=item)\n else:\n yield DataField(name=name, value=value)\n\n for name, value in files.items():\n yield FileField(name=name, value=value)\n\n\ndef multipart_encode(data: dict, files: dict) -> typing.Tuple[bytes, str]:\n body = BytesIO()\n boundary = binascii.hexlify(os.urandom(16))\n\n for field in iter_fields(data, files):\n body.write(b\"--%s\\r\\n\" % boundary)\n body.write(field.render_headers())\n body.write(field.render_data())\n body.write(b\"\\r\\n\")\n\n body.write(b\"--%s--\\r\\n\" % boundary)\n\n content_type = \"multipart/form-data; boundary=%s\" % boundary.decode(\"ascii\")\n\n return body.getvalue(), content_type\n", "path": "httpx/multipart.py"}], "after_files": [{"content": "import binascii\nimport mimetypes\nimport os\nimport re\nimport typing\nfrom io import BytesIO\n\n_HTML5_FORM_ENCODING_REPLACEMENTS = {'\"': \"%22\", \"\\\\\": \"\\\\\\\\\"}\n_HTML5_FORM_ENCODING_REPLACEMENTS.update(\n {chr(c): \"%{:02X}\".format(c) for c in range(0x00, 0x1F + 1) if c != 0x1B}\n)\n_HTML5_FORM_ENCODING_RE = re.compile(\n r\"|\".join([re.escape(c) for c in _HTML5_FORM_ENCODING_REPLACEMENTS.keys()])\n)\n\n\nclass Field:\n def render_headers(self) -> bytes:\n raise NotImplementedError() # pragma: nocover\n\n def render_data(self) -> bytes:\n raise NotImplementedError() # pragma: nocover\n\n\nclass DataField(Field):\n def __init__(self, name: str, value: typing.Union[str, bytes]) -> None:\n if not isinstance(name, str):\n raise TypeError(\"Invalid type for name. Expected str.\")\n if not isinstance(value, (str, bytes)):\n raise TypeError(\"Invalid type for value. Expected str or bytes.\")\n self.name = name\n self.value = value\n\n def render_headers(self) -> bytes:\n name = _format_param(\"name\", self.name)\n return b\"\".join([b\"Content-Disposition: form-data; \", name, b\"\\r\\n\\r\\n\"])\n\n def render_data(self) -> bytes:\n return (\n self.value if isinstance(self.value, bytes) else self.value.encode(\"utf-8\")\n )\n\n\nclass FileField(Field):\n def __init__(\n self, name: str, value: typing.Union[typing.IO[typing.AnyStr], tuple]\n ) -> None:\n self.name = name\n if not isinstance(value, tuple):\n self.filename = os.path.basename(getattr(value, \"name\", \"upload\"))\n self.file = value # type: typing.Union[typing.IO[str], typing.IO[bytes]]\n self.content_type = self.guess_content_type()\n else:\n self.filename = value[0]\n self.file = value[1]\n self.content_type = (\n value[2] if len(value) > 2 else self.guess_content_type()\n )\n\n def guess_content_type(self) -> str:\n return mimetypes.guess_type(self.filename)[0] or \"application/octet-stream\"\n\n def render_headers(self) -> bytes:\n name = _format_param(\"name\", self.name)\n filename = _format_param(\"filename\", self.filename)\n content_type = self.content_type.encode()\n return b\"\".join(\n [\n b\"Content-Disposition: form-data; \",\n name,\n b\"; \",\n filename,\n b\"\\r\\nContent-Type: \",\n content_type,\n b\"\\r\\n\\r\\n\",\n ]\n )\n\n def render_data(self) -> bytes:\n content = self.file.read()\n return content.encode(\"utf-8\") if isinstance(content, str) else content\n\n\ndef iter_fields(data: dict, files: dict) -> typing.Iterator[Field]:\n for name, value in data.items():\n if isinstance(value, (list, dict)):\n for item in value:\n yield DataField(name=name, value=item)\n else:\n yield DataField(name=name, value=value)\n\n for name, value in files.items():\n yield FileField(name=name, value=value)\n\n\ndef multipart_encode(data: dict, files: dict) -> typing.Tuple[bytes, str]:\n body = BytesIO()\n boundary = binascii.hexlify(os.urandom(16))\n\n for field in iter_fields(data, files):\n body.write(b\"--%s\\r\\n\" % boundary)\n body.write(field.render_headers())\n body.write(field.render_data())\n body.write(b\"\\r\\n\")\n\n body.write(b\"--%s--\\r\\n\" % boundary)\n\n content_type = \"multipart/form-data; boundary=%s\" % boundary.decode(\"ascii\")\n\n return body.getvalue(), content_type\n\n\ndef _format_param(name: str, value: typing.Union[str, bytes]) -> bytes:\n if isinstance(value, bytes):\n value = value.decode()\n \n def replacer(match: typing.Match[str]) -> str:\n return _HTML5_FORM_ENCODING_REPLACEMENTS[match.group(0)]\n\n value = _HTML5_FORM_ENCODING_RE.sub(replacer, value)\n return f'{name}=\"{value}\"'.encode()\n", "path": "httpx/multipart.py"}]}
1,733
708
gh_patches_debug_20601
rasdani/github-patches
git_diff
LibraryOfCongress__concordia-396
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Refine Contact Us page **What behavior did you observe? Please describe the bug** - The language is very direct. Need to make it more user friendly. - Add content to get to History Hub - Begin with options to self-service in Help Center or History Hub **What is the expected behavior?** As a user, I want to contact a community manager with a pressing question via the Contact Us form so that I can get a swift reply. It should also include: - Friendlier language for the different fields - Contact Us form would autopopulate my email and tell CM what page they are referring to - I should be able to categorize my question in the Contact Us form to best route the question. - Some information telling me about History Hub **Got screenshots? This helps us identify the issue** ![contact](https://user-images.githubusercontent.com/7362915/46110939-4114f780-c1b3-11e8-9504-3e9358f6cfcb.PNG) **Additional context** Add any other context about the problem here. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `concordia/forms.py` Content: ``` 1 from logging import getLogger 2 3 from captcha.fields import CaptchaField 4 from django import forms 5 from django.contrib.auth import get_user_model 6 from django_registration.forms import RegistrationForm 7 8 from .models import TranscriptionStatus 9 10 User = get_user_model() 11 logger = getLogger(__name__) 12 13 14 class UserRegistrationForm(RegistrationForm): 15 newsletterOptIn = forms.BooleanField( 16 label="Newsletter", 17 required=False, 18 help_text="Email me about campaign updates, upcoming events, and new features.", 19 ) 20 21 22 class UserProfileForm(forms.Form): 23 email = forms.CharField( 24 label="Email address", required=True, widget=forms.EmailInput() 25 ) 26 27 def __init__(self, *, request, **kwargs): 28 self.request = request 29 return super().__init__(**kwargs) 30 31 def clean_email(self): 32 data = self.cleaned_data["email"] 33 if ( 34 User.objects.exclude(pk=self.request.user.pk) 35 .filter(email__iexact=data) 36 .exists() 37 ): 38 raise forms.ValidationError("That email address is not available") 39 return data 40 41 42 class ContactUsForm(forms.Form): 43 referrer = forms.CharField(label="Referring Page", widget=forms.HiddenInput()) 44 45 email = forms.EmailField(label="Your email", required=True) 46 subject = forms.CharField(label="Subject", required=False) 47 48 category = forms.CharField( 49 label="Category", 50 required=True, 51 widget=forms.Select( 52 choices=( 53 ("General", "General"), 54 ("Campaign", "Question about campaign"), 55 ("Problem", "Something is not working"), 56 ) 57 ), 58 ) 59 60 link = forms.URLField( 61 label="Link to the page you need support with", required=False 62 ) 63 64 story = forms.CharField( 65 label="Why are you contacting us", required=True, widget=forms.Textarea 66 ) 67 68 69 class CaptchaEmbedForm(forms.Form): 70 captcha = CaptchaField() 71 72 73 class AssetFilteringForm(forms.Form): 74 transcription_status = forms.ChoiceField( 75 choices=TranscriptionStatus.CHOICES, 76 required=False, 77 label="Image Status", 78 widget=forms.Select(attrs={"class": "form-control"}), 79 ) 80 81 def __init__(self, status_counts, *args, **kwargs): 82 super().__init__(*args, **kwargs) 83 84 asset_statuses = { 85 status: "%s (%d)" % (TranscriptionStatus.CHOICE_MAP[status], count) 86 for status, count in status_counts.items() 87 } 88 89 filtered_choices = [("", f"All Images ({sum(status_counts.values())})")] 90 for val, label in self.fields["transcription_status"].choices: 91 if val in asset_statuses: 92 filtered_choices.append((val, asset_statuses[val])) 93 94 self.fields["transcription_status"].choices = filtered_choices 95 96 97 class AdminItemImportForm(forms.Form): 98 import_url = forms.URLField( 99 required=True, label="URL of the item/collection/search page to import" 100 ) 101 102 103 class AdminProjectBulkImportForm(forms.Form): 104 spreadsheet_file = forms.FileField( 105 required=True, 106 label="Spreadsheet containing the campaigns, projects, and items to import", 107 ) 108 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/concordia/forms.py b/concordia/forms.py --- a/concordia/forms.py +++ b/concordia/forms.py @@ -42,27 +42,16 @@ class ContactUsForm(forms.Form): referrer = forms.CharField(label="Referring Page", widget=forms.HiddenInput()) - email = forms.EmailField(label="Your email", required=True) - subject = forms.CharField(label="Subject", required=False) + email = forms.EmailField(label="Your email:", required=True) + subject = forms.CharField(label="Subject:", required=False) - category = forms.CharField( - label="Category", - required=True, - widget=forms.Select( - choices=( - ("General", "General"), - ("Campaign", "Question about campaign"), - ("Problem", "Something is not working"), - ) - ), - ) link = forms.URLField( - label="Link to the page you need support with", required=False + label="Have a specific page you need help with? Add the link below:", required=False ) story = forms.CharField( - label="Why are you contacting us", required=True, widget=forms.Textarea + label="Let us know how we can help:", required=True, widget=forms.Textarea )
{"golden_diff": "diff --git a/concordia/forms.py b/concordia/forms.py\n--- a/concordia/forms.py\n+++ b/concordia/forms.py\n@@ -42,27 +42,16 @@\n class ContactUsForm(forms.Form):\n referrer = forms.CharField(label=\"Referring Page\", widget=forms.HiddenInput())\n \n- email = forms.EmailField(label=\"Your email\", required=True)\n- subject = forms.CharField(label=\"Subject\", required=False)\n+ email = forms.EmailField(label=\"Your email:\", required=True)\n+ subject = forms.CharField(label=\"Subject:\", required=False)\n \n- category = forms.CharField(\n- label=\"Category\",\n- required=True,\n- widget=forms.Select(\n- choices=(\n- (\"General\", \"General\"),\n- (\"Campaign\", \"Question about campaign\"),\n- (\"Problem\", \"Something is not working\"),\n- )\n- ),\n- )\n \n link = forms.URLField(\n- label=\"Link to the page you need support with\", required=False\n+ label=\"Have a specific page you need help with? Add the link below:\", required=False\n )\n \n story = forms.CharField(\n- label=\"Why are you contacting us\", required=True, widget=forms.Textarea\n+ label=\"Let us know how we can help:\", required=True, widget=forms.Textarea\n )\n", "issue": "Refine Contact Us page\n**What behavior did you observe? Please describe the bug**\r\n- The language is very direct. Need to make it more user friendly. \r\n- Add content to get to History Hub\r\n- Begin with options to self-service in Help Center or History Hub\r\n\r\n**What is the expected behavior?**\r\nAs a user, I want to contact a community manager with a pressing question via the Contact Us form so that I can get a swift reply. It should also include: \r\n- Friendlier language for the different fields\r\n- Contact Us form would autopopulate my email and tell CM what page they are referring to\r\n- I should be able to categorize my question in the Contact Us form to best route the question.\r\n- Some information telling me about History Hub\r\n\r\n**Got screenshots? This helps us identify the issue**\r\n\r\n![contact](https://user-images.githubusercontent.com/7362915/46110939-4114f780-c1b3-11e8-9504-3e9358f6cfcb.PNG)\r\n\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "from logging import getLogger\n\nfrom captcha.fields import CaptchaField\nfrom django import forms\nfrom django.contrib.auth import get_user_model\nfrom django_registration.forms import RegistrationForm\n\nfrom .models import TranscriptionStatus\n\nUser = get_user_model()\nlogger = getLogger(__name__)\n\n\nclass UserRegistrationForm(RegistrationForm):\n newsletterOptIn = forms.BooleanField(\n label=\"Newsletter\",\n required=False,\n help_text=\"Email me about campaign updates, upcoming events, and new features.\",\n )\n\n\nclass UserProfileForm(forms.Form):\n email = forms.CharField(\n label=\"Email address\", required=True, widget=forms.EmailInput()\n )\n\n def __init__(self, *, request, **kwargs):\n self.request = request\n return super().__init__(**kwargs)\n\n def clean_email(self):\n data = self.cleaned_data[\"email\"]\n if (\n User.objects.exclude(pk=self.request.user.pk)\n .filter(email__iexact=data)\n .exists()\n ):\n raise forms.ValidationError(\"That email address is not available\")\n return data\n\n\nclass ContactUsForm(forms.Form):\n referrer = forms.CharField(label=\"Referring Page\", widget=forms.HiddenInput())\n\n email = forms.EmailField(label=\"Your email\", required=True)\n subject = forms.CharField(label=\"Subject\", required=False)\n\n category = forms.CharField(\n label=\"Category\",\n required=True,\n widget=forms.Select(\n choices=(\n (\"General\", \"General\"),\n (\"Campaign\", \"Question about campaign\"),\n (\"Problem\", \"Something is not working\"),\n )\n ),\n )\n\n link = forms.URLField(\n label=\"Link to the page you need support with\", required=False\n )\n\n story = forms.CharField(\n label=\"Why are you contacting us\", required=True, widget=forms.Textarea\n )\n\n\nclass CaptchaEmbedForm(forms.Form):\n captcha = CaptchaField()\n\n\nclass AssetFilteringForm(forms.Form):\n transcription_status = forms.ChoiceField(\n choices=TranscriptionStatus.CHOICES,\n required=False,\n label=\"Image Status\",\n widget=forms.Select(attrs={\"class\": \"form-control\"}),\n )\n\n def __init__(self, status_counts, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n asset_statuses = {\n status: \"%s (%d)\" % (TranscriptionStatus.CHOICE_MAP[status], count)\n for status, count in status_counts.items()\n }\n\n filtered_choices = [(\"\", f\"All Images ({sum(status_counts.values())})\")]\n for val, label in self.fields[\"transcription_status\"].choices:\n if val in asset_statuses:\n filtered_choices.append((val, asset_statuses[val]))\n\n self.fields[\"transcription_status\"].choices = filtered_choices\n\n\nclass AdminItemImportForm(forms.Form):\n import_url = forms.URLField(\n required=True, label=\"URL of the item/collection/search page to import\"\n )\n\n\nclass AdminProjectBulkImportForm(forms.Form):\n spreadsheet_file = forms.FileField(\n required=True,\n label=\"Spreadsheet containing the campaigns, projects, and items to import\",\n )\n", "path": "concordia/forms.py"}], "after_files": [{"content": "from logging import getLogger\n\nfrom captcha.fields import CaptchaField\nfrom django import forms\nfrom django.contrib.auth import get_user_model\nfrom django_registration.forms import RegistrationForm\n\nfrom .models import TranscriptionStatus\n\nUser = get_user_model()\nlogger = getLogger(__name__)\n\n\nclass UserRegistrationForm(RegistrationForm):\n newsletterOptIn = forms.BooleanField(\n label=\"Newsletter\",\n required=False,\n help_text=\"Email me about campaign updates, upcoming events, and new features.\",\n )\n\n\nclass UserProfileForm(forms.Form):\n email = forms.CharField(\n label=\"Email address\", required=True, widget=forms.EmailInput()\n )\n\n def __init__(self, *, request, **kwargs):\n self.request = request\n return super().__init__(**kwargs)\n\n def clean_email(self):\n data = self.cleaned_data[\"email\"]\n if (\n User.objects.exclude(pk=self.request.user.pk)\n .filter(email__iexact=data)\n .exists()\n ):\n raise forms.ValidationError(\"That email address is not available\")\n return data\n\n\nclass ContactUsForm(forms.Form):\n referrer = forms.CharField(label=\"Referring Page\", widget=forms.HiddenInput())\n\n email = forms.EmailField(label=\"Your email:\", required=True)\n subject = forms.CharField(label=\"Subject:\", required=False)\n\n\n link = forms.URLField(\n label=\"Have a specific page you need help with? Add the link below:\", required=False\n )\n\n story = forms.CharField(\n label=\"Let us know how we can help:\", required=True, widget=forms.Textarea\n )\n\n\nclass CaptchaEmbedForm(forms.Form):\n captcha = CaptchaField()\n\n\nclass AssetFilteringForm(forms.Form):\n transcription_status = forms.ChoiceField(\n choices=TranscriptionStatus.CHOICES,\n required=False,\n label=\"Image Status\",\n widget=forms.Select(attrs={\"class\": \"form-control\"}),\n )\n\n def __init__(self, status_counts, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n asset_statuses = {\n status: \"%s (%d)\" % (TranscriptionStatus.CHOICE_MAP[status], count)\n for status, count in status_counts.items()\n }\n\n filtered_choices = [(\"\", f\"All Images ({sum(status_counts.values())})\")]\n for val, label in self.fields[\"transcription_status\"].choices:\n if val in asset_statuses:\n filtered_choices.append((val, asset_statuses[val]))\n\n self.fields[\"transcription_status\"].choices = filtered_choices\n\n\nclass AdminItemImportForm(forms.Form):\n import_url = forms.URLField(\n required=True, label=\"URL of the item/collection/search page to import\"\n )\n\n\nclass AdminProjectBulkImportForm(forms.Form):\n spreadsheet_file = forms.FileField(\n required=True,\n label=\"Spreadsheet containing the campaigns, projects, and items to import\",\n )\n", "path": "concordia/forms.py"}]}
1,386
288
gh_patches_debug_24217
rasdani/github-patches
git_diff
docker__docker-py-288
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Please don't require exact versions in requirements It's a good idea to pin requirements in applications. It's a bad idea to pin requirements in libraries. Doing so makes reusing libraries difficult. If you have minimum requirements, that's fine. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 import os 3 import sys 4 from setuptools import setup 5 6 ROOT_DIR = os.path.dirname(__file__) 7 SOURCE_DIR = os.path.join(ROOT_DIR) 8 9 if sys.version_info[0] == 3: 10 requirements_file = './requirements3.txt' 11 else: 12 requirements_file = './requirements.txt' 13 14 exec(open('docker/version.py').read()) 15 16 with open('./test-requirements.txt') as test_reqs_txt: 17 test_requirements = [line for line in test_reqs_txt] 18 with open(requirements_file) as requirements_txt: 19 requirements = [line for line in requirements_txt] 20 21 setup( 22 name="docker-py", 23 version=version, 24 description="Python client for Docker.", 25 packages=['docker', 'docker.auth', 'docker.unixconn', 'docker.utils', 26 'docker.ssladapter'], 27 install_requires=requirements + test_requirements, 28 zip_safe=False, 29 test_suite='tests', 30 classifiers=[ 31 'Development Status :: 4 - Beta', 32 'Environment :: Other Environment', 33 'Intended Audience :: Developers', 34 'Operating System :: OS Independent', 35 'Programming Language :: Python', 36 'Programming Language :: Python :: 2.6', 37 'Programming Language :: Python :: 2.7', 38 'Programming Language :: Python :: 3.2', 39 'Programming Language :: Python :: 3.3', 40 'Programming Language :: Python :: 3.4', 41 'Topic :: Utilities', 42 'License :: OSI Approved :: Apache Software License', 43 ], 44 ) 45 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -6,17 +6,19 @@ ROOT_DIR = os.path.dirname(__file__) SOURCE_DIR = os.path.join(ROOT_DIR) -if sys.version_info[0] == 3: - requirements_file = './requirements3.txt' -else: - requirements_file = './requirements.txt' +requirements = [ + 'requests >= 2.2.1', + 'six >= 1.3.0', +] + +if sys.version_info[0] < 3: + requirements.append('websocket-client >= 0.11.0') exec(open('docker/version.py').read()) with open('./test-requirements.txt') as test_reqs_txt: test_requirements = [line for line in test_reqs_txt] -with open(requirements_file) as requirements_txt: - requirements = [line for line in requirements_txt] + setup( name="docker-py", @@ -24,7 +26,8 @@ description="Python client for Docker.", packages=['docker', 'docker.auth', 'docker.unixconn', 'docker.utils', 'docker.ssladapter'], - install_requires=requirements + test_requirements, + install_requires=requirements, + tests_require=test_requirements, zip_safe=False, test_suite='tests', classifiers=[
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -6,17 +6,19 @@\n ROOT_DIR = os.path.dirname(__file__)\n SOURCE_DIR = os.path.join(ROOT_DIR)\n \n-if sys.version_info[0] == 3:\n- requirements_file = './requirements3.txt'\n-else:\n- requirements_file = './requirements.txt'\n+requirements = [\n+ 'requests >= 2.2.1',\n+ 'six >= 1.3.0',\n+]\n+\n+if sys.version_info[0] < 3:\n+ requirements.append('websocket-client >= 0.11.0')\n \n exec(open('docker/version.py').read())\n \n with open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n-with open(requirements_file) as requirements_txt:\n- requirements = [line for line in requirements_txt]\n+\n \n setup(\n name=\"docker-py\",\n@@ -24,7 +26,8 @@\n description=\"Python client for Docker.\",\n packages=['docker', 'docker.auth', 'docker.unixconn', 'docker.utils',\n 'docker.ssladapter'],\n- install_requires=requirements + test_requirements,\n+ install_requires=requirements,\n+ tests_require=test_requirements,\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n", "issue": "Please don't require exact versions in requirements\nIt's a good idea to pin requirements in applications.\n\nIt's a bad idea to pin requirements in libraries. Doing so makes reusing libraries difficult. If you have minimum requirements, that's fine.\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\nfrom setuptools import setup\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nif sys.version_info[0] == 3:\n requirements_file = './requirements3.txt'\nelse:\n requirements_file = './requirements.txt'\n\nexec(open('docker/version.py').read())\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\nwith open(requirements_file) as requirements_txt:\n requirements = [line for line in requirements_txt]\n\nsetup(\n name=\"docker-py\",\n version=version,\n description=\"Python client for Docker.\",\n packages=['docker', 'docker.auth', 'docker.unixconn', 'docker.utils',\n 'docker.ssladapter'],\n install_requires=requirements + test_requirements,\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\nfrom setuptools import setup\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nrequirements = [\n 'requests >= 2.2.1',\n 'six >= 1.3.0',\n]\n\nif sys.version_info[0] < 3:\n requirements.append('websocket-client >= 0.11.0')\n\nexec(open('docker/version.py').read())\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n\n\nsetup(\n name=\"docker-py\",\n version=version,\n description=\"Python client for Docker.\",\n packages=['docker', 'docker.auth', 'docker.unixconn', 'docker.utils',\n 'docker.ssladapter'],\n install_requires=requirements,\n tests_require=test_requirements,\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n)\n", "path": "setup.py"}]}
707
298
gh_patches_debug_20950
rasdani/github-patches
git_diff
bridgecrewio__checkov-648
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cloudformation NLB Listener Rules are incorrectly detected as ALB rules and fail the HTTPS expectation **Describe the bug** NLB listeners are evaluated as ALB listeners, and fail because they don't use HTTPS **To Reproduce** 1. Create a Cloudformation template with NLB listeners (e.g. TCP) 2. Run default tests 3. Tests will fail with `"AWS::ElasticLoadBalancingV2::Listener.<listenername>\" failed in check \"Ensure ALB protocol is HTTPS\" **Expected behavior** NLB rules should pass. **Screenshots** **Desktop (please complete the following information):** - OS: Ubuntu 20.04 - Checkov Version: 1.0.423 **Additional context** This resource: ``` "ConsulDnsListener": { "Type": "AWS::ElasticLoadBalancingV2::Listener", "Properties": { "DefaultActions": [ { "TargetGroupArn": { "Ref": "ConsulDnsTargetGroup" }, "Type": "forward" } ], "LoadBalancerArn": { "Ref": "LoadBalancerArn" }, "Port": 53, "Protocol": "TCP_UDP" } }, ``` Produces this error: ``` { "type": "failure", "message": "Resource \"AWS::ElasticLoadBalancingV2::Listener.ConsulDnsListener\" failed in check \"Ensure ALB protocol is HTTPS\"" }``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py` Content: ``` 1 from checkov.common.models.enums import CheckResult, CheckCategories 2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck 3 4 class ALBListenerHTTPS(BaseResourceCheck): 5 6 def __init__(self): 7 name = "Ensure ALB protocol is HTTPS" 8 id = "CKV_AWS_2" 9 supported_resources = ['aws_lb_listener'] 10 categories = [CheckCategories.ENCRYPTION] 11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 12 13 def scan_resource_conf(self, conf): 14 """ 15 validates kms rotation 16 https://www.terraform.io/docs/providers/aws/r/lb_listener.html 17 :param conf: aws_kms_key configuration 18 :return: <CheckResult> 19 """ 20 key = 'protocol' 21 if key in conf.keys(): 22 if ( 23 conf[key] == ["HTTPS"] 24 or 25 conf[key] == ["TLS"] 26 ): 27 return CheckResult.PASSED 28 elif conf[key] == ["HTTP"]: 29 if 'default_action' in conf.keys(): 30 default_action = conf['default_action'][0] 31 action_type = default_action['type'] 32 if action_type == ['redirect']: 33 if default_action['redirect'][0]['protocol'] == ['HTTPS']: 34 return CheckResult.PASSED 35 return CheckResult.FAILED 36 37 38 check = ALBListenerHTTPS() 39 ``` Path: `checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py` Content: ``` 1 from checkov.common.models.enums import CheckResult, CheckCategories 2 from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck 3 4 class ALBListenerHTTPS(BaseResourceCheck): 5 6 def __init__(self): 7 name = "Ensure ALB protocol is HTTPS" 8 id = "CKV_AWS_2" 9 supported_resources = ['AWS::ElasticLoadBalancingV2::Listener'] 10 categories = [CheckCategories.ENCRYPTION] 11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 12 13 def scan_resource_conf(self, conf): 14 """ 15 validates ALB protocol is HTTPS 16 https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-listener.html 17 :param conf: aws_alb_listener configuration 18 :return: <CheckResult> 19 """ 20 21 if 'Properties' in conf.keys(): 22 if 'Protocol' in conf['Properties'].keys(): 23 if conf['Properties']['Protocol'] in ('HTTPS', 'TLS'): 24 return CheckResult.PASSED 25 else: 26 if ( 27 'DefaultActions' in conf['Properties'].keys() 28 and 29 conf['Properties']['DefaultActions'][0]['Type'] == 'redirect' 30 and 31 conf['Properties']['DefaultActions'][0]['RedirectConfig']['Protocol'] == "HTTPS" 32 ): 33 return CheckResult.PASSED 34 return CheckResult.FAILED 35 36 check = ALBListenerHTTPS() 37 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py b/checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py --- a/checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py +++ b/checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py @@ -20,7 +20,7 @@ if 'Properties' in conf.keys(): if 'Protocol' in conf['Properties'].keys(): - if conf['Properties']['Protocol'] in ('HTTPS', 'TLS'): + if conf['Properties']['Protocol'] in ('HTTPS', 'TLS', 'TCP', 'UDP', 'TCP_UDP'): return CheckResult.PASSED else: if ( diff --git a/checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py b/checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py --- a/checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py +++ b/checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py @@ -19,11 +19,7 @@ """ key = 'protocol' if key in conf.keys(): - if ( - conf[key] == ["HTTPS"] - or - conf[key] == ["TLS"] - ): + if conf[key] in (["HTTPS"], ["TLS"], ["TCP"], ["UDP"], ["TCP_UDP"]): return CheckResult.PASSED elif conf[key] == ["HTTP"]: if 'default_action' in conf.keys():
{"golden_diff": "diff --git a/checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py b/checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py\n--- a/checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py\n+++ b/checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py\n@@ -20,7 +20,7 @@\n \n if 'Properties' in conf.keys():\n if 'Protocol' in conf['Properties'].keys():\n- if conf['Properties']['Protocol'] in ('HTTPS', 'TLS'):\n+ if conf['Properties']['Protocol'] in ('HTTPS', 'TLS', 'TCP', 'UDP', 'TCP_UDP'):\n return CheckResult.PASSED\n else:\n if (\ndiff --git a/checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py b/checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py\n--- a/checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py\n+++ b/checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py\n@@ -19,11 +19,7 @@\n \"\"\"\n key = 'protocol'\n if key in conf.keys():\n- if (\n- conf[key] == [\"HTTPS\"]\n- or\n- conf[key] == [\"TLS\"]\n- ):\n+ if conf[key] in ([\"HTTPS\"], [\"TLS\"], [\"TCP\"], [\"UDP\"], [\"TCP_UDP\"]):\n return CheckResult.PASSED\n elif conf[key] == [\"HTTP\"]:\n if 'default_action' in conf.keys():\n", "issue": "Cloudformation NLB Listener Rules are incorrectly detected as ALB rules and fail the HTTPS expectation\n**Describe the bug**\r\nNLB listeners are evaluated as ALB listeners, and fail because they don't use HTTPS\r\n\r\n**To Reproduce**\r\n1. Create a Cloudformation template with NLB listeners (e.g. TCP)\r\n2. Run default tests\r\n3. Tests will fail with `\"AWS::ElasticLoadBalancingV2::Listener.<listenername>\\\" failed in check \\\"Ensure ALB protocol is HTTPS\\\"\r\n\r\n**Expected behavior**\r\nNLB rules should pass. \r\n\r\n**Screenshots**\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Ubuntu 20.04\r\n - Checkov Version: 1.0.423\r\n\r\n**Additional context**\r\nThis resource:\r\n```\r\n \"ConsulDnsListener\": { \r\n \"Type\": \"AWS::ElasticLoadBalancingV2::Listener\", \r\n \"Properties\": { \r\n \"DefaultActions\": [ \r\n { \r\n \"TargetGroupArn\": { \r\n \"Ref\": \"ConsulDnsTargetGroup\" \r\n }, \r\n \"Type\": \"forward\" \r\n } \r\n ], \r\n \"LoadBalancerArn\": { \r\n \"Ref\": \"LoadBalancerArn\" \r\n }, \r\n \"Port\": 53, \r\n \"Protocol\": \"TCP_UDP\" \r\n } \r\n }, \r\n```\r\nProduces this error:\r\n```\r\n{\r\n \"type\": \"failure\",\r\n \"message\": \"Resource \\\"AWS::ElasticLoadBalancingV2::Listener.ConsulDnsListener\\\" failed in check \\\"Ensure ALB protocol is HTTPS\\\"\"\r\n}```\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\nclass ALBListenerHTTPS(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure ALB protocol is HTTPS\"\n id = \"CKV_AWS_2\"\n supported_resources = ['aws_lb_listener']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n validates kms rotation\n https://www.terraform.io/docs/providers/aws/r/lb_listener.html\n :param conf: aws_kms_key configuration\n :return: <CheckResult>\n \"\"\"\n key = 'protocol'\n if key in conf.keys():\n if (\n conf[key] == [\"HTTPS\"]\n or\n conf[key] == [\"TLS\"]\n ):\n return CheckResult.PASSED\n elif conf[key] == [\"HTTP\"]:\n if 'default_action' in conf.keys():\n default_action = conf['default_action'][0]\n action_type = default_action['type']\n if action_type == ['redirect']:\n if default_action['redirect'][0]['protocol'] == ['HTTPS']:\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = ALBListenerHTTPS()\n", "path": "checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py"}, {"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\n\nclass ALBListenerHTTPS(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure ALB protocol is HTTPS\"\n id = \"CKV_AWS_2\"\n supported_resources = ['AWS::ElasticLoadBalancingV2::Listener']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n validates ALB protocol is HTTPS\n https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-listener.html\n :param conf: aws_alb_listener configuration\n :return: <CheckResult>\n \"\"\"\n\n if 'Properties' in conf.keys():\n if 'Protocol' in conf['Properties'].keys():\n if conf['Properties']['Protocol'] in ('HTTPS', 'TLS'):\n return CheckResult.PASSED\n else:\n if (\n 'DefaultActions' in conf['Properties'].keys()\n and\n conf['Properties']['DefaultActions'][0]['Type'] == 'redirect'\n and\n conf['Properties']['DefaultActions'][0]['RedirectConfig']['Protocol'] == \"HTTPS\"\n ):\n return CheckResult.PASSED\n return CheckResult.FAILED\n\ncheck = ALBListenerHTTPS()\n", "path": "checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py"}], "after_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\nclass ALBListenerHTTPS(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure ALB protocol is HTTPS\"\n id = \"CKV_AWS_2\"\n supported_resources = ['aws_lb_listener']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n validates kms rotation\n https://www.terraform.io/docs/providers/aws/r/lb_listener.html\n :param conf: aws_kms_key configuration\n :return: <CheckResult>\n \"\"\"\n key = 'protocol'\n if key in conf.keys():\n if conf[key] in ([\"HTTPS\"], [\"TLS\"], [\"TCP\"], [\"UDP\"], [\"TCP_UDP\"]):\n return CheckResult.PASSED\n elif conf[key] == [\"HTTP\"]:\n if 'default_action' in conf.keys():\n default_action = conf['default_action'][0]\n action_type = default_action['type']\n if action_type == ['redirect']:\n if default_action['redirect'][0]['protocol'] == ['HTTPS']:\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = ALBListenerHTTPS()\n", "path": "checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py"}, {"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\n\nclass ALBListenerHTTPS(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure ALB protocol is HTTPS\"\n id = \"CKV_AWS_2\"\n supported_resources = ['AWS::ElasticLoadBalancingV2::Listener']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n validates ALB protocol is HTTPS\n https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-listener.html\n :param conf: aws_alb_listener configuration\n :return: <CheckResult>\n \"\"\"\n\n if 'Properties' in conf.keys():\n if 'Protocol' in conf['Properties'].keys():\n if conf['Properties']['Protocol'] in ('HTTPS', 'TLS', 'TCP', 'UDP', 'TCP_UDP'):\n return CheckResult.PASSED\n else:\n if (\n 'DefaultActions' in conf['Properties'].keys()\n and\n conf['Properties']['DefaultActions'][0]['Type'] == 'redirect'\n and\n conf['Properties']['DefaultActions'][0]['RedirectConfig']['Protocol'] == \"HTTPS\"\n ):\n return CheckResult.PASSED\n return CheckResult.FAILED\n\ncheck = ALBListenerHTTPS()\n", "path": "checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py"}]}
1,401
335
gh_patches_debug_43728
rasdani/github-patches
git_diff
sopel-irc__sopel-1257
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Anonymous gists are going away. Help module will break. GitHub [announced](https://blog.github.com/2018-02-18-deprecation-notice-removing-anonymous-gist-creation/) last month that anonymous gist creation will be disabled on March 19, 2018 (UTC). The `help` module was modified in fa89eb7 to essentially paste its output to a gist and simply return the link, and this functionality will break (as of writing) tomorrow. `.help` (or `.commands`) will return an error until (and unless) the module is updated to paste its contents elsewhere and/or create a static HTML file instead as I originally proposed in #1080. (As an aside, I've been meaning to implement that HTML mode for ages. Maybe this is the kick I needed to finally get off my arse and write it. We'll see.) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sopel/modules/help.py` Content: ``` 1 # coding=utf-8 2 """ 3 help.py - Sopel Help Module 4 Copyright 2008, Sean B. Palmer, inamidst.com 5 Copyright © 2013, Elad Alfassa, <[email protected]> 6 Licensed under the Eiffel Forum License 2. 7 8 http://sopel.chat 9 """ 10 from __future__ import unicode_literals, absolute_import, print_function, division 11 12 import textwrap 13 import collections 14 import json 15 16 import requests 17 18 from sopel.logger import get_logger 19 from sopel.module import commands, rule, example, priority 20 21 logger = get_logger(__name__) 22 23 24 @rule('$nick' '(?i)(help|doc) +([A-Za-z]+)(?:\?+)?$') 25 @example('.help tell') 26 @commands('help', 'commands') 27 @priority('low') 28 def help(bot, trigger): 29 """Shows a command's documentation, and possibly an example.""" 30 if trigger.group(2): 31 name = trigger.group(2) 32 name = name.lower() 33 34 # number of lines of help to show 35 threshold = 3 36 37 if name in bot.doc: 38 if len(bot.doc[name][0]) + (1 if bot.doc[name][1] else 0) > threshold: 39 if trigger.nick != trigger.sender: # don't say that if asked in private 40 bot.reply('The documentation for this command is too long; I\'m sending it to you in a private message.') 41 msgfun = lambda l: bot.msg(trigger.nick, l) 42 else: 43 msgfun = bot.reply 44 45 for line in bot.doc[name][0]: 46 msgfun(line) 47 if bot.doc[name][1]: 48 msgfun('e.g. ' + bot.doc[name][1]) 49 else: 50 # This'll probably catch most cases, without having to spend the time 51 # actually creating the list first. Maybe worth storing the link and a 52 # heuristic in config, too, so it persists across restarts. Would need a 53 # command to regenerate, too... 54 if 'command-gist' in bot.memory and bot.memory['command-gist'][0] == len(bot.command_groups): 55 url = bot.memory['command-gist'][1] 56 else: 57 bot.say("Hang on, I'm creating a list.") 58 msgs = [] 59 60 name_length = max(6, max(len(k) for k in bot.command_groups.keys())) 61 for category, cmds in collections.OrderedDict(sorted(bot.command_groups.items())).items(): 62 category = category.upper().ljust(name_length) 63 cmds = set(cmds) # remove duplicates 64 cmds = ' '.join(cmds) 65 msg = category + ' ' + cmds 66 indent = ' ' * (name_length + 2) 67 # Honestly not sure why this is a list here 68 msgs.append('\n'.join(textwrap.wrap(msg, subsequent_indent=indent))) 69 70 url = create_gist(bot, '\n\n'.join(msgs)) 71 if not url: 72 return 73 bot.memory['command-gist'] = (len(bot.command_groups), url) 74 bot.say("I've posted a list of my commands at {} - You can see " 75 "more info about any of these commands by doing .help " 76 "<command> (e.g. .help time)".format(url)) 77 78 79 def create_gist(bot, msg): 80 payload = { 81 'description': 'Command listing for {}@{}'.format(bot.nick, bot.config.core.host), 82 'public': 'true', 83 'files': { 84 'commands.txt': { 85 "content": msg, 86 }, 87 }, 88 } 89 try: 90 result = requests.post('https://api.github.com/gists', 91 data=json.dumps(payload)) 92 except requests.RequestException: 93 bot.say("Sorry! Something went wrong.") 94 logger.exception("Error posting commands gist") 95 return 96 if not result.status_code != '201': 97 bot.say("Sorry! Something went wrong.") 98 logger.error("Error %s posting commands gist: %s", 99 result.status_code, result.text) 100 return 101 result = result.json() 102 if 'html_url' not in result: 103 bot.say("Sorry! Something went wrong.") 104 logger.error("Invalid result %s", result) 105 return 106 return result['html_url'] 107 108 109 @rule('$nick' r'(?i)help(?:[?!]+)?$') 110 @priority('low') 111 def help2(bot, trigger): 112 response = ( 113 'Hi, I\'m a bot. Say ".commands" to me in private for a list ' + 114 'of my commands, or see http://sopel.chat for more ' + 115 'general details. My owner is %s.' 116 ) % bot.config.core.owner 117 bot.reply(response) 118 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sopel/modules/help.py b/sopel/modules/help.py --- a/sopel/modules/help.py +++ b/sopel/modules/help.py @@ -3,6 +3,7 @@ help.py - Sopel Help Module Copyright 2008, Sean B. Palmer, inamidst.com Copyright © 2013, Elad Alfassa, <[email protected]> +Copyright © 2018, Adam Erdman, pandorah.org Licensed under the Eiffel Forum License 2. http://sopel.chat @@ -11,8 +12,6 @@ import textwrap import collections -import json - import requests from sopel.logger import get_logger @@ -51,8 +50,8 @@ # actually creating the list first. Maybe worth storing the link and a # heuristic in config, too, so it persists across restarts. Would need a # command to regenerate, too... - if 'command-gist' in bot.memory and bot.memory['command-gist'][0] == len(bot.command_groups): - url = bot.memory['command-gist'][1] + if 'command-list' in bot.memory and bot.memory['command-list'][0] == len(bot.command_groups): + url = bot.memory['command-list'][1] else: bot.say("Hang on, I'm creating a list.") msgs = [] @@ -60,49 +59,39 @@ name_length = max(6, max(len(k) for k in bot.command_groups.keys())) for category, cmds in collections.OrderedDict(sorted(bot.command_groups.items())).items(): category = category.upper().ljust(name_length) + cmds = set(cmds) # remove duplicates cmds = ' '.join(cmds) msg = category + ' ' + cmds indent = ' ' * (name_length + 2) # Honestly not sure why this is a list here msgs.append('\n'.join(textwrap.wrap(msg, subsequent_indent=indent))) - url = create_gist(bot, '\n\n'.join(msgs)) + url = create_list(bot, '\n\n'.join(msgs)) if not url: return - bot.memory['command-gist'] = (len(bot.command_groups), url) + bot.memory['command-list'] = (len(bot.command_groups), url) bot.say("I've posted a list of my commands at {} - You can see " "more info about any of these commands by doing .help " "<command> (e.g. .help time)".format(url)) -def create_gist(bot, msg): - payload = { - 'description': 'Command listing for {}@{}'.format(bot.nick, bot.config.core.host), - 'public': 'true', - 'files': { - 'commands.txt': { - "content": msg, - }, - }, - } +def create_list(bot, msg): + msg = 'Command listing for {}@{}\n\n'.format(bot.nick, bot.config.core.host) + msg + payload = { "content": msg } + headers = {'Content-type': 'application/json', 'Accept': 'application/json'} + try: - result = requests.post('https://api.github.com/gists', - data=json.dumps(payload)) + result = requests.post('https://ptpb.pw/', json=payload, headers=headers) except requests.RequestException: bot.say("Sorry! Something went wrong.") - logger.exception("Error posting commands gist") - return - if not result.status_code != '201': - bot.say("Sorry! Something went wrong.") - logger.error("Error %s posting commands gist: %s", - result.status_code, result.text) + logger.exception("Error posting commands") return result = result.json() - if 'html_url' not in result: + if 'url' not in result: bot.say("Sorry! Something went wrong.") logger.error("Invalid result %s", result) return - return result['html_url'] + return result['url'] @rule('$nick' r'(?i)help(?:[?!]+)?$')
{"golden_diff": "diff --git a/sopel/modules/help.py b/sopel/modules/help.py\n--- a/sopel/modules/help.py\n+++ b/sopel/modules/help.py\n@@ -3,6 +3,7 @@\n help.py - Sopel Help Module\n Copyright 2008, Sean B. Palmer, inamidst.com\n Copyright \u00a9 2013, Elad Alfassa, <[email protected]>\n+Copyright \u00a9 2018, Adam Erdman, pandorah.org\n Licensed under the Eiffel Forum License 2.\n \n http://sopel.chat\n@@ -11,8 +12,6 @@\n \n import textwrap\n import collections\n-import json\n-\n import requests\n \n from sopel.logger import get_logger\n@@ -51,8 +50,8 @@\n # actually creating the list first. Maybe worth storing the link and a\n # heuristic in config, too, so it persists across restarts. Would need a\n # command to regenerate, too...\n- if 'command-gist' in bot.memory and bot.memory['command-gist'][0] == len(bot.command_groups):\n- url = bot.memory['command-gist'][1]\n+ if 'command-list' in bot.memory and bot.memory['command-list'][0] == len(bot.command_groups):\n+ url = bot.memory['command-list'][1]\n else:\n bot.say(\"Hang on, I'm creating a list.\")\n msgs = []\n@@ -60,49 +59,39 @@\n name_length = max(6, max(len(k) for k in bot.command_groups.keys()))\n for category, cmds in collections.OrderedDict(sorted(bot.command_groups.items())).items():\n category = category.upper().ljust(name_length)\n+ cmds = set(cmds) # remove duplicates\n cmds = ' '.join(cmds)\n msg = category + ' ' + cmds\n indent = ' ' * (name_length + 2)\n # Honestly not sure why this is a list here\n msgs.append('\\n'.join(textwrap.wrap(msg, subsequent_indent=indent)))\n \n- url = create_gist(bot, '\\n\\n'.join(msgs))\n+ url = create_list(bot, '\\n\\n'.join(msgs))\n if not url:\n return\n- bot.memory['command-gist'] = (len(bot.command_groups), url)\n+ bot.memory['command-list'] = (len(bot.command_groups), url)\n bot.say(\"I've posted a list of my commands at {} - You can see \"\n \"more info about any of these commands by doing .help \"\n \"<command> (e.g. .help time)\".format(url))\n \n \n-def create_gist(bot, msg):\n- payload = {\n- 'description': 'Command listing for {}@{}'.format(bot.nick, bot.config.core.host),\n- 'public': 'true',\n- 'files': {\n- 'commands.txt': {\n- \"content\": msg,\n- },\n- },\n- }\n+def create_list(bot, msg):\n+ msg = 'Command listing for {}@{}\\n\\n'.format(bot.nick, bot.config.core.host) + msg\n+ payload = { \"content\": msg }\n+ headers = {'Content-type': 'application/json', 'Accept': 'application/json'}\n+ \n try:\n- result = requests.post('https://api.github.com/gists',\n- data=json.dumps(payload))\n+ result = requests.post('https://ptpb.pw/', json=payload, headers=headers)\n except requests.RequestException:\n bot.say(\"Sorry! Something went wrong.\")\n- logger.exception(\"Error posting commands gist\")\n- return\n- if not result.status_code != '201':\n- bot.say(\"Sorry! Something went wrong.\")\n- logger.error(\"Error %s posting commands gist: %s\",\n- result.status_code, result.text)\n+ logger.exception(\"Error posting commands\")\n return\n result = result.json()\n- if 'html_url' not in result:\n+ if 'url' not in result:\n bot.say(\"Sorry! Something went wrong.\")\n logger.error(\"Invalid result %s\", result)\n return\n- return result['html_url']\n+ return result['url']\n \n \n @rule('$nick' r'(?i)help(?:[?!]+)?$')\n", "issue": "Anonymous gists are going away. Help module will break.\nGitHub [announced](https://blog.github.com/2018-02-18-deprecation-notice-removing-anonymous-gist-creation/) last month that anonymous gist creation will be disabled on March 19, 2018 (UTC).\r\n\r\nThe `help` module was modified in fa89eb7 to essentially paste its output to a gist and simply return the link, and this functionality will break (as of writing) tomorrow. `.help` (or `.commands`) will return an error until (and unless) the module is updated to paste its contents elsewhere and/or create a static HTML file instead as I originally proposed in #1080.\r\n\r\n(As an aside, I've been meaning to implement that HTML mode for ages. Maybe this is the kick I needed to finally get off my arse and write it. We'll see.)\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nhelp.py - Sopel Help Module\nCopyright 2008, Sean B. Palmer, inamidst.com\nCopyright \u00a9 2013, Elad Alfassa, <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nhttp://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport textwrap\nimport collections\nimport json\n\nimport requests\n\nfrom sopel.logger import get_logger\nfrom sopel.module import commands, rule, example, priority\n\nlogger = get_logger(__name__)\n\n\n@rule('$nick' '(?i)(help|doc) +([A-Za-z]+)(?:\\?+)?$')\n@example('.help tell')\n@commands('help', 'commands')\n@priority('low')\ndef help(bot, trigger):\n \"\"\"Shows a command's documentation, and possibly an example.\"\"\"\n if trigger.group(2):\n name = trigger.group(2)\n name = name.lower()\n\n # number of lines of help to show\n threshold = 3\n\n if name in bot.doc:\n if len(bot.doc[name][0]) + (1 if bot.doc[name][1] else 0) > threshold:\n if trigger.nick != trigger.sender: # don't say that if asked in private\n bot.reply('The documentation for this command is too long; I\\'m sending it to you in a private message.')\n msgfun = lambda l: bot.msg(trigger.nick, l)\n else:\n msgfun = bot.reply\n\n for line in bot.doc[name][0]:\n msgfun(line)\n if bot.doc[name][1]:\n msgfun('e.g. ' + bot.doc[name][1])\n else:\n # This'll probably catch most cases, without having to spend the time\n # actually creating the list first. Maybe worth storing the link and a\n # heuristic in config, too, so it persists across restarts. Would need a\n # command to regenerate, too...\n if 'command-gist' in bot.memory and bot.memory['command-gist'][0] == len(bot.command_groups):\n url = bot.memory['command-gist'][1]\n else:\n bot.say(\"Hang on, I'm creating a list.\")\n msgs = []\n\n name_length = max(6, max(len(k) for k in bot.command_groups.keys()))\n for category, cmds in collections.OrderedDict(sorted(bot.command_groups.items())).items():\n category = category.upper().ljust(name_length)\n cmds = set(cmds) # remove duplicates\n cmds = ' '.join(cmds)\n msg = category + ' ' + cmds\n indent = ' ' * (name_length + 2)\n # Honestly not sure why this is a list here\n msgs.append('\\n'.join(textwrap.wrap(msg, subsequent_indent=indent)))\n\n url = create_gist(bot, '\\n\\n'.join(msgs))\n if not url:\n return\n bot.memory['command-gist'] = (len(bot.command_groups), url)\n bot.say(\"I've posted a list of my commands at {} - You can see \"\n \"more info about any of these commands by doing .help \"\n \"<command> (e.g. .help time)\".format(url))\n\n\ndef create_gist(bot, msg):\n payload = {\n 'description': 'Command listing for {}@{}'.format(bot.nick, bot.config.core.host),\n 'public': 'true',\n 'files': {\n 'commands.txt': {\n \"content\": msg,\n },\n },\n }\n try:\n result = requests.post('https://api.github.com/gists',\n data=json.dumps(payload))\n except requests.RequestException:\n bot.say(\"Sorry! Something went wrong.\")\n logger.exception(\"Error posting commands gist\")\n return\n if not result.status_code != '201':\n bot.say(\"Sorry! Something went wrong.\")\n logger.error(\"Error %s posting commands gist: %s\",\n result.status_code, result.text)\n return\n result = result.json()\n if 'html_url' not in result:\n bot.say(\"Sorry! Something went wrong.\")\n logger.error(\"Invalid result %s\", result)\n return\n return result['html_url']\n\n\n@rule('$nick' r'(?i)help(?:[?!]+)?$')\n@priority('low')\ndef help2(bot, trigger):\n response = (\n 'Hi, I\\'m a bot. Say \".commands\" to me in private for a list ' +\n 'of my commands, or see http://sopel.chat for more ' +\n 'general details. My owner is %s.'\n ) % bot.config.core.owner\n bot.reply(response)\n", "path": "sopel/modules/help.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"\nhelp.py - Sopel Help Module\nCopyright 2008, Sean B. Palmer, inamidst.com\nCopyright \u00a9 2013, Elad Alfassa, <[email protected]>\nCopyright \u00a9 2018, Adam Erdman, pandorah.org\nLicensed under the Eiffel Forum License 2.\n\nhttp://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport textwrap\nimport collections\nimport requests\n\nfrom sopel.logger import get_logger\nfrom sopel.module import commands, rule, example, priority\n\nlogger = get_logger(__name__)\n\n\n@rule('$nick' '(?i)(help|doc) +([A-Za-z]+)(?:\\?+)?$')\n@example('.help tell')\n@commands('help', 'commands')\n@priority('low')\ndef help(bot, trigger):\n \"\"\"Shows a command's documentation, and possibly an example.\"\"\"\n if trigger.group(2):\n name = trigger.group(2)\n name = name.lower()\n\n # number of lines of help to show\n threshold = 3\n\n if name in bot.doc:\n if len(bot.doc[name][0]) + (1 if bot.doc[name][1] else 0) > threshold:\n if trigger.nick != trigger.sender: # don't say that if asked in private\n bot.reply('The documentation for this command is too long; I\\'m sending it to you in a private message.')\n msgfun = lambda l: bot.msg(trigger.nick, l)\n else:\n msgfun = bot.reply\n\n for line in bot.doc[name][0]:\n msgfun(line)\n if bot.doc[name][1]:\n msgfun('e.g. ' + bot.doc[name][1])\n else:\n # This'll probably catch most cases, without having to spend the time\n # actually creating the list first. Maybe worth storing the link and a\n # heuristic in config, too, so it persists across restarts. Would need a\n # command to regenerate, too...\n if 'command-list' in bot.memory and bot.memory['command-list'][0] == len(bot.command_groups):\n url = bot.memory['command-list'][1]\n else:\n bot.say(\"Hang on, I'm creating a list.\")\n msgs = []\n\n name_length = max(6, max(len(k) for k in bot.command_groups.keys()))\n for category, cmds in collections.OrderedDict(sorted(bot.command_groups.items())).items():\n category = category.upper().ljust(name_length)\n cmds = set(cmds) # remove duplicates\n cmds = ' '.join(cmds)\n msg = category + ' ' + cmds\n indent = ' ' * (name_length + 2)\n # Honestly not sure why this is a list here\n msgs.append('\\n'.join(textwrap.wrap(msg, subsequent_indent=indent)))\n\n url = create_list(bot, '\\n\\n'.join(msgs))\n if not url:\n return\n bot.memory['command-list'] = (len(bot.command_groups), url)\n bot.say(\"I've posted a list of my commands at {} - You can see \"\n \"more info about any of these commands by doing .help \"\n \"<command> (e.g. .help time)\".format(url))\n\n\ndef create_list(bot, msg):\n msg = 'Command listing for {}@{}\\n\\n'.format(bot.nick, bot.config.core.host) + msg\n payload = { \"content\": msg }\n headers = {'Content-type': 'application/json', 'Accept': 'application/json'}\n \n try:\n result = requests.post('https://ptpb.pw/', json=payload, headers=headers)\n except requests.RequestException:\n bot.say(\"Sorry! Something went wrong.\")\n logger.exception(\"Error posting commands\")\n return\n result = result.json()\n if 'url' not in result:\n bot.say(\"Sorry! Something went wrong.\")\n logger.error(\"Invalid result %s\", result)\n return\n return result['url']\n\n\n@rule('$nick' r'(?i)help(?:[?!]+)?$')\n@priority('low')\ndef help2(bot, trigger):\n response = (\n 'Hi, I\\'m a bot. Say \".commands\" to me in private for a list ' +\n 'of my commands, or see http://sopel.chat for more ' +\n 'general details. My owner is %s.'\n ) % bot.config.core.owner\n bot.reply(response)\n", "path": "sopel/modules/help.py"}]}
1,727
941
gh_patches_debug_2545
rasdani/github-patches
git_diff
liqd__a4-meinberlin-5153
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Wording: no formal adress regarding notice when comment is too long (missing String on Weblate) **URL:** https://meinberlin-demo.liqd.net/budgeting/2023-00049/ **user:** any **expected behaviour:** as a user on mein Berlin I want to be adressed in a formal way (Sie) **behaviour:** When I write a comment which is too long, I get a notice with an informal adress that the textfield should not contain more than 4000 characters. **important screensize:** - **device & browser:** - **Comment/Question:** there is no string on weblate yet, so I cannot translate it correctly Screenshot? <img width="725" alt="Bildschirmfoto 2023-02-13 um 10 27 18" src="https://user-images.githubusercontent.com/113608720/219613075-f384b1ad-4227-4ee8-b4fc-c166d9ba3fe4.png"> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `meinberlin/apps/contrib/django_standard_messages.py` Content: ``` 1 def _(s): 2 return s 3 4 5 django_standard_messages_to_override = [ 6 _("You have signed out."), 7 _("Verify Your E-mail Address"), 8 _("You must type the same password each time."), 9 _("You have confirmed %(email)s."), 10 _("You cannot remove your primary e-mail address (%(email)s)."), 11 _( 12 "We have sent you an e-mail. Please contact us if " 13 "you do not receive it within a few minutes." 14 ), 15 _( 16 "We have sent an e-mail to you for verification. " 17 "Follow the link provided to finalize the signup process. " 18 "If you do not see the verification e-mail in your main inbox, " 19 "check your spam folder. " 20 "Please contact us if you do not receive the verification e-mail " 21 "within a few minutes." 22 ), 23 _( 24 "We have sent you an e-mail. If you have not received it " 25 "please check your spam folder. Otherwise contact us if you " 26 "do not receive it in a few minutes." 27 ), 28 _("You must select a minimum of %(limit_value)d choices."), 29 _("You must select a maximum of %(limit_value)d choices."), 30 _("Enter a valid email address."), 31 ] 32 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/meinberlin/apps/contrib/django_standard_messages.py b/meinberlin/apps/contrib/django_standard_messages.py --- a/meinberlin/apps/contrib/django_standard_messages.py +++ b/meinberlin/apps/contrib/django_standard_messages.py @@ -28,4 +28,5 @@ _("You must select a minimum of %(limit_value)d choices."), _("You must select a maximum of %(limit_value)d choices."), _("Enter a valid email address."), + _("Ensure this field has no more than {max_length} characters."), ]
{"golden_diff": "diff --git a/meinberlin/apps/contrib/django_standard_messages.py b/meinberlin/apps/contrib/django_standard_messages.py\n--- a/meinberlin/apps/contrib/django_standard_messages.py\n+++ b/meinberlin/apps/contrib/django_standard_messages.py\n@@ -28,4 +28,5 @@\n _(\"You must select a minimum of %(limit_value)d choices.\"),\n _(\"You must select a maximum of %(limit_value)d choices.\"),\n _(\"Enter a valid email address.\"),\n+ _(\"Ensure this field has no more than {max_length} characters.\"),\n ]\n", "issue": "Wording: no formal adress regarding notice when comment is too long (missing String on Weblate)\n**URL:** https://meinberlin-demo.liqd.net/budgeting/2023-00049/\r\n**user:** any\r\n**expected behaviour:** as a user on mein Berlin I want to be adressed in a formal way (Sie) \r\n**behaviour:** When I write a comment which is too long, I get a notice with an informal adress that the textfield should not contain more than 4000 characters.\r\n**important screensize:** - \r\n**device & browser:** - \r\n**Comment/Question:** there is no string on weblate yet, so I cannot translate it correctly\r\n\r\nScreenshot?\r\n<img width=\"725\" alt=\"Bildschirmfoto 2023-02-13 um 10 27 18\" src=\"https://user-images.githubusercontent.com/113608720/219613075-f384b1ad-4227-4ee8-b4fc-c166d9ba3fe4.png\">\n", "before_files": [{"content": "def _(s):\n return s\n\n\ndjango_standard_messages_to_override = [\n _(\"You have signed out.\"),\n _(\"Verify Your E-mail Address\"),\n _(\"You must type the same password each time.\"),\n _(\"You have confirmed %(email)s.\"),\n _(\"You cannot remove your primary e-mail address (%(email)s).\"),\n _(\n \"We have sent you an e-mail. Please contact us if \"\n \"you do not receive it within a few minutes.\"\n ),\n _(\n \"We have sent an e-mail to you for verification. \"\n \"Follow the link provided to finalize the signup process. \"\n \"If you do not see the verification e-mail in your main inbox, \"\n \"check your spam folder. \"\n \"Please contact us if you do not receive the verification e-mail \"\n \"within a few minutes.\"\n ),\n _(\n \"We have sent you an e-mail. If you have not received it \"\n \"please check your spam folder. Otherwise contact us if you \"\n \"do not receive it in a few minutes.\"\n ),\n _(\"You must select a minimum of %(limit_value)d choices.\"),\n _(\"You must select a maximum of %(limit_value)d choices.\"),\n _(\"Enter a valid email address.\"),\n]\n", "path": "meinberlin/apps/contrib/django_standard_messages.py"}], "after_files": [{"content": "def _(s):\n return s\n\n\ndjango_standard_messages_to_override = [\n _(\"You have signed out.\"),\n _(\"Verify Your E-mail Address\"),\n _(\"You must type the same password each time.\"),\n _(\"You have confirmed %(email)s.\"),\n _(\"You cannot remove your primary e-mail address (%(email)s).\"),\n _(\n \"We have sent you an e-mail. Please contact us if \"\n \"you do not receive it within a few minutes.\"\n ),\n _(\n \"We have sent an e-mail to you for verification. \"\n \"Follow the link provided to finalize the signup process. \"\n \"If you do not see the verification e-mail in your main inbox, \"\n \"check your spam folder. \"\n \"Please contact us if you do not receive the verification e-mail \"\n \"within a few minutes.\"\n ),\n _(\n \"We have sent you an e-mail. If you have not received it \"\n \"please check your spam folder. Otherwise contact us if you \"\n \"do not receive it in a few minutes.\"\n ),\n _(\"You must select a minimum of %(limit_value)d choices.\"),\n _(\"You must select a maximum of %(limit_value)d choices.\"),\n _(\"Enter a valid email address.\"),\n _(\"Ensure this field has no more than {max_length} characters.\"),\n]\n", "path": "meinberlin/apps/contrib/django_standard_messages.py"}]}
835
127
gh_patches_debug_256
rasdani/github-patches
git_diff
jazzband__pip-tools-28
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pip-review should compare version, not test equality ``` $ pip-review pelican==3.0.1 is available (you have 3.1) ``` I'm locally testing this package, and `pip-review` will just test if current installed version is the same as the latest version in `pip`. Which causes problem as shown above. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 """ 2 pip-tools keeps your pinned dependencies fresh. 3 """ 4 import sys 5 from setuptools import setup 6 7 8 def get_dependencies(): 9 deps = [] 10 if sys.version_info < (2, 7): 11 deps += ['argparse'] 12 return deps 13 14 15 setup( 16 name='pip-tools', 17 version='0.2.1', 18 url='https://github.com/nvie/pip-tools/', 19 license='BSD', 20 author='Vincent Driessen', 21 author_email='[email protected]', 22 description=__doc__.strip('\n'), 23 #packages=[], 24 scripts=['bin/pip-review', 'bin/pip-dump'], 25 #include_package_data=True, 26 zip_safe=False, 27 platforms='any', 28 install_requires=get_dependencies(), 29 classifiers=[ 30 # As from http://pypi.python.org/pypi?%3Aaction=list_classifiers 31 #'Development Status :: 1 - Planning', 32 #'Development Status :: 2 - Pre-Alpha', 33 #'Development Status :: 3 - Alpha', 34 'Development Status :: 4 - Beta', 35 #'Development Status :: 5 - Production/Stable', 36 #'Development Status :: 6 - Mature', 37 #'Development Status :: 7 - Inactive', 38 'Programming Language :: Python', 39 'Programming Language :: Python :: 2', 40 #'Programming Language :: Python :: 2.3', 41 #'Programming Language :: Python :: 2.4', 42 #'Programming Language :: Python :: 2.5', 43 'Programming Language :: Python :: 2.6', 44 'Programming Language :: Python :: 2.7', 45 #'Programming Language :: Python :: 3', 46 #'Programming Language :: Python :: 3.0', 47 #'Programming Language :: Python :: 3.1', 48 #'Programming Language :: Python :: 3.2', 49 #'Programming Language :: Python :: 3.3', 50 'Intended Audience :: Developers', 51 'Intended Audience :: System Administrators', 52 'License :: OSI Approved :: BSD License', 53 'Operating System :: OS Independent', 54 'Topic :: System :: Systems Administration', 55 ] 56 ) 57 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ def get_dependencies(): - deps = [] + deps = ['verlib'] if sys.version_info < (2, 7): deps += ['argparse'] return deps
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -6,7 +6,7 @@\n \n \n def get_dependencies():\n- deps = []\n+ deps = ['verlib']\n if sys.version_info < (2, 7):\n deps += ['argparse']\n return deps\n", "issue": "pip-review should compare version, not test equality\n```\n$ pip-review\npelican==3.0.1 is available (you have 3.1)\n```\n\nI'm locally testing this package, and `pip-review` will just test if current installed version is the same as the latest version in `pip`. Which causes problem as shown above.\n\n", "before_files": [{"content": "\"\"\"\npip-tools keeps your pinned dependencies fresh.\n\"\"\"\nimport sys\nfrom setuptools import setup\n\n\ndef get_dependencies():\n deps = []\n if sys.version_info < (2, 7):\n deps += ['argparse']\n return deps\n\n\nsetup(\n name='pip-tools',\n version='0.2.1',\n url='https://github.com/nvie/pip-tools/',\n license='BSD',\n author='Vincent Driessen',\n author_email='[email protected]',\n description=__doc__.strip('\\n'),\n #packages=[],\n scripts=['bin/pip-review', 'bin/pip-dump'],\n #include_package_data=True,\n zip_safe=False,\n platforms='any',\n install_requires=get_dependencies(),\n classifiers=[\n # As from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n #'Development Status :: 1 - Planning',\n #'Development Status :: 2 - Pre-Alpha',\n #'Development Status :: 3 - Alpha',\n 'Development Status :: 4 - Beta',\n #'Development Status :: 5 - Production/Stable',\n #'Development Status :: 6 - Mature',\n #'Development Status :: 7 - Inactive',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n #'Programming Language :: Python :: 2.3',\n #'Programming Language :: Python :: 2.4',\n #'Programming Language :: Python :: 2.5',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n #'Programming Language :: Python :: 3',\n #'Programming Language :: Python :: 3.0',\n #'Programming Language :: Python :: 3.1',\n #'Programming Language :: Python :: 3.2',\n #'Programming Language :: Python :: 3.3',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Topic :: System :: Systems Administration',\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"\npip-tools keeps your pinned dependencies fresh.\n\"\"\"\nimport sys\nfrom setuptools import setup\n\n\ndef get_dependencies():\n deps = ['verlib']\n if sys.version_info < (2, 7):\n deps += ['argparse']\n return deps\n\n\nsetup(\n name='pip-tools',\n version='0.2.1',\n url='https://github.com/nvie/pip-tools/',\n license='BSD',\n author='Vincent Driessen',\n author_email='[email protected]',\n description=__doc__.strip('\\n'),\n #packages=[],\n scripts=['bin/pip-review', 'bin/pip-dump'],\n #include_package_data=True,\n zip_safe=False,\n platforms='any',\n install_requires=get_dependencies(),\n classifiers=[\n # As from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n #'Development Status :: 1 - Planning',\n #'Development Status :: 2 - Pre-Alpha',\n #'Development Status :: 3 - Alpha',\n 'Development Status :: 4 - Beta',\n #'Development Status :: 5 - Production/Stable',\n #'Development Status :: 6 - Mature',\n #'Development Status :: 7 - Inactive',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n #'Programming Language :: Python :: 2.3',\n #'Programming Language :: Python :: 2.4',\n #'Programming Language :: Python :: 2.5',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n #'Programming Language :: Python :: 3',\n #'Programming Language :: Python :: 3.0',\n #'Programming Language :: Python :: 3.1',\n #'Programming Language :: Python :: 3.2',\n #'Programming Language :: Python :: 3.3',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Topic :: System :: Systems Administration',\n ]\n)\n", "path": "setup.py"}]}
877
71
gh_patches_debug_22569
rasdani/github-patches
git_diff
wemake-services__wemake-python-styleguide-1649
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add Exceptions for String Constant Overuse # Rule request I feel it would be good to be able to declare exceptions for WPS226 ## Thesis / Reasoning In a file on a current project, I tend to use `" ".join(iterable)` a lot. It feels unnatural to declare a CONSTANT for that. Also, I don't think it is good in this case to disable WPS226 for the entire file as there could be other common string constants. Alternative: Excluse `" "`, `""`, "`\n"` & friends from this by default. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wemake_python_styleguide/visitors/ast/complexity/overuses.py` Content: ``` 1 import ast 2 from collections import defaultdict 3 from typing import Callable, ClassVar, DefaultDict, List, Tuple 4 5 from typing_extensions import final 6 7 from wemake_python_styleguide.compat.aliases import FunctionNodes 8 from wemake_python_styleguide.logic import source, walk 9 from wemake_python_styleguide.logic.complexity import overuses 10 from wemake_python_styleguide.types import AnyNodes, AnyText, AnyTextPrimitive 11 from wemake_python_styleguide.violations import complexity 12 from wemake_python_styleguide.visitors import base, decorators 13 14 #: We use these types to store the number of nodes usage in different contexts. 15 _Expressions = DefaultDict[str, List[ast.AST]] 16 _FunctionExpressions = DefaultDict[ast.AST, _Expressions] 17 18 19 @final 20 @decorators.alias('visit_any_string', ( 21 'visit_Str', 22 'visit_Bytes', 23 )) 24 class StringOveruseVisitor(base.BaseNodeVisitor): 25 """Restricts several string usages.""" 26 27 def __init__(self, *args, **kwargs) -> None: 28 """Inits the counter for constants.""" 29 super().__init__(*args, **kwargs) 30 self._string_constants: DefaultDict[ 31 AnyTextPrimitive, int, 32 ] = defaultdict(int) 33 34 def visit_any_string(self, node: AnyText) -> None: 35 """ 36 Restricts to over-use string constants. 37 38 Raises: 39 OverusedStringViolation 40 41 """ 42 self._check_string_constant(node) 43 self.generic_visit(node) 44 45 def _check_string_constant(self, node: AnyText) -> None: 46 if overuses.is_annotation(node): 47 return 48 49 self._string_constants[node.s] += 1 50 51 def _post_visit(self) -> None: 52 for string, usage_count in self._string_constants.items(): 53 if usage_count > self.options.max_string_usages: 54 self.add_violation( 55 complexity.OverusedStringViolation( 56 text=source.render_string(string) or "''", 57 baseline=self.options.max_string_usages, 58 ), 59 ) 60 61 62 @final 63 class ExpressionOveruseVisitor(base.BaseNodeVisitor): 64 """Finds overused expressions.""" 65 66 _expressions: ClassVar[AnyNodes] = ( 67 # We do not treat `ast.Attribute`s as expressions 68 # because they are too widely used. That's a compromise. 69 ast.Assert, 70 ast.BoolOp, 71 ast.BinOp, 72 ast.Call, 73 ast.Compare, 74 ast.Subscript, 75 ast.UnaryOp, 76 ast.Lambda, 77 78 ast.DictComp, 79 ast.Dict, 80 ast.List, 81 ast.ListComp, 82 ast.Tuple, 83 ast.GeneratorExp, 84 ast.Set, 85 ast.SetComp, 86 ) 87 88 _ignore_predicates: Tuple[Callable[[ast.AST], bool], ...] = ( 89 overuses.is_decorator, 90 overuses.is_self, 91 overuses.is_annotation, 92 overuses.is_class_context, 93 overuses.is_super_call, 94 overuses.is_primitive, 95 ) 96 97 _msg: ClassVar[str] = '{0}; used {1}' 98 99 def __init__(self, *args, **kwargs) -> None: 100 """We need to track expression usage in functions and modules.""" 101 super().__init__(*args, **kwargs) 102 self._module_expressions: _Expressions = defaultdict(list) 103 self._function_expressions: _FunctionExpressions = defaultdict( 104 lambda: defaultdict(list), 105 ) 106 107 def visit(self, node: ast.AST) -> None: 108 """ 109 Visits all nodes in a module to find overused values. 110 111 Raises: 112 OverusedExpressionViolation 113 114 """ 115 if isinstance(node, self._expressions): 116 self._add_expression(node) 117 self.generic_visit(node) 118 119 def _add_expression(self, node: ast.AST) -> None: 120 if any(ignore(node) for ignore in self._ignore_predicates): 121 return 122 123 source_code = source.node_to_string(node) 124 self._module_expressions[source_code].append(node) 125 126 maybe_function = walk.get_closest_parent(node, FunctionNodes) 127 if maybe_function is not None: 128 self._function_expressions[maybe_function][source_code].append( 129 node, 130 ) 131 132 def _post_visit(self) -> None: 133 for mod_source, module_nodes in self._module_expressions.items(): 134 if len(module_nodes) > self.options.max_module_expressions: 135 self.add_violation( 136 complexity.OverusedExpressionViolation( 137 module_nodes[0], 138 text=self._msg.format(mod_source, len(module_nodes)), 139 baseline=self.options.max_module_expressions, 140 ), 141 ) 142 143 for function_contexts in self._function_expressions.values(): 144 for src, function_nodes in function_contexts.items(): 145 if len(function_nodes) > self.options.max_function_expressions: 146 self.add_violation( 147 complexity.OverusedExpressionViolation( 148 function_nodes[0], 149 text=self._msg.format(src, len(function_nodes)), 150 baseline=self.options.max_function_expressions, 151 ), 152 ) 153 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wemake_python_styleguide/visitors/ast/complexity/overuses.py b/wemake_python_styleguide/visitors/ast/complexity/overuses.py --- a/wemake_python_styleguide/visitors/ast/complexity/overuses.py +++ b/wemake_python_styleguide/visitors/ast/complexity/overuses.py @@ -22,7 +22,25 @@ 'visit_Bytes', )) class StringOveruseVisitor(base.BaseNodeVisitor): - """Restricts several string usages.""" + """ + Restricts repeated usage of the same string constant. + + NB: Some short strings are ignored, as their use is very common and + forcing assignment would not make much sense (i.e. newlines or ""). + """ + + _ignored_string_constants = frozenset(( + ' ', + '', + '\n', + '\r\n', + '\t', + b' ', + b'', + b'\n', + b'\r\n', + b'\t', + )) def __init__(self, *args, **kwargs) -> None: """Inits the counter for constants.""" @@ -46,6 +64,11 @@ if overuses.is_annotation(node): return + # Some strings are so common, that it makes no sense to check if + # they are overused. + if node.s in self._ignored_string_constants: + return + self._string_constants[node.s] += 1 def _post_visit(self) -> None:
{"golden_diff": "diff --git a/wemake_python_styleguide/visitors/ast/complexity/overuses.py b/wemake_python_styleguide/visitors/ast/complexity/overuses.py\n--- a/wemake_python_styleguide/visitors/ast/complexity/overuses.py\n+++ b/wemake_python_styleguide/visitors/ast/complexity/overuses.py\n@@ -22,7 +22,25 @@\n 'visit_Bytes',\n ))\n class StringOveruseVisitor(base.BaseNodeVisitor):\n- \"\"\"Restricts several string usages.\"\"\"\n+ \"\"\"\n+ Restricts repeated usage of the same string constant.\n+\n+ NB: Some short strings are ignored, as their use is very common and\n+ forcing assignment would not make much sense (i.e. newlines or \"\").\n+ \"\"\"\n+\n+ _ignored_string_constants = frozenset((\n+ ' ',\n+ '',\n+ '\\n',\n+ '\\r\\n',\n+ '\\t',\n+ b' ',\n+ b'',\n+ b'\\n',\n+ b'\\r\\n',\n+ b'\\t',\n+ ))\n \n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Inits the counter for constants.\"\"\"\n@@ -46,6 +64,11 @@\n if overuses.is_annotation(node):\n return\n \n+ # Some strings are so common, that it makes no sense to check if\n+ # they are overused.\n+ if node.s in self._ignored_string_constants:\n+ return\n+\n self._string_constants[node.s] += 1\n \n def _post_visit(self) -> None:\n", "issue": "Add Exceptions for String Constant Overuse\n# Rule request\r\n\r\nI feel it would be good to be able to declare exceptions for WPS226\r\n\r\n## Thesis / Reasoning\r\n\r\nIn a file on a current project, I tend to use `\" \".join(iterable)` a lot.\r\nIt feels unnatural to declare a CONSTANT for that.\r\nAlso, I don't think it is good in this case to disable WPS226 for the entire file as there could be other common string constants.\r\n\r\nAlternative: Excluse `\" \"`, `\"\"`, \"`\\n\"` & friends from this by default.\n", "before_files": [{"content": "import ast\nfrom collections import defaultdict\nfrom typing import Callable, ClassVar, DefaultDict, List, Tuple\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.compat.aliases import FunctionNodes\nfrom wemake_python_styleguide.logic import source, walk\nfrom wemake_python_styleguide.logic.complexity import overuses\nfrom wemake_python_styleguide.types import AnyNodes, AnyText, AnyTextPrimitive\nfrom wemake_python_styleguide.violations import complexity\nfrom wemake_python_styleguide.visitors import base, decorators\n\n#: We use these types to store the number of nodes usage in different contexts.\n_Expressions = DefaultDict[str, List[ast.AST]]\n_FunctionExpressions = DefaultDict[ast.AST, _Expressions]\n\n\n@final\[email protected]('visit_any_string', (\n 'visit_Str',\n 'visit_Bytes',\n))\nclass StringOveruseVisitor(base.BaseNodeVisitor):\n \"\"\"Restricts several string usages.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Inits the counter for constants.\"\"\"\n super().__init__(*args, **kwargs)\n self._string_constants: DefaultDict[\n AnyTextPrimitive, int,\n ] = defaultdict(int)\n\n def visit_any_string(self, node: AnyText) -> None:\n \"\"\"\n Restricts to over-use string constants.\n\n Raises:\n OverusedStringViolation\n\n \"\"\"\n self._check_string_constant(node)\n self.generic_visit(node)\n\n def _check_string_constant(self, node: AnyText) -> None:\n if overuses.is_annotation(node):\n return\n\n self._string_constants[node.s] += 1\n\n def _post_visit(self) -> None:\n for string, usage_count in self._string_constants.items():\n if usage_count > self.options.max_string_usages:\n self.add_violation(\n complexity.OverusedStringViolation(\n text=source.render_string(string) or \"''\",\n baseline=self.options.max_string_usages,\n ),\n )\n\n\n@final\nclass ExpressionOveruseVisitor(base.BaseNodeVisitor):\n \"\"\"Finds overused expressions.\"\"\"\n\n _expressions: ClassVar[AnyNodes] = (\n # We do not treat `ast.Attribute`s as expressions\n # because they are too widely used. That's a compromise.\n ast.Assert,\n ast.BoolOp,\n ast.BinOp,\n ast.Call,\n ast.Compare,\n ast.Subscript,\n ast.UnaryOp,\n ast.Lambda,\n\n ast.DictComp,\n ast.Dict,\n ast.List,\n ast.ListComp,\n ast.Tuple,\n ast.GeneratorExp,\n ast.Set,\n ast.SetComp,\n )\n\n _ignore_predicates: Tuple[Callable[[ast.AST], bool], ...] = (\n overuses.is_decorator,\n overuses.is_self,\n overuses.is_annotation,\n overuses.is_class_context,\n overuses.is_super_call,\n overuses.is_primitive,\n )\n\n _msg: ClassVar[str] = '{0}; used {1}'\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"We need to track expression usage in functions and modules.\"\"\"\n super().__init__(*args, **kwargs)\n self._module_expressions: _Expressions = defaultdict(list)\n self._function_expressions: _FunctionExpressions = defaultdict(\n lambda: defaultdict(list),\n )\n\n def visit(self, node: ast.AST) -> None:\n \"\"\"\n Visits all nodes in a module to find overused values.\n\n Raises:\n OverusedExpressionViolation\n\n \"\"\"\n if isinstance(node, self._expressions):\n self._add_expression(node)\n self.generic_visit(node)\n\n def _add_expression(self, node: ast.AST) -> None:\n if any(ignore(node) for ignore in self._ignore_predicates):\n return\n\n source_code = source.node_to_string(node)\n self._module_expressions[source_code].append(node)\n\n maybe_function = walk.get_closest_parent(node, FunctionNodes)\n if maybe_function is not None:\n self._function_expressions[maybe_function][source_code].append(\n node,\n )\n\n def _post_visit(self) -> None:\n for mod_source, module_nodes in self._module_expressions.items():\n if len(module_nodes) > self.options.max_module_expressions:\n self.add_violation(\n complexity.OverusedExpressionViolation(\n module_nodes[0],\n text=self._msg.format(mod_source, len(module_nodes)),\n baseline=self.options.max_module_expressions,\n ),\n )\n\n for function_contexts in self._function_expressions.values():\n for src, function_nodes in function_contexts.items():\n if len(function_nodes) > self.options.max_function_expressions:\n self.add_violation(\n complexity.OverusedExpressionViolation(\n function_nodes[0],\n text=self._msg.format(src, len(function_nodes)),\n baseline=self.options.max_function_expressions,\n ),\n )\n", "path": "wemake_python_styleguide/visitors/ast/complexity/overuses.py"}], "after_files": [{"content": "import ast\nfrom collections import defaultdict\nfrom typing import Callable, ClassVar, DefaultDict, List, Tuple\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.compat.aliases import FunctionNodes\nfrom wemake_python_styleguide.logic import source, walk\nfrom wemake_python_styleguide.logic.complexity import overuses\nfrom wemake_python_styleguide.types import AnyNodes, AnyText, AnyTextPrimitive\nfrom wemake_python_styleguide.violations import complexity\nfrom wemake_python_styleguide.visitors import base, decorators\n\n#: We use these types to store the number of nodes usage in different contexts.\n_Expressions = DefaultDict[str, List[ast.AST]]\n_FunctionExpressions = DefaultDict[ast.AST, _Expressions]\n\n\n@final\[email protected]('visit_any_string', (\n 'visit_Str',\n 'visit_Bytes',\n))\nclass StringOveruseVisitor(base.BaseNodeVisitor):\n \"\"\"\n Restricts repeated usage of the same string constant.\n\n NB: Some short strings are ignored, as their use is very common and\n forcing assignment would not make much sense (i.e. newlines or \"\").\n \"\"\"\n\n _ignored_string_constants = frozenset((\n ' ',\n '',\n '\\n',\n '\\r\\n',\n '\\t',\n b' ',\n b'',\n b'\\n',\n b'\\r\\n',\n b'\\t',\n ))\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Inits the counter for constants.\"\"\"\n super().__init__(*args, **kwargs)\n self._string_constants: DefaultDict[\n AnyTextPrimitive, int,\n ] = defaultdict(int)\n\n def visit_any_string(self, node: AnyText) -> None:\n \"\"\"\n Restricts to over-use string constants.\n\n Raises:\n OverusedStringViolation\n\n \"\"\"\n self._check_string_constant(node)\n self.generic_visit(node)\n\n def _check_string_constant(self, node: AnyText) -> None:\n if overuses.is_annotation(node):\n return\n\n # Some strings are so common, that it makes no sense to check if\n # they are overused.\n if node.s in self._ignored_string_constants:\n return\n\n self._string_constants[node.s] += 1\n\n def _post_visit(self) -> None:\n for string, usage_count in self._string_constants.items():\n if usage_count > self.options.max_string_usages:\n self.add_violation(\n complexity.OverusedStringViolation(\n text=source.render_string(string) or \"''\",\n baseline=self.options.max_string_usages,\n ),\n )\n\n\n@final\nclass ExpressionOveruseVisitor(base.BaseNodeVisitor):\n \"\"\"Finds overused expressions.\"\"\"\n\n _expressions: ClassVar[AnyNodes] = (\n # We do not treat `ast.Attribute`s as expressions\n # because they are too widely used. That's a compromise.\n ast.Assert,\n ast.BoolOp,\n ast.BinOp,\n ast.Call,\n ast.Compare,\n ast.Subscript,\n ast.UnaryOp,\n ast.Lambda,\n\n ast.DictComp,\n ast.Dict,\n ast.List,\n ast.ListComp,\n ast.Tuple,\n ast.GeneratorExp,\n ast.Set,\n ast.SetComp,\n )\n\n _ignore_predicates: Tuple[Callable[[ast.AST], bool], ...] = (\n overuses.is_decorator,\n overuses.is_self,\n overuses.is_annotation,\n overuses.is_class_context,\n overuses.is_super_call,\n overuses.is_primitive,\n )\n\n _msg: ClassVar[str] = '{0}; used {1}'\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"We need to track expression usage in functions and modules.\"\"\"\n super().__init__(*args, **kwargs)\n self._module_expressions: _Expressions = defaultdict(list)\n self._function_expressions: _FunctionExpressions = defaultdict(\n lambda: defaultdict(list),\n )\n\n def visit(self, node: ast.AST) -> None:\n \"\"\"\n Visits all nodes in a module to find overused values.\n\n Raises:\n OverusedExpressionViolation\n\n \"\"\"\n if isinstance(node, self._expressions):\n self._add_expression(node)\n self.generic_visit(node)\n\n def _add_expression(self, node: ast.AST) -> None:\n if any(ignore(node) for ignore in self._ignore_predicates):\n return\n\n source_code = source.node_to_string(node)\n self._module_expressions[source_code].append(node)\n\n maybe_function = walk.get_closest_parent(node, FunctionNodes)\n if maybe_function is not None:\n self._function_expressions[maybe_function][source_code].append(\n node,\n )\n\n def _post_visit(self) -> None:\n for mod_source, module_nodes in self._module_expressions.items():\n if len(module_nodes) > self.options.max_module_expressions:\n self.add_violation(\n complexity.OverusedExpressionViolation(\n module_nodes[0],\n text=self._msg.format(mod_source, len(module_nodes)),\n baseline=self.options.max_module_expressions,\n ),\n )\n\n for function_contexts in self._function_expressions.values():\n for src, function_nodes in function_contexts.items():\n if len(function_nodes) > self.options.max_function_expressions:\n self.add_violation(\n complexity.OverusedExpressionViolation(\n function_nodes[0],\n text=self._msg.format(src, len(function_nodes)),\n baseline=self.options.max_function_expressions,\n ),\n )\n", "path": "wemake_python_styleguide/visitors/ast/complexity/overuses.py"}]}
1,818
361
gh_patches_debug_9022
rasdani/github-patches
git_diff
python-trio__trio-1423
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Potential memory leak on windows I was recently running some scripts overnight that failed due to a MemoryError, but was unable to find anything obvious that would be leaking memory in my scripts. During my investigation process, I have found that the following script increases memory usage by ~1 MB/s while running. If the `await trio.sleep(0)` is replaced by `pass` the process runs at a constant 9.7 MB of memory usage. ```python import trio async def main(): while True: await trio.sleep(0) if __name__ == '__main__': trio.run(main) ``` ## Version Info Python 3.8.1 Trio 0.13.0 Windows 10 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 from setuptools import setup, find_packages 2 3 exec(open("trio/_version.py", encoding="utf-8").read()) 4 5 LONG_DESC = """\ 6 .. image:: https://cdn.rawgit.com/python-trio/trio/9b0bec646a31e0d0f67b8b6ecc6939726faf3e17/logo/logo-with-background.svg 7 :width: 200px 8 :align: right 9 10 The Trio project's goal is to produce a production-quality, `permissively 11 licensed <https://github.com/python-trio/trio/blob/master/LICENSE>`__, 12 async/await-native I/O library for Python. Like all async libraries, 13 its main purpose is to help you write programs that do **multiple 14 things at the same time** with **parallelized I/O**. A web spider that 15 wants to fetch lots of pages in parallel, a web server that needs to 16 juggle lots of downloads and websocket connections at the same time, a 17 process supervisor monitoring multiple subprocesses... that sort of 18 thing. Compared to other libraries, Trio attempts to distinguish 19 itself with an obsessive focus on **usability** and 20 **correctness**. Concurrency is complicated; we try to make it *easy* 21 to get things *right*. 22 23 Trio was built from the ground up to take advantage of the `latest 24 Python features <https://www.python.org/dev/peps/pep-0492/>`__, and 25 draws inspiration from `many sources 26 <https://github.com/python-trio/trio/wiki/Reading-list>`__, in 27 particular Dave Beazley's `Curio <https://curio.readthedocs.io/>`__. 28 The resulting design is radically simpler than older competitors like 29 `asyncio <https://docs.python.org/3/library/asyncio.html>`__ and 30 `Twisted <https://twistedmatrix.com/>`__, yet just as capable. Trio is 31 the Python I/O library I always wanted; I find it makes building 32 I/O-oriented programs easier, less error-prone, and just plain more 33 fun. `Perhaps you'll find the same 34 <https://github.com/python-trio/trio/wiki/Testimonials>`__. 35 36 This project is young and still somewhat experimental: the overall 37 design is solid and the existing features are fully tested and 38 documented, but you may encounter missing functionality or rough 39 edges. We *do* encourage you do use it, but you should `read and 40 subscribe to issue #1 41 <https://github.com/python-trio/trio/issues/1>`__ to get warning and a 42 chance to give feedback about any compatibility-breaking changes. 43 44 Vital statistics: 45 46 * Supported environments: Linux, macOS, or Windows running some kind of Python 47 3.5-or-better (either CPython or PyPy3 is fine). \\*BSD and illumos likely 48 work too, but are not tested. 49 50 * Install: ``python3 -m pip install -U trio`` (or on Windows, maybe 51 ``py -3 -m pip install -U trio``). No compiler needed. 52 53 * Tutorial and reference manual: https://trio.readthedocs.io 54 55 * Bug tracker and source code: https://github.com/python-trio/trio 56 57 * Real-time chat: https://gitter.im/python-trio/general 58 59 * Discussion forum: https://trio.discourse.group 60 61 * License: MIT or Apache 2, your choice 62 63 * Contributor guide: https://trio.readthedocs.io/en/latest/contributing.html 64 65 * Code of conduct: Contributors are requested to follow our `code of 66 conduct 67 <https://trio.readthedocs.io/en/latest/code-of-conduct.html>`_ 68 in all project spaces. 69 """ 70 71 setup( 72 name="trio", 73 version=__version__, 74 description="A friendly Python library for async concurrency and I/O", 75 long_description=LONG_DESC, 76 author="Nathaniel J. Smith", 77 author_email="[email protected]", 78 url="https://github.com/python-trio/trio", 79 license="MIT -or- Apache License 2.0", 80 packages=find_packages(), 81 install_requires=[ 82 "attrs >= 19.2.0", # for eq 83 "sortedcontainers", 84 "async_generator >= 1.9", 85 "idna", 86 "outcome", 87 "sniffio", 88 # cffi 1.12 adds from_buffer(require_writable=True) and ffi.release() 89 "cffi>=1.12; os_name == 'nt'", # "cffi is required on windows" 90 "contextvars>=2.1; python_version < '3.7'" 91 ], 92 # This means, just install *everything* you see under trio/, even if it 93 # doesn't look like a source file, so long as it appears in MANIFEST.in: 94 include_package_data=True, 95 python_requires=">=3.5", 96 keywords=["async", "io", "networking", "trio"], 97 classifiers=[ 98 "Development Status :: 3 - Alpha", 99 "Intended Audience :: Developers", 100 "License :: OSI Approved :: MIT License", 101 "License :: OSI Approved :: Apache Software License", 102 "Operating System :: POSIX :: Linux", 103 "Operating System :: MacOS :: MacOS X", 104 "Operating System :: POSIX :: BSD", 105 "Operating System :: Microsoft :: Windows", 106 "Programming Language :: Python :: Implementation :: CPython", 107 "Programming Language :: Python :: Implementation :: PyPy", 108 "Programming Language :: Python :: 3 :: Only", 109 "Programming Language :: Python :: 3.5", 110 "Programming Language :: Python :: 3.6", 111 "Topic :: System :: Networking", 112 "Framework :: Trio", 113 ], 114 ) 115 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -86,7 +86,8 @@ "outcome", "sniffio", # cffi 1.12 adds from_buffer(require_writable=True) and ffi.release() - "cffi>=1.12; os_name == 'nt'", # "cffi is required on windows" + # cffi 1.14 fixes memory leak inside ffi.getwinerror() + "cffi>=1.14; os_name == 'nt'", # "cffi is required on windows" "contextvars>=2.1; python_version < '3.7'" ], # This means, just install *everything* you see under trio/, even if it
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -86,7 +86,8 @@\n \"outcome\",\n \"sniffio\",\n # cffi 1.12 adds from_buffer(require_writable=True) and ffi.release()\n- \"cffi>=1.12; os_name == 'nt'\", # \"cffi is required on windows\"\n+ # cffi 1.14 fixes memory leak inside ffi.getwinerror()\n+ \"cffi>=1.14; os_name == 'nt'\", # \"cffi is required on windows\"\n \"contextvars>=2.1; python_version < '3.7'\"\n ],\n # This means, just install *everything* you see under trio/, even if it\n", "issue": "Potential memory leak on windows\nI was recently running some scripts overnight that failed due to a MemoryError, but was unable to find anything obvious that would be leaking memory in my scripts.\r\n\r\nDuring my investigation process, I have found that the following script increases memory usage by ~1 MB/s while running. If the `await trio.sleep(0)` is replaced by `pass` the process runs at a constant 9.7 MB of memory usage.\r\n\r\n```python\r\nimport trio\r\n\r\nasync def main():\r\n while True:\r\n await trio.sleep(0)\r\n\r\nif __name__ == '__main__':\r\n trio.run(main)\r\n```\r\n\r\n## Version Info\r\n\r\nPython 3.8.1\r\nTrio 0.13.0\r\nWindows 10\r\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nexec(open(\"trio/_version.py\", encoding=\"utf-8\").read())\n\nLONG_DESC = \"\"\"\\\n.. image:: https://cdn.rawgit.com/python-trio/trio/9b0bec646a31e0d0f67b8b6ecc6939726faf3e17/logo/logo-with-background.svg\n :width: 200px\n :align: right\n\nThe Trio project's goal is to produce a production-quality, `permissively\nlicensed <https://github.com/python-trio/trio/blob/master/LICENSE>`__,\nasync/await-native I/O library for Python. Like all async libraries,\nits main purpose is to help you write programs that do **multiple\nthings at the same time** with **parallelized I/O**. A web spider that\nwants to fetch lots of pages in parallel, a web server that needs to\njuggle lots of downloads and websocket connections at the same time, a\nprocess supervisor monitoring multiple subprocesses... that sort of\nthing. Compared to other libraries, Trio attempts to distinguish\nitself with an obsessive focus on **usability** and\n**correctness**. Concurrency is complicated; we try to make it *easy*\nto get things *right*.\n\nTrio was built from the ground up to take advantage of the `latest\nPython features <https://www.python.org/dev/peps/pep-0492/>`__, and\ndraws inspiration from `many sources\n<https://github.com/python-trio/trio/wiki/Reading-list>`__, in\nparticular Dave Beazley's `Curio <https://curio.readthedocs.io/>`__.\nThe resulting design is radically simpler than older competitors like\n`asyncio <https://docs.python.org/3/library/asyncio.html>`__ and\n`Twisted <https://twistedmatrix.com/>`__, yet just as capable. Trio is\nthe Python I/O library I always wanted; I find it makes building\nI/O-oriented programs easier, less error-prone, and just plain more\nfun. `Perhaps you'll find the same\n<https://github.com/python-trio/trio/wiki/Testimonials>`__.\n\nThis project is young and still somewhat experimental: the overall\ndesign is solid and the existing features are fully tested and\ndocumented, but you may encounter missing functionality or rough\nedges. We *do* encourage you do use it, but you should `read and\nsubscribe to issue #1\n<https://github.com/python-trio/trio/issues/1>`__ to get warning and a\nchance to give feedback about any compatibility-breaking changes.\n\nVital statistics:\n\n* Supported environments: Linux, macOS, or Windows running some kind of Python\n 3.5-or-better (either CPython or PyPy3 is fine). \\\\*BSD and illumos likely\n work too, but are not tested.\n\n* Install: ``python3 -m pip install -U trio`` (or on Windows, maybe\n ``py -3 -m pip install -U trio``). No compiler needed.\n\n* Tutorial and reference manual: https://trio.readthedocs.io\n\n* Bug tracker and source code: https://github.com/python-trio/trio\n\n* Real-time chat: https://gitter.im/python-trio/general\n\n* Discussion forum: https://trio.discourse.group\n\n* License: MIT or Apache 2, your choice\n\n* Contributor guide: https://trio.readthedocs.io/en/latest/contributing.html\n\n* Code of conduct: Contributors are requested to follow our `code of\n conduct\n <https://trio.readthedocs.io/en/latest/code-of-conduct.html>`_\n in all project spaces.\n\"\"\"\n\nsetup(\n name=\"trio\",\n version=__version__,\n description=\"A friendly Python library for async concurrency and I/O\",\n long_description=LONG_DESC,\n author=\"Nathaniel J. Smith\",\n author_email=\"[email protected]\",\n url=\"https://github.com/python-trio/trio\",\n license=\"MIT -or- Apache License 2.0\",\n packages=find_packages(),\n install_requires=[\n \"attrs >= 19.2.0\", # for eq\n \"sortedcontainers\",\n \"async_generator >= 1.9\",\n \"idna\",\n \"outcome\",\n \"sniffio\",\n # cffi 1.12 adds from_buffer(require_writable=True) and ffi.release()\n \"cffi>=1.12; os_name == 'nt'\", # \"cffi is required on windows\"\n \"contextvars>=2.1; python_version < '3.7'\"\n ],\n # This means, just install *everything* you see under trio/, even if it\n # doesn't look like a source file, so long as it appears in MANIFEST.in:\n include_package_data=True,\n python_requires=\">=3.5\",\n keywords=[\"async\", \"io\", \"networking\", \"trio\"],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: System :: Networking\",\n \"Framework :: Trio\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\n\nexec(open(\"trio/_version.py\", encoding=\"utf-8\").read())\n\nLONG_DESC = \"\"\"\\\n.. image:: https://cdn.rawgit.com/python-trio/trio/9b0bec646a31e0d0f67b8b6ecc6939726faf3e17/logo/logo-with-background.svg\n :width: 200px\n :align: right\n\nThe Trio project's goal is to produce a production-quality, `permissively\nlicensed <https://github.com/python-trio/trio/blob/master/LICENSE>`__,\nasync/await-native I/O library for Python. Like all async libraries,\nits main purpose is to help you write programs that do **multiple\nthings at the same time** with **parallelized I/O**. A web spider that\nwants to fetch lots of pages in parallel, a web server that needs to\njuggle lots of downloads and websocket connections at the same time, a\nprocess supervisor monitoring multiple subprocesses... that sort of\nthing. Compared to other libraries, Trio attempts to distinguish\nitself with an obsessive focus on **usability** and\n**correctness**. Concurrency is complicated; we try to make it *easy*\nto get things *right*.\n\nTrio was built from the ground up to take advantage of the `latest\nPython features <https://www.python.org/dev/peps/pep-0492/>`__, and\ndraws inspiration from `many sources\n<https://github.com/python-trio/trio/wiki/Reading-list>`__, in\nparticular Dave Beazley's `Curio <https://curio.readthedocs.io/>`__.\nThe resulting design is radically simpler than older competitors like\n`asyncio <https://docs.python.org/3/library/asyncio.html>`__ and\n`Twisted <https://twistedmatrix.com/>`__, yet just as capable. Trio is\nthe Python I/O library I always wanted; I find it makes building\nI/O-oriented programs easier, less error-prone, and just plain more\nfun. `Perhaps you'll find the same\n<https://github.com/python-trio/trio/wiki/Testimonials>`__.\n\nThis project is young and still somewhat experimental: the overall\ndesign is solid and the existing features are fully tested and\ndocumented, but you may encounter missing functionality or rough\nedges. We *do* encourage you do use it, but you should `read and\nsubscribe to issue #1\n<https://github.com/python-trio/trio/issues/1>`__ to get warning and a\nchance to give feedback about any compatibility-breaking changes.\n\nVital statistics:\n\n* Supported environments: Linux, macOS, or Windows running some kind of Python\n 3.5-or-better (either CPython or PyPy3 is fine). \\\\*BSD and illumos likely\n work too, but are not tested.\n\n* Install: ``python3 -m pip install -U trio`` (or on Windows, maybe\n ``py -3 -m pip install -U trio``). No compiler needed.\n\n* Tutorial and reference manual: https://trio.readthedocs.io\n\n* Bug tracker and source code: https://github.com/python-trio/trio\n\n* Real-time chat: https://gitter.im/python-trio/general\n\n* Discussion forum: https://trio.discourse.group\n\n* License: MIT or Apache 2, your choice\n\n* Contributor guide: https://trio.readthedocs.io/en/latest/contributing.html\n\n* Code of conduct: Contributors are requested to follow our `code of\n conduct\n <https://trio.readthedocs.io/en/latest/code-of-conduct.html>`_\n in all project spaces.\n\"\"\"\n\nsetup(\n name=\"trio\",\n version=__version__,\n description=\"A friendly Python library for async concurrency and I/O\",\n long_description=LONG_DESC,\n author=\"Nathaniel J. Smith\",\n author_email=\"[email protected]\",\n url=\"https://github.com/python-trio/trio\",\n license=\"MIT -or- Apache License 2.0\",\n packages=find_packages(),\n install_requires=[\n \"attrs >= 19.2.0\", # for eq\n \"sortedcontainers\",\n \"async_generator >= 1.9\",\n \"idna\",\n \"outcome\",\n \"sniffio\",\n # cffi 1.12 adds from_buffer(require_writable=True) and ffi.release()\n # cffi 1.14 fixes memory leak inside ffi.getwinerror()\n \"cffi>=1.14; os_name == 'nt'\", # \"cffi is required on windows\"\n \"contextvars>=2.1; python_version < '3.7'\"\n ],\n # This means, just install *everything* you see under trio/, even if it\n # doesn't look like a source file, so long as it appears in MANIFEST.in:\n include_package_data=True,\n python_requires=\">=3.5\",\n keywords=[\"async\", \"io\", \"networking\", \"trio\"],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: System :: Networking\",\n \"Framework :: Trio\",\n ],\n)\n", "path": "setup.py"}]}
1,881
174
gh_patches_debug_4088
rasdani/github-patches
git_diff
plotly__plotly.py-2015
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error when using two image scrappers together ### Introduction to the problem I am trying to move the current Jupyter Notebook examples of the [poliastro project](https://github.com/poliastro/poliastro) to an [Sphinx-Gallery](https://github.com/sphinx-gallery/sphinx-gallery) set. Since we are making use of **plotly figures** we need to **capture them** as output figures and therefore, make use of the **plotly image scrapper**. We also need to capture `matplotlib` figures, so this image scrapper must be also added to the `conf.py` file. #### How to reproduce this issue :beetle: If you download the [official example repository](https://github.com/plotly/plotly-sphinx-gallery) from @emmanuelle for achieving this task and you add the following [simple Python file](https://gist.github.com/jorgepiloto/db807a7ee3a0bcfbaea38fc9cd7ac95e) in the `examples/` directory for plotting a sinusoidal wave with `matplotlib` and error is raised: ```bash generating gallery... generating gallery for auto_examples... [ 25%] plot_sin.py Exception occurred: File "/home/lobo/anaconda3/envs/poliastro/lib/python3.7/site-packages/plotly/io/_sg_scraper.py", line 91, in figure_rst figure_name = figure_paths[0] IndexError: list index out of range ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `packages/python/plotly/plotly/io/_sg_scraper.py` Content: ``` 1 # This module defines an image scraper for sphinx-gallery 2 # https://sphinx-gallery.github.io/ 3 # which can be used by projects using plotly in their documentation. 4 import inspect, os 5 6 import plotly 7 from glob import glob 8 import shutil 9 10 plotly.io.renderers.default = "sphinx_gallery" 11 12 13 def plotly_sg_scraper(block, block_vars, gallery_conf, **kwargs): 14 """Scrape Plotly figures for galleries of examples using 15 sphinx-gallery. 16 17 Examples should use ``plotly.io.show()`` to display the figure with 18 the custom sphinx_gallery renderer. 19 20 Since the sphinx_gallery renderer generates both html and static png 21 files, we simply crawl these files and give them the appropriate path. 22 23 Parameters 24 ---------- 25 block : tuple 26 A tuple containing the (label, content, line_number) of the block. 27 block_vars : dict 28 Dict of block variables. 29 gallery_conf : dict 30 Contains the configuration of Sphinx-Gallery 31 **kwargs : dict 32 Additional keyword arguments to pass to 33 :meth:`~matplotlib.figure.Figure.savefig`, e.g. ``format='svg'``. 34 The ``format`` kwarg in particular is used to set the file extension 35 of the output file (currently only 'png' and 'svg' are supported). 36 37 Returns 38 ------- 39 rst : str 40 The ReSTructuredText that will be rendered to HTML containing 41 the images. 42 43 Notes 44 ----- 45 Add this function to the image scrapers 46 """ 47 examples_dirs = gallery_conf["examples_dirs"] 48 if isinstance(examples_dirs, (list, tuple)): 49 examples_dirs = examples_dirs[0] 50 pngs = sorted(glob(os.path.join(examples_dirs, "*.png"))) 51 htmls = sorted(glob(os.path.join(examples_dirs, "*.html"))) 52 image_path_iterator = block_vars["image_path_iterator"] 53 image_names = list() 54 seen = set() 55 for html, png in zip(htmls, pngs): 56 if png not in seen: 57 seen |= set(png) 58 this_image_path_png = next(image_path_iterator) 59 this_image_path_html = os.path.splitext(this_image_path_png)[0] + ".html" 60 image_names.append(this_image_path_html) 61 shutil.move(png, this_image_path_png) 62 shutil.move(html, this_image_path_html) 63 # Use the `figure_rst` helper function to generate rST for image files 64 return figure_rst(image_names, gallery_conf["src_dir"]) 65 66 67 def figure_rst(figure_list, sources_dir): 68 """Generate RST for a list of PNG filenames. 69 70 Depending on whether we have one or more figures, we use a 71 single rst call to 'image' or a horizontal list. 72 73 Parameters 74 ---------- 75 figure_list : list 76 List of strings of the figures' absolute paths. 77 sources_dir : str 78 absolute path of Sphinx documentation sources 79 80 Returns 81 ------- 82 images_rst : str 83 rst code to embed the images in the document 84 """ 85 86 figure_paths = [ 87 os.path.relpath(figure_path, sources_dir).replace(os.sep, "/").lstrip("/") 88 for figure_path in figure_list 89 ] 90 images_rst = "" 91 figure_name = figure_paths[0] 92 ext = os.path.splitext(figure_name)[1] 93 figure_path = os.path.join("images", os.path.basename(figure_name)) 94 images_rst = SINGLE_HTML % figure_path 95 return images_rst 96 97 98 SINGLE_HTML = """ 99 .. raw:: html 100 :file: %s 101 """ 102 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/packages/python/plotly/plotly/io/_sg_scraper.py b/packages/python/plotly/plotly/io/_sg_scraper.py --- a/packages/python/plotly/plotly/io/_sg_scraper.py +++ b/packages/python/plotly/plotly/io/_sg_scraper.py @@ -88,6 +88,8 @@ for figure_path in figure_list ] images_rst = "" + if not figure_paths: + return images_rst figure_name = figure_paths[0] ext = os.path.splitext(figure_name)[1] figure_path = os.path.join("images", os.path.basename(figure_name))
{"golden_diff": "diff --git a/packages/python/plotly/plotly/io/_sg_scraper.py b/packages/python/plotly/plotly/io/_sg_scraper.py\n--- a/packages/python/plotly/plotly/io/_sg_scraper.py\n+++ b/packages/python/plotly/plotly/io/_sg_scraper.py\n@@ -88,6 +88,8 @@\n for figure_path in figure_list\n ]\n images_rst = \"\"\n+ if not figure_paths:\n+ return images_rst\n figure_name = figure_paths[0]\n ext = os.path.splitext(figure_name)[1]\n figure_path = os.path.join(\"images\", os.path.basename(figure_name))\n", "issue": "Error when using two image scrappers together\n### Introduction to the problem \r\nI am trying to move the current Jupyter Notebook examples of the [poliastro project](https://github.com/poliastro/poliastro) to an [Sphinx-Gallery](https://github.com/sphinx-gallery/sphinx-gallery) set. Since we are making use of **plotly figures** we need to **capture them** as output figures and therefore, make use of the **plotly image scrapper**. We also need to capture `matplotlib` figures, so this image scrapper must be also added to the `conf.py` file.\r\n\r\n#### How to reproduce this issue :beetle: \r\n\r\nIf you download the [official example repository](https://github.com/plotly/plotly-sphinx-gallery) from @emmanuelle for achieving this task and you add the following [simple Python file](https://gist.github.com/jorgepiloto/db807a7ee3a0bcfbaea38fc9cd7ac95e) in the `examples/` directory for plotting a sinusoidal wave with `matplotlib` and error is raised:\r\n\r\n```bash\r\ngenerating gallery...\r\ngenerating gallery for auto_examples... [ 25%] plot_sin.py \r\nException occurred:\r\n File \"/home/lobo/anaconda3/envs/poliastro/lib/python3.7/site-packages/plotly/io/_sg_scraper.py\", line 91, in figure_rst\r\n figure_name = figure_paths[0]\r\nIndexError: list index out of range\r\n```\n", "before_files": [{"content": "# This module defines an image scraper for sphinx-gallery\n# https://sphinx-gallery.github.io/\n# which can be used by projects using plotly in their documentation.\nimport inspect, os\n\nimport plotly\nfrom glob import glob\nimport shutil\n\nplotly.io.renderers.default = \"sphinx_gallery\"\n\n\ndef plotly_sg_scraper(block, block_vars, gallery_conf, **kwargs):\n \"\"\"Scrape Plotly figures for galleries of examples using\n sphinx-gallery.\n\n Examples should use ``plotly.io.show()`` to display the figure with\n the custom sphinx_gallery renderer.\n\n Since the sphinx_gallery renderer generates both html and static png\n files, we simply crawl these files and give them the appropriate path.\n\n Parameters\n ----------\n block : tuple\n A tuple containing the (label, content, line_number) of the block.\n block_vars : dict\n Dict of block variables.\n gallery_conf : dict\n Contains the configuration of Sphinx-Gallery\n **kwargs : dict\n Additional keyword arguments to pass to\n :meth:`~matplotlib.figure.Figure.savefig`, e.g. ``format='svg'``.\n The ``format`` kwarg in particular is used to set the file extension\n of the output file (currently only 'png' and 'svg' are supported).\n\n Returns\n -------\n rst : str\n The ReSTructuredText that will be rendered to HTML containing\n the images.\n\n Notes\n -----\n Add this function to the image scrapers \n \"\"\"\n examples_dirs = gallery_conf[\"examples_dirs\"]\n if isinstance(examples_dirs, (list, tuple)):\n examples_dirs = examples_dirs[0]\n pngs = sorted(glob(os.path.join(examples_dirs, \"*.png\")))\n htmls = sorted(glob(os.path.join(examples_dirs, \"*.html\")))\n image_path_iterator = block_vars[\"image_path_iterator\"]\n image_names = list()\n seen = set()\n for html, png in zip(htmls, pngs):\n if png not in seen:\n seen |= set(png)\n this_image_path_png = next(image_path_iterator)\n this_image_path_html = os.path.splitext(this_image_path_png)[0] + \".html\"\n image_names.append(this_image_path_html)\n shutil.move(png, this_image_path_png)\n shutil.move(html, this_image_path_html)\n # Use the `figure_rst` helper function to generate rST for image files\n return figure_rst(image_names, gallery_conf[\"src_dir\"])\n\n\ndef figure_rst(figure_list, sources_dir):\n \"\"\"Generate RST for a list of PNG filenames.\n\n Depending on whether we have one or more figures, we use a\n single rst call to 'image' or a horizontal list.\n\n Parameters\n ----------\n figure_list : list\n List of strings of the figures' absolute paths.\n sources_dir : str\n absolute path of Sphinx documentation sources\n\n Returns\n -------\n images_rst : str\n rst code to embed the images in the document\n \"\"\"\n\n figure_paths = [\n os.path.relpath(figure_path, sources_dir).replace(os.sep, \"/\").lstrip(\"/\")\n for figure_path in figure_list\n ]\n images_rst = \"\"\n figure_name = figure_paths[0]\n ext = os.path.splitext(figure_name)[1]\n figure_path = os.path.join(\"images\", os.path.basename(figure_name))\n images_rst = SINGLE_HTML % figure_path\n return images_rst\n\n\nSINGLE_HTML = \"\"\"\n.. raw:: html\n :file: %s\n\"\"\"\n", "path": "packages/python/plotly/plotly/io/_sg_scraper.py"}], "after_files": [{"content": "# This module defines an image scraper for sphinx-gallery\n# https://sphinx-gallery.github.io/\n# which can be used by projects using plotly in their documentation.\nimport inspect, os\n\nimport plotly\nfrom glob import glob\nimport shutil\n\nplotly.io.renderers.default = \"sphinx_gallery\"\n\n\ndef plotly_sg_scraper(block, block_vars, gallery_conf, **kwargs):\n \"\"\"Scrape Plotly figures for galleries of examples using\n sphinx-gallery.\n\n Examples should use ``plotly.io.show()`` to display the figure with\n the custom sphinx_gallery renderer.\n\n Since the sphinx_gallery renderer generates both html and static png\n files, we simply crawl these files and give them the appropriate path.\n\n Parameters\n ----------\n block : tuple\n A tuple containing the (label, content, line_number) of the block.\n block_vars : dict\n Dict of block variables.\n gallery_conf : dict\n Contains the configuration of Sphinx-Gallery\n **kwargs : dict\n Additional keyword arguments to pass to\n :meth:`~matplotlib.figure.Figure.savefig`, e.g. ``format='svg'``.\n The ``format`` kwarg in particular is used to set the file extension\n of the output file (currently only 'png' and 'svg' are supported).\n\n Returns\n -------\n rst : str\n The ReSTructuredText that will be rendered to HTML containing\n the images.\n\n Notes\n -----\n Add this function to the image scrapers \n \"\"\"\n examples_dirs = gallery_conf[\"examples_dirs\"]\n if isinstance(examples_dirs, (list, tuple)):\n examples_dirs = examples_dirs[0]\n pngs = sorted(glob(os.path.join(examples_dirs, \"*.png\")))\n htmls = sorted(glob(os.path.join(examples_dirs, \"*.html\")))\n image_path_iterator = block_vars[\"image_path_iterator\"]\n image_names = list()\n seen = set()\n for html, png in zip(htmls, pngs):\n if png not in seen:\n seen |= set(png)\n this_image_path_png = next(image_path_iterator)\n this_image_path_html = os.path.splitext(this_image_path_png)[0] + \".html\"\n image_names.append(this_image_path_html)\n shutil.move(png, this_image_path_png)\n shutil.move(html, this_image_path_html)\n # Use the `figure_rst` helper function to generate rST for image files\n return figure_rst(image_names, gallery_conf[\"src_dir\"])\n\n\ndef figure_rst(figure_list, sources_dir):\n \"\"\"Generate RST for a list of PNG filenames.\n\n Depending on whether we have one or more figures, we use a\n single rst call to 'image' or a horizontal list.\n\n Parameters\n ----------\n figure_list : list\n List of strings of the figures' absolute paths.\n sources_dir : str\n absolute path of Sphinx documentation sources\n\n Returns\n -------\n images_rst : str\n rst code to embed the images in the document\n \"\"\"\n\n figure_paths = [\n os.path.relpath(figure_path, sources_dir).replace(os.sep, \"/\").lstrip(\"/\")\n for figure_path in figure_list\n ]\n images_rst = \"\"\n if not figure_paths:\n return images_rst\n figure_name = figure_paths[0]\n ext = os.path.splitext(figure_name)[1]\n figure_path = os.path.join(\"images\", os.path.basename(figure_name))\n images_rst = SINGLE_HTML % figure_path\n return images_rst\n\n\nSINGLE_HTML = \"\"\"\n.. raw:: html\n :file: %s\n\"\"\"\n", "path": "packages/python/plotly/plotly/io/_sg_scraper.py"}]}
1,572
148
gh_patches_debug_3006
rasdani/github-patches
git_diff
googleapis__google-api-python-client-1629
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Python 3.10 compatibility issue #### Environment details - OS type and version: Windows 10 - Python version: `python --version` 3.10.1 - pip version: `pip --version` 21.2.4 - `google-api-python-client` version: `pip show google-api-python-client` - 2.33.0 uritemplate package 3.0.0 is not compatible with python 3.10. Need to update the requirements. Partial Stack Trace service = build('gmail', 'v1', credentials=creds) File "C:\JA\Envs\GIC\lib\site-packages\googleapiclient\_helpers.py", line 130, in positional_wrapper return wrapped(*args, **kwargs) File "C:\JA\Envs\GIC\lib\site-packages\googleapiclient\discovery.py", line 219, in build requested_url = uritemplate.expand(discovery_url, params) File "C:\JA\Envs\GIC\lib\site-packages\uritemplate\api.py", line 33, in expand return URITemplate(uri).expand(var_dict, **kwargs) File "C:\JA\Envs\GIC\lib\site-packages\uritemplate\template.py", line 132, in expand return self._expand(_merge(var_dict, kwargs), False) File "C:\JA\Envs\GIC\lib\site-packages\uritemplate\template.py", line 97, in _expand expanded.update(v.expand(expansion)) File "C:\JA\Envs\GIC\lib\site-packages\uritemplate\variable.py", line 338, in expand expanded = expansion(name, value, opts['explode'], opts['prefix']) File "C:\JA\Envs\GIC\lib\site-packages\uritemplate\variable.py", line 278, in _string_expansion if dict_test(value) or tuples: File "C:\JA\Envs\GIC\lib\site-packages\uritemplate\variable.py", line 363, in dict_test return isinstance(value, (dict, collections.MutableMapping)) AttributeError: module 'collections' has no attribute 'MutableMapping' --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # Copyright 2014 Google Inc. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Setup script for Google API Python client. 16 17 Also installs included versions of third party libraries, if those libraries 18 are not already installed. 19 """ 20 from __future__ import print_function 21 22 import sys 23 24 if sys.version_info < (3, 6): 25 print("google-api-python-client requires python3 version >= 3.6.", file=sys.stderr) 26 sys.exit(1) 27 28 import io 29 import os 30 from setuptools import setup 31 32 packages = ["apiclient", "googleapiclient", "googleapiclient/discovery_cache"] 33 34 install_requires = [ 35 "httplib2>=0.15.0,<1dev", 36 # NOTE: Maintainers, please do not require google-auth>=2.x.x 37 # Until this issue is closed 38 # https://github.com/googleapis/google-cloud-python/issues/10566 39 "google-auth>=1.16.0,<3.0.0dev", 40 "google-auth-httplib2>=0.1.0", 41 # NOTE: Maintainers, please do not require google-api-core>=2.x.x 42 # Until this issue is closed 43 # https://github.com/googleapis/google-cloud-python/issues/10566 44 "google-api-core>=1.21.0,<3.0.0dev", 45 "uritemplate>=3.0.0,<5", 46 ] 47 48 package_root = os.path.abspath(os.path.dirname(__file__)) 49 50 readme_filename = os.path.join(package_root, "README.md") 51 with io.open(readme_filename, encoding="utf-8") as readme_file: 52 readme = readme_file.read() 53 54 package_root = os.path.abspath(os.path.dirname(__file__)) 55 56 version = {} 57 with open(os.path.join(package_root, "googleapiclient/version.py")) as fp: 58 exec(fp.read(), version) 59 version = version["__version__"] 60 61 setup( 62 name="google-api-python-client", 63 version=version, 64 description="Google API Client Library for Python", 65 long_description=readme, 66 long_description_content_type='text/markdown', 67 author="Google LLC", 68 author_email="[email protected]", 69 url="https://github.com/googleapis/google-api-python-client/", 70 install_requires=install_requires, 71 python_requires=">=3.6", 72 packages=packages, 73 package_data={"googleapiclient": ["discovery_cache/documents/*.json"]}, 74 license="Apache 2.0", 75 keywords="google api client", 76 classifiers=[ 77 "Programming Language :: Python :: 3", 78 "Programming Language :: Python :: 3.6", 79 "Programming Language :: Python :: 3.7", 80 "Programming Language :: Python :: 3.8", 81 "Programming Language :: Python :: 3.9", 82 "Programming Language :: Python :: 3.10", 83 "Development Status :: 5 - Production/Stable", 84 "Intended Audience :: Developers", 85 "License :: OSI Approved :: Apache Software License", 86 "Operating System :: OS Independent", 87 "Topic :: Internet :: WWW/HTTP", 88 ], 89 ) 90 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -42,7 +42,7 @@ # Until this issue is closed # https://github.com/googleapis/google-cloud-python/issues/10566 "google-api-core>=1.21.0,<3.0.0dev", - "uritemplate>=3.0.0,<5", + "uritemplate>=3.0.1,<5", ] package_root = os.path.abspath(os.path.dirname(__file__))
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -42,7 +42,7 @@\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-api-core>=1.21.0,<3.0.0dev\",\n- \"uritemplate>=3.0.0,<5\",\n+ \"uritemplate>=3.0.1,<5\",\n ]\n \n package_root = os.path.abspath(os.path.dirname(__file__))\n", "issue": "Python 3.10 compatibility issue\n\r\n\r\n#### Environment details\r\n\r\n - OS type and version: Windows 10\r\n - Python version: `python --version` 3.10.1\r\n - pip version: `pip --version` 21.2.4\r\n - `google-api-python-client` version: `pip show google-api-python-client` - 2.33.0\r\n\r\nuritemplate package 3.0.0 is not compatible with python 3.10. Need to update the requirements.\r\n\r\nPartial Stack Trace\r\n\r\nservice = build('gmail', 'v1', credentials=creds)\r\n File \"C:\\JA\\Envs\\GIC\\lib\\site-packages\\googleapiclient\\_helpers.py\", line 130, in positional_wrapper\r\n return wrapped(*args, **kwargs)\r\n File \"C:\\JA\\Envs\\GIC\\lib\\site-packages\\googleapiclient\\discovery.py\", line 219, in build\r\n requested_url = uritemplate.expand(discovery_url, params)\r\n File \"C:\\JA\\Envs\\GIC\\lib\\site-packages\\uritemplate\\api.py\", line 33, in expand\r\n return URITemplate(uri).expand(var_dict, **kwargs)\r\n File \"C:\\JA\\Envs\\GIC\\lib\\site-packages\\uritemplate\\template.py\", line 132, in expand\r\n return self._expand(_merge(var_dict, kwargs), False)\r\n File \"C:\\JA\\Envs\\GIC\\lib\\site-packages\\uritemplate\\template.py\", line 97, in _expand\r\n expanded.update(v.expand(expansion))\r\n File \"C:\\JA\\Envs\\GIC\\lib\\site-packages\\uritemplate\\variable.py\", line 338, in expand\r\n expanded = expansion(name, value, opts['explode'], opts['prefix'])\r\n File \"C:\\JA\\Envs\\GIC\\lib\\site-packages\\uritemplate\\variable.py\", line 278, in _string_expansion\r\n if dict_test(value) or tuples:\r\n File \"C:\\JA\\Envs\\GIC\\lib\\site-packages\\uritemplate\\variable.py\", line 363, in dict_test\r\n return isinstance(value, (dict, collections.MutableMapping))\r\nAttributeError: module 'collections' has no attribute 'MutableMapping'\r\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (3, 6):\n print(\"google-api-python-client requires python3 version >= 3.6.\", file=sys.stderr)\n sys.exit(1)\n\nimport io\nimport os\nfrom setuptools import setup\n\npackages = [\"apiclient\", \"googleapiclient\", \"googleapiclient/discovery_cache\"]\n\ninstall_requires = [\n \"httplib2>=0.15.0,<1dev\",\n # NOTE: Maintainers, please do not require google-auth>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-auth>=1.16.0,<3.0.0dev\",\n \"google-auth-httplib2>=0.1.0\",\n # NOTE: Maintainers, please do not require google-api-core>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-api-core>=1.21.0,<3.0.0dev\",\n \"uritemplate>=3.0.0,<5\",\n]\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.md\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nversion = {}\nwith open(os.path.join(package_root, \"googleapiclient/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=readme,\n long_description_content_type='text/markdown',\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n url=\"https://github.com/googleapis/google-api-python-client/\",\n install_requires=install_requires,\n python_requires=\">=3.6\",\n packages=packages,\n package_data={\"googleapiclient\": [\"discovery_cache/documents/*.json\"]},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (3, 6):\n print(\"google-api-python-client requires python3 version >= 3.6.\", file=sys.stderr)\n sys.exit(1)\n\nimport io\nimport os\nfrom setuptools import setup\n\npackages = [\"apiclient\", \"googleapiclient\", \"googleapiclient/discovery_cache\"]\n\ninstall_requires = [\n \"httplib2>=0.15.0,<1dev\",\n # NOTE: Maintainers, please do not require google-auth>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-auth>=1.16.0,<3.0.0dev\",\n \"google-auth-httplib2>=0.1.0\",\n # NOTE: Maintainers, please do not require google-api-core>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-api-core>=1.21.0,<3.0.0dev\",\n \"uritemplate>=3.0.1,<5\",\n]\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.md\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nversion = {}\nwith open(os.path.join(package_root, \"googleapiclient/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=readme,\n long_description_content_type='text/markdown',\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n url=\"https://github.com/googleapis/google-api-python-client/\",\n install_requires=install_requires,\n python_requires=\">=3.6\",\n packages=packages,\n package_data={\"googleapiclient\": [\"discovery_cache/documents/*.json\"]},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}]}
1,740
123
gh_patches_debug_21659
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-4721
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Adopting a projected as gold user fails ### How to reproduce it 1. sign up as Gold member 1. go to https://readthedocs.org/accounts/gold/subscription/ 1. select the project that you want to adopt ### Expected Result Adopts the project. ### Actual Result Fails with a 500. https://sentry.io/read-the-docs/readthedocs-org/issues/587668658/ ### The problem This line https://github.com/rtfd/readthedocs.org/blob/44e02def230b937e4eca396864de9fc81f4ef33f/readthedocs/gold/views.py#L109 cause the problem since we are receiving a "project name" and using it as "project slug". --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `readthedocs/gold/forms.py` Content: ``` 1 """Gold subscription forms.""" 2 3 from __future__ import absolute_import 4 5 from builtins import object 6 from django import forms 7 8 from readthedocs.payments.forms import StripeModelForm, StripeResourceMixin 9 10 from .models import LEVEL_CHOICES, GoldUser 11 12 13 class GoldSubscriptionForm(StripeResourceMixin, StripeModelForm): 14 15 """ 16 Gold subscription payment form. 17 18 This extends the common base form for handling Stripe subscriptions. Credit 19 card fields for card number, expiry, and CVV are extended from 20 :py:class:`StripeModelForm`, with additional methods from 21 :py:class:`StripeResourceMixin` for common operations against the Stripe API. 22 """ 23 24 class Meta(object): 25 model = GoldUser 26 fields = ['last_4_card_digits', 'level'] 27 28 last_4_card_digits = forms.CharField( 29 required=True, 30 min_length=4, 31 max_length=4, 32 widget=forms.HiddenInput(attrs={ 33 'data-bind': 'valueInit: last_4_card_digits, value: last_4_card_digits', 34 }) 35 ) 36 37 level = forms.ChoiceField( 38 required=True, 39 choices=LEVEL_CHOICES, 40 ) 41 42 def clean(self): 43 self.instance.user = self.customer 44 return super(GoldSubscriptionForm, self).clean() 45 46 def validate_stripe(self): 47 subscription = self.get_subscription() 48 self.instance.stripe_id = subscription.customer 49 self.instance.subscribed = True 50 51 def get_customer_kwargs(self): 52 return { 53 'description': self.customer.get_full_name() or self.customer.username, 54 'email': self.customer.email, 55 'id': self.instance.stripe_id or None 56 } 57 58 def get_subscription(self): 59 customer = self.get_customer() 60 61 # TODO get the first subscription more intelligently 62 subscriptions = customer.subscriptions.all(limit=5) 63 if subscriptions.data: 64 # Update an existing subscription - Stripe prorates by default 65 subscription = subscriptions.data[0] 66 subscription.plan = self.cleaned_data['level'] 67 if 'stripe_token' in self.cleaned_data and self.cleaned_data['stripe_token']: 68 # Optionally update the card 69 subscription.source = self.cleaned_data['stripe_token'] 70 subscription.save() 71 else: 72 # Add a new subscription 73 subscription = customer.subscriptions.create( 74 plan=self.cleaned_data['level'], 75 source=self.cleaned_data['stripe_token'] 76 ) 77 78 return subscription 79 80 81 class GoldProjectForm(forms.Form): 82 project = forms.CharField( 83 required=True, 84 ) 85 86 def __init__(self, *args, **kwargs): 87 self.user = kwargs.pop('user', None) 88 self.projects = kwargs.pop('projects', None) 89 super(GoldProjectForm, self).__init__(*args, **kwargs) 90 91 def clean(self): 92 cleaned_data = super(GoldProjectForm, self).clean() 93 if self.projects.count() < self.user.num_supported_projects: 94 return cleaned_data 95 96 self.add_error(None, 'You already have the max number of supported projects.') 97 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/readthedocs/gold/forms.py b/readthedocs/gold/forms.py --- a/readthedocs/gold/forms.py +++ b/readthedocs/gold/forms.py @@ -5,7 +5,10 @@ from builtins import object from django import forms +from django.utils.translation import ugettext_lazy as _ + from readthedocs.payments.forms import StripeModelForm, StripeResourceMixin +from readthedocs.projects.models import Project from .models import LEVEL_CHOICES, GoldUser @@ -88,6 +91,14 @@ self.projects = kwargs.pop('projects', None) super(GoldProjectForm, self).__init__(*args, **kwargs) + def clean_project(self): + project_slug = self.cleaned_data.get('project', '') + project_instance = Project.objects.filter(slug=project_slug) + if not project_instance.exists(): + raise forms.ValidationError(_('No project found.')) + else: + return project_slug + def clean(self): cleaned_data = super(GoldProjectForm, self).clean() if self.projects.count() < self.user.num_supported_projects:
{"golden_diff": "diff --git a/readthedocs/gold/forms.py b/readthedocs/gold/forms.py\n--- a/readthedocs/gold/forms.py\n+++ b/readthedocs/gold/forms.py\n@@ -5,7 +5,10 @@\n from builtins import object\n from django import forms\n \n+from django.utils.translation import ugettext_lazy as _\n+\n from readthedocs.payments.forms import StripeModelForm, StripeResourceMixin\n+from readthedocs.projects.models import Project\n \n from .models import LEVEL_CHOICES, GoldUser\n \n@@ -88,6 +91,14 @@\n self.projects = kwargs.pop('projects', None)\n super(GoldProjectForm, self).__init__(*args, **kwargs)\n \n+ def clean_project(self):\n+ project_slug = self.cleaned_data.get('project', '')\n+ project_instance = Project.objects.filter(slug=project_slug)\n+ if not project_instance.exists():\n+ raise forms.ValidationError(_('No project found.'))\n+ else:\n+ return project_slug\n+\n def clean(self):\n cleaned_data = super(GoldProjectForm, self).clean()\n if self.projects.count() < self.user.num_supported_projects:\n", "issue": "Adopting a projected as gold user fails\n### How to reproduce it\r\n\r\n1. sign up as Gold member\r\n1. go to https://readthedocs.org/accounts/gold/subscription/\r\n1. select the project that you want to adopt\r\n\r\n### Expected Result\r\n\r\nAdopts the project.\r\n\r\n### Actual Result\r\n\r\nFails with a 500.\r\n\r\nhttps://sentry.io/read-the-docs/readthedocs-org/issues/587668658/\r\n\r\n### The problem\r\n\r\nThis line\r\n\r\nhttps://github.com/rtfd/readthedocs.org/blob/44e02def230b937e4eca396864de9fc81f4ef33f/readthedocs/gold/views.py#L109\r\n\r\ncause the problem since we are receiving a \"project name\" and using it as \"project slug\".\n", "before_files": [{"content": "\"\"\"Gold subscription forms.\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom builtins import object\nfrom django import forms\n\nfrom readthedocs.payments.forms import StripeModelForm, StripeResourceMixin\n\nfrom .models import LEVEL_CHOICES, GoldUser\n\n\nclass GoldSubscriptionForm(StripeResourceMixin, StripeModelForm):\n\n \"\"\"\n Gold subscription payment form.\n\n This extends the common base form for handling Stripe subscriptions. Credit\n card fields for card number, expiry, and CVV are extended from\n :py:class:`StripeModelForm`, with additional methods from\n :py:class:`StripeResourceMixin` for common operations against the Stripe API.\n \"\"\"\n\n class Meta(object):\n model = GoldUser\n fields = ['last_4_card_digits', 'level']\n\n last_4_card_digits = forms.CharField(\n required=True,\n min_length=4,\n max_length=4,\n widget=forms.HiddenInput(attrs={\n 'data-bind': 'valueInit: last_4_card_digits, value: last_4_card_digits',\n })\n )\n\n level = forms.ChoiceField(\n required=True,\n choices=LEVEL_CHOICES,\n )\n\n def clean(self):\n self.instance.user = self.customer\n return super(GoldSubscriptionForm, self).clean()\n\n def validate_stripe(self):\n subscription = self.get_subscription()\n self.instance.stripe_id = subscription.customer\n self.instance.subscribed = True\n\n def get_customer_kwargs(self):\n return {\n 'description': self.customer.get_full_name() or self.customer.username,\n 'email': self.customer.email,\n 'id': self.instance.stripe_id or None\n }\n\n def get_subscription(self):\n customer = self.get_customer()\n\n # TODO get the first subscription more intelligently\n subscriptions = customer.subscriptions.all(limit=5)\n if subscriptions.data:\n # Update an existing subscription - Stripe prorates by default\n subscription = subscriptions.data[0]\n subscription.plan = self.cleaned_data['level']\n if 'stripe_token' in self.cleaned_data and self.cleaned_data['stripe_token']:\n # Optionally update the card\n subscription.source = self.cleaned_data['stripe_token']\n subscription.save()\n else:\n # Add a new subscription\n subscription = customer.subscriptions.create(\n plan=self.cleaned_data['level'],\n source=self.cleaned_data['stripe_token']\n )\n\n return subscription\n\n\nclass GoldProjectForm(forms.Form):\n project = forms.CharField(\n required=True,\n )\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n self.projects = kwargs.pop('projects', None)\n super(GoldProjectForm, self).__init__(*args, **kwargs)\n\n def clean(self):\n cleaned_data = super(GoldProjectForm, self).clean()\n if self.projects.count() < self.user.num_supported_projects:\n return cleaned_data\n\n self.add_error(None, 'You already have the max number of supported projects.')\n", "path": "readthedocs/gold/forms.py"}], "after_files": [{"content": "\"\"\"Gold subscription forms.\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom builtins import object\nfrom django import forms\n\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom readthedocs.payments.forms import StripeModelForm, StripeResourceMixin\nfrom readthedocs.projects.models import Project\n\nfrom .models import LEVEL_CHOICES, GoldUser\n\n\nclass GoldSubscriptionForm(StripeResourceMixin, StripeModelForm):\n\n \"\"\"\n Gold subscription payment form.\n\n This extends the common base form for handling Stripe subscriptions. Credit\n card fields for card number, expiry, and CVV are extended from\n :py:class:`StripeModelForm`, with additional methods from\n :py:class:`StripeResourceMixin` for common operations against the Stripe API.\n \"\"\"\n\n class Meta(object):\n model = GoldUser\n fields = ['last_4_card_digits', 'level']\n\n last_4_card_digits = forms.CharField(\n required=True,\n min_length=4,\n max_length=4,\n widget=forms.HiddenInput(attrs={\n 'data-bind': 'valueInit: last_4_card_digits, value: last_4_card_digits',\n })\n )\n\n level = forms.ChoiceField(\n required=True,\n choices=LEVEL_CHOICES,\n )\n\n def clean(self):\n self.instance.user = self.customer\n return super(GoldSubscriptionForm, self).clean()\n\n def validate_stripe(self):\n subscription = self.get_subscription()\n self.instance.stripe_id = subscription.customer\n self.instance.subscribed = True\n\n def get_customer_kwargs(self):\n return {\n 'description': self.customer.get_full_name() or self.customer.username,\n 'email': self.customer.email,\n 'id': self.instance.stripe_id or None\n }\n\n def get_subscription(self):\n customer = self.get_customer()\n\n # TODO get the first subscription more intelligently\n subscriptions = customer.subscriptions.all(limit=5)\n if subscriptions.data:\n # Update an existing subscription - Stripe prorates by default\n subscription = subscriptions.data[0]\n subscription.plan = self.cleaned_data['level']\n if 'stripe_token' in self.cleaned_data and self.cleaned_data['stripe_token']:\n # Optionally update the card\n subscription.source = self.cleaned_data['stripe_token']\n subscription.save()\n else:\n # Add a new subscription\n subscription = customer.subscriptions.create(\n plan=self.cleaned_data['level'],\n source=self.cleaned_data['stripe_token']\n )\n\n return subscription\n\n\nclass GoldProjectForm(forms.Form):\n project = forms.CharField(\n required=True,\n )\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n self.projects = kwargs.pop('projects', None)\n super(GoldProjectForm, self).__init__(*args, **kwargs)\n\n def clean_project(self):\n project_slug = self.cleaned_data.get('project', '')\n project_instance = Project.objects.filter(slug=project_slug)\n if not project_instance.exists():\n raise forms.ValidationError(_('No project found.'))\n else:\n return project_slug\n\n def clean(self):\n cleaned_data = super(GoldProjectForm, self).clean()\n if self.projects.count() < self.user.num_supported_projects:\n return cleaned_data\n\n self.add_error(None, 'You already have the max number of supported projects.')\n", "path": "readthedocs/gold/forms.py"}]}
1,270
246
gh_patches_debug_17614
rasdani/github-patches
git_diff
svthalia__concrexit-1164
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- FieldError: Cannot resolve keyword 'registration' into field. Choices are: cancel_deadline, category, descrip... Sentry Issue: [CONCREXIT-2Z](https://sentry.io/organizations/thalia/issues/1761243401/?referrer=github_integration) ``` FieldError: Cannot resolve keyword 'registration' into field. Choices are: cancel_deadline, category, description_en, description_nl, documents, end, eventregistration, fine, id, location_en, location_nl, map_location, max_participants, no_registration_message_en, no_registration_message_nl, organiser, organiser_id, pizzaevent, price, published, registration_end, registration_reminder, registration_reminder_id, registration_start, registrationinformationfield, send_cancel_email, slide, slide_id, start, start_reminde... (14 additional frame(s) were not displayed) ... File "django/db/models/sql/query.py", line 1371, in _add_q child_clause, needed_inner = self._add_q( File "django/db/models/sql/query.py", line 1378, in _add_q child_clause, needed_inner = self.build_filter( File "django/db/models/sql/query.py", line 1251, in build_filter lookups, parts, reffed_expression = self.solve_lookup_type(arg) File "django/db/models/sql/query.py", line 1088, in solve_lookup_type _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta()) File "django/db/models/sql/query.py", line 1483, in names_to_path raise FieldError("Cannot resolve keyword '%s' into field. " ``` FieldError: Cannot resolve keyword 'registration' into field. Choices are: cancel_deadline, category, descrip... Sentry Issue: [CONCREXIT-2Z](https://sentry.io/organizations/thalia/issues/1761243401/?referrer=github_integration) ``` FieldError: Cannot resolve keyword 'registration' into field. Choices are: cancel_deadline, category, description_en, description_nl, documents, end, eventregistration, fine, id, location_en, location_nl, map_location, max_participants, no_registration_message_en, no_registration_message_nl, organiser, organiser_id, pizzaevent, price, published, registration_end, registration_reminder, registration_reminder_id, registration_start, registrationinformationfield, send_cancel_email, slide, slide_id, start, start_reminde... (14 additional frame(s) were not displayed) ... File "django/db/models/sql/query.py", line 1371, in _add_q child_clause, needed_inner = self._add_q( File "django/db/models/sql/query.py", line 1378, in _add_q child_clause, needed_inner = self.build_filter( File "django/db/models/sql/query.py", line 1251, in build_filter lookups, parts, reffed_expression = self.solve_lookup_type(arg) File "django/db/models/sql/query.py", line 1088, in solve_lookup_type _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta()) File "django/db/models/sql/query.py", line 1483, in names_to_path raise FieldError("Cannot resolve keyword '%s' into field. " ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `website/events/feeds.py` Content: ``` 1 """The feeds defined by the events package""" 2 from django.conf import settings 3 from django.db.models.query_utils import Q 4 from django.urls import reverse 5 from django.utils.translation import activate 6 from django.utils.translation import gettext as _ 7 from django_ical.views import ICalFeed 8 9 from events.models import Event, FeedToken 10 11 12 class EventFeed(ICalFeed): 13 """Output an iCal feed containing all published events""" 14 15 def __init__(self, lang="en"): 16 super().__init__() 17 self.lang = lang 18 self.user = None 19 20 def __call__(self, request, *args, **kwargs): 21 if "u" in request.GET: 22 self.user = FeedToken.get_member(request.GET["u"]) 23 else: 24 self.user = None 25 26 return super().__call__(request, args, kwargs) 27 28 def product_id(self): 29 return f"-//{settings.SITE_DOMAIN}//EventCalendar//{self.lang.upper()}" 30 31 def file_name(self): 32 return "thalia_{}.ics".format(self.lang) 33 34 def title(self): 35 activate(self.lang) 36 return _("Study Association Thalia event calendar") 37 38 def items(self): 39 query = Q(published=True) 40 41 if self.user: 42 query &= Q(registration_start__isnull=True) | ( 43 Q(registration__member=self.user) & Q(registration__date_cancelled=None) 44 ) 45 46 return Event.objects.filter(query).order_by("-start") 47 48 def item_title(self, item): 49 return item.title 50 51 def item_description(self, item): 52 return f'{item.description} <a href="' f'{self.item_link(item)}">Website</a>' 53 54 def item_start_datetime(self, item): 55 return item.start 56 57 def item_end_datetime(self, item): 58 return item.end 59 60 def item_link(self, item): 61 return settings.BASE_URL + reverse("events:event", kwargs={"pk": item.id}) 62 63 def item_location(self, item): 64 return "{} - {}".format(item.location, item.map_location) 65 ``` Path: `website/events/decorators.py` Content: ``` 1 """The decorators defined by the events package""" 2 from django.core.exceptions import PermissionDenied 3 4 from events import services 5 from events.models import Event 6 7 8 def organiser_only(view_function): 9 """See OrganiserOnly""" 10 return OrganiserOnly(view_function) 11 12 13 class OrganiserOnly: 14 """ 15 Decorator that denies access to the page if: 16 1. There is no `pk` or `registration` in the request 17 2. The specified event does not exist 18 3. The user is no organiser of the specified event 19 """ 20 21 def __init__(self, view_function): 22 self.view_function = view_function 23 24 def __call__(self, request, *args, **kwargs): 25 event = None 26 27 if "pk" in kwargs: 28 try: 29 event = Event.objects.get(pk=kwargs.get("pk")) 30 except Event.DoesNotExist: 31 pass 32 elif "registration" in kwargs: 33 try: 34 event = Event.objects.get(registration__pk=kwargs.get("registration")) 35 except Event.DoesNotExist: 36 pass 37 38 if event and services.is_organiser(request.member, event): 39 return self.view_function(request, *args, **kwargs) 40 41 raise PermissionDenied 42 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/website/events/decorators.py b/website/events/decorators.py --- a/website/events/decorators.py +++ b/website/events/decorators.py @@ -31,7 +31,9 @@ pass elif "registration" in kwargs: try: - event = Event.objects.get(registration__pk=kwargs.get("registration")) + event = Event.objects.get( + eventregistration__pk=kwargs.get("registration") + ) except Event.DoesNotExist: pass diff --git a/website/events/feeds.py b/website/events/feeds.py --- a/website/events/feeds.py +++ b/website/events/feeds.py @@ -40,7 +40,8 @@ if self.user: query &= Q(registration_start__isnull=True) | ( - Q(registration__member=self.user) & Q(registration__date_cancelled=None) + Q(eventregistration__member=self.user) + & Q(eventregistration__date_cancelled=None) ) return Event.objects.filter(query).order_by("-start")
{"golden_diff": "diff --git a/website/events/decorators.py b/website/events/decorators.py\n--- a/website/events/decorators.py\n+++ b/website/events/decorators.py\n@@ -31,7 +31,9 @@\n pass\n elif \"registration\" in kwargs:\n try:\n- event = Event.objects.get(registration__pk=kwargs.get(\"registration\"))\n+ event = Event.objects.get(\n+ eventregistration__pk=kwargs.get(\"registration\")\n+ )\n except Event.DoesNotExist:\n pass\n \ndiff --git a/website/events/feeds.py b/website/events/feeds.py\n--- a/website/events/feeds.py\n+++ b/website/events/feeds.py\n@@ -40,7 +40,8 @@\n \n if self.user:\n query &= Q(registration_start__isnull=True) | (\n- Q(registration__member=self.user) & Q(registration__date_cancelled=None)\n+ Q(eventregistration__member=self.user)\n+ & Q(eventregistration__date_cancelled=None)\n )\n \n return Event.objects.filter(query).order_by(\"-start\")\n", "issue": "FieldError: Cannot resolve keyword 'registration' into field. Choices are: cancel_deadline, category, descrip...\nSentry Issue: [CONCREXIT-2Z](https://sentry.io/organizations/thalia/issues/1761243401/?referrer=github_integration)\n\n```\nFieldError: Cannot resolve keyword 'registration' into field. Choices are: cancel_deadline, category, description_en, description_nl, documents, end, eventregistration, fine, id, location_en, location_nl, map_location, max_participants, no_registration_message_en, no_registration_message_nl, organiser, organiser_id, pizzaevent, price, published, registration_end, registration_reminder, registration_reminder_id, registration_start, registrationinformationfield, send_cancel_email, slide, slide_id, start, start_reminde...\n(14 additional frame(s) were not displayed)\n...\n File \"django/db/models/sql/query.py\", line 1371, in _add_q\n child_clause, needed_inner = self._add_q(\n File \"django/db/models/sql/query.py\", line 1378, in _add_q\n child_clause, needed_inner = self.build_filter(\n File \"django/db/models/sql/query.py\", line 1251, in build_filter\n lookups, parts, reffed_expression = self.solve_lookup_type(arg)\n File \"django/db/models/sql/query.py\", line 1088, in solve_lookup_type\n _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())\n File \"django/db/models/sql/query.py\", line 1483, in names_to_path\n raise FieldError(\"Cannot resolve keyword '%s' into field. \"\n```\nFieldError: Cannot resolve keyword 'registration' into field. Choices are: cancel_deadline, category, descrip...\nSentry Issue: [CONCREXIT-2Z](https://sentry.io/organizations/thalia/issues/1761243401/?referrer=github_integration)\n\n```\nFieldError: Cannot resolve keyword 'registration' into field. Choices are: cancel_deadline, category, description_en, description_nl, documents, end, eventregistration, fine, id, location_en, location_nl, map_location, max_participants, no_registration_message_en, no_registration_message_nl, organiser, organiser_id, pizzaevent, price, published, registration_end, registration_reminder, registration_reminder_id, registration_start, registrationinformationfield, send_cancel_email, slide, slide_id, start, start_reminde...\n(14 additional frame(s) were not displayed)\n...\n File \"django/db/models/sql/query.py\", line 1371, in _add_q\n child_clause, needed_inner = self._add_q(\n File \"django/db/models/sql/query.py\", line 1378, in _add_q\n child_clause, needed_inner = self.build_filter(\n File \"django/db/models/sql/query.py\", line 1251, in build_filter\n lookups, parts, reffed_expression = self.solve_lookup_type(arg)\n File \"django/db/models/sql/query.py\", line 1088, in solve_lookup_type\n _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())\n File \"django/db/models/sql/query.py\", line 1483, in names_to_path\n raise FieldError(\"Cannot resolve keyword '%s' into field. \"\n```\n", "before_files": [{"content": "\"\"\"The feeds defined by the events package\"\"\"\nfrom django.conf import settings\nfrom django.db.models.query_utils import Q\nfrom django.urls import reverse\nfrom django.utils.translation import activate\nfrom django.utils.translation import gettext as _\nfrom django_ical.views import ICalFeed\n\nfrom events.models import Event, FeedToken\n\n\nclass EventFeed(ICalFeed):\n \"\"\"Output an iCal feed containing all published events\"\"\"\n\n def __init__(self, lang=\"en\"):\n super().__init__()\n self.lang = lang\n self.user = None\n\n def __call__(self, request, *args, **kwargs):\n if \"u\" in request.GET:\n self.user = FeedToken.get_member(request.GET[\"u\"])\n else:\n self.user = None\n\n return super().__call__(request, args, kwargs)\n\n def product_id(self):\n return f\"-//{settings.SITE_DOMAIN}//EventCalendar//{self.lang.upper()}\"\n\n def file_name(self):\n return \"thalia_{}.ics\".format(self.lang)\n\n def title(self):\n activate(self.lang)\n return _(\"Study Association Thalia event calendar\")\n\n def items(self):\n query = Q(published=True)\n\n if self.user:\n query &= Q(registration_start__isnull=True) | (\n Q(registration__member=self.user) & Q(registration__date_cancelled=None)\n )\n\n return Event.objects.filter(query).order_by(\"-start\")\n\n def item_title(self, item):\n return item.title\n\n def item_description(self, item):\n return f'{item.description} <a href=\"' f'{self.item_link(item)}\">Website</a>'\n\n def item_start_datetime(self, item):\n return item.start\n\n def item_end_datetime(self, item):\n return item.end\n\n def item_link(self, item):\n return settings.BASE_URL + reverse(\"events:event\", kwargs={\"pk\": item.id})\n\n def item_location(self, item):\n return \"{} - {}\".format(item.location, item.map_location)\n", "path": "website/events/feeds.py"}, {"content": "\"\"\"The decorators defined by the events package\"\"\"\nfrom django.core.exceptions import PermissionDenied\n\nfrom events import services\nfrom events.models import Event\n\n\ndef organiser_only(view_function):\n \"\"\"See OrganiserOnly\"\"\"\n return OrganiserOnly(view_function)\n\n\nclass OrganiserOnly:\n \"\"\"\n Decorator that denies access to the page if:\n 1. There is no `pk` or `registration` in the request\n 2. The specified event does not exist\n 3. The user is no organiser of the specified event\n \"\"\"\n\n def __init__(self, view_function):\n self.view_function = view_function\n\n def __call__(self, request, *args, **kwargs):\n event = None\n\n if \"pk\" in kwargs:\n try:\n event = Event.objects.get(pk=kwargs.get(\"pk\"))\n except Event.DoesNotExist:\n pass\n elif \"registration\" in kwargs:\n try:\n event = Event.objects.get(registration__pk=kwargs.get(\"registration\"))\n except Event.DoesNotExist:\n pass\n\n if event and services.is_organiser(request.member, event):\n return self.view_function(request, *args, **kwargs)\n\n raise PermissionDenied\n", "path": "website/events/decorators.py"}], "after_files": [{"content": "\"\"\"The feeds defined by the events package\"\"\"\nfrom django.conf import settings\nfrom django.db.models.query_utils import Q\nfrom django.urls import reverse\nfrom django.utils.translation import activate\nfrom django.utils.translation import gettext as _\nfrom django_ical.views import ICalFeed\n\nfrom events.models import Event, FeedToken\n\n\nclass EventFeed(ICalFeed):\n \"\"\"Output an iCal feed containing all published events\"\"\"\n\n def __init__(self, lang=\"en\"):\n super().__init__()\n self.lang = lang\n self.user = None\n\n def __call__(self, request, *args, **kwargs):\n if \"u\" in request.GET:\n self.user = FeedToken.get_member(request.GET[\"u\"])\n else:\n self.user = None\n\n return super().__call__(request, args, kwargs)\n\n def product_id(self):\n return f\"-//{settings.SITE_DOMAIN}//EventCalendar//{self.lang.upper()}\"\n\n def file_name(self):\n return \"thalia_{}.ics\".format(self.lang)\n\n def title(self):\n activate(self.lang)\n return _(\"Study Association Thalia event calendar\")\n\n def items(self):\n query = Q(published=True)\n\n if self.user:\n query &= Q(registration_start__isnull=True) | (\n Q(eventregistration__member=self.user)\n & Q(eventregistration__date_cancelled=None)\n )\n\n return Event.objects.filter(query).order_by(\"-start\")\n\n def item_title(self, item):\n return item.title\n\n def item_description(self, item):\n return f'{item.description} <a href=\"' f'{self.item_link(item)}\">Website</a>'\n\n def item_start_datetime(self, item):\n return item.start\n\n def item_end_datetime(self, item):\n return item.end\n\n def item_link(self, item):\n return settings.BASE_URL + reverse(\"events:event\", kwargs={\"pk\": item.id})\n\n def item_location(self, item):\n return \"{} - {}\".format(item.location, item.map_location)\n", "path": "website/events/feeds.py"}, {"content": "\"\"\"The decorators defined by the events package\"\"\"\nfrom django.core.exceptions import PermissionDenied\n\nfrom events import services\nfrom events.models import Event\n\n\ndef organiser_only(view_function):\n \"\"\"See OrganiserOnly\"\"\"\n return OrganiserOnly(view_function)\n\n\nclass OrganiserOnly:\n \"\"\"\n Decorator that denies access to the page if:\n 1. There is no `pk` or `registration` in the request\n 2. The specified event does not exist\n 3. The user is no organiser of the specified event\n \"\"\"\n\n def __init__(self, view_function):\n self.view_function = view_function\n\n def __call__(self, request, *args, **kwargs):\n event = None\n\n if \"pk\" in kwargs:\n try:\n event = Event.objects.get(pk=kwargs.get(\"pk\"))\n except Event.DoesNotExist:\n pass\n elif \"registration\" in kwargs:\n try:\n event = Event.objects.get(\n eventregistration__pk=kwargs.get(\"registration\")\n )\n except Event.DoesNotExist:\n pass\n\n if event and services.is_organiser(request.member, event):\n return self.view_function(request, *args, **kwargs)\n\n raise PermissionDenied\n", "path": "website/events/decorators.py"}]}
1,921
239
gh_patches_debug_34965
rasdani/github-patches
git_diff
fidals__shopelectro-987
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Render only first-leveled children at the header menu Now we have too many categories at the header menu ![image](https://user-images.githubusercontent.com/3526796/65932829-d6c31280-e417-11e9-8b01-3ec7f32c76b5.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `shopelectro/logic/header.py` Content: ``` 1 from django.conf import settings 2 from django.db.models import Q 3 4 from pages import models as pages_models 5 from shopelectro import models 6 7 8 def menu_qs() -> pages_models.PageQuerySet: 9 """ 10 QuerySet with header menu items. 11 12 Contains root categories. 13 Result can be tuned HEADER_LINKS settings option. 14 """ 15 return ( 16 pages_models.Page.objects.active() 17 .filter( 18 Q(slug__in=settings.HEADER_LINKS['add']) 19 | ( 20 # @todo #974:30m Optimize the header menu query. 21 # Fetch catalog page for the header menu at the same query. 22 # root category pages. 23 Q(parent=pages_models.CustomPage.objects.filter(slug='catalog')) 24 & Q(type='model') 25 & Q(related_model_name=models.Category._meta.db_table) 26 & ~Q(slug__in=settings.HEADER_LINKS['exclude']) 27 ) 28 ) 29 .order_by('position') 30 ) 31 ``` Path: `shopelectro/templatetags/se_extras.py` Content: ``` 1 import datetime 2 import math 3 4 from django import template 5 from django.conf import settings 6 from django.contrib.humanize.templatetags.humanize import intcomma 7 from django.template.defaultfilters import floatformat 8 from django.urls import reverse 9 10 from images.models import ImageMixin 11 from pages.models import Page 12 from shopelectro import logic 13 14 register = template.Library() 15 16 17 @register.simple_tag 18 def roots(): 19 return logic.header.menu_qs() 20 21 22 @register.simple_tag 23 def footer_links(): 24 return settings.FOOTER_LINKS 25 26 27 @register.filter 28 def class_name(model): 29 """Return Model name.""" 30 return type(model).__name__ 31 32 33 @register.simple_tag 34 def time_to_call(): 35 def is_weekend(t): 36 return t.weekday() > 4 37 38 def is_friday(t): 39 return t.weekday() == 4 40 41 def not_yet_opened(t): 42 current_time = (t.hour, t.minute) 43 open_time = (10, 00) 44 return current_time < open_time and not is_weekend(t) 45 46 def is_closed(t): 47 current_time = (t.hour, t.minute) 48 closing_time = (16, 30) if is_friday(t) else (17, 30) 49 return current_time > closing_time 50 51 when_we_call = { 52 lambda now: is_weekend(now) or (is_friday(now) and is_closed(now)): 'В понедельник в 10:30', 53 lambda now: not_yet_opened(now): 'Сегодня в 10:30', 54 lambda now: is_closed(now) and not (is_friday(now) or is_weekend(now)): 'Завтра в 10:30', 55 lambda _: True: 'В течение 30 минут' 56 } 57 58 time_ = datetime.datetime.now() 59 call = ' позвонит менеджер и обсудит детали доставки.' 60 for condition, time in when_we_call.items(): 61 if condition(time_): 62 return time + call 63 64 65 @register.simple_tag 66 def full_url(url_name, *args): 67 return settings.BASE_URL + reverse(url_name, args=args) 68 69 70 @register.filter 71 def humanize_price(price): 72 return intcomma(floatformat(price, 0)) 73 74 75 @register.filter 76 def show_price_in_units(item): 77 if (getattr(item, 'in_pack', 1) > 1): 78 return 'руб / упаковка' 79 return 'руб / шт' 80 81 82 # Not good code, but duker at 06/10/2016 don't know how to fix it. 83 # It makes Image model very complex. 84 @register.simple_tag 85 def get_img_alt(entity: ImageMixin): 86 product_alt = 'Фотография {}' 87 logo_alt = 'Логотип компании Shopelectro' 88 89 if not isinstance(entity, Page): 90 return logo_alt 91 92 # try one of this attributes to get pages name 93 name_attrs = ['h1', 'title', 'name'] 94 entity_name = next( 95 getattr(entity, attr) 96 for attr in name_attrs 97 if getattr(entity, attr) 98 ) 99 return product_alt.format(entity_name) 100 101 102 @register.simple_tag 103 def main_image_or_logo(page: Page): 104 """Used for microdata.""" 105 if hasattr(page, 'main_image') and page.main_image: 106 return page.main_image.url 107 else: 108 return settings.STATIC_URL + 'images/logo.png' 109 110 111 @register.inclusion_tag('catalog/product_feedbacks_icons.html') 112 def icon_stars(rating=0): 113 """Render set of rating icons based on 1 through 5 rating values.""" 114 full_icons = int(math.floor(rating)) 115 half_icons = 0 if rating == int(rating) else 1 116 empty_icons = 5 - full_icons - half_icons 117 118 return { 119 'full_icons': range(full_icons), 120 'half_icons': range(half_icons), 121 'empty_icons': range(empty_icons), 122 } 123 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/shopelectro/logic/header.py b/shopelectro/logic/header.py --- a/shopelectro/logic/header.py +++ b/shopelectro/logic/header.py @@ -1,3 +1,6 @@ +import typing +from functools import lru_cache + from django.conf import settings from django.db.models import Q @@ -5,26 +8,41 @@ from shopelectro import models -def menu_qs() -> pages_models.PageQuerySet: - """ - QuerySet with header menu items. +class Menu: + DICT_TYPE = typing.Dict[models.CategoryPage, typing.List[models.CategoryPage]] + + @staticmethod + def roots() -> pages_models.PageQuerySet: + """ + QuerySet with header menu items. - Contains root categories. - Result can be tuned HEADER_LINKS settings option. - """ - return ( - pages_models.Page.objects.active() - .filter( - Q(slug__in=settings.HEADER_LINKS['add']) - | ( - # @todo #974:30m Optimize the header menu query. - # Fetch catalog page for the header menu at the same query. - # root category pages. - Q(parent=pages_models.CustomPage.objects.filter(slug='catalog')) - & Q(type='model') - & Q(related_model_name=models.Category._meta.db_table) - & ~Q(slug__in=settings.HEADER_LINKS['exclude']) + Contains root categories. + Result can be tuned HEADER_LINKS settings option. + """ + return ( + pages_models.Page.objects.active() + .filter( + Q(slug__in=settings.HEADER_LINKS['add']) + | ( + # @todo #974:30m Optimize the header menu query. + # Fetch catalog page for the header menu at the same query. + # root category pages. + Q(parent=pages_models.CustomPage.objects.filter(slug='catalog')) + & Q(type='model') + & Q(related_model_name=models.Category._meta.db_table) + & ~Q(slug__in=settings.HEADER_LINKS['exclude']) + ) ) + .order_by('position') ) - .order_by('position') - ) + + @lru_cache(maxsize=1) + def as_dict(self) -> DICT_TYPE: + return { + root: list( + root.get_children() + .filter(type='model') + .filter(related_model_name=models.Category._meta.db_table) + ) + for root in self.roots().iterator() + } diff --git a/shopelectro/templatetags/se_extras.py b/shopelectro/templatetags/se_extras.py --- a/shopelectro/templatetags/se_extras.py +++ b/shopelectro/templatetags/se_extras.py @@ -15,8 +15,8 @@ @register.simple_tag -def roots(): - return logic.header.menu_qs() +def header_menu() -> logic.header.Menu: + return logic.header.Menu() @register.simple_tag
{"golden_diff": "diff --git a/shopelectro/logic/header.py b/shopelectro/logic/header.py\n--- a/shopelectro/logic/header.py\n+++ b/shopelectro/logic/header.py\n@@ -1,3 +1,6 @@\n+import typing\n+from functools import lru_cache\n+\n from django.conf import settings\n from django.db.models import Q\n \n@@ -5,26 +8,41 @@\n from shopelectro import models\n \n \n-def menu_qs() -> pages_models.PageQuerySet:\n- \"\"\"\n- QuerySet with header menu items.\n+class Menu:\n+ DICT_TYPE = typing.Dict[models.CategoryPage, typing.List[models.CategoryPage]]\n+\n+ @staticmethod\n+ def roots() -> pages_models.PageQuerySet:\n+ \"\"\"\n+ QuerySet with header menu items.\n \n- Contains root categories.\n- Result can be tuned HEADER_LINKS settings option.\n- \"\"\"\n- return (\n- pages_models.Page.objects.active()\n- .filter(\n- Q(slug__in=settings.HEADER_LINKS['add'])\n- | (\n- # @todo #974:30m Optimize the header menu query.\n- # Fetch catalog page for the header menu at the same query.\n- # root category pages.\n- Q(parent=pages_models.CustomPage.objects.filter(slug='catalog'))\n- & Q(type='model')\n- & Q(related_model_name=models.Category._meta.db_table)\n- & ~Q(slug__in=settings.HEADER_LINKS['exclude'])\n+ Contains root categories.\n+ Result can be tuned HEADER_LINKS settings option.\n+ \"\"\"\n+ return (\n+ pages_models.Page.objects.active()\n+ .filter(\n+ Q(slug__in=settings.HEADER_LINKS['add'])\n+ | (\n+ # @todo #974:30m Optimize the header menu query.\n+ # Fetch catalog page for the header menu at the same query.\n+ # root category pages.\n+ Q(parent=pages_models.CustomPage.objects.filter(slug='catalog'))\n+ & Q(type='model')\n+ & Q(related_model_name=models.Category._meta.db_table)\n+ & ~Q(slug__in=settings.HEADER_LINKS['exclude'])\n+ )\n )\n+ .order_by('position')\n )\n- .order_by('position')\n- )\n+\n+ @lru_cache(maxsize=1)\n+ def as_dict(self) -> DICT_TYPE:\n+ return {\n+ root: list(\n+ root.get_children()\n+ .filter(type='model')\n+ .filter(related_model_name=models.Category._meta.db_table)\n+ )\n+ for root in self.roots().iterator()\n+ }\ndiff --git a/shopelectro/templatetags/se_extras.py b/shopelectro/templatetags/se_extras.py\n--- a/shopelectro/templatetags/se_extras.py\n+++ b/shopelectro/templatetags/se_extras.py\n@@ -15,8 +15,8 @@\n \n \n @register.simple_tag\n-def roots():\n- return logic.header.menu_qs()\n+def header_menu() -> logic.header.Menu:\n+ return logic.header.Menu()\n \n \n @register.simple_tag\n", "issue": "Render only first-leveled children at the header menu\nNow we have too many categories at the header menu\r\n![image](https://user-images.githubusercontent.com/3526796/65932829-d6c31280-e417-11e9-8b01-3ec7f32c76b5.png)\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.db.models import Q\n\nfrom pages import models as pages_models\nfrom shopelectro import models\n\n\ndef menu_qs() -> pages_models.PageQuerySet:\n \"\"\"\n QuerySet with header menu items.\n\n Contains root categories.\n Result can be tuned HEADER_LINKS settings option.\n \"\"\"\n return (\n pages_models.Page.objects.active()\n .filter(\n Q(slug__in=settings.HEADER_LINKS['add'])\n | (\n # @todo #974:30m Optimize the header menu query.\n # Fetch catalog page for the header menu at the same query.\n # root category pages.\n Q(parent=pages_models.CustomPage.objects.filter(slug='catalog'))\n & Q(type='model')\n & Q(related_model_name=models.Category._meta.db_table)\n & ~Q(slug__in=settings.HEADER_LINKS['exclude'])\n )\n )\n .order_by('position')\n )\n", "path": "shopelectro/logic/header.py"}, {"content": "import datetime\nimport math\n\nfrom django import template\nfrom django.conf import settings\nfrom django.contrib.humanize.templatetags.humanize import intcomma\nfrom django.template.defaultfilters import floatformat\nfrom django.urls import reverse\n\nfrom images.models import ImageMixin\nfrom pages.models import Page\nfrom shopelectro import logic\n\nregister = template.Library()\n\n\[email protected]_tag\ndef roots():\n return logic.header.menu_qs()\n\n\[email protected]_tag\ndef footer_links():\n return settings.FOOTER_LINKS\n\n\[email protected]\ndef class_name(model):\n \"\"\"Return Model name.\"\"\"\n return type(model).__name__\n\n\[email protected]_tag\ndef time_to_call():\n def is_weekend(t):\n return t.weekday() > 4\n\n def is_friday(t):\n return t.weekday() == 4\n\n def not_yet_opened(t):\n current_time = (t.hour, t.minute)\n open_time = (10, 00)\n return current_time < open_time and not is_weekend(t)\n\n def is_closed(t):\n current_time = (t.hour, t.minute)\n closing_time = (16, 30) if is_friday(t) else (17, 30)\n return current_time > closing_time\n\n when_we_call = {\n lambda now: is_weekend(now) or (is_friday(now) and is_closed(now)): '\u0412 \u043f\u043e\u043d\u0435\u0434\u0435\u043b\u044c\u043d\u0438\u043a \u0432 10:30',\n lambda now: not_yet_opened(now): '\u0421\u0435\u0433\u043e\u0434\u043d\u044f \u0432 10:30',\n lambda now: is_closed(now) and not (is_friday(now) or is_weekend(now)): '\u0417\u0430\u0432\u0442\u0440\u0430 \u0432 10:30',\n lambda _: True: '\u0412 \u0442\u0435\u0447\u0435\u043d\u0438\u0435 30 \u043c\u0438\u043d\u0443\u0442'\n }\n\n time_ = datetime.datetime.now()\n call = ' \u043f\u043e\u0437\u0432\u043e\u043d\u0438\u0442 \u043c\u0435\u043d\u0435\u0434\u0436\u0435\u0440 \u0438 \u043e\u0431\u0441\u0443\u0434\u0438\u0442 \u0434\u0435\u0442\u0430\u043b\u0438 \u0434\u043e\u0441\u0442\u0430\u0432\u043a\u0438.'\n for condition, time in when_we_call.items():\n if condition(time_):\n return time + call\n\n\[email protected]_tag\ndef full_url(url_name, *args):\n return settings.BASE_URL + reverse(url_name, args=args)\n\n\[email protected]\ndef humanize_price(price):\n return intcomma(floatformat(price, 0))\n\n\[email protected]\ndef show_price_in_units(item):\n if (getattr(item, 'in_pack', 1) > 1):\n return '\u0440\u0443\u0431 / \u0443\u043f\u0430\u043a\u043e\u0432\u043a\u0430'\n return '\u0440\u0443\u0431 / \u0448\u0442'\n\n\n# Not good code, but duker at 06/10/2016 don't know how to fix it.\n# It makes Image model very complex.\[email protected]_tag\ndef get_img_alt(entity: ImageMixin):\n product_alt = '\u0424\u043e\u0442\u043e\u0433\u0440\u0430\u0444\u0438\u044f {}'\n logo_alt = '\u041b\u043e\u0433\u043e\u0442\u0438\u043f \u043a\u043e\u043c\u043f\u0430\u043d\u0438\u0438 Shopelectro'\n\n if not isinstance(entity, Page):\n return logo_alt\n\n # try one of this attributes to get pages name\n name_attrs = ['h1', 'title', 'name']\n entity_name = next(\n getattr(entity, attr)\n for attr in name_attrs\n if getattr(entity, attr)\n )\n return product_alt.format(entity_name)\n\n\[email protected]_tag\ndef main_image_or_logo(page: Page):\n \"\"\"Used for microdata.\"\"\"\n if hasattr(page, 'main_image') and page.main_image:\n return page.main_image.url\n else:\n return settings.STATIC_URL + 'images/logo.png'\n\n\[email protected]_tag('catalog/product_feedbacks_icons.html')\ndef icon_stars(rating=0):\n \"\"\"Render set of rating icons based on 1 through 5 rating values.\"\"\"\n full_icons = int(math.floor(rating))\n half_icons = 0 if rating == int(rating) else 1\n empty_icons = 5 - full_icons - half_icons\n\n return {\n 'full_icons': range(full_icons),\n 'half_icons': range(half_icons),\n 'empty_icons': range(empty_icons),\n }\n", "path": "shopelectro/templatetags/se_extras.py"}], "after_files": [{"content": "import typing\nfrom functools import lru_cache\n\nfrom django.conf import settings\nfrom django.db.models import Q\n\nfrom pages import models as pages_models\nfrom shopelectro import models\n\n\nclass Menu:\n DICT_TYPE = typing.Dict[models.CategoryPage, typing.List[models.CategoryPage]]\n\n @staticmethod\n def roots() -> pages_models.PageQuerySet:\n \"\"\"\n QuerySet with header menu items.\n\n Contains root categories.\n Result can be tuned HEADER_LINKS settings option.\n \"\"\"\n return (\n pages_models.Page.objects.active()\n .filter(\n Q(slug__in=settings.HEADER_LINKS['add'])\n | (\n # @todo #974:30m Optimize the header menu query.\n # Fetch catalog page for the header menu at the same query.\n # root category pages.\n Q(parent=pages_models.CustomPage.objects.filter(slug='catalog'))\n & Q(type='model')\n & Q(related_model_name=models.Category._meta.db_table)\n & ~Q(slug__in=settings.HEADER_LINKS['exclude'])\n )\n )\n .order_by('position')\n )\n\n @lru_cache(maxsize=1)\n def as_dict(self) -> DICT_TYPE:\n return {\n root: list(\n root.get_children()\n .filter(type='model')\n .filter(related_model_name=models.Category._meta.db_table)\n )\n for root in self.roots().iterator()\n }\n", "path": "shopelectro/logic/header.py"}, {"content": "import datetime\nimport math\n\nfrom django import template\nfrom django.conf import settings\nfrom django.contrib.humanize.templatetags.humanize import intcomma\nfrom django.template.defaultfilters import floatformat\nfrom django.urls import reverse\n\nfrom images.models import ImageMixin\nfrom pages.models import Page\nfrom shopelectro import logic\n\nregister = template.Library()\n\n\[email protected]_tag\ndef header_menu() -> logic.header.Menu:\n return logic.header.Menu()\n\n\[email protected]_tag\ndef footer_links():\n return settings.FOOTER_LINKS\n\n\[email protected]\ndef class_name(model):\n \"\"\"Return Model name.\"\"\"\n return type(model).__name__\n\n\[email protected]_tag\ndef time_to_call():\n def is_weekend(t):\n return t.weekday() > 4\n\n def is_friday(t):\n return t.weekday() == 4\n\n def not_yet_opened(t):\n current_time = (t.hour, t.minute)\n open_time = (10, 00)\n return current_time < open_time and not is_weekend(t)\n\n def is_closed(t):\n current_time = (t.hour, t.minute)\n closing_time = (16, 30) if is_friday(t) else (17, 30)\n return current_time > closing_time\n\n when_we_call = {\n lambda now: is_weekend(now) or (is_friday(now) and is_closed(now)): '\u0412 \u043f\u043e\u043d\u0435\u0434\u0435\u043b\u044c\u043d\u0438\u043a \u0432 10:30',\n lambda now: not_yet_opened(now): '\u0421\u0435\u0433\u043e\u0434\u043d\u044f \u0432 10:30',\n lambda now: is_closed(now) and not (is_friday(now) or is_weekend(now)): '\u0417\u0430\u0432\u0442\u0440\u0430 \u0432 10:30',\n lambda _: True: '\u0412 \u0442\u0435\u0447\u0435\u043d\u0438\u0435 30 \u043c\u0438\u043d\u0443\u0442'\n }\n\n time_ = datetime.datetime.now()\n call = ' \u043f\u043e\u0437\u0432\u043e\u043d\u0438\u0442 \u043c\u0435\u043d\u0435\u0434\u0436\u0435\u0440 \u0438 \u043e\u0431\u0441\u0443\u0434\u0438\u0442 \u0434\u0435\u0442\u0430\u043b\u0438 \u0434\u043e\u0441\u0442\u0430\u0432\u043a\u0438.'\n for condition, time in when_we_call.items():\n if condition(time_):\n return time + call\n\n\[email protected]_tag\ndef full_url(url_name, *args):\n return settings.BASE_URL + reverse(url_name, args=args)\n\n\[email protected]\ndef humanize_price(price):\n return intcomma(floatformat(price, 0))\n\n\[email protected]\ndef show_price_in_units(item):\n if (getattr(item, 'in_pack', 1) > 1):\n return '\u0440\u0443\u0431 / \u0443\u043f\u0430\u043a\u043e\u0432\u043a\u0430'\n return '\u0440\u0443\u0431 / \u0448\u0442'\n\n\n# Not good code, but duker at 06/10/2016 don't know how to fix it.\n# It makes Image model very complex.\[email protected]_tag\ndef get_img_alt(entity: ImageMixin):\n product_alt = '\u0424\u043e\u0442\u043e\u0433\u0440\u0430\u0444\u0438\u044f {}'\n logo_alt = '\u041b\u043e\u0433\u043e\u0442\u0438\u043f \u043a\u043e\u043c\u043f\u0430\u043d\u0438\u0438 Shopelectro'\n\n if not isinstance(entity, Page):\n return logo_alt\n\n # try one of this attributes to get pages name\n name_attrs = ['h1', 'title', 'name']\n entity_name = next(\n getattr(entity, attr)\n for attr in name_attrs\n if getattr(entity, attr)\n )\n return product_alt.format(entity_name)\n\n\[email protected]_tag\ndef main_image_or_logo(page: Page):\n \"\"\"Used for microdata.\"\"\"\n if hasattr(page, 'main_image') and page.main_image:\n return page.main_image.url\n else:\n return settings.STATIC_URL + 'images/logo.png'\n\n\[email protected]_tag('catalog/product_feedbacks_icons.html')\ndef icon_stars(rating=0):\n \"\"\"Render set of rating icons based on 1 through 5 rating values.\"\"\"\n full_icons = int(math.floor(rating))\n half_icons = 0 if rating == int(rating) else 1\n empty_icons = 5 - full_icons - half_icons\n\n return {\n 'full_icons': range(full_icons),\n 'half_icons': range(half_icons),\n 'empty_icons': range(empty_icons),\n }\n", "path": "shopelectro/templatetags/se_extras.py"}]}
1,776
711
gh_patches_debug_4043
rasdani/github-patches
git_diff
hylang__hy-139
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Quote doesn't return valid lists ``` => (car (quote [if 1 2 3])) u'if' => (cdr (quote [if 1 2 3])) [1, 2, 3] ``` => OK ``` => (car (quote (if 1 2 3))) u'_hy_hoisted_fn_1' => (car (car (quote (if 1 2 3)))) u'_' => (cdr (quote (if 1 2 3))) [] ``` => Not ok --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `hy/core/mangles.py` Content: ``` 1 # Copyright (c) 2013 Paul Tagliamonte <[email protected]> 2 # 3 # Permission is hereby granted, free of charge, to any person obtaining a 4 # copy of this software and associated documentation files (the "Software"), 5 # to deal in the Software without restriction, including without limitation 6 # the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 # and/or sell copies of the Software, and to permit persons to whom the 8 # Software is furnished to do so, subject to the following conditions: 9 # 10 # The above copyright notice and this permission notice shall be included in 11 # all copies or substantial portions of the Software. 12 # 13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 16 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 # DEALINGS IN THE SOFTWARE. 20 21 from hy.models.expression import HyExpression 22 from hy.models.symbol import HySymbol 23 from hy.models.list import HyList 24 25 import hy.mangle 26 27 28 class HoistableMangle(hy.mangle.Mangle): 29 def should_hoist(self): 30 for frame in self.stack: 31 if frame is self.scope: 32 return False 33 34 if isinstance(frame, HyExpression) and frame != []: 35 call = frame[0] 36 if call in self.ignore: 37 continue 38 return True 39 return False 40 41 42 class FunctionMangle(HoistableMangle): 43 hoistable = ["fn"] 44 ignore = ["def", "decorate_with", "setf", "setv", "foreach", "do"] 45 46 def __init__(self): 47 self.series = 0 48 49 def unique_name(self): 50 self.series += 1 51 return "_hy_hoisted_fn_%s" % (self.series) 52 53 def visit(self, tree): 54 if isinstance(tree, HyExpression) and tree != []: 55 call = tree[0] 56 if call == "fn" and self.should_hoist(): 57 new_name = HySymbol(self.unique_name()) 58 new_name.replace(tree) 59 fn_def = HyExpression([HySymbol("def"), 60 new_name, 61 tree]) 62 fn_def.replace(tree) 63 self.hoist(fn_def) 64 return new_name 65 66 67 class IfMangle(HoistableMangle): 68 ignore = ["foreach", "do"] 69 70 def __init__(self): 71 self.series = 0 72 73 def visit(self, tree): 74 if isinstance(tree, HyExpression) and tree != []: 75 call = tree[0] 76 if call == "if" and self.should_hoist(): 77 fn = HyExpression([HyExpression([HySymbol("fn"), 78 HyList([]), 79 tree])]) 80 fn.replace(tree) 81 return fn 82 83 84 hy.mangle.MANGLES.append(IfMangle) 85 hy.mangle.MANGLES.append(FunctionMangle) 86 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/hy/core/mangles.py b/hy/core/mangles.py --- a/hy/core/mangles.py +++ b/hy/core/mangles.py @@ -27,6 +27,11 @@ class HoistableMangle(hy.mangle.Mangle): def should_hoist(self): + for frame in self.stack: + if (isinstance(frame, HyExpression) and + frame and frame[0] == "quote"): + return False + for frame in self.stack: if frame is self.scope: return False
{"golden_diff": "diff --git a/hy/core/mangles.py b/hy/core/mangles.py\n--- a/hy/core/mangles.py\n+++ b/hy/core/mangles.py\n@@ -27,6 +27,11 @@\n \n class HoistableMangle(hy.mangle.Mangle):\n def should_hoist(self):\n+ for frame in self.stack:\n+ if (isinstance(frame, HyExpression) and\n+ frame and frame[0] == \"quote\"):\n+ return False\n+\n for frame in self.stack:\n if frame is self.scope:\n return False\n", "issue": "Quote doesn't return valid lists\n```\n=> (car (quote [if 1 2 3]))\nu'if'\n=> (cdr (quote [if 1 2 3]))\n[1, 2, 3]\n```\n\n=> OK\n\n```\n=> (car (quote (if 1 2 3)))\nu'_hy_hoisted_fn_1'\n=> (car (car (quote (if 1 2 3))))\nu'_'\n=> (cdr (quote (if 1 2 3)))\n[]\n```\n\n=> Not ok\n\n", "before_files": [{"content": "# Copyright (c) 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom hy.models.expression import HyExpression\nfrom hy.models.symbol import HySymbol\nfrom hy.models.list import HyList\n\nimport hy.mangle\n\n\nclass HoistableMangle(hy.mangle.Mangle):\n def should_hoist(self):\n for frame in self.stack:\n if frame is self.scope:\n return False\n\n if isinstance(frame, HyExpression) and frame != []:\n call = frame[0]\n if call in self.ignore:\n continue\n return True\n return False\n\n\nclass FunctionMangle(HoistableMangle):\n hoistable = [\"fn\"]\n ignore = [\"def\", \"decorate_with\", \"setf\", \"setv\", \"foreach\", \"do\"]\n\n def __init__(self):\n self.series = 0\n\n def unique_name(self):\n self.series += 1\n return \"_hy_hoisted_fn_%s\" % (self.series)\n\n def visit(self, tree):\n if isinstance(tree, HyExpression) and tree != []:\n call = tree[0]\n if call == \"fn\" and self.should_hoist():\n new_name = HySymbol(self.unique_name())\n new_name.replace(tree)\n fn_def = HyExpression([HySymbol(\"def\"),\n new_name,\n tree])\n fn_def.replace(tree)\n self.hoist(fn_def)\n return new_name\n\n\nclass IfMangle(HoistableMangle):\n ignore = [\"foreach\", \"do\"]\n\n def __init__(self):\n self.series = 0\n\n def visit(self, tree):\n if isinstance(tree, HyExpression) and tree != []:\n call = tree[0]\n if call == \"if\" and self.should_hoist():\n fn = HyExpression([HyExpression([HySymbol(\"fn\"),\n HyList([]),\n tree])])\n fn.replace(tree)\n return fn\n\n\nhy.mangle.MANGLES.append(IfMangle)\nhy.mangle.MANGLES.append(FunctionMangle)\n", "path": "hy/core/mangles.py"}], "after_files": [{"content": "# Copyright (c) 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom hy.models.expression import HyExpression\nfrom hy.models.symbol import HySymbol\nfrom hy.models.list import HyList\n\nimport hy.mangle\n\n\nclass HoistableMangle(hy.mangle.Mangle):\n def should_hoist(self):\n for frame in self.stack:\n if (isinstance(frame, HyExpression) and\n frame and frame[0] == \"quote\"):\n return False\n\n for frame in self.stack:\n if frame is self.scope:\n return False\n\n if isinstance(frame, HyExpression) and frame != []:\n call = frame[0]\n if call in self.ignore:\n continue\n return True\n return False\n\n\nclass FunctionMangle(HoistableMangle):\n hoistable = [\"fn\"]\n ignore = [\"def\", \"decorate_with\", \"setf\", \"setv\", \"foreach\", \"do\"]\n\n def __init__(self):\n self.series = 0\n\n def unique_name(self):\n self.series += 1\n return \"_hy_hoisted_fn_%s\" % (self.series)\n\n def visit(self, tree):\n if isinstance(tree, HyExpression) and tree != []:\n call = tree[0]\n if call == \"fn\" and self.should_hoist():\n new_name = HySymbol(self.unique_name())\n new_name.replace(tree)\n fn_def = HyExpression([HySymbol(\"def\"),\n new_name,\n tree])\n fn_def.replace(tree)\n self.hoist(fn_def)\n return new_name\n\n\nclass IfMangle(HoistableMangle):\n ignore = [\"foreach\", \"do\"]\n\n def __init__(self):\n self.series = 0\n\n def visit(self, tree):\n if isinstance(tree, HyExpression) and tree != []:\n call = tree[0]\n if call == \"if\" and self.should_hoist():\n fn = HyExpression([HyExpression([HySymbol(\"fn\"),\n HyList([]),\n tree])])\n fn.replace(tree)\n return fn\n\n\nhy.mangle.MANGLES.append(IfMangle)\nhy.mangle.MANGLES.append(FunctionMangle)\n", "path": "hy/core/mangles.py"}]}
1,214
128
gh_patches_debug_6693
rasdani/github-patches
git_diff
sosreport__sos-3342
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [hpssm] controller collection misses Gen10+ controllers and above slot 9. 1. Gen10+ controllers changed the naming from **Smart Array** for at least some controllers. * `HPE SR932i-p Gen10+ in Slot 3` 2. Controllers are showing up above slot 9 even when there is only 1 or two total controllers. * `HPE Smart Array P816i-a SR Gen10 in Slot 12` This system had no controller in slot 0, a new naming style in slot 3, and the old naming style in slot 12. The `ssacli ctrl all show status` still lists them, and `ssacli ctrl all show config detail` still gets the config details of each. The current pattern fails to identify the slots in both cases, leading to not executing individual slot based commands or executing on the wrong slot. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sos/report/plugins/hpssm.py` Content: ``` 1 # This file is part of the sos project: https://github.com/sosreport/sos 2 # 3 # This copyrighted material is made available to anyone wishing to use, 4 # modify, copy, or redistribute it subject to the terms and conditions of 5 # version 2 of the GNU General Public License. 6 # 7 # See the LICENSE file in the source distribution for further information. 8 9 from sos.report.plugins import Plugin, IndependentPlugin, PluginOpt 10 import re 11 12 13 class Hpssm(Plugin, IndependentPlugin): 14 """ 15 This plugin will capture details for each controller from Smart Storage 16 Array Administrator, an Array diagnostic report from Smart Storage 17 Administrator Diagnostics Utility and, when the plugins debug option is 18 enabled will gather the Active Health System log via the RESTful Interface 19 Tool (iLOREST). 20 """ 21 short_desc = 'HP Smart Storage Management' 22 23 plugin_name = 'hpssm' 24 profiles = ('system', 'storage', 'hardware',) 25 packages = ('ilorest', 'ssacli', 'ssaducli',) 26 27 option_list = [ 28 PluginOpt('debug', default=False, desc='capture debug data') 29 ] 30 31 def setup(self): 32 cmd = 'ssacli' 33 subcmds = [ 34 'ctrl all show status' 35 ] 36 slot_subcmds = [ 37 'array all show detail', 38 'ld all show', 39 'ld all show detail', 40 'pd all show', 41 'pd all show detail', 42 'show detail' 43 ] 44 self.add_cmd_output( 45 ["%s %s" % (cmd, subcmd) for subcmd in subcmds] 46 ) 47 48 pattern = re.compile("^HP.*Smart Array (.*) in Slot ([0123456789])") 49 config_detail_cmd = cmd + ' ctrl all show config detail' 50 config_detail = self.collect_cmd_output(config_detail_cmd) 51 ctrl_slots = [] 52 if config_detail['status'] == 0: 53 ctrl_slots = [m.group(2) 54 for line in config_detail['output'].splitlines() 55 for m in [pattern.search(line)] if m] 56 ssacli_ctrl_slot_cmd = cmd + ' ctrl slot=' 57 self.add_cmd_output( 58 ["%s%s %s" % ( 59 ssacli_ctrl_slot_cmd, 60 slot, 61 slot_subcmd 62 ) 63 for slot in ctrl_slots 64 for slot_subcmd in slot_subcmds] 65 ) 66 67 logpath = self.get_cmd_output_path() 68 69 self.add_cmd_output( 70 'ssaducli -v -adu -f %s/adu-log.zip' % logpath, 71 suggest_filename='ssaducli_-v_-adu.log' 72 ) 73 74 if self.get_option("debug"): 75 self.do_debug(logpath) 76 77 def do_debug(self, logpath): 78 self.add_cmd_output( 79 'ilorest serverlogs --selectlog=AHS --directorypath=%s' % logpath, 80 runat=logpath, suggest_filename='ilorest.log' 81 ) 82 83 # vim: set et ts=4 sw=4 : 84 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sos/report/plugins/hpssm.py b/sos/report/plugins/hpssm.py --- a/sos/report/plugins/hpssm.py +++ b/sos/report/plugins/hpssm.py @@ -45,7 +45,7 @@ ["%s %s" % (cmd, subcmd) for subcmd in subcmds] ) - pattern = re.compile("^HP.*Smart Array (.*) in Slot ([0123456789])") + pattern = re.compile("^HP[E] (.*) in Slot ([0123456789]+)") config_detail_cmd = cmd + ' ctrl all show config detail' config_detail = self.collect_cmd_output(config_detail_cmd) ctrl_slots = []
{"golden_diff": "diff --git a/sos/report/plugins/hpssm.py b/sos/report/plugins/hpssm.py\n--- a/sos/report/plugins/hpssm.py\n+++ b/sos/report/plugins/hpssm.py\n@@ -45,7 +45,7 @@\n [\"%s %s\" % (cmd, subcmd) for subcmd in subcmds]\n )\n \n- pattern = re.compile(\"^HP.*Smart Array (.*) in Slot ([0123456789])\")\n+ pattern = re.compile(\"^HP[E] (.*) in Slot ([0123456789]+)\")\n config_detail_cmd = cmd + ' ctrl all show config detail'\n config_detail = self.collect_cmd_output(config_detail_cmd)\n ctrl_slots = []\n", "issue": "[hpssm] controller collection misses Gen10+ controllers and above slot 9.\n1. Gen10+ controllers changed the naming from **Smart Array** for at least some controllers.\r\n * `HPE SR932i-p Gen10+ in Slot 3`\r\n2. Controllers are showing up above slot 9 even when there is only 1 or two total controllers.\r\n * `HPE Smart Array P816i-a SR Gen10 in Slot 12`\r\n\r\nThis system had no controller in slot 0, a new naming style in slot 3, and the old naming style in slot 12. \r\nThe `ssacli ctrl all show status` still lists them, and `ssacli ctrl all show config detail` still gets the config details of each. The current pattern fails to identify the slots in both cases, leading to not executing individual slot based commands or executing on the wrong slot.\r\n\r\n\n", "before_files": [{"content": "# This file is part of the sos project: https://github.com/sosreport/sos\n#\n# This copyrighted material is made available to anyone wishing to use,\n# modify, copy, or redistribute it subject to the terms and conditions of\n# version 2 of the GNU General Public License.\n#\n# See the LICENSE file in the source distribution for further information.\n\nfrom sos.report.plugins import Plugin, IndependentPlugin, PluginOpt\nimport re\n\n\nclass Hpssm(Plugin, IndependentPlugin):\n \"\"\"\n This plugin will capture details for each controller from Smart Storage\n Array Administrator, an Array diagnostic report from Smart Storage\n Administrator Diagnostics Utility and, when the plugins debug option is\n enabled will gather the Active Health System log via the RESTful Interface\n Tool (iLOREST).\n \"\"\"\n short_desc = 'HP Smart Storage Management'\n\n plugin_name = 'hpssm'\n profiles = ('system', 'storage', 'hardware',)\n packages = ('ilorest', 'ssacli', 'ssaducli',)\n\n option_list = [\n PluginOpt('debug', default=False, desc='capture debug data')\n ]\n\n def setup(self):\n cmd = 'ssacli'\n subcmds = [\n 'ctrl all show status'\n ]\n slot_subcmds = [\n 'array all show detail',\n 'ld all show',\n 'ld all show detail',\n 'pd all show',\n 'pd all show detail',\n 'show detail'\n ]\n self.add_cmd_output(\n [\"%s %s\" % (cmd, subcmd) for subcmd in subcmds]\n )\n\n pattern = re.compile(\"^HP.*Smart Array (.*) in Slot ([0123456789])\")\n config_detail_cmd = cmd + ' ctrl all show config detail'\n config_detail = self.collect_cmd_output(config_detail_cmd)\n ctrl_slots = []\n if config_detail['status'] == 0:\n ctrl_slots = [m.group(2)\n for line in config_detail['output'].splitlines()\n for m in [pattern.search(line)] if m]\n ssacli_ctrl_slot_cmd = cmd + ' ctrl slot='\n self.add_cmd_output(\n [\"%s%s %s\" % (\n ssacli_ctrl_slot_cmd,\n slot,\n slot_subcmd\n )\n for slot in ctrl_slots\n for slot_subcmd in slot_subcmds]\n )\n\n logpath = self.get_cmd_output_path()\n\n self.add_cmd_output(\n 'ssaducli -v -adu -f %s/adu-log.zip' % logpath,\n suggest_filename='ssaducli_-v_-adu.log'\n )\n\n if self.get_option(\"debug\"):\n self.do_debug(logpath)\n\n def do_debug(self, logpath):\n self.add_cmd_output(\n 'ilorest serverlogs --selectlog=AHS --directorypath=%s' % logpath,\n runat=logpath, suggest_filename='ilorest.log'\n )\n\n# vim: set et ts=4 sw=4 :\n", "path": "sos/report/plugins/hpssm.py"}], "after_files": [{"content": "# This file is part of the sos project: https://github.com/sosreport/sos\n#\n# This copyrighted material is made available to anyone wishing to use,\n# modify, copy, or redistribute it subject to the terms and conditions of\n# version 2 of the GNU General Public License.\n#\n# See the LICENSE file in the source distribution for further information.\n\nfrom sos.report.plugins import Plugin, IndependentPlugin, PluginOpt\nimport re\n\n\nclass Hpssm(Plugin, IndependentPlugin):\n \"\"\"\n This plugin will capture details for each controller from Smart Storage\n Array Administrator, an Array diagnostic report from Smart Storage\n Administrator Diagnostics Utility and, when the plugins debug option is\n enabled will gather the Active Health System log via the RESTful Interface\n Tool (iLOREST).\n \"\"\"\n short_desc = 'HP Smart Storage Management'\n\n plugin_name = 'hpssm'\n profiles = ('system', 'storage', 'hardware',)\n packages = ('ilorest', 'ssacli', 'ssaducli',)\n\n option_list = [\n PluginOpt('debug', default=False, desc='capture debug data')\n ]\n\n def setup(self):\n cmd = 'ssacli'\n subcmds = [\n 'ctrl all show status'\n ]\n slot_subcmds = [\n 'array all show detail',\n 'ld all show',\n 'ld all show detail',\n 'pd all show',\n 'pd all show detail',\n 'show detail'\n ]\n self.add_cmd_output(\n [\"%s %s\" % (cmd, subcmd) for subcmd in subcmds]\n )\n\n pattern = re.compile(\"^HP[E] (.*) in Slot ([0123456789]+)\")\n config_detail_cmd = cmd + ' ctrl all show config detail'\n config_detail = self.collect_cmd_output(config_detail_cmd)\n ctrl_slots = []\n if config_detail['status'] == 0:\n ctrl_slots = [m.group(2)\n for line in config_detail['output'].splitlines()\n for m in [pattern.search(line)] if m]\n ssacli_ctrl_slot_cmd = cmd + ' ctrl slot='\n self.add_cmd_output(\n [\"%s%s %s\" % (\n ssacli_ctrl_slot_cmd,\n slot,\n slot_subcmd\n )\n for slot in ctrl_slots\n for slot_subcmd in slot_subcmds]\n )\n\n logpath = self.get_cmd_output_path()\n\n self.add_cmd_output(\n 'ssaducli -v -adu -f %s/adu-log.zip' % logpath,\n suggest_filename='ssaducli_-v_-adu.log'\n )\n\n if self.get_option(\"debug\"):\n self.do_debug(logpath)\n\n def do_debug(self, logpath):\n self.add_cmd_output(\n 'ilorest serverlogs --selectlog=AHS --directorypath=%s' % logpath,\n runat=logpath, suggest_filename='ilorest.log'\n )\n\n# vim: set et ts=4 sw=4 :\n", "path": "sos/report/plugins/hpssm.py"}]}
1,274
170
gh_patches_debug_56607
rasdani/github-patches
git_diff
spacetelescope__jwql-662
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Switch EDB to use MAST token from config.json always Currently the EDB feature of the JWQL web app attempts to authenticate users with whichever MAST token is currently cached (https://github.com/spacetelescope/jwql/blob/develop/jwql/utils/credentials.py#L45), and if that doesn't succeed, _then_ it uses the `mast_token` key in the `config.json` file. This seems problematic if users are creating new tokens but then attempting to perform EDB queries in the same browser. We should probably switch this to just use the `mast_token` key in `config.json` file always. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `jwql/utils/credentials.py` Content: ``` 1 """Utility functions related to accessing remote services and databases. 2 3 Authors 4 ------- 5 6 - Johannes Sahlmann 7 - Lauren Chambers 8 9 Use 10 --- 11 12 This module can be imported as such: 13 :: 14 15 import credentials 16 token = credentials.get_mast_token() 17 18 """ 19 import os 20 21 from astroquery.mast import Mast 22 23 from jwql.utils.utils import get_config, check_config_for_key 24 25 26 def get_mast_token(request=None): 27 """Return MAST token from either Astroquery.Mast, webpage cookies, the 28 JWQL configuration file, or an environment variable. 29 30 Parameters 31 ---------- 32 request : HttpRequest object 33 Incoming request from the webpage 34 35 Returns 36 ------- 37 token : str or None 38 User-specific MAST token string, if available 39 """ 40 if Mast.authenticated(): 41 print('Authenticated with Astroquery MAST magic') 42 return None 43 else: 44 if request is not None: 45 token = str(request.POST.get('access_token')) 46 if token != 'None': 47 print('Authenticated with cached MAST token.') 48 return token 49 try: 50 # check if token is available via config file 51 check_config_for_key('mast_token') 52 token = get_config()['mast_token'] 53 print('Authenticated with config.json MAST token.') 54 return token 55 except (KeyError, ValueError): 56 # check if token is available via environment variable 57 # see https://auth.mast.stsci.edu/info 58 try: 59 token = os.environ['MAST_API_TOKEN'] 60 print('Authenticated with MAST token environment variable.') 61 return token 62 except KeyError: 63 return None 64 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/jwql/utils/credentials.py b/jwql/utils/credentials.py --- a/jwql/utils/credentials.py +++ b/jwql/utils/credentials.py @@ -41,11 +41,6 @@ print('Authenticated with Astroquery MAST magic') return None else: - if request is not None: - token = str(request.POST.get('access_token')) - if token != 'None': - print('Authenticated with cached MAST token.') - return token try: # check if token is available via config file check_config_for_key('mast_token')
{"golden_diff": "diff --git a/jwql/utils/credentials.py b/jwql/utils/credentials.py\n--- a/jwql/utils/credentials.py\n+++ b/jwql/utils/credentials.py\n@@ -41,11 +41,6 @@\n print('Authenticated with Astroquery MAST magic')\n return None\n else:\n- if request is not None:\n- token = str(request.POST.get('access_token'))\n- if token != 'None':\n- print('Authenticated with cached MAST token.')\n- return token\n try:\n # check if token is available via config file\n check_config_for_key('mast_token')\n", "issue": "Switch EDB to use MAST token from config.json always\nCurrently the EDB feature of the JWQL web app attempts to authenticate users with whichever MAST token is currently cached (https://github.com/spacetelescope/jwql/blob/develop/jwql/utils/credentials.py#L45), and if that doesn't succeed, _then_ it uses the `mast_token` key in the `config.json` file. This seems problematic if users are creating new tokens but then attempting to perform EDB queries in the same browser. We should probably switch this to just use the `mast_token` key in `config.json` file always. \n", "before_files": [{"content": "\"\"\"Utility functions related to accessing remote services and databases.\n\nAuthors\n-------\n\n - Johannes Sahlmann\n - Lauren Chambers\n\nUse\n---\n\n This module can be imported as such:\n ::\n\n import credentials\n token = credentials.get_mast_token()\n\n \"\"\"\nimport os\n\nfrom astroquery.mast import Mast\n\nfrom jwql.utils.utils import get_config, check_config_for_key\n\n\ndef get_mast_token(request=None):\n \"\"\"Return MAST token from either Astroquery.Mast, webpage cookies, the\n JWQL configuration file, or an environment variable.\n\n Parameters\n ----------\n request : HttpRequest object\n Incoming request from the webpage\n\n Returns\n -------\n token : str or None\n User-specific MAST token string, if available\n \"\"\"\n if Mast.authenticated():\n print('Authenticated with Astroquery MAST magic')\n return None\n else:\n if request is not None:\n token = str(request.POST.get('access_token'))\n if token != 'None':\n print('Authenticated with cached MAST token.')\n return token\n try:\n # check if token is available via config file\n check_config_for_key('mast_token')\n token = get_config()['mast_token']\n print('Authenticated with config.json MAST token.')\n return token\n except (KeyError, ValueError):\n # check if token is available via environment variable\n # see https://auth.mast.stsci.edu/info\n try:\n token = os.environ['MAST_API_TOKEN']\n print('Authenticated with MAST token environment variable.')\n return token\n except KeyError:\n return None\n", "path": "jwql/utils/credentials.py"}], "after_files": [{"content": "\"\"\"Utility functions related to accessing remote services and databases.\n\nAuthors\n-------\n\n - Johannes Sahlmann\n - Lauren Chambers\n\nUse\n---\n\n This module can be imported as such:\n ::\n\n import credentials\n token = credentials.get_mast_token()\n\n \"\"\"\nimport os\n\nfrom astroquery.mast import Mast\n\nfrom jwql.utils.utils import get_config, check_config_for_key\n\n\ndef get_mast_token(request=None):\n \"\"\"Return MAST token from either Astroquery.Mast, webpage cookies, the\n JWQL configuration file, or an environment variable.\n\n Parameters\n ----------\n request : HttpRequest object\n Incoming request from the webpage\n\n Returns\n -------\n token : str or None\n User-specific MAST token string, if available\n \"\"\"\n if Mast.authenticated():\n print('Authenticated with Astroquery MAST magic')\n return None\n else:\n try:\n # check if token is available via config file\n check_config_for_key('mast_token')\n token = get_config()['mast_token']\n print('Authenticated with config.json MAST token.')\n return token\n except (KeyError, ValueError):\n # check if token is available via environment variable\n # see https://auth.mast.stsci.edu/info\n try:\n token = os.environ['MAST_API_TOKEN']\n print('Authenticated with MAST token environment variable.')\n return token\n except KeyError:\n return None\n", "path": "jwql/utils/credentials.py"}]}
868
138
gh_patches_debug_19238
rasdani/github-patches
git_diff
bridgecrewio__checkov-3921
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Duplicate checks CKV_K8S_29 and CKV_K8S_30 ? It seem that both checks are the same, at least in the description : ``` Check: CKV_K8S_29: "Apply security context to your pods and containers" FAILED for resource: module.some_module.kubernetes_deployment.app File: /base/main.tf:12-355 Calling File: /some_module.tf:1-116 Check: CKV_K8S_30: "Apply security context to your pods and containers" FAILED for resource: module.some_module.kubernetes_deployment.app File: /base/main.tf:12-355 Calling File: /some_module.tf:1-116 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py` Content: ``` 1 from typing import Any, Dict 2 3 from checkov.common.models.enums import CheckResult 4 from checkov.kubernetes.checks.resource.base_container_check import BaseK8sContainerCheck 5 6 7 class ContainerSecurityContext(BaseK8sContainerCheck): 8 def __init__(self) -> None: 9 # CIS-1.5 5.7.3 10 name = "Apply security context to your pods and containers" 11 # Security context can be set at pod or container level. 12 # Location: container .securityContext 13 id = "CKV_K8S_30" 14 super().__init__(name=name, id=id) 15 16 def scan_container_conf(self, metadata: Dict[str, Any], conf: Dict[str, Any]) -> CheckResult: 17 self.evaluated_container_keys = ["securityContext"] 18 if conf.get("securityContext"): 19 return CheckResult.PASSED 20 return CheckResult.FAILED 21 22 23 check = ContainerSecurityContext() 24 ``` Path: `checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py` Content: ``` 1 from checkov.common.models.enums import CheckCategories, CheckResult 2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck 3 4 5 class PodSecurityContext(BaseResourceCheck): 6 7 def __init__(self): 8 # CIS-1.5 5.7.3 9 name = "Apply security context to your pods and containers" 10 # Security context can be set at pod or container level. 11 id = "CKV_K8S_29" 12 13 supported_resources = ('kubernetes_pod', 'kubernetes_pod_v1', 14 'kubernetes_deployment', 'kubernetes_deployment_v1', 15 'kubernetes_daemonset', 'kubernetes_daemon_set_v1') 16 categories = (CheckCategories.GENERAL_SECURITY,) 17 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 18 19 def scan_resource_conf(self, conf) -> CheckResult: 20 if "spec" not in conf: 21 self.evaluated_keys = [""] 22 return CheckResult.FAILED 23 spec = conf['spec'][0] 24 if spec.get("container"): 25 containers = spec.get("container") 26 27 for idx, container in enumerate(containers): 28 if type(container) != dict: 29 return CheckResult.UNKNOWN 30 31 if not container.get("security_context"): 32 self.evaluated_keys = [f"spec/[0]/container/{idx}"] 33 return CheckResult.FAILED 34 return CheckResult.PASSED 35 36 if spec.get("template") and isinstance(spec.get("template"), list): 37 template = spec.get("template")[0] 38 if template.get("spec") and isinstance(template.get("spec"), list): 39 temp_spec = template.get("spec")[0] 40 if temp_spec.get("container"): 41 containers = temp_spec.get("container") 42 43 for idx, container in enumerate(containers): 44 if type(container) != dict: 45 return CheckResult.UNKNOWN 46 47 if not container.get("security_context"): 48 self.evaluated_keys = [f"spec/[0]/template/[0]/spec/[0]/container/{idx}"] 49 return CheckResult.FAILED 50 return CheckResult.PASSED 51 return CheckResult.FAILED 52 53 54 check = PodSecurityContext() 55 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py b/checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py --- a/checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py +++ b/checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py @@ -7,7 +7,7 @@ class ContainerSecurityContext(BaseK8sContainerCheck): def __init__(self) -> None: # CIS-1.5 5.7.3 - name = "Apply security context to your pods and containers" + name = "Apply security context to your containers" # Security context can be set at pod or container level. # Location: container .securityContext id = "CKV_K8S_30" diff --git a/checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py b/checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py --- a/checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py +++ b/checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py @@ -6,7 +6,7 @@ def __init__(self): # CIS-1.5 5.7.3 - name = "Apply security context to your pods and containers" + name = "Apply security context to your pods, deployments and daemon_sets" # Security context can be set at pod or container level. id = "CKV_K8S_29"
{"golden_diff": "diff --git a/checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py b/checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py\n--- a/checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py\n+++ b/checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py\n@@ -7,7 +7,7 @@\n class ContainerSecurityContext(BaseK8sContainerCheck):\n def __init__(self) -> None:\n # CIS-1.5 5.7.3\n- name = \"Apply security context to your pods and containers\"\n+ name = \"Apply security context to your containers\"\n # Security context can be set at pod or container level.\n # Location: container .securityContext\n id = \"CKV_K8S_30\"\ndiff --git a/checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py b/checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py\n--- a/checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py\n+++ b/checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py\n@@ -6,7 +6,7 @@\n \n def __init__(self):\n # CIS-1.5 5.7.3\n- name = \"Apply security context to your pods and containers\"\n+ name = \"Apply security context to your pods, deployments and daemon_sets\"\n # Security context can be set at pod or container level.\n id = \"CKV_K8S_29\"\n", "issue": "Duplicate checks CKV_K8S_29 and CKV_K8S_30 ?\nIt seem that both checks are the same, at least in the description :\r\n\r\n```\r\nCheck: CKV_K8S_29: \"Apply security context to your pods and containers\"\r\n\tFAILED for resource: module.some_module.kubernetes_deployment.app\r\n\tFile: /base/main.tf:12-355\r\n\tCalling File: /some_module.tf:1-116\r\n\r\nCheck: CKV_K8S_30: \"Apply security context to your pods and containers\"\r\n\tFAILED for resource: module.some_module.kubernetes_deployment.app\r\n\tFile: /base/main.tf:12-355\r\n\tCalling File: /some_module.tf:1-116\r\n```\n", "before_files": [{"content": "from typing import Any, Dict\n\nfrom checkov.common.models.enums import CheckResult\nfrom checkov.kubernetes.checks.resource.base_container_check import BaseK8sContainerCheck\n\n\nclass ContainerSecurityContext(BaseK8sContainerCheck):\n def __init__(self) -> None:\n # CIS-1.5 5.7.3\n name = \"Apply security context to your pods and containers\"\n # Security context can be set at pod or container level.\n # Location: container .securityContext\n id = \"CKV_K8S_30\"\n super().__init__(name=name, id=id)\n\n def scan_container_conf(self, metadata: Dict[str, Any], conf: Dict[str, Any]) -> CheckResult:\n self.evaluated_container_keys = [\"securityContext\"]\n if conf.get(\"securityContext\"):\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = ContainerSecurityContext()\n", "path": "checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py"}, {"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass PodSecurityContext(BaseResourceCheck):\n\n def __init__(self):\n # CIS-1.5 5.7.3\n name = \"Apply security context to your pods and containers\"\n # Security context can be set at pod or container level.\n id = \"CKV_K8S_29\"\n\n supported_resources = ('kubernetes_pod', 'kubernetes_pod_v1',\n 'kubernetes_deployment', 'kubernetes_deployment_v1',\n 'kubernetes_daemonset', 'kubernetes_daemon_set_v1')\n categories = (CheckCategories.GENERAL_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf) -> CheckResult:\n if \"spec\" not in conf:\n self.evaluated_keys = [\"\"]\n return CheckResult.FAILED\n spec = conf['spec'][0]\n if spec.get(\"container\"):\n containers = spec.get(\"container\")\n\n for idx, container in enumerate(containers):\n if type(container) != dict:\n return CheckResult.UNKNOWN\n\n if not container.get(\"security_context\"):\n self.evaluated_keys = [f\"spec/[0]/container/{idx}\"]\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n if spec.get(\"template\") and isinstance(spec.get(\"template\"), list):\n template = spec.get(\"template\")[0]\n if template.get(\"spec\") and isinstance(template.get(\"spec\"), list):\n temp_spec = template.get(\"spec\")[0]\n if temp_spec.get(\"container\"):\n containers = temp_spec.get(\"container\")\n\n for idx, container in enumerate(containers):\n if type(container) != dict:\n return CheckResult.UNKNOWN\n\n if not container.get(\"security_context\"):\n self.evaluated_keys = [f\"spec/[0]/template/[0]/spec/[0]/container/{idx}\"]\n return CheckResult.FAILED\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = PodSecurityContext()\n", "path": "checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py"}], "after_files": [{"content": "from typing import Any, Dict\n\nfrom checkov.common.models.enums import CheckResult\nfrom checkov.kubernetes.checks.resource.base_container_check import BaseK8sContainerCheck\n\n\nclass ContainerSecurityContext(BaseK8sContainerCheck):\n def __init__(self) -> None:\n # CIS-1.5 5.7.3\n name = \"Apply security context to your containers\"\n # Security context can be set at pod or container level.\n # Location: container .securityContext\n id = \"CKV_K8S_30\"\n super().__init__(name=name, id=id)\n\n def scan_container_conf(self, metadata: Dict[str, Any], conf: Dict[str, Any]) -> CheckResult:\n self.evaluated_container_keys = [\"securityContext\"]\n if conf.get(\"securityContext\"):\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = ContainerSecurityContext()\n", "path": "checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py"}, {"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass PodSecurityContext(BaseResourceCheck):\n\n def __init__(self):\n # CIS-1.5 5.7.3\n name = \"Apply security context to your pods, deployments and daemon_sets\"\n # Security context can be set at pod or container level.\n id = \"CKV_K8S_29\"\n\n supported_resources = ('kubernetes_pod', 'kubernetes_pod_v1',\n 'kubernetes_deployment', 'kubernetes_deployment_v1',\n 'kubernetes_daemonset', 'kubernetes_daemon_set_v1')\n categories = (CheckCategories.GENERAL_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf) -> CheckResult:\n if \"spec\" not in conf:\n self.evaluated_keys = [\"\"]\n return CheckResult.FAILED\n spec = conf['spec'][0]\n if spec.get(\"container\"):\n containers = spec.get(\"container\")\n\n for idx, container in enumerate(containers):\n if type(container) != dict:\n return CheckResult.UNKNOWN\n\n if not container.get(\"security_context\"):\n self.evaluated_keys = [f\"spec/[0]/container/{idx}\"]\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n if spec.get(\"template\") and isinstance(spec.get(\"template\"), list):\n template = spec.get(\"template\")[0]\n if template.get(\"spec\") and isinstance(template.get(\"spec\"), list):\n temp_spec = template.get(\"spec\")[0]\n if temp_spec.get(\"container\"):\n containers = temp_spec.get(\"container\")\n\n for idx, container in enumerate(containers):\n if type(container) != dict:\n return CheckResult.UNKNOWN\n\n if not container.get(\"security_context\"):\n self.evaluated_keys = [f\"spec/[0]/template/[0]/spec/[0]/container/{idx}\"]\n return CheckResult.FAILED\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = PodSecurityContext()\n", "path": "checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py"}]}
1,280
327
gh_patches_debug_1977
rasdani/github-patches
git_diff
xorbitsai__inference-1096
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ENH: Add the option to use CPU to inference even there is GPU device ### Is your feature request related to a problem? Please describe There is a GPU in my server, but when load some LLM model, I need load it into my memory because the model size is bigger than GPU memory. However, when I launch the model from web page, the N-GPU setting only contains auto, 0, 1 options, if I select 0, system will complain the following error: > Server error: 400 - [address=0.0.0.0:19270, pid=2063850] The parameter `n_gpu` must be greater than 0 and not greater than the number of GPUs: 1 on the machine. ### Describe the solution you'd like I think when the N GPU setting is set to 0, it should use CPU as inference device. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `xinference/device_utils.py` Content: ``` 1 # Copyright 2022-2023 XProbe Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os 16 17 import torch 18 from typing_extensions import Literal, Union 19 20 DeviceType = Literal["cuda", "mps", "xpu", "cpu"] 21 22 23 def is_xpu_available() -> bool: 24 return hasattr(torch, "xpu") and torch.xpu.is_available() 25 26 27 def get_available_device() -> DeviceType: 28 if torch.cuda.is_available(): 29 return "cuda" 30 elif torch.backends.mps.is_available(): 31 return "mps" 32 elif is_xpu_available(): 33 return "xpu" 34 return "cpu" 35 36 37 def is_device_available(device: str) -> bool: 38 if device == "cuda": 39 return torch.cuda.is_available() 40 elif device == "mps": 41 return torch.backends.mps.is_available() 42 elif device == "xpu": 43 return is_xpu_available() 44 elif device == "cpu": 45 return True 46 47 return False 48 49 50 def move_model_to_available_device(model): 51 device = get_available_device() 52 53 if device == "cpu": 54 return model 55 56 return model.to(device) 57 58 59 def get_device_preferred_dtype(device: str) -> Union[torch.dtype, None]: 60 if device == "cpu": 61 return torch.float32 62 elif device == "cuda" or device == "mps": 63 return torch.float16 64 elif device == "xpu": 65 return torch.bfloat16 66 67 return None 68 69 70 def is_hf_accelerate_supported(device: str) -> bool: 71 return device == "cuda" or device == "xpu" 72 73 74 def empty_cache(): 75 if torch.cuda.is_available(): 76 torch.cuda.empty_cache() 77 if torch.backends.mps.is_available(): 78 torch.mps.empty_cache() 79 if is_xpu_available(): 80 torch.xpu.empty_cache() 81 82 83 def gpu_count(): 84 if torch.cuda.is_available(): 85 cuda_visible_devices_env = os.getenv("CUDA_VISIBLE_DEVICES", None) 86 87 if cuda_visible_devices_env is None: 88 return torch.cuda.device_count() 89 90 cuda_visible_devices = ( 91 cuda_visible_devices_env.split(",") if cuda_visible_devices_env else [] 92 ) 93 94 return min(torch.cuda.device_count(), len(cuda_visible_devices)) 95 elif torch.backends.mps.is_available(): 96 return 1 97 elif is_xpu_available(): 98 return torch.xpu.device_count() 99 else: 100 return 0 101 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/xinference/device_utils.py b/xinference/device_utils.py --- a/xinference/device_utils.py +++ b/xinference/device_utils.py @@ -92,8 +92,6 @@ ) return min(torch.cuda.device_count(), len(cuda_visible_devices)) - elif torch.backends.mps.is_available(): - return 1 elif is_xpu_available(): return torch.xpu.device_count() else:
{"golden_diff": "diff --git a/xinference/device_utils.py b/xinference/device_utils.py\n--- a/xinference/device_utils.py\n+++ b/xinference/device_utils.py\n@@ -92,8 +92,6 @@\n )\n \n return min(torch.cuda.device_count(), len(cuda_visible_devices))\n- elif torch.backends.mps.is_available():\n- return 1\n elif is_xpu_available():\n return torch.xpu.device_count()\n else:\n", "issue": "ENH: Add the option to use CPU to inference even there is GPU device\n### Is your feature request related to a problem? Please describe\r\nThere is a GPU in my server, but when load some LLM model, I need load it into my memory because the model size is bigger\r\nthan GPU memory.\r\n\r\nHowever, when I launch the model from web page, the N-GPU setting only contains auto, 0, 1 options, if I select 0, system will complain the following error:\r\n\r\n> Server error: 400 - [address=0.0.0.0:19270, pid=2063850] The parameter `n_gpu` must be greater than 0 and not greater than the number of GPUs: 1 on the machine.\r\n\r\n### Describe the solution you'd like\r\nI think when the N GPU setting is set to 0, it should use CPU as inference device.\r\n\n", "before_files": [{"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nimport torch\nfrom typing_extensions import Literal, Union\n\nDeviceType = Literal[\"cuda\", \"mps\", \"xpu\", \"cpu\"]\n\n\ndef is_xpu_available() -> bool:\n return hasattr(torch, \"xpu\") and torch.xpu.is_available()\n\n\ndef get_available_device() -> DeviceType:\n if torch.cuda.is_available():\n return \"cuda\"\n elif torch.backends.mps.is_available():\n return \"mps\"\n elif is_xpu_available():\n return \"xpu\"\n return \"cpu\"\n\n\ndef is_device_available(device: str) -> bool:\n if device == \"cuda\":\n return torch.cuda.is_available()\n elif device == \"mps\":\n return torch.backends.mps.is_available()\n elif device == \"xpu\":\n return is_xpu_available()\n elif device == \"cpu\":\n return True\n\n return False\n\n\ndef move_model_to_available_device(model):\n device = get_available_device()\n\n if device == \"cpu\":\n return model\n\n return model.to(device)\n\n\ndef get_device_preferred_dtype(device: str) -> Union[torch.dtype, None]:\n if device == \"cpu\":\n return torch.float32\n elif device == \"cuda\" or device == \"mps\":\n return torch.float16\n elif device == \"xpu\":\n return torch.bfloat16\n\n return None\n\n\ndef is_hf_accelerate_supported(device: str) -> bool:\n return device == \"cuda\" or device == \"xpu\"\n\n\ndef empty_cache():\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n if torch.backends.mps.is_available():\n torch.mps.empty_cache()\n if is_xpu_available():\n torch.xpu.empty_cache()\n\n\ndef gpu_count():\n if torch.cuda.is_available():\n cuda_visible_devices_env = os.getenv(\"CUDA_VISIBLE_DEVICES\", None)\n\n if cuda_visible_devices_env is None:\n return torch.cuda.device_count()\n\n cuda_visible_devices = (\n cuda_visible_devices_env.split(\",\") if cuda_visible_devices_env else []\n )\n\n return min(torch.cuda.device_count(), len(cuda_visible_devices))\n elif torch.backends.mps.is_available():\n return 1\n elif is_xpu_available():\n return torch.xpu.device_count()\n else:\n return 0\n", "path": "xinference/device_utils.py"}], "after_files": [{"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nimport torch\nfrom typing_extensions import Literal, Union\n\nDeviceType = Literal[\"cuda\", \"mps\", \"xpu\", \"cpu\"]\n\n\ndef is_xpu_available() -> bool:\n return hasattr(torch, \"xpu\") and torch.xpu.is_available()\n\n\ndef get_available_device() -> DeviceType:\n if torch.cuda.is_available():\n return \"cuda\"\n elif torch.backends.mps.is_available():\n return \"mps\"\n elif is_xpu_available():\n return \"xpu\"\n return \"cpu\"\n\n\ndef is_device_available(device: str) -> bool:\n if device == \"cuda\":\n return torch.cuda.is_available()\n elif device == \"mps\":\n return torch.backends.mps.is_available()\n elif device == \"xpu\":\n return is_xpu_available()\n elif device == \"cpu\":\n return True\n\n return False\n\n\ndef move_model_to_available_device(model):\n device = get_available_device()\n\n if device == \"cpu\":\n return model\n\n return model.to(device)\n\n\ndef get_device_preferred_dtype(device: str) -> Union[torch.dtype, None]:\n if device == \"cpu\":\n return torch.float32\n elif device == \"cuda\" or device == \"mps\":\n return torch.float16\n elif device == \"xpu\":\n return torch.bfloat16\n\n return None\n\n\ndef is_hf_accelerate_supported(device: str) -> bool:\n return device == \"cuda\" or device == \"xpu\"\n\n\ndef empty_cache():\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n if torch.backends.mps.is_available():\n torch.mps.empty_cache()\n if is_xpu_available():\n torch.xpu.empty_cache()\n\n\ndef gpu_count():\n if torch.cuda.is_available():\n cuda_visible_devices_env = os.getenv(\"CUDA_VISIBLE_DEVICES\", None)\n\n if cuda_visible_devices_env is None:\n return torch.cuda.device_count()\n\n cuda_visible_devices = (\n cuda_visible_devices_env.split(\",\") if cuda_visible_devices_env else []\n )\n\n return min(torch.cuda.device_count(), len(cuda_visible_devices))\n elif is_xpu_available():\n return torch.xpu.device_count()\n else:\n return 0\n", "path": "xinference/device_utils.py"}]}
1,288
98
gh_patches_debug_12186
rasdani/github-patches
git_diff
cookiecutter__cookiecutter-528
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- chg: be more precise in error message on config file. We can be more helpful when the main config file throws a parser error. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cookiecutter/config.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 """ 5 cookiecutter.config 6 ------------------- 7 8 Global configuration handling 9 """ 10 11 from __future__ import unicode_literals 12 import copy 13 import logging 14 import os 15 import io 16 17 import yaml 18 19 from .exceptions import ConfigDoesNotExistException 20 from .exceptions import InvalidConfiguration 21 22 23 logger = logging.getLogger(__name__) 24 25 DEFAULT_CONFIG = { 26 'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'), 27 'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'), 28 'default_context': {} 29 } 30 31 32 def get_config(config_path): 33 """ 34 Retrieve the config from the specified path, returning it as a config dict. 35 """ 36 37 if not os.path.exists(config_path): 38 raise ConfigDoesNotExistException 39 40 logger.debug('config_path is {0}'.format(config_path)) 41 with io.open(config_path, encoding='utf-8') as file_handle: 42 try: 43 yaml_dict = yaml.safe_load(file_handle) 44 except yaml.scanner.ScannerError: 45 raise InvalidConfiguration( 46 '{0} is no a valid YAML file'.format(config_path)) 47 48 config_dict = copy.copy(DEFAULT_CONFIG) 49 config_dict.update(yaml_dict) 50 51 return config_dict 52 53 54 def get_user_config(): 55 """ 56 Retrieve config from the user's ~/.cookiecutterrc, if it exists. 57 Otherwise, return None. 58 """ 59 60 # TODO: test on windows... 61 USER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc') 62 63 if os.path.exists(USER_CONFIG_PATH): 64 return get_config(USER_CONFIG_PATH) 65 return copy.copy(DEFAULT_CONFIG) 66 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cookiecutter/config.py b/cookiecutter/config.py --- a/cookiecutter/config.py +++ b/cookiecutter/config.py @@ -41,9 +41,12 @@ with io.open(config_path, encoding='utf-8') as file_handle: try: yaml_dict = yaml.safe_load(file_handle) - except yaml.scanner.ScannerError: + except yaml.scanner.ScannerError as e: raise InvalidConfiguration( - '{0} is no a valid YAML file'.format(config_path)) + '{0} is not a valid YAML file: line {1}: {2}'.format( + config_path, + e.problem_mark.line, + e.problem)) config_dict = copy.copy(DEFAULT_CONFIG) config_dict.update(yaml_dict)
{"golden_diff": "diff --git a/cookiecutter/config.py b/cookiecutter/config.py\n--- a/cookiecutter/config.py\n+++ b/cookiecutter/config.py\n@@ -41,9 +41,12 @@\n with io.open(config_path, encoding='utf-8') as file_handle:\n try:\n yaml_dict = yaml.safe_load(file_handle)\n- except yaml.scanner.ScannerError:\n+ except yaml.scanner.ScannerError as e:\n raise InvalidConfiguration(\n- '{0} is no a valid YAML file'.format(config_path))\n+ '{0} is not a valid YAML file: line {1}: {2}'.format(\n+ config_path,\n+ e.problem_mark.line,\n+ e.problem))\n \n config_dict = copy.copy(DEFAULT_CONFIG)\n config_dict.update(yaml_dict)\n", "issue": "chg: be more precise in error message on config file.\nWe can be more helpful when the main config file throws a parser error.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.config\n-------------------\n\nGlobal configuration handling\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport copy\nimport logging\nimport os\nimport io\n\nimport yaml\n\nfrom .exceptions import ConfigDoesNotExistException\nfrom .exceptions import InvalidConfiguration\n\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_CONFIG = {\n 'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'),\n 'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'),\n 'default_context': {}\n}\n\n\ndef get_config(config_path):\n \"\"\"\n Retrieve the config from the specified path, returning it as a config dict.\n \"\"\"\n\n if not os.path.exists(config_path):\n raise ConfigDoesNotExistException\n\n logger.debug('config_path is {0}'.format(config_path))\n with io.open(config_path, encoding='utf-8') as file_handle:\n try:\n yaml_dict = yaml.safe_load(file_handle)\n except yaml.scanner.ScannerError:\n raise InvalidConfiguration(\n '{0} is no a valid YAML file'.format(config_path))\n\n config_dict = copy.copy(DEFAULT_CONFIG)\n config_dict.update(yaml_dict)\n\n return config_dict\n\n\ndef get_user_config():\n \"\"\"\n Retrieve config from the user's ~/.cookiecutterrc, if it exists.\n Otherwise, return None.\n \"\"\"\n\n # TODO: test on windows...\n USER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc')\n\n if os.path.exists(USER_CONFIG_PATH):\n return get_config(USER_CONFIG_PATH)\n return copy.copy(DEFAULT_CONFIG)\n", "path": "cookiecutter/config.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.config\n-------------------\n\nGlobal configuration handling\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport copy\nimport logging\nimport os\nimport io\n\nimport yaml\n\nfrom .exceptions import ConfigDoesNotExistException\nfrom .exceptions import InvalidConfiguration\n\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_CONFIG = {\n 'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'),\n 'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'),\n 'default_context': {}\n}\n\n\ndef get_config(config_path):\n \"\"\"\n Retrieve the config from the specified path, returning it as a config dict.\n \"\"\"\n\n if not os.path.exists(config_path):\n raise ConfigDoesNotExistException\n\n logger.debug('config_path is {0}'.format(config_path))\n with io.open(config_path, encoding='utf-8') as file_handle:\n try:\n yaml_dict = yaml.safe_load(file_handle)\n except yaml.scanner.ScannerError as e:\n raise InvalidConfiguration(\n '{0} is not a valid YAML file: line {1}: {2}'.format(\n config_path,\n e.problem_mark.line,\n e.problem))\n\n config_dict = copy.copy(DEFAULT_CONFIG)\n config_dict.update(yaml_dict)\n\n return config_dict\n\n\ndef get_user_config():\n \"\"\"\n Retrieve config from the user's ~/.cookiecutterrc, if it exists.\n Otherwise, return None.\n \"\"\"\n\n # TODO: test on windows...\n USER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc')\n\n if os.path.exists(USER_CONFIG_PATH):\n return get_config(USER_CONFIG_PATH)\n return copy.copy(DEFAULT_CONFIG)\n", "path": "cookiecutter/config.py"}]}
765
176
gh_patches_debug_7505
rasdani/github-patches
git_diff
encode__starlette-813
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Gzip Middleware content-length is incorrect The following exception is thrown when I use uvicorn to drive my starlette project. After control variates, I am sure this is caused by Gzip Middleware. ``` File "C:\Users\AberS\Documents\Github\index.py\.venv\lib\site-packages\h11\_writers.py", line 102, in send_eom raise LocalProtocolError("Too little data for declared Content-Length") h11._util.LocalProtocolError: Too little data for declared Content-Length ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `starlette/middleware/wsgi.py` Content: ``` 1 import asyncio 2 import io 3 import sys 4 import typing 5 6 from starlette.concurrency import run_in_threadpool 7 from starlette.types import Message, Receive, Scope, Send 8 9 10 def build_environ(scope: Scope, body: bytes) -> dict: 11 """ 12 Builds a scope and request body into a WSGI environ object. 13 """ 14 environ = { 15 "REQUEST_METHOD": scope["method"], 16 "SCRIPT_NAME": scope.get("root_path", ""), 17 "PATH_INFO": scope["path"], 18 "QUERY_STRING": scope["query_string"].decode("ascii"), 19 "SERVER_PROTOCOL": f"HTTP/{scope['http_version']}", 20 "wsgi.version": (1, 0), 21 "wsgi.url_scheme": scope.get("scheme", "http"), 22 "wsgi.input": io.BytesIO(body), 23 "wsgi.errors": sys.stdout, 24 "wsgi.multithread": True, 25 "wsgi.multiprocess": True, 26 "wsgi.run_once": False, 27 } 28 29 # Get server name and port - required in WSGI, not in ASGI 30 server = scope.get("server") or ("localhost", 80) 31 environ["SERVER_NAME"] = server[0] 32 environ["SERVER_PORT"] = server[1] 33 34 # Get client IP address 35 if scope.get("client"): 36 environ["REMOTE_ADDR"] = scope["client"][0] 37 38 # Go through headers and make them into environ entries 39 for name, value in scope.get("headers", []): 40 name = name.decode("latin1") 41 if name == "content-length": 42 corrected_name = "CONTENT_LENGTH" 43 elif name == "content-type": 44 corrected_name = "CONTENT_TYPE" 45 else: 46 corrected_name = f"HTTP_{name}".upper().replace("-", "_") 47 # HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in case 48 value = value.decode("latin1") 49 if corrected_name in environ: 50 value = environ[corrected_name] + "," + value 51 environ[corrected_name] = value 52 return environ 53 54 55 class WSGIMiddleware: 56 def __init__(self, app: typing.Callable, workers: int = 10) -> None: 57 self.app = app 58 59 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: 60 assert scope["type"] == "http" 61 responder = WSGIResponder(self.app, scope) 62 await responder(receive, send) 63 64 65 class WSGIResponder: 66 def __init__(self, app: typing.Callable, scope: Scope) -> None: 67 self.app = app 68 self.scope = scope 69 self.status = None 70 self.response_headers = None 71 self.send_event = asyncio.Event() 72 self.send_queue = [] # type: typing.List[typing.Optional[Message]] 73 self.loop = asyncio.get_event_loop() 74 self.response_started = False 75 self.exc_info = None # type: typing.Any 76 77 async def __call__(self, receive: Receive, send: Send) -> None: 78 body = b"" 79 more_body = True 80 while more_body: 81 message = await receive() 82 body += message.get("body", b"") 83 more_body = message.get("more_body", False) 84 environ = build_environ(self.scope, body) 85 sender = None 86 try: 87 sender = self.loop.create_task(self.sender(send)) 88 await run_in_threadpool(self.wsgi, environ, self.start_response) 89 self.send_queue.append(None) 90 self.send_event.set() 91 await asyncio.wait_for(sender, None) 92 if self.exc_info is not None: 93 raise self.exc_info[0].with_traceback( 94 self.exc_info[1], self.exc_info[2] 95 ) 96 finally: 97 if sender and not sender.done(): 98 sender.cancel() # pragma: no cover 99 100 async def sender(self, send: Send) -> None: 101 while True: 102 if self.send_queue: 103 message = self.send_queue.pop(0) 104 if message is None: 105 return 106 await send(message) 107 else: 108 await self.send_event.wait() 109 self.send_event.clear() 110 111 def start_response( 112 self, 113 status: str, 114 response_headers: typing.List[typing.Tuple[str, str]], 115 exc_info: typing.Any = None, 116 ) -> None: 117 self.exc_info = exc_info 118 if not self.response_started: 119 self.response_started = True 120 status_code_string, _ = status.split(" ", 1) 121 status_code = int(status_code_string) 122 headers = [ 123 (name.strip().encode("ascii"), value.strip().encode("ascii")) 124 for name, value in response_headers 125 ] 126 self.send_queue.append( 127 { 128 "type": "http.response.start", 129 "status": status_code, 130 "headers": headers, 131 } 132 ) 133 self.loop.call_soon_threadsafe(self.send_event.set) 134 135 def wsgi(self, environ: dict, start_response: typing.Callable) -> None: 136 for chunk in self.app(environ, start_response): 137 self.send_queue.append( 138 {"type": "http.response.body", "body": chunk, "more_body": True} 139 ) 140 self.loop.call_soon_threadsafe(self.send_event.set) 141 142 self.send_queue.append({"type": "http.response.body", "body": b""}) 143 self.loop.call_soon_threadsafe(self.send_event.set) 144 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/starlette/middleware/wsgi.py b/starlette/middleware/wsgi.py --- a/starlette/middleware/wsgi.py +++ b/starlette/middleware/wsgi.py @@ -120,7 +120,7 @@ status_code_string, _ = status.split(" ", 1) status_code = int(status_code_string) headers = [ - (name.strip().encode("ascii"), value.strip().encode("ascii")) + (name.strip().encode("ascii").lower(), value.strip().encode("ascii")) for name, value in response_headers ] self.send_queue.append(
{"golden_diff": "diff --git a/starlette/middleware/wsgi.py b/starlette/middleware/wsgi.py\n--- a/starlette/middleware/wsgi.py\n+++ b/starlette/middleware/wsgi.py\n@@ -120,7 +120,7 @@\n status_code_string, _ = status.split(\" \", 1)\n status_code = int(status_code_string)\n headers = [\n- (name.strip().encode(\"ascii\"), value.strip().encode(\"ascii\"))\n+ (name.strip().encode(\"ascii\").lower(), value.strip().encode(\"ascii\"))\n for name, value in response_headers\n ]\n self.send_queue.append(\n", "issue": "Gzip Middleware content-length is incorrect\nThe following exception is thrown when I use uvicorn to drive my starlette project. After control variates, I am sure this is caused by Gzip Middleware.\r\n\r\n```\r\n File \"C:\\Users\\AberS\\Documents\\Github\\index.py\\.venv\\lib\\site-packages\\h11\\_writers.py\", line 102, in send_eom\r\n raise LocalProtocolError(\"Too little data for declared Content-Length\") \r\nh11._util.LocalProtocolError: Too little data for declared Content-Length\r\n```\r\n\n", "before_files": [{"content": "import asyncio\nimport io\nimport sys\nimport typing\n\nfrom starlette.concurrency import run_in_threadpool\nfrom starlette.types import Message, Receive, Scope, Send\n\n\ndef build_environ(scope: Scope, body: bytes) -> dict:\n \"\"\"\n Builds a scope and request body into a WSGI environ object.\n \"\"\"\n environ = {\n \"REQUEST_METHOD\": scope[\"method\"],\n \"SCRIPT_NAME\": scope.get(\"root_path\", \"\"),\n \"PATH_INFO\": scope[\"path\"],\n \"QUERY_STRING\": scope[\"query_string\"].decode(\"ascii\"),\n \"SERVER_PROTOCOL\": f\"HTTP/{scope['http_version']}\",\n \"wsgi.version\": (1, 0),\n \"wsgi.url_scheme\": scope.get(\"scheme\", \"http\"),\n \"wsgi.input\": io.BytesIO(body),\n \"wsgi.errors\": sys.stdout,\n \"wsgi.multithread\": True,\n \"wsgi.multiprocess\": True,\n \"wsgi.run_once\": False,\n }\n\n # Get server name and port - required in WSGI, not in ASGI\n server = scope.get(\"server\") or (\"localhost\", 80)\n environ[\"SERVER_NAME\"] = server[0]\n environ[\"SERVER_PORT\"] = server[1]\n\n # Get client IP address\n if scope.get(\"client\"):\n environ[\"REMOTE_ADDR\"] = scope[\"client\"][0]\n\n # Go through headers and make them into environ entries\n for name, value in scope.get(\"headers\", []):\n name = name.decode(\"latin1\")\n if name == \"content-length\":\n corrected_name = \"CONTENT_LENGTH\"\n elif name == \"content-type\":\n corrected_name = \"CONTENT_TYPE\"\n else:\n corrected_name = f\"HTTP_{name}\".upper().replace(\"-\", \"_\")\n # HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in case\n value = value.decode(\"latin1\")\n if corrected_name in environ:\n value = environ[corrected_name] + \",\" + value\n environ[corrected_name] = value\n return environ\n\n\nclass WSGIMiddleware:\n def __init__(self, app: typing.Callable, workers: int = 10) -> None:\n self.app = app\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n assert scope[\"type\"] == \"http\"\n responder = WSGIResponder(self.app, scope)\n await responder(receive, send)\n\n\nclass WSGIResponder:\n def __init__(self, app: typing.Callable, scope: Scope) -> None:\n self.app = app\n self.scope = scope\n self.status = None\n self.response_headers = None\n self.send_event = asyncio.Event()\n self.send_queue = [] # type: typing.List[typing.Optional[Message]]\n self.loop = asyncio.get_event_loop()\n self.response_started = False\n self.exc_info = None # type: typing.Any\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n body = b\"\"\n more_body = True\n while more_body:\n message = await receive()\n body += message.get(\"body\", b\"\")\n more_body = message.get(\"more_body\", False)\n environ = build_environ(self.scope, body)\n sender = None\n try:\n sender = self.loop.create_task(self.sender(send))\n await run_in_threadpool(self.wsgi, environ, self.start_response)\n self.send_queue.append(None)\n self.send_event.set()\n await asyncio.wait_for(sender, None)\n if self.exc_info is not None:\n raise self.exc_info[0].with_traceback(\n self.exc_info[1], self.exc_info[2]\n )\n finally:\n if sender and not sender.done():\n sender.cancel() # pragma: no cover\n\n async def sender(self, send: Send) -> None:\n while True:\n if self.send_queue:\n message = self.send_queue.pop(0)\n if message is None:\n return\n await send(message)\n else:\n await self.send_event.wait()\n self.send_event.clear()\n\n def start_response(\n self,\n status: str,\n response_headers: typing.List[typing.Tuple[str, str]],\n exc_info: typing.Any = None,\n ) -> None:\n self.exc_info = exc_info\n if not self.response_started:\n self.response_started = True\n status_code_string, _ = status.split(\" \", 1)\n status_code = int(status_code_string)\n headers = [\n (name.strip().encode(\"ascii\"), value.strip().encode(\"ascii\"))\n for name, value in response_headers\n ]\n self.send_queue.append(\n {\n \"type\": \"http.response.start\",\n \"status\": status_code,\n \"headers\": headers,\n }\n )\n self.loop.call_soon_threadsafe(self.send_event.set)\n\n def wsgi(self, environ: dict, start_response: typing.Callable) -> None:\n for chunk in self.app(environ, start_response):\n self.send_queue.append(\n {\"type\": \"http.response.body\", \"body\": chunk, \"more_body\": True}\n )\n self.loop.call_soon_threadsafe(self.send_event.set)\n\n self.send_queue.append({\"type\": \"http.response.body\", \"body\": b\"\"})\n self.loop.call_soon_threadsafe(self.send_event.set)\n", "path": "starlette/middleware/wsgi.py"}], "after_files": [{"content": "import asyncio\nimport io\nimport sys\nimport typing\n\nfrom starlette.concurrency import run_in_threadpool\nfrom starlette.types import Message, Receive, Scope, Send\n\n\ndef build_environ(scope: Scope, body: bytes) -> dict:\n \"\"\"\n Builds a scope and request body into a WSGI environ object.\n \"\"\"\n environ = {\n \"REQUEST_METHOD\": scope[\"method\"],\n \"SCRIPT_NAME\": scope.get(\"root_path\", \"\"),\n \"PATH_INFO\": scope[\"path\"],\n \"QUERY_STRING\": scope[\"query_string\"].decode(\"ascii\"),\n \"SERVER_PROTOCOL\": f\"HTTP/{scope['http_version']}\",\n \"wsgi.version\": (1, 0),\n \"wsgi.url_scheme\": scope.get(\"scheme\", \"http\"),\n \"wsgi.input\": io.BytesIO(body),\n \"wsgi.errors\": sys.stdout,\n \"wsgi.multithread\": True,\n \"wsgi.multiprocess\": True,\n \"wsgi.run_once\": False,\n }\n\n # Get server name and port - required in WSGI, not in ASGI\n server = scope.get(\"server\") or (\"localhost\", 80)\n environ[\"SERVER_NAME\"] = server[0]\n environ[\"SERVER_PORT\"] = server[1]\n\n # Get client IP address\n if scope.get(\"client\"):\n environ[\"REMOTE_ADDR\"] = scope[\"client\"][0]\n\n # Go through headers and make them into environ entries\n for name, value in scope.get(\"headers\", []):\n name = name.decode(\"latin1\")\n if name == \"content-length\":\n corrected_name = \"CONTENT_LENGTH\"\n elif name == \"content-type\":\n corrected_name = \"CONTENT_TYPE\"\n else:\n corrected_name = f\"HTTP_{name}\".upper().replace(\"-\", \"_\")\n # HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in case\n value = value.decode(\"latin1\")\n if corrected_name in environ:\n value = environ[corrected_name] + \",\" + value\n environ[corrected_name] = value\n return environ\n\n\nclass WSGIMiddleware:\n def __init__(self, app: typing.Callable, workers: int = 10) -> None:\n self.app = app\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n assert scope[\"type\"] == \"http\"\n responder = WSGIResponder(self.app, scope)\n await responder(receive, send)\n\n\nclass WSGIResponder:\n def __init__(self, app: typing.Callable, scope: Scope) -> None:\n self.app = app\n self.scope = scope\n self.status = None\n self.response_headers = None\n self.send_event = asyncio.Event()\n self.send_queue = [] # type: typing.List[typing.Optional[Message]]\n self.loop = asyncio.get_event_loop()\n self.response_started = False\n self.exc_info = None # type: typing.Any\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n body = b\"\"\n more_body = True\n while more_body:\n message = await receive()\n body += message.get(\"body\", b\"\")\n more_body = message.get(\"more_body\", False)\n environ = build_environ(self.scope, body)\n sender = None\n try:\n sender = self.loop.create_task(self.sender(send))\n await run_in_threadpool(self.wsgi, environ, self.start_response)\n self.send_queue.append(None)\n self.send_event.set()\n await asyncio.wait_for(sender, None)\n if self.exc_info is not None:\n raise self.exc_info[0].with_traceback(\n self.exc_info[1], self.exc_info[2]\n )\n finally:\n if sender and not sender.done():\n sender.cancel() # pragma: no cover\n\n async def sender(self, send: Send) -> None:\n while True:\n if self.send_queue:\n message = self.send_queue.pop(0)\n if message is None:\n return\n await send(message)\n else:\n await self.send_event.wait()\n self.send_event.clear()\n\n def start_response(\n self,\n status: str,\n response_headers: typing.List[typing.Tuple[str, str]],\n exc_info: typing.Any = None,\n ) -> None:\n self.exc_info = exc_info\n if not self.response_started:\n self.response_started = True\n status_code_string, _ = status.split(\" \", 1)\n status_code = int(status_code_string)\n headers = [\n (name.strip().encode(\"ascii\").lower(), value.strip().encode(\"ascii\"))\n for name, value in response_headers\n ]\n self.send_queue.append(\n {\n \"type\": \"http.response.start\",\n \"status\": status_code,\n \"headers\": headers,\n }\n )\n self.loop.call_soon_threadsafe(self.send_event.set)\n\n def wsgi(self, environ: dict, start_response: typing.Callable) -> None:\n for chunk in self.app(environ, start_response):\n self.send_queue.append(\n {\"type\": \"http.response.body\", \"body\": chunk, \"more_body\": True}\n )\n self.loop.call_soon_threadsafe(self.send_event.set)\n\n self.send_queue.append({\"type\": \"http.response.body\", \"body\": b\"\"})\n self.loop.call_soon_threadsafe(self.send_event.set)\n", "path": "starlette/middleware/wsgi.py"}]}
1,885
134
gh_patches_debug_1013
rasdani/github-patches
git_diff
magenta__magenta-785
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- numpy dependency missing? magenta/models/sketch_rnn/utils.py has ```import numpy as np```, but magenta/tools/pip/setup.py doesn't list it as a dependency. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `magenta/tools/pip/setup.py` Content: ``` 1 # Copyright 2016 Google Inc. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """A setuptools based setup module for magenta.""" 15 16 from setuptools import find_packages 17 from setuptools import setup 18 19 # Bit of a hack to parse the version string stored in version.py without 20 # executing __init__.py, which will end up requiring a bunch of dependencies to 21 # execute (e.g., tensorflow, pretty_midi, etc.). 22 # Makes the __version__ variable available. 23 execfile('magenta/version.py') 24 25 26 REQUIRED_PACKAGES = [ 27 'IPython', 28 'Pillow >= 3.4.2', 29 'bokeh >= 0.12.0', 30 'futures', 31 'intervaltree >= 2.1.0', 32 'matplotlib >= 1.5.3', 33 'mido == 1.2.6', 34 'pandas >= 0.18.1', 35 'pretty_midi >= 0.2.6', 36 'python-rtmidi', 37 'scipy >= 0.18.1', 38 'tensorflow >= 1.1.0', 39 'wheel', 40 ] 41 42 CONSOLE_SCRIPTS = [ 43 'magenta.interfaces.midi.magenta_midi', 44 'magenta.interfaces.midi.midi_clock', 45 'magenta.models.drums_rnn.drums_rnn_create_dataset', 46 'magenta.models.drums_rnn.drums_rnn_generate', 47 'magenta.models.drums_rnn.drums_rnn_train', 48 'magenta.models.image_stylization.image_stylization_create_dataset', 49 'magenta.models.image_stylization.image_stylization_evaluate', 50 'magenta.models.image_stylization.image_stylization_finetune', 51 'magenta.models.image_stylization.image_stylization_train', 52 'magenta.models.image_stylization.image_stylization_transform', 53 'magenta.models.improv_rnn.improv_rnn_create_dataset', 54 'magenta.models.improv_rnn.improv_rnn_generate', 55 'magenta.models.improv_rnn.improv_rnn_train', 56 'magenta.models.melody_rnn.melody_rnn_create_dataset', 57 'magenta.models.melody_rnn.melody_rnn_generate', 58 'magenta.models.melody_rnn.melody_rnn_train', 59 'magenta.models.nsynth.wavenet.nsynth_generate', 60 'magenta.models.nsynth.wavenet.nsynth_save_embeddings', 61 'magenta.models.performance_rnn.performance_rnn_create_dataset', 62 'magenta.models.performance_rnn.performance_rnn_generate', 63 'magenta.models.performance_rnn.performance_rnn_train', 64 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_create_dataset', 65 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_generate', 66 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_train', 67 'magenta.models.polyphony_rnn.polyphony_rnn_create_dataset', 68 'magenta.models.polyphony_rnn.polyphony_rnn_generate', 69 'magenta.models.polyphony_rnn.polyphony_rnn_train', 70 'magenta.models.rl_tuner.rl_tuner_train', 71 'magenta.models.sketch_rnn.sketch_rnn_train', 72 'magenta.scripts.convert_dir_to_note_sequences', 73 ] 74 75 setup( 76 name='magenta', 77 version=__version__, # pylint: disable=undefined-variable 78 description='Use machine learning to create art and music', 79 long_description='', 80 url='https://magenta.tensorflow.org/', 81 author='Google Inc.', 82 author_email='[email protected]', 83 license='Apache 2', 84 # PyPI package information. 85 classifiers=[ 86 'Development Status :: 4 - Beta', 87 'Intended Audience :: Developers', 88 'Intended Audience :: Education', 89 'Intended Audience :: Science/Research', 90 'License :: OSI Approved :: Apache Software License', 91 'Programming Language :: Python :: 2.7', 92 'Topic :: Scientific/Engineering :: Mathematics', 93 'Topic :: Software Development :: Libraries :: Python Modules', 94 'Topic :: Software Development :: Libraries', 95 ], 96 keywords='tensorflow machine learning magenta music art', 97 98 packages=find_packages(), 99 install_requires=REQUIRED_PACKAGES, 100 entry_points={ 101 'console_scripts': ['%s = %s:console_entry_point' % (n, p) for n, p in 102 ((s.split('.')[-1], s) for s in CONSOLE_SCRIPTS)], 103 }, 104 105 include_package_data=True, 106 package_data={ 107 'magenta': ['models/image_stylization/evaluation_images/*.jpg'], 108 }, 109 ) 110 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/magenta/tools/pip/setup.py b/magenta/tools/pip/setup.py --- a/magenta/tools/pip/setup.py +++ b/magenta/tools/pip/setup.py @@ -31,6 +31,7 @@ 'intervaltree >= 2.1.0', 'matplotlib >= 1.5.3', 'mido == 1.2.6', + 'numpy >= 1.11.0', 'pandas >= 0.18.1', 'pretty_midi >= 0.2.6', 'python-rtmidi',
{"golden_diff": "diff --git a/magenta/tools/pip/setup.py b/magenta/tools/pip/setup.py\n--- a/magenta/tools/pip/setup.py\n+++ b/magenta/tools/pip/setup.py\n@@ -31,6 +31,7 @@\n 'intervaltree >= 2.1.0',\n 'matplotlib >= 1.5.3',\n 'mido == 1.2.6',\n+ 'numpy >= 1.11.0',\n 'pandas >= 0.18.1',\n 'pretty_midi >= 0.2.6',\n 'python-rtmidi',\n", "issue": "numpy dependency missing?\nmagenta/models/sketch_rnn/utils.py has ```import numpy as np```, but magenta/tools/pip/setup.py doesn't list it as a dependency.\n", "before_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A setuptools based setup module for magenta.\"\"\"\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n# Bit of a hack to parse the version string stored in version.py without\n# executing __init__.py, which will end up requiring a bunch of dependencies to\n# execute (e.g., tensorflow, pretty_midi, etc.).\n# Makes the __version__ variable available.\nexecfile('magenta/version.py')\n\n\nREQUIRED_PACKAGES = [\n 'IPython',\n 'Pillow >= 3.4.2',\n 'bokeh >= 0.12.0',\n 'futures',\n 'intervaltree >= 2.1.0',\n 'matplotlib >= 1.5.3',\n 'mido == 1.2.6',\n 'pandas >= 0.18.1',\n 'pretty_midi >= 0.2.6',\n 'python-rtmidi',\n 'scipy >= 0.18.1',\n 'tensorflow >= 1.1.0',\n 'wheel',\n]\n\nCONSOLE_SCRIPTS = [\n 'magenta.interfaces.midi.magenta_midi',\n 'magenta.interfaces.midi.midi_clock',\n 'magenta.models.drums_rnn.drums_rnn_create_dataset',\n 'magenta.models.drums_rnn.drums_rnn_generate',\n 'magenta.models.drums_rnn.drums_rnn_train',\n 'magenta.models.image_stylization.image_stylization_create_dataset',\n 'magenta.models.image_stylization.image_stylization_evaluate',\n 'magenta.models.image_stylization.image_stylization_finetune',\n 'magenta.models.image_stylization.image_stylization_train',\n 'magenta.models.image_stylization.image_stylization_transform',\n 'magenta.models.improv_rnn.improv_rnn_create_dataset',\n 'magenta.models.improv_rnn.improv_rnn_generate',\n 'magenta.models.improv_rnn.improv_rnn_train',\n 'magenta.models.melody_rnn.melody_rnn_create_dataset',\n 'magenta.models.melody_rnn.melody_rnn_generate',\n 'magenta.models.melody_rnn.melody_rnn_train',\n 'magenta.models.nsynth.wavenet.nsynth_generate',\n 'magenta.models.nsynth.wavenet.nsynth_save_embeddings',\n 'magenta.models.performance_rnn.performance_rnn_create_dataset',\n 'magenta.models.performance_rnn.performance_rnn_generate',\n 'magenta.models.performance_rnn.performance_rnn_train',\n 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_create_dataset',\n 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_generate',\n 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_train',\n 'magenta.models.polyphony_rnn.polyphony_rnn_create_dataset',\n 'magenta.models.polyphony_rnn.polyphony_rnn_generate',\n 'magenta.models.polyphony_rnn.polyphony_rnn_train',\n 'magenta.models.rl_tuner.rl_tuner_train',\n 'magenta.models.sketch_rnn.sketch_rnn_train',\n 'magenta.scripts.convert_dir_to_note_sequences',\n]\n\nsetup(\n name='magenta',\n version=__version__, # pylint: disable=undefined-variable\n description='Use machine learning to create art and music',\n long_description='',\n url='https://magenta.tensorflow.org/',\n author='Google Inc.',\n author_email='[email protected]',\n license='Apache 2',\n # PyPI package information.\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='tensorflow machine learning magenta music art',\n\n packages=find_packages(),\n install_requires=REQUIRED_PACKAGES,\n entry_points={\n 'console_scripts': ['%s = %s:console_entry_point' % (n, p) for n, p in\n ((s.split('.')[-1], s) for s in CONSOLE_SCRIPTS)],\n },\n\n include_package_data=True,\n package_data={\n 'magenta': ['models/image_stylization/evaluation_images/*.jpg'],\n },\n)\n", "path": "magenta/tools/pip/setup.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A setuptools based setup module for magenta.\"\"\"\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n# Bit of a hack to parse the version string stored in version.py without\n# executing __init__.py, which will end up requiring a bunch of dependencies to\n# execute (e.g., tensorflow, pretty_midi, etc.).\n# Makes the __version__ variable available.\nexecfile('magenta/version.py')\n\n\nREQUIRED_PACKAGES = [\n 'IPython',\n 'Pillow >= 3.4.2',\n 'bokeh >= 0.12.0',\n 'futures',\n 'intervaltree >= 2.1.0',\n 'matplotlib >= 1.5.3',\n 'mido == 1.2.6',\n 'numpy >= 1.11.0',\n 'pandas >= 0.18.1',\n 'pretty_midi >= 0.2.6',\n 'python-rtmidi',\n 'scipy >= 0.18.1',\n 'tensorflow >= 1.1.0',\n 'wheel',\n]\n\nCONSOLE_SCRIPTS = [\n 'magenta.interfaces.midi.magenta_midi',\n 'magenta.interfaces.midi.midi_clock',\n 'magenta.models.drums_rnn.drums_rnn_create_dataset',\n 'magenta.models.drums_rnn.drums_rnn_generate',\n 'magenta.models.drums_rnn.drums_rnn_train',\n 'magenta.models.image_stylization.image_stylization_create_dataset',\n 'magenta.models.image_stylization.image_stylization_evaluate',\n 'magenta.models.image_stylization.image_stylization_finetune',\n 'magenta.models.image_stylization.image_stylization_train',\n 'magenta.models.image_stylization.image_stylization_transform',\n 'magenta.models.improv_rnn.improv_rnn_create_dataset',\n 'magenta.models.improv_rnn.improv_rnn_generate',\n 'magenta.models.improv_rnn.improv_rnn_train',\n 'magenta.models.melody_rnn.melody_rnn_create_dataset',\n 'magenta.models.melody_rnn.melody_rnn_generate',\n 'magenta.models.melody_rnn.melody_rnn_train',\n 'magenta.models.nsynth.wavenet.nsynth_generate',\n 'magenta.models.nsynth.wavenet.nsynth_save_embeddings',\n 'magenta.models.performance_rnn.performance_rnn_create_dataset',\n 'magenta.models.performance_rnn.performance_rnn_generate',\n 'magenta.models.performance_rnn.performance_rnn_train',\n 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_create_dataset',\n 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_generate',\n 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_train',\n 'magenta.models.polyphony_rnn.polyphony_rnn_create_dataset',\n 'magenta.models.polyphony_rnn.polyphony_rnn_generate',\n 'magenta.models.polyphony_rnn.polyphony_rnn_train',\n 'magenta.models.rl_tuner.rl_tuner_train',\n 'magenta.models.sketch_rnn.sketch_rnn_train',\n 'magenta.scripts.convert_dir_to_note_sequences',\n]\n\nsetup(\n name='magenta',\n version=__version__, # pylint: disable=undefined-variable\n description='Use machine learning to create art and music',\n long_description='',\n url='https://magenta.tensorflow.org/',\n author='Google Inc.',\n author_email='[email protected]',\n license='Apache 2',\n # PyPI package information.\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='tensorflow machine learning magenta music art',\n\n packages=find_packages(),\n install_requires=REQUIRED_PACKAGES,\n entry_points={\n 'console_scripts': ['%s = %s:console_entry_point' % (n, p) for n, p in\n ((s.split('.')[-1], s) for s in CONSOLE_SCRIPTS)],\n },\n\n include_package_data=True,\n package_data={\n 'magenta': ['models/image_stylization/evaluation_images/*.jpg'],\n },\n)\n", "path": "magenta/tools/pip/setup.py"}]}
1,611
135
gh_patches_debug_1278
rasdani/github-patches
git_diff
microsoft__botbuilder-python-1637
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- botbuilder-testing is missing install requirements ## Version botbuilder-testing 4.12.0 ## Describe the bug While installing botbuilder-testing for CI I got errors about missing dependencies. ## To Reproduce 1. `python3 -m venv .venv` 2. `. .venv/bin/activate` 3. `pip install -U pip wheel` 4. `pip install botbuilder-testing` 5. `python -c "from botbuilder.testing import DialogTestClient"` First error is missing `pytest`: ```python Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/__init__.py", line 6, in <module> from .storage_base_tests import StorageBaseTests File "/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/storage_base_tests.py", line 26, in <module> import pytest ModuleNotFoundError: No module named 'pytest' ``` 6. `pip install pytest` 7. `python -c 'from botbuilder.testing import DialogTestClient'` Next error is missing `botbuilder-azure`: ```python Traceback (most recent call last): File "<string>", line 1, in <module> File "/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/__init__.py", line 6, in <module> from .storage_base_tests import StorageBaseTests File "/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/storage_base_tests.py", line 27, in <module> from botbuilder.azure import CosmosDbStorage ModuleNotFoundError: No module named 'botbuilder.azure' ``` 8. `pip install botbuilder-azure` 9. `python -c 'from botbuilder.testing import DialogTestClient'` Command works! ## Expected behavior No errors after installing botbuilder-testing and importing module I do wonder if the requirement for pytest is not necessary, leaving the lib test-suite agnostic and could be refactored out? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `libraries/botbuilder-testing/setup.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 import os 5 from setuptools import setup 6 7 REQUIRES = [ 8 "botbuilder-schema==4.13.0", 9 "botbuilder-core==4.13.0", 10 "botbuilder-dialogs==4.13.0", 11 ] 12 13 TESTS_REQUIRES = ["aiounittest==1.3.0"] 14 15 root = os.path.abspath(os.path.dirname(__file__)) 16 17 with open(os.path.join(root, "botbuilder", "testing", "about.py")) as f: 18 package_info = {} 19 info = f.read() 20 exec(info, package_info) 21 22 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f: 23 long_description = f.read() 24 25 setup( 26 name=package_info["__title__"], 27 version=package_info["__version__"], 28 url=package_info["__uri__"], 29 author=package_info["__author__"], 30 description=package_info["__description__"], 31 keywords="botbuilder-testing bots ai testing botframework botbuilder", 32 long_description=long_description, 33 long_description_content_type="text/x-rst", 34 license=package_info["__license__"], 35 packages=["botbuilder.testing"], 36 install_requires=REQUIRES + TESTS_REQUIRES, 37 tests_require=TESTS_REQUIRES, 38 include_package_data=True, 39 classifiers=[ 40 "Programming Language :: Python :: 3.7", 41 "Intended Audience :: Developers", 42 "License :: OSI Approved :: MIT License", 43 "Operating System :: OS Independent", 44 "Development Status :: 5 - Production/Stable", 45 "Topic :: Scientific/Engineering :: Artificial Intelligence", 46 ], 47 ) 48 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libraries/botbuilder-testing/setup.py b/libraries/botbuilder-testing/setup.py --- a/libraries/botbuilder-testing/setup.py +++ b/libraries/botbuilder-testing/setup.py @@ -8,6 +8,8 @@ "botbuilder-schema==4.13.0", "botbuilder-core==4.13.0", "botbuilder-dialogs==4.13.0", + "botbuilder-azure==4.13.0", + "pytest~=6.2.3", ] TESTS_REQUIRES = ["aiounittest==1.3.0"]
{"golden_diff": "diff --git a/libraries/botbuilder-testing/setup.py b/libraries/botbuilder-testing/setup.py\n--- a/libraries/botbuilder-testing/setup.py\n+++ b/libraries/botbuilder-testing/setup.py\n@@ -8,6 +8,8 @@\n \"botbuilder-schema==4.13.0\",\n \"botbuilder-core==4.13.0\",\n \"botbuilder-dialogs==4.13.0\",\n+ \"botbuilder-azure==4.13.0\",\n+ \"pytest~=6.2.3\",\n ]\n \n TESTS_REQUIRES = [\"aiounittest==1.3.0\"]\n", "issue": "botbuilder-testing is missing install requirements\n## Version\r\n\r\nbotbuilder-testing 4.12.0\r\n\r\n## Describe the bug\r\nWhile installing botbuilder-testing for CI I got errors about missing dependencies. \r\n\r\n## To Reproduce\r\n\r\n1. `python3 -m venv .venv`\r\n2. `. .venv/bin/activate`\r\n3. `pip install -U pip wheel`\r\n4. `pip install botbuilder-testing`\r\n5. `python -c \"from botbuilder.testing import DialogTestClient\"`\r\n\r\nFirst error is missing `pytest`:\r\n```python\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/__init__.py\", line 6, in <module>\r\n from .storage_base_tests import StorageBaseTests\r\n File \"/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/storage_base_tests.py\", line 26, in <module>\r\n import pytest\r\nModuleNotFoundError: No module named 'pytest'\r\n```\r\n\r\n6. `pip install pytest`\r\n7. `python -c 'from botbuilder.testing import DialogTestClient'`\r\n\r\nNext error is missing `botbuilder-azure`:\r\n```python\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/__init__.py\", line 6, in <module>\r\n from .storage_base_tests import StorageBaseTests\r\n File \"/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/storage_base_tests.py\", line 27, in <module>\r\n from botbuilder.azure import CosmosDbStorage\r\nModuleNotFoundError: No module named 'botbuilder.azure'\r\n```\r\n\r\n8. `pip install botbuilder-azure`\r\n9. `python -c 'from botbuilder.testing import DialogTestClient'`\r\n\r\nCommand works!\r\n\r\n## Expected behavior\r\nNo errors after installing botbuilder-testing and importing module\r\n\r\nI do wonder if the requirement for pytest is not necessary, leaving the lib test-suite agnostic and could be refactored out?\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"botbuilder-schema==4.13.0\",\n \"botbuilder-core==4.13.0\",\n \"botbuilder-dialogs==4.13.0\",\n]\n\nTESTS_REQUIRES = [\"aiounittest==1.3.0\"]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"testing\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=\"botbuilder-testing bots ai testing botframework botbuilder\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\"botbuilder.testing\"],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-testing/setup.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"botbuilder-schema==4.13.0\",\n \"botbuilder-core==4.13.0\",\n \"botbuilder-dialogs==4.13.0\",\n \"botbuilder-azure==4.13.0\",\n \"pytest~=6.2.3\",\n]\n\nTESTS_REQUIRES = [\"aiounittest==1.3.0\"]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"testing\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=\"botbuilder-testing bots ai testing botframework botbuilder\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\"botbuilder.testing\"],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-testing/setup.py"}]}
1,240
138
gh_patches_debug_10966
rasdani/github-patches
git_diff
docker__docker-py-2534
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support OpenSSH's IdentityFile config option # Description As a developer I want `docker-py` to load the identity file according to my setting specified in `~/.ssh/config`, not only the "default" `id_rsa` file. # Example I have a `~/.ssh/config` file with contents: ```ssh Host myHost User myUser HostName myIp IdentityFile C:/Users/me/.ssh/id_rsa_custom_file IdentitiesOnly yes ``` Now I would like `docker-py` to pick up `C:/Users/me/.ssh/id_rsa_custom_file` for trying to connect to the remote host (For example when I want to run `docker-compose -H "ssh://myHost" ps`. However right now it does not. When I rename `C:/Users/me/.ssh/id_rsa_custom_file` to `C:/Users/me/.ssh/id_rsa` it works just fine. However this is not an option for me since I have multiple identity files and hosts that I want to use. # Proposal Add something like this to the [`sshcon.py`](https://github.com/docker/docker-py/blob/c285bee1bc59f6b2d65cee952b5522c88047a3bc/docker/transport/sshconn.py#L105): ```python if 'identityfile' in host_config: self.ssh_params['key_filename '] = host_config['identityfile'] ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docker/transport/sshconn.py` Content: ``` 1 import paramiko 2 import requests.adapters 3 import six 4 import logging 5 import os 6 7 from docker.transport.basehttpadapter import BaseHTTPAdapter 8 from .. import constants 9 10 if six.PY3: 11 import http.client as httplib 12 else: 13 import httplib 14 15 try: 16 import requests.packages.urllib3 as urllib3 17 except ImportError: 18 import urllib3 19 20 RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer 21 22 23 class SSHConnection(httplib.HTTPConnection, object): 24 def __init__(self, ssh_transport, timeout=60): 25 super(SSHConnection, self).__init__( 26 'localhost', timeout=timeout 27 ) 28 self.ssh_transport = ssh_transport 29 self.timeout = timeout 30 31 def connect(self): 32 sock = self.ssh_transport.open_session() 33 sock.settimeout(self.timeout) 34 sock.exec_command('docker system dial-stdio') 35 self.sock = sock 36 37 38 class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool): 39 scheme = 'ssh' 40 41 def __init__(self, ssh_client, timeout=60, maxsize=10): 42 super(SSHConnectionPool, self).__init__( 43 'localhost', timeout=timeout, maxsize=maxsize 44 ) 45 self.ssh_transport = ssh_client.get_transport() 46 self.timeout = timeout 47 48 def _new_conn(self): 49 return SSHConnection(self.ssh_transport, self.timeout) 50 51 # When re-using connections, urllib3 calls fileno() on our 52 # SSH channel instance, quickly overloading our fd limit. To avoid this, 53 # we override _get_conn 54 def _get_conn(self, timeout): 55 conn = None 56 try: 57 conn = self.pool.get(block=self.block, timeout=timeout) 58 59 except AttributeError: # self.pool is None 60 raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") 61 62 except six.moves.queue.Empty: 63 if self.block: 64 raise urllib3.exceptions.EmptyPoolError( 65 self, 66 "Pool reached maximum size and no more " 67 "connections are allowed." 68 ) 69 pass # Oh well, we'll create a new connection then 70 71 return conn or self._new_conn() 72 73 74 class SSHHTTPAdapter(BaseHTTPAdapter): 75 76 __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + [ 77 'pools', 'timeout', 'ssh_client', 'ssh_params' 78 ] 79 80 def __init__(self, base_url, timeout=60, 81 pool_connections=constants.DEFAULT_NUM_POOLS): 82 logging.getLogger("paramiko").setLevel(logging.WARNING) 83 self.ssh_client = paramiko.SSHClient() 84 base_url = six.moves.urllib_parse.urlparse(base_url) 85 self.ssh_params = { 86 "hostname": base_url.hostname, 87 "port": base_url.port, 88 "username": base_url.username 89 } 90 ssh_config_file = os.path.expanduser("~/.ssh/config") 91 if os.path.exists(ssh_config_file): 92 conf = paramiko.SSHConfig() 93 with open(ssh_config_file) as f: 94 conf.parse(f) 95 host_config = conf.lookup(base_url.hostname) 96 self.ssh_conf = host_config 97 if 'proxycommand' in host_config: 98 self.ssh_params["sock"] = paramiko.ProxyCommand( 99 self.ssh_conf['proxycommand'] 100 ) 101 if 'hostname' in host_config: 102 self.ssh_params['hostname'] = host_config['hostname'] 103 if base_url.port is None and 'port' in host_config: 104 self.ssh_params['port'] = self.ssh_conf['port'] 105 if base_url.username is None and 'user' in host_config: 106 self.ssh_params['username'] = self.ssh_conf['user'] 107 108 self.ssh_client.load_system_host_keys() 109 self.ssh_client.set_missing_host_key_policy(paramiko.WarningPolicy()) 110 111 self._connect() 112 self.timeout = timeout 113 self.pools = RecentlyUsedContainer( 114 pool_connections, dispose_func=lambda p: p.close() 115 ) 116 super(SSHHTTPAdapter, self).__init__() 117 118 def _connect(self): 119 self.ssh_client.connect(**self.ssh_params) 120 121 def get_connection(self, url, proxies=None): 122 with self.pools.lock: 123 pool = self.pools.get(url) 124 if pool: 125 return pool 126 127 # Connection is closed try a reconnect 128 if not self.ssh_client.get_transport(): 129 self._connect() 130 131 pool = SSHConnectionPool( 132 self.ssh_client, self.timeout 133 ) 134 self.pools[url] = pool 135 136 return pool 137 138 def close(self): 139 super(SSHHTTPAdapter, self).close() 140 self.ssh_client.close() 141 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docker/transport/sshconn.py b/docker/transport/sshconn.py --- a/docker/transport/sshconn.py +++ b/docker/transport/sshconn.py @@ -100,6 +100,8 @@ ) if 'hostname' in host_config: self.ssh_params['hostname'] = host_config['hostname'] + if 'identityfile' in host_config: + self.ssh_params['key_filename'] = host_config['identityfile'] if base_url.port is None and 'port' in host_config: self.ssh_params['port'] = self.ssh_conf['port'] if base_url.username is None and 'user' in host_config:
{"golden_diff": "diff --git a/docker/transport/sshconn.py b/docker/transport/sshconn.py\n--- a/docker/transport/sshconn.py\n+++ b/docker/transport/sshconn.py\n@@ -100,6 +100,8 @@\n )\n if 'hostname' in host_config:\n self.ssh_params['hostname'] = host_config['hostname']\n+ if 'identityfile' in host_config:\n+ self.ssh_params['key_filename'] = host_config['identityfile']\n if base_url.port is None and 'port' in host_config:\n self.ssh_params['port'] = self.ssh_conf['port']\n if base_url.username is None and 'user' in host_config:\n", "issue": "Support OpenSSH's IdentityFile config option\n# Description\r\nAs a developer I want `docker-py` to load the identity file according to my setting specified in `~/.ssh/config`, not only the \"default\" `id_rsa` file.\r\n\r\n# Example\r\nI have a `~/.ssh/config` file with contents:\r\n```ssh\r\nHost myHost\r\n User myUser\r\n HostName myIp\r\n IdentityFile C:/Users/me/.ssh/id_rsa_custom_file\r\n IdentitiesOnly yes\r\n```\r\n\r\nNow I would like `docker-py` to pick up `C:/Users/me/.ssh/id_rsa_custom_file` for trying to connect to the remote host (For example when I want to run `docker-compose -H \"ssh://myHost\" ps`. However right now it does not.\r\n\r\nWhen I rename `C:/Users/me/.ssh/id_rsa_custom_file` to `C:/Users/me/.ssh/id_rsa` it works just fine. However this is not an option for me since I have multiple identity files and hosts that I want to use.\r\n\r\n# Proposal\r\nAdd something like this to the [`sshcon.py`](https://github.com/docker/docker-py/blob/c285bee1bc59f6b2d65cee952b5522c88047a3bc/docker/transport/sshconn.py#L105):\r\n```python\r\nif 'identityfile' in host_config:\r\n self.ssh_params['key_filename '] = host_config['identityfile']\r\n```\n", "before_files": [{"content": "import paramiko\nimport requests.adapters\nimport six\nimport logging\nimport os\n\nfrom docker.transport.basehttpadapter import BaseHTTPAdapter\nfrom .. import constants\n\nif six.PY3:\n import http.client as httplib\nelse:\n import httplib\n\ntry:\n import requests.packages.urllib3 as urllib3\nexcept ImportError:\n import urllib3\n\nRecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer\n\n\nclass SSHConnection(httplib.HTTPConnection, object):\n def __init__(self, ssh_transport, timeout=60):\n super(SSHConnection, self).__init__(\n 'localhost', timeout=timeout\n )\n self.ssh_transport = ssh_transport\n self.timeout = timeout\n\n def connect(self):\n sock = self.ssh_transport.open_session()\n sock.settimeout(self.timeout)\n sock.exec_command('docker system dial-stdio')\n self.sock = sock\n\n\nclass SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):\n scheme = 'ssh'\n\n def __init__(self, ssh_client, timeout=60, maxsize=10):\n super(SSHConnectionPool, self).__init__(\n 'localhost', timeout=timeout, maxsize=maxsize\n )\n self.ssh_transport = ssh_client.get_transport()\n self.timeout = timeout\n\n def _new_conn(self):\n return SSHConnection(self.ssh_transport, self.timeout)\n\n # When re-using connections, urllib3 calls fileno() on our\n # SSH channel instance, quickly overloading our fd limit. To avoid this,\n # we override _get_conn\n def _get_conn(self, timeout):\n conn = None\n try:\n conn = self.pool.get(block=self.block, timeout=timeout)\n\n except AttributeError: # self.pool is None\n raise urllib3.exceptions.ClosedPoolError(self, \"Pool is closed.\")\n\n except six.moves.queue.Empty:\n if self.block:\n raise urllib3.exceptions.EmptyPoolError(\n self,\n \"Pool reached maximum size and no more \"\n \"connections are allowed.\"\n )\n pass # Oh well, we'll create a new connection then\n\n return conn or self._new_conn()\n\n\nclass SSHHTTPAdapter(BaseHTTPAdapter):\n\n __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + [\n 'pools', 'timeout', 'ssh_client', 'ssh_params'\n ]\n\n def __init__(self, base_url, timeout=60,\n pool_connections=constants.DEFAULT_NUM_POOLS):\n logging.getLogger(\"paramiko\").setLevel(logging.WARNING)\n self.ssh_client = paramiko.SSHClient()\n base_url = six.moves.urllib_parse.urlparse(base_url)\n self.ssh_params = {\n \"hostname\": base_url.hostname,\n \"port\": base_url.port,\n \"username\": base_url.username\n }\n ssh_config_file = os.path.expanduser(\"~/.ssh/config\")\n if os.path.exists(ssh_config_file):\n conf = paramiko.SSHConfig()\n with open(ssh_config_file) as f:\n conf.parse(f)\n host_config = conf.lookup(base_url.hostname)\n self.ssh_conf = host_config\n if 'proxycommand' in host_config:\n self.ssh_params[\"sock\"] = paramiko.ProxyCommand(\n self.ssh_conf['proxycommand']\n )\n if 'hostname' in host_config:\n self.ssh_params['hostname'] = host_config['hostname']\n if base_url.port is None and 'port' in host_config:\n self.ssh_params['port'] = self.ssh_conf['port']\n if base_url.username is None and 'user' in host_config:\n self.ssh_params['username'] = self.ssh_conf['user']\n\n self.ssh_client.load_system_host_keys()\n self.ssh_client.set_missing_host_key_policy(paramiko.WarningPolicy())\n\n self._connect()\n self.timeout = timeout\n self.pools = RecentlyUsedContainer(\n pool_connections, dispose_func=lambda p: p.close()\n )\n super(SSHHTTPAdapter, self).__init__()\n\n def _connect(self):\n self.ssh_client.connect(**self.ssh_params)\n\n def get_connection(self, url, proxies=None):\n with self.pools.lock:\n pool = self.pools.get(url)\n if pool:\n return pool\n\n # Connection is closed try a reconnect\n if not self.ssh_client.get_transport():\n self._connect()\n\n pool = SSHConnectionPool(\n self.ssh_client, self.timeout\n )\n self.pools[url] = pool\n\n return pool\n\n def close(self):\n super(SSHHTTPAdapter, self).close()\n self.ssh_client.close()\n", "path": "docker/transport/sshconn.py"}], "after_files": [{"content": "import paramiko\nimport requests.adapters\nimport six\nimport logging\nimport os\n\nfrom docker.transport.basehttpadapter import BaseHTTPAdapter\nfrom .. import constants\n\nif six.PY3:\n import http.client as httplib\nelse:\n import httplib\n\ntry:\n import requests.packages.urllib3 as urllib3\nexcept ImportError:\n import urllib3\n\nRecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer\n\n\nclass SSHConnection(httplib.HTTPConnection, object):\n def __init__(self, ssh_transport, timeout=60):\n super(SSHConnection, self).__init__(\n 'localhost', timeout=timeout\n )\n self.ssh_transport = ssh_transport\n self.timeout = timeout\n\n def connect(self):\n sock = self.ssh_transport.open_session()\n sock.settimeout(self.timeout)\n sock.exec_command('docker system dial-stdio')\n self.sock = sock\n\n\nclass SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):\n scheme = 'ssh'\n\n def __init__(self, ssh_client, timeout=60, maxsize=10):\n super(SSHConnectionPool, self).__init__(\n 'localhost', timeout=timeout, maxsize=maxsize\n )\n self.ssh_transport = ssh_client.get_transport()\n self.timeout = timeout\n\n def _new_conn(self):\n return SSHConnection(self.ssh_transport, self.timeout)\n\n # When re-using connections, urllib3 calls fileno() on our\n # SSH channel instance, quickly overloading our fd limit. To avoid this,\n # we override _get_conn\n def _get_conn(self, timeout):\n conn = None\n try:\n conn = self.pool.get(block=self.block, timeout=timeout)\n\n except AttributeError: # self.pool is None\n raise urllib3.exceptions.ClosedPoolError(self, \"Pool is closed.\")\n\n except six.moves.queue.Empty:\n if self.block:\n raise urllib3.exceptions.EmptyPoolError(\n self,\n \"Pool reached maximum size and no more \"\n \"connections are allowed.\"\n )\n pass # Oh well, we'll create a new connection then\n\n return conn or self._new_conn()\n\n\nclass SSHHTTPAdapter(BaseHTTPAdapter):\n\n __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + [\n 'pools', 'timeout', 'ssh_client', 'ssh_params'\n ]\n\n def __init__(self, base_url, timeout=60,\n pool_connections=constants.DEFAULT_NUM_POOLS):\n logging.getLogger(\"paramiko\").setLevel(logging.WARNING)\n self.ssh_client = paramiko.SSHClient()\n base_url = six.moves.urllib_parse.urlparse(base_url)\n self.ssh_params = {\n \"hostname\": base_url.hostname,\n \"port\": base_url.port,\n \"username\": base_url.username\n }\n ssh_config_file = os.path.expanduser(\"~/.ssh/config\")\n if os.path.exists(ssh_config_file):\n conf = paramiko.SSHConfig()\n with open(ssh_config_file) as f:\n conf.parse(f)\n host_config = conf.lookup(base_url.hostname)\n self.ssh_conf = host_config\n if 'proxycommand' in host_config:\n self.ssh_params[\"sock\"] = paramiko.ProxyCommand(\n self.ssh_conf['proxycommand']\n )\n if 'hostname' in host_config:\n self.ssh_params['hostname'] = host_config['hostname']\n if 'identityfile' in host_config:\n self.ssh_params['key_filename'] = host_config['identityfile']\n if base_url.port is None and 'port' in host_config:\n self.ssh_params['port'] = self.ssh_conf['port']\n if base_url.username is None and 'user' in host_config:\n self.ssh_params['username'] = self.ssh_conf['user']\n\n self.ssh_client.load_system_host_keys()\n self.ssh_client.set_missing_host_key_policy(paramiko.WarningPolicy())\n\n self._connect()\n self.timeout = timeout\n self.pools = RecentlyUsedContainer(\n pool_connections, dispose_func=lambda p: p.close()\n )\n super(SSHHTTPAdapter, self).__init__()\n\n def _connect(self):\n self.ssh_client.connect(**self.ssh_params)\n\n def get_connection(self, url, proxies=None):\n with self.pools.lock:\n pool = self.pools.get(url)\n if pool:\n return pool\n\n # Connection is closed try a reconnect\n if not self.ssh_client.get_transport():\n self._connect()\n\n pool = SSHConnectionPool(\n self.ssh_client, self.timeout\n )\n self.pools[url] = pool\n\n return pool\n\n def close(self):\n super(SSHHTTPAdapter, self).close()\n self.ssh_client.close()\n", "path": "docker/transport/sshconn.py"}]}
1,903
153
gh_patches_debug_61215
rasdani/github-patches
git_diff
scikit-hep__pyhf-1124
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Top-Level Python API methods don't have docstrings rendered in docs # Description The top level Python API methods pages on the docs website doesn't contain any of the rendered docstrings. For example, the `pyhf.set_backend()` API has examples (and it rather important for new users) https://github.com/scikit-hep/pyhf/blob/e55eea408d7c28e3109338de96252119ac63f87a/src/pyhf/__init__.py#L42-L52 but the docs website doesn't show any of this ![not_rendered](https://user-images.githubusercontent.com/5142394/88368070-def79000-cd52-11ea-9f0e-5acbb4d45c6b.png) # Expected Behavior Have the docstrings be rendered in the docs # Actual Behavior c.f. above # Steps to Reproduce Build the docs # Checklist - [x] Run `git fetch` to get the most up to date version of `master` - [x] Searched through existing Issues to confirm this is not a duplicate issue - [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/pyhf/events.py` Content: ``` 1 import weakref 2 3 __events = {} 4 __disabled_events = set([]) 5 6 7 def noop(*args, **kwargs): 8 pass 9 10 11 class WeakList(list): 12 def append(self, item): 13 list.append(self, weakref.WeakMethod(item, self.remove)) 14 15 16 class Callables(WeakList): 17 def __call__(self, *args, **kwargs): 18 for func in self: 19 # weakref: needs to be de-ref'd first before calling 20 func()(*args, **kwargs) 21 22 def __repr__(self): 23 return "Callables(%s)" % list.__repr__(self) 24 25 26 def subscribe(event): 27 """ 28 This is meant to be used as a decorator. 29 """ 30 # Example: 31 # 32 # >>> @pyhf.events.subscribe('myevent') 33 # ... def test(a,b): 34 # ... print a+b 35 # ... 36 # >>> pyhf.events.trigger_myevent(1,2) 37 # 3 38 global __events 39 40 def __decorator(func): 41 __events.setdefault(event, Callables()).append(func) 42 return func 43 44 return __decorator 45 46 47 def register(event): 48 """ 49 This is meant to be used as a decorator to register a function for triggering events. 50 51 This creates two events: "<event_name>::before" and "<event_name>::after" 52 """ 53 # Examples: 54 # 55 # >>> @pyhf.events.register('test_func') 56 # ... def test(a,b): 57 # ... print a+b 58 # ... 59 # >>> @pyhf.events.subscribe('test_func::before') 60 # ... def precall(): 61 # ... print 'before call' 62 # ... 63 # >>> @pyhf.events.subscribe('test_func::after') 64 # ... def postcall(): 65 # ... print 'after call' 66 # ... 67 # >>> test(1,2) 68 # "before call" 69 # 3 70 # "after call" 71 # >>> 72 73 def _register(func): 74 def register_wrapper(*args, **kwargs): 75 trigger("{0:s}::before".format(event))() 76 result = func(*args, **kwargs) 77 trigger("{0:s}::after".format(event))() 78 return result 79 80 return register_wrapper 81 82 return _register 83 84 85 def trigger(event): 86 """ 87 Trigger an event if not disabled. 88 """ 89 global __events, __disabled_events, noop 90 is_noop = bool(event in __disabled_events or event not in __events) 91 return noop if is_noop else __events.get(event) 92 93 94 def disable(event): 95 """ 96 Disable an event from firing. 97 """ 98 global __disabled_events 99 __disabled_events.add(event) 100 101 102 def enable(event): 103 """ 104 Enable an event to be fired if disabled. 105 """ 106 global __disabled_events 107 __disabled_events.remove(event) 108 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/pyhf/events.py b/src/pyhf/events.py --- a/src/pyhf/events.py +++ b/src/pyhf/events.py @@ -1,4 +1,5 @@ import weakref +from functools import wraps __events = {} __disabled_events = set([]) @@ -71,6 +72,7 @@ # >>> def _register(func): + @wraps(func) def register_wrapper(*args, **kwargs): trigger("{0:s}::before".format(event))() result = func(*args, **kwargs)
{"golden_diff": "diff --git a/src/pyhf/events.py b/src/pyhf/events.py\n--- a/src/pyhf/events.py\n+++ b/src/pyhf/events.py\n@@ -1,4 +1,5 @@\n import weakref\n+from functools import wraps\n \n __events = {}\n __disabled_events = set([])\n@@ -71,6 +72,7 @@\n # >>>\n \n def _register(func):\n+ @wraps(func)\n def register_wrapper(*args, **kwargs):\n trigger(\"{0:s}::before\".format(event))()\n result = func(*args, **kwargs)\n", "issue": "Top-Level Python API methods don't have docstrings rendered in docs\n# Description\r\n\r\nThe top level Python API methods pages on the docs website doesn't contain any of the rendered docstrings. For example, the `pyhf.set_backend()` API has examples (and it rather important for new users)\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/e55eea408d7c28e3109338de96252119ac63f87a/src/pyhf/__init__.py#L42-L52\r\n\r\nbut the docs website doesn't show any of this\r\n\r\n![not_rendered](https://user-images.githubusercontent.com/5142394/88368070-def79000-cd52-11ea-9f0e-5acbb4d45c6b.png)\r\n\r\n\r\n# Expected Behavior\r\n\r\nHave the docstrings be rendered in the docs \r\n\r\n# Actual Behavior\r\n\r\nc.f. above\r\n\r\n# Steps to Reproduce\r\n\r\nBuild the docs\r\n\r\n# Checklist\r\n\r\n- [x] Run `git fetch` to get the most up to date version of `master`\r\n- [x] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\n", "before_files": [{"content": "import weakref\n\n__events = {}\n__disabled_events = set([])\n\n\ndef noop(*args, **kwargs):\n pass\n\n\nclass WeakList(list):\n def append(self, item):\n list.append(self, weakref.WeakMethod(item, self.remove))\n\n\nclass Callables(WeakList):\n def __call__(self, *args, **kwargs):\n for func in self:\n # weakref: needs to be de-ref'd first before calling\n func()(*args, **kwargs)\n\n def __repr__(self):\n return \"Callables(%s)\" % list.__repr__(self)\n\n\ndef subscribe(event):\n \"\"\"\n This is meant to be used as a decorator.\n \"\"\"\n # Example:\n #\n # >>> @pyhf.events.subscribe('myevent')\n # ... def test(a,b):\n # ... print a+b\n # ...\n # >>> pyhf.events.trigger_myevent(1,2)\n # 3\n global __events\n\n def __decorator(func):\n __events.setdefault(event, Callables()).append(func)\n return func\n\n return __decorator\n\n\ndef register(event):\n \"\"\"\n This is meant to be used as a decorator to register a function for triggering events.\n\n This creates two events: \"<event_name>::before\" and \"<event_name>::after\"\n \"\"\"\n # Examples:\n #\n # >>> @pyhf.events.register('test_func')\n # ... def test(a,b):\n # ... print a+b\n # ...\n # >>> @pyhf.events.subscribe('test_func::before')\n # ... def precall():\n # ... print 'before call'\n # ...\n # >>> @pyhf.events.subscribe('test_func::after')\n # ... def postcall():\n # ... print 'after call'\n # ...\n # >>> test(1,2)\n # \"before call\"\n # 3\n # \"after call\"\n # >>>\n\n def _register(func):\n def register_wrapper(*args, **kwargs):\n trigger(\"{0:s}::before\".format(event))()\n result = func(*args, **kwargs)\n trigger(\"{0:s}::after\".format(event))()\n return result\n\n return register_wrapper\n\n return _register\n\n\ndef trigger(event):\n \"\"\"\n Trigger an event if not disabled.\n \"\"\"\n global __events, __disabled_events, noop\n is_noop = bool(event in __disabled_events or event not in __events)\n return noop if is_noop else __events.get(event)\n\n\ndef disable(event):\n \"\"\"\n Disable an event from firing.\n \"\"\"\n global __disabled_events\n __disabled_events.add(event)\n\n\ndef enable(event):\n \"\"\"\n Enable an event to be fired if disabled.\n \"\"\"\n global __disabled_events\n __disabled_events.remove(event)\n", "path": "src/pyhf/events.py"}], "after_files": [{"content": "import weakref\nfrom functools import wraps\n\n__events = {}\n__disabled_events = set([])\n\n\ndef noop(*args, **kwargs):\n pass\n\n\nclass WeakList(list):\n def append(self, item):\n list.append(self, weakref.WeakMethod(item, self.remove))\n\n\nclass Callables(WeakList):\n def __call__(self, *args, **kwargs):\n for func in self:\n # weakref: needs to be de-ref'd first before calling\n func()(*args, **kwargs)\n\n def __repr__(self):\n return \"Callables(%s)\" % list.__repr__(self)\n\n\ndef subscribe(event):\n \"\"\"\n This is meant to be used as a decorator.\n \"\"\"\n # Example:\n #\n # >>> @pyhf.events.subscribe('myevent')\n # ... def test(a,b):\n # ... print a+b\n # ...\n # >>> pyhf.events.trigger_myevent(1,2)\n # 3\n global __events\n\n def __decorator(func):\n __events.setdefault(event, Callables()).append(func)\n return func\n\n return __decorator\n\n\ndef register(event):\n \"\"\"\n This is meant to be used as a decorator to register a function for triggering events.\n\n This creates two events: \"<event_name>::before\" and \"<event_name>::after\"\n \"\"\"\n # Examples:\n #\n # >>> @pyhf.events.register('test_func')\n # ... def test(a,b):\n # ... print a+b\n # ...\n # >>> @pyhf.events.subscribe('test_func::before')\n # ... def precall():\n # ... print 'before call'\n # ...\n # >>> @pyhf.events.subscribe('test_func::after')\n # ... def postcall():\n # ... print 'after call'\n # ...\n # >>> test(1,2)\n # \"before call\"\n # 3\n # \"after call\"\n # >>>\n\n def _register(func):\n @wraps(func)\n def register_wrapper(*args, **kwargs):\n trigger(\"{0:s}::before\".format(event))()\n result = func(*args, **kwargs)\n trigger(\"{0:s}::after\".format(event))()\n return result\n\n return register_wrapper\n\n return _register\n\n\ndef trigger(event):\n \"\"\"\n Trigger an event if not disabled.\n \"\"\"\n global __events, __disabled_events, noop\n is_noop = bool(event in __disabled_events or event not in __events)\n return noop if is_noop else __events.get(event)\n\n\ndef disable(event):\n \"\"\"\n Disable an event from firing.\n \"\"\"\n global __disabled_events\n __disabled_events.add(event)\n\n\ndef enable(event):\n \"\"\"\n Enable an event to be fired if disabled.\n \"\"\"\n global __disabled_events\n __disabled_events.remove(event)\n", "path": "src/pyhf/events.py"}]}
1,401
125
gh_patches_debug_22018
rasdani/github-patches
git_diff
MycroftAI__mycroft-core-2535
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Italian language with Google TTS I'm running Mycroft on Manjaro Linux, with Italian language. I tried both the "British male" and "American male" voices, and they do not speak Italian at all. So I decided to try the Google Voice. That way, no sound is emitted. Whenever Mycroft tries to speak, I see this error in logs (file audio.log): ``` 2020-04-13 10:45:39.632 | INFO | 195922 | mycroft.audio.speech:mute_and_speak:127 | Speak: Va uno spettacolo 2020-04-13 10:45:40.070 | ERROR | 195922 | mycroft.audio.speech:handle_speak:99 | Error in mute_and_speak Traceback (most recent call last): File "/home/luke/git/mycroft-core/mycroft/audio/speech.py", line 95, in handle_speak mute_and_speak(chunk, ident, listen) File "/home/luke/git/mycroft-core/mycroft/audio/speech.py", line 129, in mute_and_speak tts.execute(utterance, ident, listen) File "/home/luke/git/mycroft-core/mycroft/tts/tts.py", line 337, in execute wav_file, phonemes = self.get_tts(sentence, wav_file) File "/home/luke/git/mycroft-core/mycroft/tts/google_tts.py", line 35, in get_tts tts = gTTS(text=sentence, lang=self.lang) File "/home/luke/git/mycroft-core/.venv/lib/python3.8/site-packages/gtts/tts.py", line 121, in __init__ raise ValueError("Language not supported: %s" % lang) ValueError: Language not supported: it-it ``` The problem is that Google TTS can handle "it", but it cannot handle "it-it". I will release a PR shortly for fixing this. (So far, Google Voice seems my only option for Italian language, so I really need that). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mycroft/tts/google_tts.py` Content: ``` 1 # Copyright 2017 Mycroft AI Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # 15 from gtts import gTTS 16 17 from .tts import TTS, TTSValidator 18 19 20 class GoogleTTS(TTS): 21 """Interface to google TTS.""" 22 def __init__(self, lang, config): 23 super(GoogleTTS, self).__init__(lang, config, GoogleTTSValidator( 24 self), 'mp3') 25 26 def get_tts(self, sentence, wav_file): 27 """Fetch tts audio using gTTS. 28 29 Arguments: 30 sentence (str): Sentence to generate audio for 31 wav_file (str): output file path 32 Returns: 33 Tuple ((str) written file, None) 34 """ 35 tts = gTTS(text=sentence, lang=self.lang) 36 tts.save(wav_file) 37 return (wav_file, None) # No phonemes 38 39 40 class GoogleTTSValidator(TTSValidator): 41 def __init__(self, tts): 42 super(GoogleTTSValidator, self).__init__(tts) 43 44 def validate_lang(self): 45 # TODO 46 pass 47 48 def validate_connection(self): 49 try: 50 gTTS(text='Hi').save(self.tts.filename) 51 except Exception: 52 raise Exception( 53 'GoogleTTS server could not be verified. Please check your ' 54 'internet connection.') 55 56 def get_tts_class(self): 57 return GoogleTTS 58 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mycroft/tts/google_tts.py b/mycroft/tts/google_tts.py --- a/mycroft/tts/google_tts.py +++ b/mycroft/tts/google_tts.py @@ -13,13 +13,19 @@ # limitations under the License. # from gtts import gTTS +from gtts.lang import tts_langs from .tts import TTS, TTSValidator +supported_langs = tts_langs() + class GoogleTTS(TTS): """Interface to google TTS.""" def __init__(self, lang, config): + if lang.lower() not in supported_langs and \ + lang[:2].lower() in supported_langs: + lang = lang[:2] super(GoogleTTS, self).__init__(lang, config, GoogleTTSValidator( self), 'mp3') @@ -42,8 +48,10 @@ super(GoogleTTSValidator, self).__init__(tts) def validate_lang(self): - # TODO - pass + lang = self.tts.lang + if lang.lower() not in supported_langs: + raise ValueError("Language not supported by gTTS: {}" + .format(lang)) def validate_connection(self): try:
{"golden_diff": "diff --git a/mycroft/tts/google_tts.py b/mycroft/tts/google_tts.py\n--- a/mycroft/tts/google_tts.py\n+++ b/mycroft/tts/google_tts.py\n@@ -13,13 +13,19 @@\n # limitations under the License.\n #\n from gtts import gTTS\n+from gtts.lang import tts_langs\n \n from .tts import TTS, TTSValidator\n \n+supported_langs = tts_langs()\n+\n \n class GoogleTTS(TTS):\n \"\"\"Interface to google TTS.\"\"\"\n def __init__(self, lang, config):\n+ if lang.lower() not in supported_langs and \\\n+ lang[:2].lower() in supported_langs:\n+ lang = lang[:2]\n super(GoogleTTS, self).__init__(lang, config, GoogleTTSValidator(\n self), 'mp3')\n \n@@ -42,8 +48,10 @@\n super(GoogleTTSValidator, self).__init__(tts)\n \n def validate_lang(self):\n- # TODO\n- pass\n+ lang = self.tts.lang\n+ if lang.lower() not in supported_langs:\n+ raise ValueError(\"Language not supported by gTTS: {}\"\n+ .format(lang))\n \n def validate_connection(self):\n try:\n", "issue": "Italian language with Google TTS\nI'm running Mycroft on Manjaro Linux, with Italian language.\r\nI tried both the \"British male\" and \"American male\" voices, and they do not speak Italian at all. So I decided to try the Google Voice.\r\nThat way, no sound is emitted. Whenever Mycroft tries to speak, I see this error in logs (file audio.log):\r\n\r\n```\r\n2020-04-13 10:45:39.632 | INFO | 195922 | mycroft.audio.speech:mute_and_speak:127 | Speak: Va uno spettacolo\r\n2020-04-13 10:45:40.070 | ERROR | 195922 | mycroft.audio.speech:handle_speak:99 | Error in mute_and_speak\r\nTraceback (most recent call last):\r\n File \"/home/luke/git/mycroft-core/mycroft/audio/speech.py\", line 95, in handle_speak\r\n mute_and_speak(chunk, ident, listen)\r\n File \"/home/luke/git/mycroft-core/mycroft/audio/speech.py\", line 129, in mute_and_speak\r\n tts.execute(utterance, ident, listen)\r\n File \"/home/luke/git/mycroft-core/mycroft/tts/tts.py\", line 337, in execute\r\n wav_file, phonemes = self.get_tts(sentence, wav_file)\r\n File \"/home/luke/git/mycroft-core/mycroft/tts/google_tts.py\", line 35, in get_tts\r\n tts = gTTS(text=sentence, lang=self.lang)\r\n File \"/home/luke/git/mycroft-core/.venv/lib/python3.8/site-packages/gtts/tts.py\", line 121, in __init__\r\n raise ValueError(\"Language not supported: %s\" % lang)\r\nValueError: Language not supported: it-it\r\n```\r\n\r\nThe problem is that Google TTS can handle \"it\", but it cannot handle \"it-it\".\r\n\r\nI will release a PR shortly for fixing this. (So far, Google Voice seems my only option for Italian language, so I really need that).\r\n\n", "before_files": [{"content": "# Copyright 2017 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom gtts import gTTS\n\nfrom .tts import TTS, TTSValidator\n\n\nclass GoogleTTS(TTS):\n \"\"\"Interface to google TTS.\"\"\"\n def __init__(self, lang, config):\n super(GoogleTTS, self).__init__(lang, config, GoogleTTSValidator(\n self), 'mp3')\n\n def get_tts(self, sentence, wav_file):\n \"\"\"Fetch tts audio using gTTS.\n\n Arguments:\n sentence (str): Sentence to generate audio for\n wav_file (str): output file path\n Returns:\n Tuple ((str) written file, None)\n \"\"\"\n tts = gTTS(text=sentence, lang=self.lang)\n tts.save(wav_file)\n return (wav_file, None) # No phonemes\n\n\nclass GoogleTTSValidator(TTSValidator):\n def __init__(self, tts):\n super(GoogleTTSValidator, self).__init__(tts)\n\n def validate_lang(self):\n # TODO\n pass\n\n def validate_connection(self):\n try:\n gTTS(text='Hi').save(self.tts.filename)\n except Exception:\n raise Exception(\n 'GoogleTTS server could not be verified. Please check your '\n 'internet connection.')\n\n def get_tts_class(self):\n return GoogleTTS\n", "path": "mycroft/tts/google_tts.py"}], "after_files": [{"content": "# Copyright 2017 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom gtts import gTTS\nfrom gtts.lang import tts_langs\n\nfrom .tts import TTS, TTSValidator\n\nsupported_langs = tts_langs()\n\n\nclass GoogleTTS(TTS):\n \"\"\"Interface to google TTS.\"\"\"\n def __init__(self, lang, config):\n if lang.lower() not in supported_langs and \\\n lang[:2].lower() in supported_langs:\n lang = lang[:2]\n super(GoogleTTS, self).__init__(lang, config, GoogleTTSValidator(\n self), 'mp3')\n\n def get_tts(self, sentence, wav_file):\n \"\"\"Fetch tts audio using gTTS.\n\n Arguments:\n sentence (str): Sentence to generate audio for\n wav_file (str): output file path\n Returns:\n Tuple ((str) written file, None)\n \"\"\"\n tts = gTTS(text=sentence, lang=self.lang)\n tts.save(wav_file)\n return (wav_file, None) # No phonemes\n\n\nclass GoogleTTSValidator(TTSValidator):\n def __init__(self, tts):\n super(GoogleTTSValidator, self).__init__(tts)\n\n def validate_lang(self):\n lang = self.tts.lang\n if lang.lower() not in supported_langs:\n raise ValueError(\"Language not supported by gTTS: {}\"\n .format(lang))\n\n def validate_connection(self):\n try:\n gTTS(text='Hi').save(self.tts.filename)\n except Exception:\n raise Exception(\n 'GoogleTTS server could not be verified. Please check your '\n 'internet connection.')\n\n def get_tts_class(self):\n return GoogleTTS\n", "path": "mycroft/tts/google_tts.py"}]}
1,282
290
gh_patches_debug_27633
rasdani/github-patches
git_diff
fedora-infra__bodhi-417
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Markdown unordered lists do not work in update notes Filling the update notes under https://bodhi.fedoraproject.org/updates/new with unordered lists in markdown syntax (https://help.github.com/articles/markdown-basics/) does not work, neither in the preview nor after submitting, visit e.g. https://bodhi.fedoraproject.org/updates/phpMyAdmin-4.4.14-1.fc23 with Firefox 38 ESR from RHEL/CentOS 6. It seems to work properly with a text browser such as w3m (CSS issue?). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bodhi/ffmarkdown.py` Content: ``` 1 # This program is free software; you can redistribute it and/or 2 # modify it under the terms of the GNU General Public License 3 # as published by the Free Software Foundation; either version 2 4 # of the License, or (at your option) any later version. 5 # 6 # This program is distributed in the hope that it will be useful, 7 # but WITHOUT ANY WARRANTY; without even the implied warranty of 8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 # GNU General Public License for more details. 10 # 11 # You should have received a copy of the GNU General Public License 12 # along with this program; if not, write to the Free Software 13 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 14 # USA. 15 16 """ Fedora-flavored Markdown 17 18 Author: Ralph Bean <[email protected]> 19 """ 20 21 import markdown.inlinepatterns 22 import markdown.util 23 import pyramid.threadlocal 24 25 26 def user_url(name): 27 request = pyramid.threadlocal.get_current_request() 28 return request.route_url('user', name=name) 29 30 31 def bugzilla_url(idx): 32 return "https://bugzilla.redhat.com/show_bug.cgi?id=%s" % idx 33 34 35 def inject(): 36 """ Hack out python-markdown to do the autolinking that we want. """ 37 38 # First, make it so that bare links get automatically linkified. 39 markdown.inlinepatterns.AUTOLINK_RE = '(%s)' % '|'.join([ 40 r'<(?:f|ht)tps?://[^>]*>', 41 r'\b(?:f|ht)tps?://[^)<>\s]+[^.,)<>\s]', 42 r'\bwww\.[^)<>\s]+[^.,)<>\s]', 43 r'[^(<\s]+\.(?:com|net|org)\b', 44 ]) 45 46 # Second, build some Pattern objects for @mentions, #bugs, etc... 47 class MentionPattern(markdown.inlinepatterns.Pattern): 48 def handleMatch(self, m): 49 el = markdown.util.etree.Element("a") 50 name = markdown.util.AtomicString(m.group(2)) 51 el.set('href', user_url(name[1:])) 52 el.text = name 53 return el 54 55 class BugzillaPattern(markdown.inlinepatterns.Pattern): 56 def handleMatch(self, m): 57 el = markdown.util.etree.Element("a") 58 idx = markdown.util.AtomicString(m.group(2)) 59 el.set('href', bugzilla_url(idx[1:])) 60 el.text = idx 61 return el 62 63 MENTION_RE = r'(@\w+)' 64 BUGZILLA_RE = r'(#[0-9]{5,})' 65 66 # Lastly, monkey-patch the build_inlinepatterns func to insert our patterns 67 original_builder = markdown.build_inlinepatterns 68 69 def extended_builder(md_instance, **kwargs): 70 patterns = original_builder(md_instance, **kwargs) 71 patterns['mention'] = MentionPattern(MENTION_RE, md_instance) 72 patterns['bugzillas'] = BugzillaPattern(BUGZILLA_RE, md_instance) 73 return patterns 74 75 markdown.build_inlinepatterns = extended_builder 76 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bodhi/ffmarkdown.py b/bodhi/ffmarkdown.py --- a/bodhi/ffmarkdown.py +++ b/bodhi/ffmarkdown.py @@ -19,6 +19,7 @@ """ import markdown.inlinepatterns +import markdown.postprocessors import markdown.util import pyramid.threadlocal @@ -63,13 +64,26 @@ MENTION_RE = r'(@\w+)' BUGZILLA_RE = r'(#[0-9]{5,})' + class SurroundProcessor(markdown.postprocessors.Postprocessor): + def run(self, text): + return "<div class='markdown'>" + text + "</div>" + # Lastly, monkey-patch the build_inlinepatterns func to insert our patterns - original_builder = markdown.build_inlinepatterns + original_pattern_builder = markdown.build_inlinepatterns - def extended_builder(md_instance, **kwargs): - patterns = original_builder(md_instance, **kwargs) + def extended_pattern_builder(md_instance, **kwargs): + patterns = original_pattern_builder(md_instance, **kwargs) patterns['mention'] = MentionPattern(MENTION_RE, md_instance) patterns['bugzillas'] = BugzillaPattern(BUGZILLA_RE, md_instance) return patterns - markdown.build_inlinepatterns = extended_builder + markdown.build_inlinepatterns = extended_pattern_builder + + original_postprocessor_builder = markdown.build_postprocessors + + def extended_postprocessor_builder(md_instance, **kwargs): + processors = original_postprocessor_builder(md_instance, **kwargs) + processors['surround'] = SurroundProcessor(md_instance) + return processors + + markdown.build_postprocessors = extended_postprocessor_builder
{"golden_diff": "diff --git a/bodhi/ffmarkdown.py b/bodhi/ffmarkdown.py\n--- a/bodhi/ffmarkdown.py\n+++ b/bodhi/ffmarkdown.py\n@@ -19,6 +19,7 @@\n \"\"\"\n \n import markdown.inlinepatterns\n+import markdown.postprocessors\n import markdown.util\n import pyramid.threadlocal\n \n@@ -63,13 +64,26 @@\n MENTION_RE = r'(@\\w+)'\n BUGZILLA_RE = r'(#[0-9]{5,})'\n \n+ class SurroundProcessor(markdown.postprocessors.Postprocessor):\n+ def run(self, text):\n+ return \"<div class='markdown'>\" + text + \"</div>\"\n+\n # Lastly, monkey-patch the build_inlinepatterns func to insert our patterns\n- original_builder = markdown.build_inlinepatterns\n+ original_pattern_builder = markdown.build_inlinepatterns\n \n- def extended_builder(md_instance, **kwargs):\n- patterns = original_builder(md_instance, **kwargs)\n+ def extended_pattern_builder(md_instance, **kwargs):\n+ patterns = original_pattern_builder(md_instance, **kwargs)\n patterns['mention'] = MentionPattern(MENTION_RE, md_instance)\n patterns['bugzillas'] = BugzillaPattern(BUGZILLA_RE, md_instance)\n return patterns\n \n- markdown.build_inlinepatterns = extended_builder\n+ markdown.build_inlinepatterns = extended_pattern_builder\n+\n+ original_postprocessor_builder = markdown.build_postprocessors\n+\n+ def extended_postprocessor_builder(md_instance, **kwargs):\n+ processors = original_postprocessor_builder(md_instance, **kwargs)\n+ processors['surround'] = SurroundProcessor(md_instance)\n+ return processors\n+\n+ markdown.build_postprocessors = extended_postprocessor_builder\n", "issue": "Markdown unordered lists do not work in update notes\nFilling the update notes under https://bodhi.fedoraproject.org/updates/new with unordered lists in markdown syntax (https://help.github.com/articles/markdown-basics/) does not work, neither in the preview nor after submitting, visit e.g. https://bodhi.fedoraproject.org/updates/phpMyAdmin-4.4.14-1.fc23 with Firefox 38 ESR from RHEL/CentOS 6. It seems to work properly with a text browser such as w3m (CSS issue?).\n\n", "before_files": [{"content": "# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,\n# USA.\n\n\"\"\" Fedora-flavored Markdown\n\nAuthor: Ralph Bean <[email protected]>\n\"\"\"\n\nimport markdown.inlinepatterns\nimport markdown.util\nimport pyramid.threadlocal\n\n\ndef user_url(name):\n request = pyramid.threadlocal.get_current_request()\n return request.route_url('user', name=name)\n\n\ndef bugzilla_url(idx):\n return \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\" % idx\n\n\ndef inject():\n \"\"\" Hack out python-markdown to do the autolinking that we want. \"\"\"\n\n # First, make it so that bare links get automatically linkified.\n markdown.inlinepatterns.AUTOLINK_RE = '(%s)' % '|'.join([\n r'<(?:f|ht)tps?://[^>]*>',\n r'\\b(?:f|ht)tps?://[^)<>\\s]+[^.,)<>\\s]',\n r'\\bwww\\.[^)<>\\s]+[^.,)<>\\s]',\n r'[^(<\\s]+\\.(?:com|net|org)\\b',\n ])\n\n # Second, build some Pattern objects for @mentions, #bugs, etc...\n class MentionPattern(markdown.inlinepatterns.Pattern):\n def handleMatch(self, m):\n el = markdown.util.etree.Element(\"a\")\n name = markdown.util.AtomicString(m.group(2))\n el.set('href', user_url(name[1:]))\n el.text = name\n return el\n\n class BugzillaPattern(markdown.inlinepatterns.Pattern):\n def handleMatch(self, m):\n el = markdown.util.etree.Element(\"a\")\n idx = markdown.util.AtomicString(m.group(2))\n el.set('href', bugzilla_url(idx[1:]))\n el.text = idx\n return el\n\n MENTION_RE = r'(@\\w+)'\n BUGZILLA_RE = r'(#[0-9]{5,})'\n\n # Lastly, monkey-patch the build_inlinepatterns func to insert our patterns\n original_builder = markdown.build_inlinepatterns\n\n def extended_builder(md_instance, **kwargs):\n patterns = original_builder(md_instance, **kwargs)\n patterns['mention'] = MentionPattern(MENTION_RE, md_instance)\n patterns['bugzillas'] = BugzillaPattern(BUGZILLA_RE, md_instance)\n return patterns\n\n markdown.build_inlinepatterns = extended_builder\n", "path": "bodhi/ffmarkdown.py"}], "after_files": [{"content": "# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,\n# USA.\n\n\"\"\" Fedora-flavored Markdown\n\nAuthor: Ralph Bean <[email protected]>\n\"\"\"\n\nimport markdown.inlinepatterns\nimport markdown.postprocessors\nimport markdown.util\nimport pyramid.threadlocal\n\n\ndef user_url(name):\n request = pyramid.threadlocal.get_current_request()\n return request.route_url('user', name=name)\n\n\ndef bugzilla_url(idx):\n return \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\" % idx\n\n\ndef inject():\n \"\"\" Hack out python-markdown to do the autolinking that we want. \"\"\"\n\n # First, make it so that bare links get automatically linkified.\n markdown.inlinepatterns.AUTOLINK_RE = '(%s)' % '|'.join([\n r'<(?:f|ht)tps?://[^>]*>',\n r'\\b(?:f|ht)tps?://[^)<>\\s]+[^.,)<>\\s]',\n r'\\bwww\\.[^)<>\\s]+[^.,)<>\\s]',\n r'[^(<\\s]+\\.(?:com|net|org)\\b',\n ])\n\n # Second, build some Pattern objects for @mentions, #bugs, etc...\n class MentionPattern(markdown.inlinepatterns.Pattern):\n def handleMatch(self, m):\n el = markdown.util.etree.Element(\"a\")\n name = markdown.util.AtomicString(m.group(2))\n el.set('href', user_url(name[1:]))\n el.text = name\n return el\n\n class BugzillaPattern(markdown.inlinepatterns.Pattern):\n def handleMatch(self, m):\n el = markdown.util.etree.Element(\"a\")\n idx = markdown.util.AtomicString(m.group(2))\n el.set('href', bugzilla_url(idx[1:]))\n el.text = idx\n return el\n\n MENTION_RE = r'(@\\w+)'\n BUGZILLA_RE = r'(#[0-9]{5,})'\n\n class SurroundProcessor(markdown.postprocessors.Postprocessor):\n def run(self, text):\n return \"<div class='markdown'>\" + text + \"</div>\"\n\n # Lastly, monkey-patch the build_inlinepatterns func to insert our patterns\n original_pattern_builder = markdown.build_inlinepatterns\n\n def extended_pattern_builder(md_instance, **kwargs):\n patterns = original_pattern_builder(md_instance, **kwargs)\n patterns['mention'] = MentionPattern(MENTION_RE, md_instance)\n patterns['bugzillas'] = BugzillaPattern(BUGZILLA_RE, md_instance)\n return patterns\n\n markdown.build_inlinepatterns = extended_pattern_builder\n\n original_postprocessor_builder = markdown.build_postprocessors\n\n def extended_postprocessor_builder(md_instance, **kwargs):\n processors = original_postprocessor_builder(md_instance, **kwargs)\n processors['surround'] = SurroundProcessor(md_instance)\n return processors\n\n markdown.build_postprocessors = extended_postprocessor_builder\n", "path": "bodhi/ffmarkdown.py"}]}
1,201
372
gh_patches_debug_177
rasdani/github-patches
git_diff
encode__starlette-455
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- py.typed missing in published artifacts I didn’t check for earlier versions, but at least 0.11.4 on PyPI does not include `py.typed`. I assume this is an oversight, given it is mentioned in `setup.py`? https://github.com/encode/starlette/blob/77b84a08c1e4de0db64a197b58ac363a26c51d4f/setup.py#L49 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 import os 5 import re 6 7 from setuptools import setup 8 9 10 def get_version(package): 11 """ 12 Return package version as listed in `__version__` in `init.py`. 13 """ 14 with open(os.path.join(package, "__init__.py")) as f: 15 return re.search("__version__ = ['\"]([^'\"]+)['\"]", f.read()).group(1) 16 17 18 def get_long_description(): 19 """ 20 Return the README. 21 """ 22 with open("README.md", encoding="utf8") as f: 23 return f.read() 24 25 26 def get_packages(package): 27 """ 28 Return root package and all sub-packages. 29 """ 30 return [ 31 dirpath 32 for dirpath, dirnames, filenames in os.walk(package) 33 if os.path.exists(os.path.join(dirpath, "__init__.py")) 34 ] 35 36 37 setup( 38 name="starlette", 39 python_requires=">=3.6", 40 version=get_version("starlette"), 41 url="https://github.com/encode/starlette", 42 license="BSD", 43 description="The little ASGI library that shines.", 44 long_description=get_long_description(), 45 long_description_content_type="text/markdown", 46 author="Tom Christie", 47 author_email="[email protected]", 48 packages=get_packages("starlette"), 49 package_data={"starlette": ["py.typed"]}, 50 data_files=[("", ["LICENSE.md"])], 51 extras_require={ 52 "full": [ 53 "aiofiles", 54 "asyncpg", 55 "graphene", 56 "itsdangerous", 57 "jinja2", 58 "python-multipart", 59 "pyyaml", 60 "requests", 61 "ujson", 62 ] 63 }, 64 classifiers=[ 65 "Development Status :: 3 - Alpha", 66 "Environment :: Web Environment", 67 "Intended Audience :: Developers", 68 "License :: OSI Approved :: BSD License", 69 "Operating System :: OS Independent", 70 "Topic :: Internet :: WWW/HTTP", 71 "Programming Language :: Python :: 3", 72 "Programming Language :: Python :: 3.6", 73 "Programming Language :: Python :: 3.7", 74 ], 75 ) 76 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -72,4 +72,5 @@ "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", ], + zip_safe=False, )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -72,4 +72,5 @@\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n+ zip_safe=False,\n )\n", "issue": "py.typed missing in published artifacts\nI didn\u2019t check for earlier versions, but at least 0.11.4 on PyPI does not include `py.typed`. I assume this is an oversight, given it is mentioned in `setup.py`?\r\n\r\nhttps://github.com/encode/starlette/blob/77b84a08c1e4de0db64a197b58ac363a26c51d4f/setup.py#L49\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n with open(os.path.join(package, \"__init__.py\")) as f:\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", f.read()).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n return f.read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [\n dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n ]\n\n\nsetup(\n name=\"starlette\",\n python_requires=\">=3.6\",\n version=get_version(\"starlette\"),\n url=\"https://github.com/encode/starlette\",\n license=\"BSD\",\n description=\"The little ASGI library that shines.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n packages=get_packages(\"starlette\"),\n package_data={\"starlette\": [\"py.typed\"]},\n data_files=[(\"\", [\"LICENSE.md\"])],\n extras_require={\n \"full\": [\n \"aiofiles\",\n \"asyncpg\",\n \"graphene\",\n \"itsdangerous\",\n \"jinja2\",\n \"python-multipart\",\n \"pyyaml\",\n \"requests\",\n \"ujson\",\n ]\n },\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n with open(os.path.join(package, \"__init__.py\")) as f:\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", f.read()).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n return f.read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [\n dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n ]\n\n\nsetup(\n name=\"starlette\",\n python_requires=\">=3.6\",\n version=get_version(\"starlette\"),\n url=\"https://github.com/encode/starlette\",\n license=\"BSD\",\n description=\"The little ASGI library that shines.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n packages=get_packages(\"starlette\"),\n package_data={\"starlette\": [\"py.typed\"]},\n data_files=[(\"\", [\"LICENSE.md\"])],\n extras_require={\n \"full\": [\n \"aiofiles\",\n \"asyncpg\",\n \"graphene\",\n \"itsdangerous\",\n \"jinja2\",\n \"python-multipart\",\n \"pyyaml\",\n \"requests\",\n \"ujson\",\n ]\n },\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n zip_safe=False,\n)\n", "path": "setup.py"}]}
973
65
gh_patches_debug_1159
rasdani/github-patches
git_diff
nltk__nltk-1274
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Tox fails with "ERROR: Failure: ImportError (No module named 'six')" When I try to run the tests with Tox (on Ubuntu) from within a local clone of the repo, it manages to install the dependencies but blows up when trying to import things from within NLTK. I imagine I can work around this by figuring out how to manually run just the tests I care about, but it's inconvenient. I'm not sure whether I'm doing something dumb or whether the Tox setup is broken; if the former, the CONTRIBUTING docs should probably mention what needs to be done besides just running Tox; if the latter, it should probably be fixed. Here's the full output (had to pastebin it due to GitHub's post length limit): http://pastebin.com/ENuCLnv6 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nltk/tokenize/api.py` Content: ``` 1 # Natural Language Toolkit: Tokenizer Interface 2 # 3 # Copyright (C) 2001-2015 NLTK Project 4 # Author: Edward Loper <[email protected]> 5 # Steven Bird <[email protected]> 6 # URL: <http://nltk.org/> 7 # For license information, see LICENSE.TXT 8 9 """ 10 Tokenizer Interface 11 """ 12 13 from abc import ABCMeta, abstractmethod 14 from six import add_metaclass 15 16 from nltk.internals import overridden 17 from nltk.tokenize.util import string_span_tokenize 18 19 @add_metaclass(ABCMeta) 20 class TokenizerI(object): 21 """ 22 A processing interface for tokenizing a string. 23 Subclasses must define ``tokenize()`` or ``tokenize_sents()`` (or both). 24 """ 25 @abstractmethod 26 def tokenize(self, s): 27 """ 28 Return a tokenized copy of *s*. 29 30 :rtype: list of str 31 """ 32 if overridden(self.tokenize_sents): 33 return self.tokenize_sents([s])[0] 34 35 def span_tokenize(self, s): 36 """ 37 Identify the tokens using integer offsets ``(start_i, end_i)``, 38 where ``s[start_i:end_i]`` is the corresponding token. 39 40 :rtype: iter(tuple(int, int)) 41 """ 42 raise NotImplementedError() 43 44 def tokenize_sents(self, strings): 45 """ 46 Apply ``self.tokenize()`` to each element of ``strings``. I.e.: 47 48 return [self.tokenize(s) for s in strings] 49 50 :rtype: list(list(str)) 51 """ 52 return [self.tokenize(s) for s in strings] 53 54 def span_tokenize_sents(self, strings): 55 """ 56 Apply ``self.span_tokenize()`` to each element of ``strings``. I.e.: 57 58 return [self.span_tokenize(s) for s in strings] 59 60 :rtype: iter(list(tuple(int, int))) 61 """ 62 for s in strings: 63 yield list(self.span_tokenize(s)) 64 65 66 class StringTokenizer(TokenizerI): 67 """A tokenizer that divides a string into substrings by splitting 68 on the specified string (defined in subclasses). 69 """ 70 71 def tokenize(self, s): 72 return s.split(self._string) 73 74 def span_tokenize(self, s): 75 for span in string_span_tokenize(s, self._string): 76 yield span 77 78 79 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nltk/tokenize/api.py b/nltk/tokenize/api.py --- a/nltk/tokenize/api.py +++ b/nltk/tokenize/api.py @@ -11,7 +11,7 @@ """ from abc import ABCMeta, abstractmethod -from six import add_metaclass +from nltk.six import add_metaclass from nltk.internals import overridden from nltk.tokenize.util import string_span_tokenize
{"golden_diff": "diff --git a/nltk/tokenize/api.py b/nltk/tokenize/api.py\n--- a/nltk/tokenize/api.py\n+++ b/nltk/tokenize/api.py\n@@ -11,7 +11,7 @@\n \"\"\"\n \n from abc import ABCMeta, abstractmethod\n-from six import add_metaclass\n+from nltk.six import add_metaclass\n \n from nltk.internals import overridden\n from nltk.tokenize.util import string_span_tokenize\n", "issue": "Tox fails with \"ERROR: Failure: ImportError (No module named 'six')\"\nWhen I try to run the tests with Tox (on Ubuntu) from within a local clone of the repo, it manages to install the dependencies but blows up when trying to import things from within NLTK.\n\nI imagine I can work around this by figuring out how to manually run just the tests I care about, but it's inconvenient.\n\nI'm not sure whether I'm doing something dumb or whether the Tox setup is broken; if the former, the CONTRIBUTING docs should probably mention what needs to be done besides just running Tox; if the latter, it should probably be fixed.\n\nHere's the full output (had to pastebin it due to GitHub's post length limit):\n\nhttp://pastebin.com/ENuCLnv6\n\n", "before_files": [{"content": "# Natural Language Toolkit: Tokenizer Interface\n#\n# Copyright (C) 2001-2015 NLTK Project\n# Author: Edward Loper <[email protected]>\n# Steven Bird <[email protected]>\n# URL: <http://nltk.org/>\n# For license information, see LICENSE.TXT\n\n\"\"\"\nTokenizer Interface\n\"\"\"\n\nfrom abc import ABCMeta, abstractmethod\nfrom six import add_metaclass\n\nfrom nltk.internals import overridden\nfrom nltk.tokenize.util import string_span_tokenize\n\n@add_metaclass(ABCMeta)\nclass TokenizerI(object):\n \"\"\"\n A processing interface for tokenizing a string.\n Subclasses must define ``tokenize()`` or ``tokenize_sents()`` (or both).\n \"\"\"\n @abstractmethod\n def tokenize(self, s):\n \"\"\"\n Return a tokenized copy of *s*.\n\n :rtype: list of str\n \"\"\"\n if overridden(self.tokenize_sents):\n return self.tokenize_sents([s])[0]\n\n def span_tokenize(self, s):\n \"\"\"\n Identify the tokens using integer offsets ``(start_i, end_i)``,\n where ``s[start_i:end_i]`` is the corresponding token.\n\n :rtype: iter(tuple(int, int))\n \"\"\"\n raise NotImplementedError()\n\n def tokenize_sents(self, strings):\n \"\"\"\n Apply ``self.tokenize()`` to each element of ``strings``. I.e.:\n\n return [self.tokenize(s) for s in strings]\n\n :rtype: list(list(str))\n \"\"\"\n return [self.tokenize(s) for s in strings]\n\n def span_tokenize_sents(self, strings):\n \"\"\"\n Apply ``self.span_tokenize()`` to each element of ``strings``. I.e.:\n\n return [self.span_tokenize(s) for s in strings]\n\n :rtype: iter(list(tuple(int, int)))\n \"\"\"\n for s in strings:\n yield list(self.span_tokenize(s))\n\n\nclass StringTokenizer(TokenizerI):\n \"\"\"A tokenizer that divides a string into substrings by splitting\n on the specified string (defined in subclasses).\n \"\"\"\n\n def tokenize(self, s):\n return s.split(self._string)\n\n def span_tokenize(self, s):\n for span in string_span_tokenize(s, self._string):\n yield span\n\n\n", "path": "nltk/tokenize/api.py"}], "after_files": [{"content": "# Natural Language Toolkit: Tokenizer Interface\n#\n# Copyright (C) 2001-2015 NLTK Project\n# Author: Edward Loper <[email protected]>\n# Steven Bird <[email protected]>\n# URL: <http://nltk.org/>\n# For license information, see LICENSE.TXT\n\n\"\"\"\nTokenizer Interface\n\"\"\"\n\nfrom abc import ABCMeta, abstractmethod\nfrom nltk.six import add_metaclass\n\nfrom nltk.internals import overridden\nfrom nltk.tokenize.util import string_span_tokenize\n\n@add_metaclass(ABCMeta)\nclass TokenizerI(object):\n \"\"\"\n A processing interface for tokenizing a string.\n Subclasses must define ``tokenize()`` or ``tokenize_sents()`` (or both).\n \"\"\"\n @abstractmethod\n def tokenize(self, s):\n \"\"\"\n Return a tokenized copy of *s*.\n\n :rtype: list of str\n \"\"\"\n if overridden(self.tokenize_sents):\n return self.tokenize_sents([s])[0]\n\n def span_tokenize(self, s):\n \"\"\"\n Identify the tokens using integer offsets ``(start_i, end_i)``,\n where ``s[start_i:end_i]`` is the corresponding token.\n\n :rtype: iter(tuple(int, int))\n \"\"\"\n raise NotImplementedError()\n\n def tokenize_sents(self, strings):\n \"\"\"\n Apply ``self.tokenize()`` to each element of ``strings``. I.e.:\n\n return [self.tokenize(s) for s in strings]\n\n :rtype: list(list(str))\n \"\"\"\n return [self.tokenize(s) for s in strings]\n\n def span_tokenize_sents(self, strings):\n \"\"\"\n Apply ``self.span_tokenize()`` to each element of ``strings``. I.e.:\n\n return [self.span_tokenize(s) for s in strings]\n\n :rtype: iter(list(tuple(int, int)))\n \"\"\"\n for s in strings:\n yield list(self.span_tokenize(s))\n\n\nclass StringTokenizer(TokenizerI):\n \"\"\"A tokenizer that divides a string into substrings by splitting\n on the specified string (defined in subclasses).\n \"\"\"\n\n def tokenize(self, s):\n return s.split(self._string)\n\n def span_tokenize(self, s):\n for span in string_span_tokenize(s, self._string):\n yield span\n\n\n", "path": "nltk/tokenize/api.py"}]}
1,076
92
gh_patches_debug_26549
rasdani/github-patches
git_diff
pyjanitor-devs__pyjanitor-728
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add the ability to test and sort timestamps to be monotonic in a pandas data frame # Brief Description Following up on #703, this issue seeks to introduce the ability to sort the timestamps in a pandas data frame monotonically I would like to propose... # Example API ```python def _test_for_monotonicity( df: pd.DataFrame, column_name: str = None, direction: str = 'increasing' ) -> bool: """ Tests input data frame for monotonicity. Check if the data is monotonically increasing or decreasing. Direction is dependent on user input. Defaults to increasing :param df: data frame to be tested for monotonicity :param column_name: needs to be specified if and only if the date time is not in index. Defaults to None. :param direction: specifies the direction in which monotonicity is being tested for. Defaults to 'increasing' :return: single boolean flag indicating whether the test has passed or not """ def sort_monotonically( df: pd.DataFrame, column_name: str = None, direction: str ='increasing' ) -> pd.DataFrame: """ Sorts data frame monotonically. It assumes the data frame has an index of type pd.DateTimeIndex when index is datetime. If datetime is in a column, then the column is expected to be of type pd.Timestamp :param df: data frame to sort monotonically :param column_name: needs to be specified if and only if the date time is not in index. Defaults to None :param direction: specifies the direction in which monotonicity is being tested for. Defaults to 'increasing' :return: data frame with its index sorted """ # more examples below # ... ``` [ENH] Adding ability to sort timestamps monotonically Closes #707 # PR Description Please describe the changes proposed in the pull request: - Adding a function that allows monotonic sorting of timestamps in data frame **This PR resolves #707 ** # PR Checklist Please ensure that you have done the following: 1. [x] PR in from a fork off your branch. Do not PR from `<your_username>`:`dev`, but rather from `<your_username>`:`<feature-branch_name>`. 2. [x] If you're not on the contributors list, add yourself to `AUTHORS.rst`. 3. [x] Add a line to `CHANGELOG.rst` under the latest version header (i.e. the one that is "on deck") describing the contribution. - Do use some discretion here; if there are multiple PRs that are related, keep them in a single line. ## Quick Check To do a very quick check that everything is correct, follow these steps below: - [x] Run the command `make check` from pyjanitor's top-level directory. This will automatically run: - black formatting - flake8 checking - running the test suite - docs build Once done, please check off the check-box above. If `make check` does not work for you, you can execute the commands listed in the Makefile individually. ## Code Changes <!-- If you have not made code changes, please feel free to delete this section. --> If you are adding code changes, please ensure the following: - [x] Ensure that you have added tests. - [x] Run all tests (`$ pytest .`) locally on your machine. - [x] Check to ensure that test coverage covers the lines of code that you have added. - [x] Ensure that all tests pass. ## Documentation Changes If you are adding documentation changes, please ensure the following: - [x] Build the docs locally. - [ ] View the docs to check that it renders correctly. # Relevant Reviewers - @ericmjl - @samukweku Please tag maintainers to review. - @ericmjl --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `janitor/timeseries.py` Content: ``` 1 """ 2 Time series-specific data testing and cleaning functions. 3 """ 4 5 import pandas as pd 6 import pandas_flavor as pf 7 from janitor import check 8 9 10 @pf.register_dataframe_method 11 def fill_missing_timestamps( 12 df: pd.DataFrame, 13 frequency: str, 14 first_time_stamp: pd.Timestamp = None, 15 last_time_stamp: pd.Timestamp = None, 16 ) -> pd.DataFrame: 17 """ 18 Fill dataframe with missing timestamps based on a defined frequency. 19 20 If timestamps are missing, 21 this function will reindex the dataframe. 22 If timestamps are not missing, 23 then the function will return the dataframe unmodified. 24 Example usage: 25 .. code-block:: python 26 27 df = ( 28 pd.DataFrame(...) 29 .fill_missing_timestamps(frequency="1H") 30 ) 31 32 :param df: Dataframe which needs to be tested for missing timestamps 33 :param frequency: frequency i.e. sampling frequency of the data. 34 Acceptable frequency strings are available 35 `here <https://pandas.pydata.org/pandas-docs/stable/>`_ 36 Check offset aliases under time series in user guide 37 :param first_time_stamp: timestamp expected to start from 38 Defaults to None. 39 If no input is provided assumes the minimum value in time_series 40 :param last_time_stamp: timestamp expected to end with. 41 Defaults to None. 42 If no input is provided, assumes the maximum value in time_series 43 :returns: dataframe that has a complete set of contiguous datetimes. 44 """ 45 # Check all the inputs are the correct data type 46 check("frequency", frequency, [str]) 47 check("first_time_stamp", first_time_stamp, [pd.Timestamp, type(None)]) 48 check("last_time_stamp", last_time_stamp, [pd.Timestamp, type(None)]) 49 50 if first_time_stamp is None: 51 first_time_stamp = df.index.min() 52 if last_time_stamp is None: 53 last_time_stamp = df.index.max() 54 55 # Generate expected timestamps 56 expected_timestamps = pd.date_range( 57 start=first_time_stamp, end=last_time_stamp, freq=frequency 58 ) 59 60 return df.reindex(expected_timestamps) 61 62 63 def _get_missing_timestamps( 64 df: pd.DataFrame, 65 frequency: str, 66 first_time_stamp: pd.Timestamp = None, 67 last_time_stamp: pd.Timestamp = None, 68 ) -> pd.DataFrame: 69 """ 70 Return the timestamps that are missing in a dataframe. 71 72 This function takes in a dataframe, 73 and checks its index against a dataframe 74 that contains the expected timestamps. 75 Here, we assume that the expected timestamps 76 are going to be of a larger size 77 than the timestamps available in the input dataframe ``df``. 78 79 If there are any missing timestamps in the input dataframe, 80 this function will return those missing timestamps 81 from the expected dataframe. 82 """ 83 expected_df = df.fill_missing_timestamps( 84 frequency, first_time_stamp, last_time_stamp 85 ) 86 87 missing_timestamps = expected_df.index.difference(df.index) 88 89 return expected_df.loc[missing_timestamps] 90 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/janitor/timeseries.py b/janitor/timeseries.py --- a/janitor/timeseries.py +++ b/janitor/timeseries.py @@ -87,3 +87,58 @@ missing_timestamps = expected_df.index.difference(df.index) return expected_df.loc[missing_timestamps] + + [email protected]_dataframe_method +def sort_timestamps_monotonically( + df: pd.DataFrame, direction: str = "increasing", strict: bool = False +) -> pd.DataFrame: + """ + Sort dataframe such that index is monotonic. + + If timestamps are monotonic, + this function will return the dataframe unmodified. + If timestamps are not monotonic, + then the function will sort the dataframe. + + Example usage: + + .. code-block:: python + + df = ( + pd.DataFrame(...) + .sort_timestamps_monotonically(direction='increasing') + ) + + :param df: Dataframe which needs to be tested for monotonicity + :param direction: type of monotonicity desired. + Acceptable arguments are: + 1. increasing + 2. decreasing + :param strict: flag to enable/disable strict monotonicity. + If set to True, + will remove duplicates in the index, + by retaining first occurrence of value in index. + If set to False, + will not test for duplicates in the index. + Defaults to False. + :returns: Dataframe that has monotonically increasing + (or decreasing) timestamps. + """ + # Check all the inputs are the correct data type + check("df", df, [pd.DataFrame]) + check("direction", direction, [str]) + check("strict", strict, [bool]) + + # Remove duplicates if requested + if strict: + df = df[~df.index.duplicated(keep="first")] + + # Sort timestamps + if direction == "increasing": + df = df.sort_index() + else: + df = df.sort_index(ascending=False) + + # Return the dataframe + return df
{"golden_diff": "diff --git a/janitor/timeseries.py b/janitor/timeseries.py\n--- a/janitor/timeseries.py\n+++ b/janitor/timeseries.py\n@@ -87,3 +87,58 @@\n missing_timestamps = expected_df.index.difference(df.index)\n \n return expected_df.loc[missing_timestamps]\n+\n+\[email protected]_dataframe_method\n+def sort_timestamps_monotonically(\n+ df: pd.DataFrame, direction: str = \"increasing\", strict: bool = False\n+) -> pd.DataFrame:\n+ \"\"\"\n+ Sort dataframe such that index is monotonic.\n+\n+ If timestamps are monotonic,\n+ this function will return the dataframe unmodified.\n+ If timestamps are not monotonic,\n+ then the function will sort the dataframe.\n+\n+ Example usage:\n+\n+ .. code-block:: python\n+\n+ df = (\n+ pd.DataFrame(...)\n+ .sort_timestamps_monotonically(direction='increasing')\n+ )\n+\n+ :param df: Dataframe which needs to be tested for monotonicity\n+ :param direction: type of monotonicity desired.\n+ Acceptable arguments are:\n+ 1. increasing\n+ 2. decreasing\n+ :param strict: flag to enable/disable strict monotonicity.\n+ If set to True,\n+ will remove duplicates in the index,\n+ by retaining first occurrence of value in index.\n+ If set to False,\n+ will not test for duplicates in the index.\n+ Defaults to False.\n+ :returns: Dataframe that has monotonically increasing\n+ (or decreasing) timestamps.\n+ \"\"\"\n+ # Check all the inputs are the correct data type\n+ check(\"df\", df, [pd.DataFrame])\n+ check(\"direction\", direction, [str])\n+ check(\"strict\", strict, [bool])\n+\n+ # Remove duplicates if requested\n+ if strict:\n+ df = df[~df.index.duplicated(keep=\"first\")]\n+\n+ # Sort timestamps\n+ if direction == \"increasing\":\n+ df = df.sort_index()\n+ else:\n+ df = df.sort_index(ascending=False)\n+\n+ # Return the dataframe\n+ return df\n", "issue": "Add the ability to test and sort timestamps to be monotonic in a pandas data frame\n# Brief Description\r\n\r\nFollowing up on #703, this issue seeks to introduce the ability to sort the timestamps in a pandas data frame monotonically\r\n\r\nI would like to propose...\r\n\r\n# Example API\r\n```python\r\ndef _test_for_monotonicity(\r\n df: pd.DataFrame,\r\n column_name: str = None,\r\n direction: str = 'increasing'\r\n) -> bool:\r\n \"\"\"\r\n Tests input data frame for monotonicity.\r\n\r\n Check if the data is monotonically increasing or decreasing.\r\n Direction is dependent on user input.\r\n Defaults to increasing\r\n\r\n :param df: data frame to be tested for monotonicity\r\n :param column_name: needs to be specified if and only if the date time is not in index.\r\n Defaults to None.\r\n :param direction: specifies the direction in which monotonicity is being tested for.\r\n Defaults to 'increasing'\r\n :return: single boolean flag indicating whether the test has passed or not\r\n \"\"\"\r\n\r\ndef sort_monotonically(\r\n df: pd.DataFrame,\r\n column_name: str = None,\r\n direction: str ='increasing'\r\n) -> pd.DataFrame:\r\n \"\"\"\r\n Sorts data frame monotonically.\r\n It assumes the data frame has an index of type pd.DateTimeIndex when index is datetime.\r\n If datetime is in a column, then the column is expected to be of type pd.Timestamp\r\n\r\n :param df: data frame to sort monotonically\r\n :param column_name: needs to be specified if and only if the date time is not in index.\r\n Defaults to None\r\n :param direction: specifies the direction in which monotonicity is being tested for.\r\n Defaults to 'increasing'\r\n :return: data frame with its index sorted\r\n \"\"\"\r\n\r\n\r\n# more examples below\r\n# ...\r\n```\r\n\n[ENH] Adding ability to sort timestamps monotonically\nCloses #707 \r\n\r\n# PR Description\r\n\r\nPlease describe the changes proposed in the pull request:\r\n\r\n- Adding a function that allows monotonic sorting of timestamps in data frame \r\n\r\n**This PR resolves #707 **\r\n\r\n# PR Checklist\r\nPlease ensure that you have done the following:\r\n\r\n1. [x] PR in from a fork off your branch. Do not PR from `<your_username>`:`dev`, but rather from `<your_username>`:`<feature-branch_name>`.\r\n2. [x] If you're not on the contributors list, add yourself to `AUTHORS.rst`.\r\n3. [x] Add a line to `CHANGELOG.rst` under the latest version header (i.e. the one that is \"on deck\") describing the contribution.\r\n - Do use some discretion here; if there are multiple PRs that are related, keep them in a single line.\r\n\r\n## Quick Check\r\n\r\nTo do a very quick check that everything is correct, follow these steps below:\r\n\r\n- [x] Run the command `make check` from pyjanitor's top-level directory. This will automatically run:\r\n - black formatting\r\n - flake8 checking\r\n - running the test suite\r\n - docs build\r\n\r\nOnce done, please check off the check-box above.\r\n\r\nIf `make check` does not work for you, you can execute the commands listed in the Makefile individually.\r\n\r\n## Code Changes\r\n\r\n<!-- If you have not made code changes, please feel free to delete this section. -->\r\n\r\nIf you are adding code changes, please ensure the following:\r\n\r\n- [x] Ensure that you have added tests.\r\n- [x] Run all tests (`$ pytest .`) locally on your machine.\r\n - [x] Check to ensure that test coverage covers the lines of code that you have added.\r\n - [x] Ensure that all tests pass.\r\n\r\n## Documentation Changes\r\nIf you are adding documentation changes, please ensure the following:\r\n\r\n- [x] Build the docs locally.\r\n- [ ] View the docs to check that it renders correctly.\r\n\r\n# Relevant Reviewers\r\n- @ericmjl \r\n- @samukweku \r\n\r\nPlease tag maintainers to review.\r\n\r\n- @ericmjl\r\n\n", "before_files": [{"content": "\"\"\"\nTime series-specific data testing and cleaning functions.\n\"\"\"\n\nimport pandas as pd\nimport pandas_flavor as pf\nfrom janitor import check\n\n\[email protected]_dataframe_method\ndef fill_missing_timestamps(\n df: pd.DataFrame,\n frequency: str,\n first_time_stamp: pd.Timestamp = None,\n last_time_stamp: pd.Timestamp = None,\n) -> pd.DataFrame:\n \"\"\"\n Fill dataframe with missing timestamps based on a defined frequency.\n\n If timestamps are missing,\n this function will reindex the dataframe.\n If timestamps are not missing,\n then the function will return the dataframe unmodified.\n Example usage:\n .. code-block:: python\n\n df = (\n pd.DataFrame(...)\n .fill_missing_timestamps(frequency=\"1H\")\n )\n\n :param df: Dataframe which needs to be tested for missing timestamps\n :param frequency: frequency i.e. sampling frequency of the data.\n Acceptable frequency strings are available\n `here <https://pandas.pydata.org/pandas-docs/stable/>`_\n Check offset aliases under time series in user guide\n :param first_time_stamp: timestamp expected to start from\n Defaults to None.\n If no input is provided assumes the minimum value in time_series\n :param last_time_stamp: timestamp expected to end with.\n Defaults to None.\n If no input is provided, assumes the maximum value in time_series\n :returns: dataframe that has a complete set of contiguous datetimes.\n \"\"\"\n # Check all the inputs are the correct data type\n check(\"frequency\", frequency, [str])\n check(\"first_time_stamp\", first_time_stamp, [pd.Timestamp, type(None)])\n check(\"last_time_stamp\", last_time_stamp, [pd.Timestamp, type(None)])\n\n if first_time_stamp is None:\n first_time_stamp = df.index.min()\n if last_time_stamp is None:\n last_time_stamp = df.index.max()\n\n # Generate expected timestamps\n expected_timestamps = pd.date_range(\n start=first_time_stamp, end=last_time_stamp, freq=frequency\n )\n\n return df.reindex(expected_timestamps)\n\n\ndef _get_missing_timestamps(\n df: pd.DataFrame,\n frequency: str,\n first_time_stamp: pd.Timestamp = None,\n last_time_stamp: pd.Timestamp = None,\n) -> pd.DataFrame:\n \"\"\"\n Return the timestamps that are missing in a dataframe.\n\n This function takes in a dataframe,\n and checks its index against a dataframe\n that contains the expected timestamps.\n Here, we assume that the expected timestamps\n are going to be of a larger size\n than the timestamps available in the input dataframe ``df``.\n\n If there are any missing timestamps in the input dataframe,\n this function will return those missing timestamps\n from the expected dataframe.\n \"\"\"\n expected_df = df.fill_missing_timestamps(\n frequency, first_time_stamp, last_time_stamp\n )\n\n missing_timestamps = expected_df.index.difference(df.index)\n\n return expected_df.loc[missing_timestamps]\n", "path": "janitor/timeseries.py"}], "after_files": [{"content": "\"\"\"\nTime series-specific data testing and cleaning functions.\n\"\"\"\n\nimport pandas as pd\nimport pandas_flavor as pf\nfrom janitor import check\n\n\[email protected]_dataframe_method\ndef fill_missing_timestamps(\n df: pd.DataFrame,\n frequency: str,\n first_time_stamp: pd.Timestamp = None,\n last_time_stamp: pd.Timestamp = None,\n) -> pd.DataFrame:\n \"\"\"\n Fill dataframe with missing timestamps based on a defined frequency.\n\n If timestamps are missing,\n this function will reindex the dataframe.\n If timestamps are not missing,\n then the function will return the dataframe unmodified.\n Example usage:\n .. code-block:: python\n\n df = (\n pd.DataFrame(...)\n .fill_missing_timestamps(frequency=\"1H\")\n )\n\n :param df: Dataframe which needs to be tested for missing timestamps\n :param frequency: frequency i.e. sampling frequency of the data.\n Acceptable frequency strings are available\n `here <https://pandas.pydata.org/pandas-docs/stable/>`_\n Check offset aliases under time series in user guide\n :param first_time_stamp: timestamp expected to start from\n Defaults to None.\n If no input is provided assumes the minimum value in time_series\n :param last_time_stamp: timestamp expected to end with.\n Defaults to None.\n If no input is provided, assumes the maximum value in time_series\n :returns: dataframe that has a complete set of contiguous datetimes.\n \"\"\"\n # Check all the inputs are the correct data type\n check(\"frequency\", frequency, [str])\n check(\"first_time_stamp\", first_time_stamp, [pd.Timestamp, type(None)])\n check(\"last_time_stamp\", last_time_stamp, [pd.Timestamp, type(None)])\n\n if first_time_stamp is None:\n first_time_stamp = df.index.min()\n if last_time_stamp is None:\n last_time_stamp = df.index.max()\n\n # Generate expected timestamps\n expected_timestamps = pd.date_range(\n start=first_time_stamp, end=last_time_stamp, freq=frequency\n )\n\n return df.reindex(expected_timestamps)\n\n\ndef _get_missing_timestamps(\n df: pd.DataFrame,\n frequency: str,\n first_time_stamp: pd.Timestamp = None,\n last_time_stamp: pd.Timestamp = None,\n) -> pd.DataFrame:\n \"\"\"\n Return the timestamps that are missing in a dataframe.\n\n This function takes in a dataframe,\n and checks its index against a dataframe\n that contains the expected timestamps.\n Here, we assume that the expected timestamps\n are going to be of a larger size\n than the timestamps available in the input dataframe ``df``.\n\n If there are any missing timestamps in the input dataframe,\n this function will return those missing timestamps\n from the expected dataframe.\n \"\"\"\n expected_df = df.fill_missing_timestamps(\n frequency, first_time_stamp, last_time_stamp\n )\n\n missing_timestamps = expected_df.index.difference(df.index)\n\n return expected_df.loc[missing_timestamps]\n\n\[email protected]_dataframe_method\ndef sort_timestamps_monotonically(\n df: pd.DataFrame, direction: str = \"increasing\", strict: bool = False\n) -> pd.DataFrame:\n \"\"\"\n Sort dataframe such that index is monotonic.\n\n If timestamps are monotonic,\n this function will return the dataframe unmodified.\n If timestamps are not monotonic,\n then the function will sort the dataframe.\n\n Example usage:\n\n .. code-block:: python\n\n df = (\n pd.DataFrame(...)\n .sort_timestamps_monotonically(direction='increasing')\n )\n\n :param df: Dataframe which needs to be tested for monotonicity\n :param direction: type of monotonicity desired.\n Acceptable arguments are:\n 1. increasing\n 2. decreasing\n :param strict: flag to enable/disable strict monotonicity.\n If set to True,\n will remove duplicates in the index,\n by retaining first occurrence of value in index.\n If set to False,\n will not test for duplicates in the index.\n Defaults to False.\n :returns: Dataframe that has monotonically increasing\n (or decreasing) timestamps.\n \"\"\"\n # Check all the inputs are the correct data type\n check(\"df\", df, [pd.DataFrame])\n check(\"direction\", direction, [str])\n check(\"strict\", strict, [bool])\n\n # Remove duplicates if requested\n if strict:\n df = df[~df.index.duplicated(keep=\"first\")]\n\n # Sort timestamps\n if direction == \"increasing\":\n df = df.sort_index()\n else:\n df = df.sort_index(ascending=False)\n\n # Return the dataframe\n return df\n", "path": "janitor/timeseries.py"}]}
1,936
490
gh_patches_debug_18844
rasdani/github-patches
git_diff
Gallopsled__pwntools-2345
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pwn constgrep a throws an exception Ugh, this seems wrong: ``` root@pwndbg:~# pwn constgrep a Traceback (most recent call last): File "/usr/local/bin/pwn", line 8, in <module> sys.exit(main()) File "/usr/local/lib/python3.10/dist-packages/pwnlib/commandline/main.py", line 58, in main commands[args.command](args) File "/usr/local/lib/python3.10/dist-packages/pwnlib/commandline/constgrep.py", line 110, in main for _, k in sorted(out): TypeError: '<' not supported between instances of 'Constant' and 'type' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pwnlib/commandline/constgrep.py` Content: ``` 1 from __future__ import absolute_import 2 from __future__ import division 3 4 import argparse 5 import functools 6 import re 7 8 import pwnlib.args 9 pwnlib.args.free_form = False 10 11 from pwn import * 12 from pwnlib.commandline import common 13 14 p = common.parser_commands.add_parser( 15 'constgrep', 16 help = "Looking up constants from header files.\n\nExample: constgrep -c freebsd -m ^PROT_ '3 + 4'", 17 description = "Looking up constants from header files.\n\nExample: constgrep -c freebsd -m ^PROT_ '3 + 4'", 18 formatter_class = argparse.RawDescriptionHelpFormatter, 19 ) 20 21 p.add_argument( 22 '-e', '--exact', 23 action='store_true', 24 help='Do an exact match for a constant instead of searching for a regex', 25 ) 26 27 p.add_argument( 28 'regex', 29 help='The regex matching constant you want to find', 30 ) 31 32 p.add_argument( 33 'constant', 34 nargs = '?', 35 default = None, 36 type = safeeval.expr, 37 help = 'The constant to find', 38 ) 39 40 p.add_argument( 41 '-i', '--case-insensitive', 42 action = 'store_true', 43 help = 'Search case insensitive', 44 ) 45 46 p.add_argument( 47 '-m', '--mask-mode', 48 action = 'store_true', 49 help = 'Instead of searching for a specific constant value, search for values not containing strictly less bits that the given value.', 50 ) 51 52 p.add_argument( 53 '-c', '--context', 54 metavar = 'arch_or_os', 55 action = 'append', 56 type = common.context_arg, 57 choices = common.choices, 58 help = 'The os/architecture/endianness/bits the shellcode will run in (default: linux/i386), choose from: %s' % common.choices, 59 ) 60 61 def main(args): 62 if args.exact: 63 # This is the simple case 64 print(cpp(args.regex).strip()) 65 else: 66 # New we search in the right module. 67 # But first: We find the right module 68 if context.os == 'freebsd': 69 mod = constants.freebsd 70 else: 71 mod = getattr(getattr(constants, context.os), context.arch) 72 73 # Compile the given regex, for optimized lookup 74 if args.case_insensitive: 75 matcher = re.compile(args.regex, re.IGNORECASE) 76 else: 77 matcher = re.compile(args.regex) 78 79 # The found matching constants and the length of the longest string 80 out = [] 81 maxlen = 0 82 83 constant = args.constant 84 85 for k in dir(mod): 86 # No python stuff 87 if k.endswith('__') and k.startswith('__'): 88 continue 89 90 # Run the regex 91 if not matcher.search(k): 92 continue 93 94 # Check the constant 95 if constant is not None: 96 val = getattr(mod, k) 97 if args.mask_mode: 98 if constant & val != val: 99 continue 100 else: 101 if constant != val: 102 continue 103 104 # Append it 105 out.append((getattr(mod, k), k)) 106 maxlen = max(len(k), maxlen) 107 108 # Output all matching constants 109 for _, k in sorted(out): 110 print('#define %s %s' % (k.ljust(maxlen), cpp(k).strip())) 111 112 # If we are in match_mode, then try to find a combination of 113 # constants that yield the exact given value 114 # We do not want to find combinations using the value 0. 115 if constant and args.mask_mode: 116 mask = constant 117 good = [] 118 out = [(v, k) for v, k in out if v != 0] 119 120 while mask and out: 121 cur = out.pop() 122 mask &= ~cur[0] 123 good.append(cur) 124 125 out = [(v, k) for v, k in out if mask & v == v] 126 127 if functools.reduce(lambda x, cur: x | cur[0], good, 0) == constant: 128 print('') 129 print('(%s) == %s' % (' | '.join(k for v, k in good), args.constant)) 130 131 if __name__ == '__main__': 132 pwnlib.commandline.common.main(__file__) 133 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pwnlib/commandline/constgrep.py b/pwnlib/commandline/constgrep.py --- a/pwnlib/commandline/constgrep.py +++ b/pwnlib/commandline/constgrep.py @@ -91,9 +91,13 @@ if not matcher.search(k): continue + # Check if the value has proper type + val = getattr(mod, k) + if not isinstance(val, pwnlib.constants.constant.Constant): + continue + # Check the constant if constant is not None: - val = getattr(mod, k) if args.mask_mode: if constant & val != val: continue @@ -102,7 +106,7 @@ continue # Append it - out.append((getattr(mod, k), k)) + out.append((val, k)) maxlen = max(len(k), maxlen) # Output all matching constants
{"golden_diff": "diff --git a/pwnlib/commandline/constgrep.py b/pwnlib/commandline/constgrep.py\n--- a/pwnlib/commandline/constgrep.py\n+++ b/pwnlib/commandline/constgrep.py\n@@ -91,9 +91,13 @@\n if not matcher.search(k):\n continue\n \n+ # Check if the value has proper type\n+ val = getattr(mod, k)\n+ if not isinstance(val, pwnlib.constants.constant.Constant):\n+ continue\n+\n # Check the constant\n if constant is not None:\n- val = getattr(mod, k)\n if args.mask_mode:\n if constant & val != val:\n continue\n@@ -102,7 +106,7 @@\n continue\n \n # Append it\n- out.append((getattr(mod, k), k))\n+ out.append((val, k))\n maxlen = max(len(k), maxlen)\n \n # Output all matching constants\n", "issue": "pwn constgrep a throws an exception\nUgh, this seems wrong:\r\n\r\n```\r\nroot@pwndbg:~# pwn constgrep a\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/pwn\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/usr/local/lib/python3.10/dist-packages/pwnlib/commandline/main.py\", line 58, in main\r\n commands[args.command](args)\r\n File \"/usr/local/lib/python3.10/dist-packages/pwnlib/commandline/constgrep.py\", line 110, in main\r\n for _, k in sorted(out):\r\nTypeError: '<' not supported between instances of 'Constant' and 'type'\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport argparse\nimport functools\nimport re\n\nimport pwnlib.args\npwnlib.args.free_form = False\n\nfrom pwn import *\nfrom pwnlib.commandline import common\n\np = common.parser_commands.add_parser(\n 'constgrep',\n help = \"Looking up constants from header files.\\n\\nExample: constgrep -c freebsd -m ^PROT_ '3 + 4'\",\n description = \"Looking up constants from header files.\\n\\nExample: constgrep -c freebsd -m ^PROT_ '3 + 4'\",\n formatter_class = argparse.RawDescriptionHelpFormatter,\n)\n\np.add_argument(\n '-e', '--exact',\n action='store_true',\n help='Do an exact match for a constant instead of searching for a regex',\n)\n\np.add_argument(\n 'regex',\n help='The regex matching constant you want to find',\n)\n\np.add_argument(\n 'constant',\n nargs = '?',\n default = None,\n type = safeeval.expr,\n help = 'The constant to find',\n)\n\np.add_argument(\n '-i', '--case-insensitive',\n action = 'store_true',\n help = 'Search case insensitive',\n)\n\np.add_argument(\n '-m', '--mask-mode',\n action = 'store_true',\n help = 'Instead of searching for a specific constant value, search for values not containing strictly less bits that the given value.',\n)\n\np.add_argument(\n '-c', '--context',\n metavar = 'arch_or_os',\n action = 'append',\n type = common.context_arg,\n choices = common.choices,\n help = 'The os/architecture/endianness/bits the shellcode will run in (default: linux/i386), choose from: %s' % common.choices,\n)\n\ndef main(args):\n if args.exact:\n # This is the simple case\n print(cpp(args.regex).strip())\n else:\n # New we search in the right module.\n # But first: We find the right module\n if context.os == 'freebsd':\n mod = constants.freebsd\n else:\n mod = getattr(getattr(constants, context.os), context.arch)\n\n # Compile the given regex, for optimized lookup\n if args.case_insensitive:\n matcher = re.compile(args.regex, re.IGNORECASE)\n else:\n matcher = re.compile(args.regex)\n\n # The found matching constants and the length of the longest string\n out = []\n maxlen = 0\n\n constant = args.constant\n\n for k in dir(mod):\n # No python stuff\n if k.endswith('__') and k.startswith('__'):\n continue\n\n # Run the regex\n if not matcher.search(k):\n continue\n\n # Check the constant\n if constant is not None:\n val = getattr(mod, k)\n if args.mask_mode:\n if constant & val != val:\n continue\n else:\n if constant != val:\n continue\n\n # Append it\n out.append((getattr(mod, k), k))\n maxlen = max(len(k), maxlen)\n\n # Output all matching constants\n for _, k in sorted(out):\n print('#define %s %s' % (k.ljust(maxlen), cpp(k).strip()))\n\n # If we are in match_mode, then try to find a combination of\n # constants that yield the exact given value\n # We do not want to find combinations using the value 0.\n if constant and args.mask_mode:\n mask = constant\n good = []\n out = [(v, k) for v, k in out if v != 0]\n\n while mask and out:\n cur = out.pop()\n mask &= ~cur[0]\n good.append(cur)\n\n out = [(v, k) for v, k in out if mask & v == v]\n\n if functools.reduce(lambda x, cur: x | cur[0], good, 0) == constant:\n print('')\n print('(%s) == %s' % (' | '.join(k for v, k in good), args.constant))\n\nif __name__ == '__main__':\n pwnlib.commandline.common.main(__file__)\n", "path": "pwnlib/commandline/constgrep.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport argparse\nimport functools\nimport re\n\nimport pwnlib.args\npwnlib.args.free_form = False\n\nfrom pwn import *\nfrom pwnlib.commandline import common\n\np = common.parser_commands.add_parser(\n 'constgrep',\n help = \"Looking up constants from header files.\\n\\nExample: constgrep -c freebsd -m ^PROT_ '3 + 4'\",\n description = \"Looking up constants from header files.\\n\\nExample: constgrep -c freebsd -m ^PROT_ '3 + 4'\",\n formatter_class = argparse.RawDescriptionHelpFormatter,\n)\n\np.add_argument(\n '-e', '--exact',\n action='store_true',\n help='Do an exact match for a constant instead of searching for a regex',\n)\n\np.add_argument(\n 'regex',\n help='The regex matching constant you want to find',\n)\n\np.add_argument(\n 'constant',\n nargs = '?',\n default = None,\n type = safeeval.expr,\n help = 'The constant to find',\n)\n\np.add_argument(\n '-i', '--case-insensitive',\n action = 'store_true',\n help = 'Search case insensitive',\n)\n\np.add_argument(\n '-m', '--mask-mode',\n action = 'store_true',\n help = 'Instead of searching for a specific constant value, search for values not containing strictly less bits that the given value.',\n)\n\np.add_argument(\n '-c', '--context',\n metavar = 'arch_or_os',\n action = 'append',\n type = common.context_arg,\n choices = common.choices,\n help = 'The os/architecture/endianness/bits the shellcode will run in (default: linux/i386), choose from: %s' % common.choices,\n)\n\ndef main(args):\n if args.exact:\n # This is the simple case\n print(cpp(args.regex).strip())\n else:\n # New we search in the right module.\n # But first: We find the right module\n if context.os == 'freebsd':\n mod = constants.freebsd\n else:\n mod = getattr(getattr(constants, context.os), context.arch)\n\n # Compile the given regex, for optimized lookup\n if args.case_insensitive:\n matcher = re.compile(args.regex, re.IGNORECASE)\n else:\n matcher = re.compile(args.regex)\n\n # The found matching constants and the length of the longest string\n out = []\n maxlen = 0\n\n constant = args.constant\n\n for k in dir(mod):\n # No python stuff\n if k.endswith('__') and k.startswith('__'):\n continue\n\n # Run the regex\n if not matcher.search(k):\n continue\n\n # Check if the value has proper type\n val = getattr(mod, k)\n if not isinstance(val, pwnlib.constants.constant.Constant):\n continue\n\n # Check the constant\n if constant is not None:\n if args.mask_mode:\n if constant & val != val:\n continue\n else:\n if constant != val:\n continue\n\n # Append it\n out.append((val, k))\n maxlen = max(len(k), maxlen)\n\n # Output all matching constants\n for _, k in sorted(out):\n print('#define %s %s' % (k.ljust(maxlen), cpp(k).strip()))\n\n # If we are in match_mode, then try to find a combination of\n # constants that yield the exact given value\n # We do not want to find combinations using the value 0.\n if constant and args.mask_mode:\n mask = constant\n good = []\n out = [(v, k) for v, k in out if v != 0]\n\n while mask and out:\n cur = out.pop()\n mask &= ~cur[0]\n good.append(cur)\n\n out = [(v, k) for v, k in out if mask & v == v]\n\n if functools.reduce(lambda x, cur: x | cur[0], good, 0) == constant:\n print('')\n print('(%s) == %s' % (' | '.join(k for v, k in good), args.constant))\n\nif __name__ == '__main__':\n pwnlib.commandline.common.main(__file__)\n", "path": "pwnlib/commandline/constgrep.py"}]}
1,630
209
gh_patches_debug_24092
rasdani/github-patches
git_diff
lk-geimfari__mimesis-929
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- PyCharm indicates wrong type for lambda # Bug report <!-- Hi, thanks for submitting a bug. We appreciate that. But, we will need some information about what's wrong to help you. --> ## What's wrong While using PyCharm, `lambda` type in `Schema` results in *Expected type 'FunctionType', got '() -> Dict[str, Union[str, Any]]' instead* ![Screenshot 2020-08-28 at 07 59 37](https://user-images.githubusercontent.com/55017335/91531501-ca378a80-e904-11ea-9558-5b44bcff7040.png) <!-- Describe what is not working. Please, attach a traceback. --> ## How is that should be Is this warning correct? The code runs perfectly fine but maybe the Type maybe be wrong here. On this [SO post](https://stackoverflow.com/a/33833896/12794150) they mentioned using the `from typing import Callable` for type hinting a lambda. <!-- Describe how it should work. --> ## System information <!-- Describe system information --> ``` ❯ python3 --version Python 3.8.5 ❯ sw_vers ProductName: macOS ProductVersion: 11.0 BuildVersion: 20A5354i ``` PyCharm 2020.2.1 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mimesis/schema.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 """Implements classes for generating data by schema.""" 4 5 from types import LambdaType 6 from typing import Any, Callable, List, Optional 7 8 from mimesis.exceptions import ( 9 UnacceptableField, 10 UndefinedField, 11 UndefinedSchema, 12 UnsupportedField, 13 ) 14 from mimesis.providers.generic import Generic 15 from mimesis.typing import JSON, Seed 16 17 __all__ = ['Field', 'Schema'] 18 19 20 class AbstractField(object): 21 """ 22 AbstractField is a class for generating data by the name of the method. 23 24 Instance of this object takes any string which represents name 25 of any method of any supported data provider (:class:`~mimesis.Generic`) 26 and the ``**kwargs`` of the method. 27 28 See :class:`~mimesis.schema.AbstractField.__call__` for more details. 29 """ 30 31 def __init__(self, locale: str = 'en', 32 seed: Optional[Seed] = None, 33 providers: Optional[Any] = None) -> None: 34 """Initialize field. 35 36 :param locale: Locale 37 :param seed: Seed for random. 38 """ 39 self.locale = locale 40 self.seed = seed 41 self._gen = Generic(self.locale, self.seed) 42 43 if providers: 44 self._gen.add_providers(*providers) 45 46 self._table = {} # type: ignore 47 48 def __call__(self, name: Optional[str] = None, 49 key: Optional[Callable] = None, **kwargs) -> Any: 50 """Override standard call. 51 52 This magic method overrides standard call so it takes any string 53 which represents the name of any method of any supported data 54 provider and the ``**kwargs`` of this method. 55 56 .. note:: Some data providers have methods with the same names 57 and in such cases, you can explicitly define that the method 58 belongs to data-provider ``name='provider.name'`` otherwise 59 it will return the data from the first provider which 60 has a method ``name``. 61 62 You can apply a *key function* to the result returned by 63 the method, bt passing a parameter **key** with a callable 64 object which returns the final result. 65 66 :param name: Name of the method. 67 :param key: A key function (or other callable object) 68 which will be applied to result. 69 :param kwargs: Kwargs of method. 70 :return: Value which represented by method. 71 :raises ValueError: if provider not 72 supported or if field not defined. 73 """ 74 if name is None: 75 raise UndefinedField() 76 77 def tail_parser(tails: str, obj: Any) -> Any: 78 """Return method from end of tail. 79 80 :param tails: Tail string 81 :param obj: Search tail from this object 82 :return last tailed method 83 """ 84 provider_name, method_name = tails.split('.', 1) 85 86 if '.' in method_name: 87 raise UnacceptableField() 88 89 attr = getattr(obj, provider_name) 90 if attr is not None: 91 return getattr(attr, method_name) 92 93 try: 94 if name not in self._table: 95 if '.' not in name: 96 # Fix https://github.com/lk-geimfari/mimesis/issues/619 97 if name == self._gen.choice.Meta.name: 98 self._table[name] = self._gen.choice 99 else: 100 for provider in dir(self._gen): 101 provider = getattr(self._gen, provider) 102 if name in dir(provider): 103 self._table[name] = getattr(provider, name) 104 else: 105 self._table[name] = tail_parser(name, self._gen) 106 107 result = self._table[name](**kwargs) 108 if key and callable(key): 109 return key(result) 110 return result 111 except KeyError: 112 raise UnsupportedField(name) 113 114 def __str__(self): 115 return '{} <{}>'.format( 116 self.__class__.__name__, self.locale) 117 118 119 class Schema(object): 120 """Class which return list of filled schemas.""" 121 122 def __init__(self, schema: LambdaType) -> None: 123 """Initialize schema. 124 125 :param schema: A schema. 126 """ 127 if isinstance(schema, LambdaType): 128 self.schema = schema 129 else: 130 raise UndefinedSchema() 131 132 def create(self, iterations: int = 1) -> List[JSON]: 133 """Return filled schema. 134 135 Create a list of a filled schemas with elements in 136 an amount of **iterations**. 137 138 :param iterations: Amount of iterations. 139 :return: List of willed schemas. 140 """ 141 return [self.schema() for _ in range(iterations)] 142 143 144 # Alias for AbstractField 145 Field = AbstractField 146 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mimesis/schema.py b/mimesis/schema.py --- a/mimesis/schema.py +++ b/mimesis/schema.py @@ -2,7 +2,6 @@ """Implements classes for generating data by schema.""" -from types import LambdaType from typing import Any, Callable, List, Optional from mimesis.exceptions import ( @@ -17,7 +16,7 @@ __all__ = ['Field', 'Schema'] -class AbstractField(object): +class AbstractField: """ AbstractField is a class for generating data by the name of the method. @@ -116,15 +115,15 @@ self.__class__.__name__, self.locale) -class Schema(object): +class Schema: """Class which return list of filled schemas.""" - def __init__(self, schema: LambdaType) -> None: + def __init__(self, schema: Callable) -> None: """Initialize schema. :param schema: A schema. """ - if isinstance(schema, LambdaType): + if callable(schema): self.schema = schema else: raise UndefinedSchema()
{"golden_diff": "diff --git a/mimesis/schema.py b/mimesis/schema.py\n--- a/mimesis/schema.py\n+++ b/mimesis/schema.py\n@@ -2,7 +2,6 @@\n \n \"\"\"Implements classes for generating data by schema.\"\"\"\n \n-from types import LambdaType\n from typing import Any, Callable, List, Optional\n \n from mimesis.exceptions import (\n@@ -17,7 +16,7 @@\n __all__ = ['Field', 'Schema']\n \n \n-class AbstractField(object):\n+class AbstractField:\n \"\"\"\n AbstractField is a class for generating data by the name of the method.\n \n@@ -116,15 +115,15 @@\n self.__class__.__name__, self.locale)\n \n \n-class Schema(object):\n+class Schema:\n \"\"\"Class which return list of filled schemas.\"\"\"\n \n- def __init__(self, schema: LambdaType) -> None:\n+ def __init__(self, schema: Callable) -> None:\n \"\"\"Initialize schema.\n \n :param schema: A schema.\n \"\"\"\n- if isinstance(schema, LambdaType):\n+ if callable(schema):\n self.schema = schema\n else:\n raise UndefinedSchema()\n", "issue": "PyCharm indicates wrong type for lambda\n# Bug report\r\n\r\n<!--\r\nHi, thanks for submitting a bug. We appreciate that.\r\n\r\nBut, we will need some information about what's wrong to help you.\r\n-->\r\n\r\n## What's wrong\r\n\r\nWhile using PyCharm, `lambda` type in `Schema` results in *Expected type 'FunctionType', got '() -> Dict[str, Union[str, Any]]' instead*\r\n![Screenshot 2020-08-28 at 07 59 37](https://user-images.githubusercontent.com/55017335/91531501-ca378a80-e904-11ea-9558-5b44bcff7040.png)\r\n\r\n<!-- Describe what is not working. Please, attach a traceback. -->\r\n\r\n## How is that should be\r\nIs this warning correct? The code runs perfectly fine but maybe the Type maybe be wrong here. On this [SO post](https://stackoverflow.com/a/33833896/12794150) they mentioned using the `from typing import Callable` for type hinting a lambda.\r\n<!-- Describe how it should work. -->\r\n\r\n## System information\r\n\r\n<!-- Describe system information -->\r\n```\r\n\u276f python3 --version\r\nPython 3.8.5\r\n\r\n\u276f sw_vers\r\nProductName:\tmacOS\r\nProductVersion:\t11.0\r\nBuildVersion:\t20A5354i\r\n```\r\nPyCharm 2020.2.1\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Implements classes for generating data by schema.\"\"\"\n\nfrom types import LambdaType\nfrom typing import Any, Callable, List, Optional\n\nfrom mimesis.exceptions import (\n UnacceptableField,\n UndefinedField,\n UndefinedSchema,\n UnsupportedField,\n)\nfrom mimesis.providers.generic import Generic\nfrom mimesis.typing import JSON, Seed\n\n__all__ = ['Field', 'Schema']\n\n\nclass AbstractField(object):\n \"\"\"\n AbstractField is a class for generating data by the name of the method.\n\n Instance of this object takes any string which represents name\n of any method of any supported data provider (:class:`~mimesis.Generic`)\n and the ``**kwargs`` of the method.\n\n See :class:`~mimesis.schema.AbstractField.__call__` for more details.\n \"\"\"\n\n def __init__(self, locale: str = 'en',\n seed: Optional[Seed] = None,\n providers: Optional[Any] = None) -> None:\n \"\"\"Initialize field.\n\n :param locale: Locale\n :param seed: Seed for random.\n \"\"\"\n self.locale = locale\n self.seed = seed\n self._gen = Generic(self.locale, self.seed)\n\n if providers:\n self._gen.add_providers(*providers)\n\n self._table = {} # type: ignore\n\n def __call__(self, name: Optional[str] = None,\n key: Optional[Callable] = None, **kwargs) -> Any:\n \"\"\"Override standard call.\n\n This magic method overrides standard call so it takes any string\n which represents the name of any method of any supported data\n provider and the ``**kwargs`` of this method.\n\n .. note:: Some data providers have methods with the same names\n and in such cases, you can explicitly define that the method\n belongs to data-provider ``name='provider.name'`` otherwise\n it will return the data from the first provider which\n has a method ``name``.\n\n You can apply a *key function* to the result returned by\n the method, bt passing a parameter **key** with a callable\n object which returns the final result.\n\n :param name: Name of the method.\n :param key: A key function (or other callable object)\n which will be applied to result.\n :param kwargs: Kwargs of method.\n :return: Value which represented by method.\n :raises ValueError: if provider not\n supported or if field not defined.\n \"\"\"\n if name is None:\n raise UndefinedField()\n\n def tail_parser(tails: str, obj: Any) -> Any:\n \"\"\"Return method from end of tail.\n\n :param tails: Tail string\n :param obj: Search tail from this object\n :return last tailed method\n \"\"\"\n provider_name, method_name = tails.split('.', 1)\n\n if '.' in method_name:\n raise UnacceptableField()\n\n attr = getattr(obj, provider_name)\n if attr is not None:\n return getattr(attr, method_name)\n\n try:\n if name not in self._table:\n if '.' not in name:\n # Fix https://github.com/lk-geimfari/mimesis/issues/619\n if name == self._gen.choice.Meta.name:\n self._table[name] = self._gen.choice\n else:\n for provider in dir(self._gen):\n provider = getattr(self._gen, provider)\n if name in dir(provider):\n self._table[name] = getattr(provider, name)\n else:\n self._table[name] = tail_parser(name, self._gen)\n\n result = self._table[name](**kwargs)\n if key and callable(key):\n return key(result)\n return result\n except KeyError:\n raise UnsupportedField(name)\n\n def __str__(self):\n return '{} <{}>'.format(\n self.__class__.__name__, self.locale)\n\n\nclass Schema(object):\n \"\"\"Class which return list of filled schemas.\"\"\"\n\n def __init__(self, schema: LambdaType) -> None:\n \"\"\"Initialize schema.\n\n :param schema: A schema.\n \"\"\"\n if isinstance(schema, LambdaType):\n self.schema = schema\n else:\n raise UndefinedSchema()\n\n def create(self, iterations: int = 1) -> List[JSON]:\n \"\"\"Return filled schema.\n\n Create a list of a filled schemas with elements in\n an amount of **iterations**.\n\n :param iterations: Amount of iterations.\n :return: List of willed schemas.\n \"\"\"\n return [self.schema() for _ in range(iterations)]\n\n\n# Alias for AbstractField\nField = AbstractField\n", "path": "mimesis/schema.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Implements classes for generating data by schema.\"\"\"\n\nfrom typing import Any, Callable, List, Optional\n\nfrom mimesis.exceptions import (\n UnacceptableField,\n UndefinedField,\n UndefinedSchema,\n UnsupportedField,\n)\nfrom mimesis.providers.generic import Generic\nfrom mimesis.typing import JSON, Seed\n\n__all__ = ['Field', 'Schema']\n\n\nclass AbstractField:\n \"\"\"\n AbstractField is a class for generating data by the name of the method.\n\n Instance of this object takes any string which represents name\n of any method of any supported data provider (:class:`~mimesis.Generic`)\n and the ``**kwargs`` of the method.\n\n See :class:`~mimesis.schema.AbstractField.__call__` for more details.\n \"\"\"\n\n def __init__(self, locale: str = 'en',\n seed: Optional[Seed] = None,\n providers: Optional[Any] = None) -> None:\n \"\"\"Initialize field.\n\n :param locale: Locale\n :param seed: Seed for random.\n \"\"\"\n self.locale = locale\n self.seed = seed\n self._gen = Generic(self.locale, self.seed)\n\n if providers:\n self._gen.add_providers(*providers)\n\n self._table = {} # type: ignore\n\n def __call__(self, name: Optional[str] = None,\n key: Optional[Callable] = None, **kwargs) -> Any:\n \"\"\"Override standard call.\n\n This magic method overrides standard call so it takes any string\n which represents the name of any method of any supported data\n provider and the ``**kwargs`` of this method.\n\n .. note:: Some data providers have methods with the same names\n and in such cases, you can explicitly define that the method\n belongs to data-provider ``name='provider.name'`` otherwise\n it will return the data from the first provider which\n has a method ``name``.\n\n You can apply a *key function* to the result returned by\n the method, bt passing a parameter **key** with a callable\n object which returns the final result.\n\n :param name: Name of the method.\n :param key: A key function (or other callable object)\n which will be applied to result.\n :param kwargs: Kwargs of method.\n :return: Value which represented by method.\n :raises ValueError: if provider not\n supported or if field not defined.\n \"\"\"\n if name is None:\n raise UndefinedField()\n\n def tail_parser(tails: str, obj: Any) -> Any:\n \"\"\"Return method from end of tail.\n\n :param tails: Tail string\n :param obj: Search tail from this object\n :return last tailed method\n \"\"\"\n provider_name, method_name = tails.split('.', 1)\n\n if '.' in method_name:\n raise UnacceptableField()\n\n attr = getattr(obj, provider_name)\n if attr is not None:\n return getattr(attr, method_name)\n\n try:\n if name not in self._table:\n if '.' not in name:\n # Fix https://github.com/lk-geimfari/mimesis/issues/619\n if name == self._gen.choice.Meta.name:\n self._table[name] = self._gen.choice\n else:\n for provider in dir(self._gen):\n provider = getattr(self._gen, provider)\n if name in dir(provider):\n self._table[name] = getattr(provider, name)\n else:\n self._table[name] = tail_parser(name, self._gen)\n\n result = self._table[name](**kwargs)\n if key and callable(key):\n return key(result)\n return result\n except KeyError:\n raise UnsupportedField(name)\n\n def __str__(self):\n return '{} <{}>'.format(\n self.__class__.__name__, self.locale)\n\n\nclass Schema:\n \"\"\"Class which return list of filled schemas.\"\"\"\n\n def __init__(self, schema: Callable) -> None:\n \"\"\"Initialize schema.\n\n :param schema: A schema.\n \"\"\"\n if callable(schema):\n self.schema = schema\n else:\n raise UndefinedSchema()\n\n def create(self, iterations: int = 1) -> List[JSON]:\n \"\"\"Return filled schema.\n\n Create a list of a filled schemas with elements in\n an amount of **iterations**.\n\n :param iterations: Amount of iterations.\n :return: List of willed schemas.\n \"\"\"\n return [self.schema() for _ in range(iterations)]\n\n\n# Alias for AbstractField\nField = AbstractField\n", "path": "mimesis/schema.py"}]}
1,943
252
gh_patches_debug_13831
rasdani/github-patches
git_diff
pypa__setuptools-3705
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] release v65.6.0 breaks packages downstream due to removal of `distutils.log.Log` ### setuptools version 65.6.0 ### Python version Python 3.10 ### OS Ubuntu ### Additional environment information _No response_ ### Description The `distutils.log.Log` class was removed in https://github.com/pypa/setuptools/commit/74652cabdeaacadc76ccf126563bed8ee2ccf3ef. This causes popular packages downstream, such as `numpy`, to fail: see https://github.com/numpy/numpy/issues/22623 ### Expected behavior The module `distutils.log` module was not officially marked as deprecated even though https://github.com/pypa/setuptools/commit/74652cabdeaacadc76ccf126563bed8ee2ccf3ef added to the docstring that the module is `Retained for compatibility and should not be used.`. It would be great if the removed class could be reinstated and a deprecation pathway be provided. ### How to Reproduce 1. `pip install setuptools==65.6.0` 2. `from numpy.distutils import Log` ### Output ``` In [4]: from numpy.distutils import Log --------------------------------------------------------------------------- ImportError Traceback (most recent call last) <ipython-input-4-f8e71815afcd> in <module> ----> 1 from numpy.distutils import Log ~/.virtualenvs/aiida_dev/lib/python3.9/site-packages/numpy/distutils/__init__.py in <module> 22 # Must import local ccompiler ASAP in order to get 23 # customized CCompiler.spawn effective. ---> 24 from . import ccompiler 25 from . import unixccompiler 26 ~/.virtualenvs/aiida_dev/lib/python3.9/site-packages/numpy/distutils/ccompiler.py in <module> 18 from distutils.version import LooseVersion 19 ---> 20 from numpy.distutils import log 21 from numpy.distutils.exec_command import ( 22 filepath_from_subprocess_output, forward_bytes_to_stdout ~/.virtualenvs/aiida_dev/lib/python3.9/site-packages/numpy/distutils/log.py in <module> 2 import sys 3 from distutils.log import * # noqa: F403 ----> 4 from distutils.log import Log as old_Log 5 from distutils.log import _global_log 6 ImportError: cannot import name 'Log' from 'distutils.log' (/home/sph/.virtualenvs/aiida_dev/lib/python3.9/site-packages/setuptools/_distutils/log.py) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setuptools/_distutils/log.py` Content: ``` 1 """ 2 A simple log mechanism styled after PEP 282. 3 4 Retained for compatibility and should not be used. 5 """ 6 7 import logging 8 9 from ._log import log as _global_log 10 11 12 DEBUG = logging.DEBUG 13 INFO = logging.INFO 14 WARN = logging.WARN 15 ERROR = logging.ERROR 16 FATAL = logging.FATAL 17 18 log = _global_log.log 19 debug = _global_log.debug 20 info = _global_log.info 21 warn = _global_log.warning 22 error = _global_log.error 23 fatal = _global_log.fatal 24 25 26 def set_threshold(level): 27 orig = _global_log.level 28 _global_log.setLevel(level) 29 return orig 30 31 32 def set_verbosity(v): 33 if v <= 0: 34 set_threshold(logging.WARN) 35 elif v == 1: 36 set_threshold(logging.INFO) 37 elif v >= 2: 38 set_threshold(logging.DEBUG) 39 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setuptools/_distutils/log.py b/setuptools/_distutils/log.py --- a/setuptools/_distutils/log.py +++ b/setuptools/_distutils/log.py @@ -5,6 +5,7 @@ """ import logging +import warnings from ._log import log as _global_log @@ -36,3 +37,21 @@ set_threshold(logging.INFO) elif v >= 2: set_threshold(logging.DEBUG) + + +class Log(logging.Logger): + """distutils.log.Log is deprecated, please use an alternative from `logging`.""" + + def __init__(self, threshold=WARN): + warnings.warn(Log.__doc__) # avoid DeprecationWarning to ensure warn is shown + super().__init__(__name__, level=threshold) + + @property + def threshold(self): + return self.level + + @threshold.setter + def threshold(self, level): + self.setLevel(level) + + warn = logging.Logger.warning
{"golden_diff": "diff --git a/setuptools/_distutils/log.py b/setuptools/_distutils/log.py\n--- a/setuptools/_distutils/log.py\n+++ b/setuptools/_distutils/log.py\n@@ -5,6 +5,7 @@\n \"\"\"\n \n import logging\n+import warnings\n \n from ._log import log as _global_log\n \n@@ -36,3 +37,21 @@\n set_threshold(logging.INFO)\n elif v >= 2:\n set_threshold(logging.DEBUG)\n+\n+\n+class Log(logging.Logger):\n+ \"\"\"distutils.log.Log is deprecated, please use an alternative from `logging`.\"\"\"\n+\n+ def __init__(self, threshold=WARN):\n+ warnings.warn(Log.__doc__) # avoid DeprecationWarning to ensure warn is shown\n+ super().__init__(__name__, level=threshold)\n+\n+ @property\n+ def threshold(self):\n+ return self.level\n+\n+ @threshold.setter\n+ def threshold(self, level):\n+ self.setLevel(level)\n+\n+ warn = logging.Logger.warning\n", "issue": "[BUG] release v65.6.0 breaks packages downstream due to removal of `distutils.log.Log`\n### setuptools version\n\n65.6.0\n\n### Python version\n\nPython 3.10\n\n### OS\n\nUbuntu\n\n### Additional environment information\n\n_No response_\n\n### Description\n\nThe `distutils.log.Log` class was removed in https://github.com/pypa/setuptools/commit/74652cabdeaacadc76ccf126563bed8ee2ccf3ef. This causes popular packages downstream, such as `numpy`, to fail: see https://github.com/numpy/numpy/issues/22623\n\n### Expected behavior\n\nThe module `distutils.log` module was not officially marked as deprecated even though https://github.com/pypa/setuptools/commit/74652cabdeaacadc76ccf126563bed8ee2ccf3ef added to the docstring that the module is `Retained for compatibility and should not be used.`. It would be great if the removed class could be reinstated and a deprecation pathway be provided.\n\n### How to Reproduce\n\n1. `pip install setuptools==65.6.0`\r\n2. `from numpy.distutils import Log`\n\n### Output\n\n```\r\nIn [4]: from numpy.distutils import Log\r\n---------------------------------------------------------------------------\r\nImportError Traceback (most recent call last)\r\n<ipython-input-4-f8e71815afcd> in <module>\r\n----> 1 from numpy.distutils import Log\r\n\r\n~/.virtualenvs/aiida_dev/lib/python3.9/site-packages/numpy/distutils/__init__.py in <module>\r\n 22 # Must import local ccompiler ASAP in order to get\r\n 23 # customized CCompiler.spawn effective.\r\n---> 24 from . import ccompiler\r\n 25 from . import unixccompiler\r\n 26 \r\n\r\n~/.virtualenvs/aiida_dev/lib/python3.9/site-packages/numpy/distutils/ccompiler.py in <module>\r\n 18 from distutils.version import LooseVersion\r\n 19 \r\n---> 20 from numpy.distutils import log\r\n 21 from numpy.distutils.exec_command import (\r\n 22 filepath_from_subprocess_output, forward_bytes_to_stdout\r\n\r\n~/.virtualenvs/aiida_dev/lib/python3.9/site-packages/numpy/distutils/log.py in <module>\r\n 2 import sys\r\n 3 from distutils.log import * # noqa: F403\r\n----> 4 from distutils.log import Log as old_Log\r\n 5 from distutils.log import _global_log\r\n 6 \r\n\r\nImportError: cannot import name 'Log' from 'distutils.log' (/home/sph/.virtualenvs/aiida_dev/lib/python3.9/site-packages/setuptools/_distutils/log.py)\r\n```\n", "before_files": [{"content": "\"\"\"\nA simple log mechanism styled after PEP 282.\n\nRetained for compatibility and should not be used.\n\"\"\"\n\nimport logging\n\nfrom ._log import log as _global_log\n\n\nDEBUG = logging.DEBUG\nINFO = logging.INFO\nWARN = logging.WARN\nERROR = logging.ERROR\nFATAL = logging.FATAL\n\nlog = _global_log.log\ndebug = _global_log.debug\ninfo = _global_log.info\nwarn = _global_log.warning\nerror = _global_log.error\nfatal = _global_log.fatal\n\n\ndef set_threshold(level):\n orig = _global_log.level\n _global_log.setLevel(level)\n return orig\n\n\ndef set_verbosity(v):\n if v <= 0:\n set_threshold(logging.WARN)\n elif v == 1:\n set_threshold(logging.INFO)\n elif v >= 2:\n set_threshold(logging.DEBUG)\n", "path": "setuptools/_distutils/log.py"}], "after_files": [{"content": "\"\"\"\nA simple log mechanism styled after PEP 282.\n\nRetained for compatibility and should not be used.\n\"\"\"\n\nimport logging\nimport warnings\n\nfrom ._log import log as _global_log\n\n\nDEBUG = logging.DEBUG\nINFO = logging.INFO\nWARN = logging.WARN\nERROR = logging.ERROR\nFATAL = logging.FATAL\n\nlog = _global_log.log\ndebug = _global_log.debug\ninfo = _global_log.info\nwarn = _global_log.warning\nerror = _global_log.error\nfatal = _global_log.fatal\n\n\ndef set_threshold(level):\n orig = _global_log.level\n _global_log.setLevel(level)\n return orig\n\n\ndef set_verbosity(v):\n if v <= 0:\n set_threshold(logging.WARN)\n elif v == 1:\n set_threshold(logging.INFO)\n elif v >= 2:\n set_threshold(logging.DEBUG)\n\n\nclass Log(logging.Logger):\n \"\"\"distutils.log.Log is deprecated, please use an alternative from `logging`.\"\"\"\n\n def __init__(self, threshold=WARN):\n warnings.warn(Log.__doc__) # avoid DeprecationWarning to ensure warn is shown\n super().__init__(__name__, level=threshold)\n\n @property\n def threshold(self):\n return self.level\n\n @threshold.setter\n def threshold(self, level):\n self.setLevel(level)\n\n warn = logging.Logger.warning\n", "path": "setuptools/_distutils/log.py"}]}
1,144
223
gh_patches_debug_38767
rasdani/github-patches
git_diff
microsoft__botbuilder-python-976
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Skill consumers should not be able to send Activities to skills without a recipient (Python) See [parent](https://github.com/microsoft/botframework-sdk/issues/5785). Issue may be specific to dotnet, need to verify if this is the case. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 # pylint: disable=no-member 4 5 import json 6 from typing import Dict 7 from logging import Logger 8 9 import aiohttp 10 from botbuilder.core import InvokeResponse 11 from botbuilder.core.skills import BotFrameworkClient 12 from botbuilder.schema import ( 13 Activity, 14 ExpectedReplies, 15 ConversationReference, 16 ConversationAccount, 17 ) 18 from botframework.connector.auth import ( 19 ChannelProvider, 20 CredentialProvider, 21 MicrosoftAppCredentials, 22 AppCredentials, 23 MicrosoftGovernmentAppCredentials, 24 ) 25 26 27 class BotFrameworkHttpClient(BotFrameworkClient): 28 29 """ 30 A skill host adapter implements API to forward activity to a skill and 31 implements routing ChannelAPI calls from the Skill up through the bot/adapter. 32 """ 33 34 INVOKE_ACTIVITY_NAME = "SkillEvents.ChannelApiInvoke" 35 _BOT_IDENTITY_KEY = "BotIdentity" 36 _APP_CREDENTIALS_CACHE: Dict[str, MicrosoftAppCredentials] = {} 37 38 def __init__( 39 self, 40 credential_provider: CredentialProvider, 41 channel_provider: ChannelProvider = None, 42 logger: Logger = None, 43 ): 44 if not credential_provider: 45 raise TypeError("credential_provider can't be None") 46 47 self._credential_provider = credential_provider 48 self._channel_provider = channel_provider 49 self._logger = logger 50 self._session = aiohttp.ClientSession() 51 52 async def post_activity( 53 self, 54 from_bot_id: str, 55 to_bot_id: str, 56 to_url: str, 57 service_url: str, 58 conversation_id: str, 59 activity: Activity, 60 ) -> InvokeResponse: 61 app_credentials = await self._get_app_credentials(from_bot_id, to_bot_id) 62 63 if not app_credentials: 64 raise KeyError("Unable to get appCredentials to connect to the skill") 65 66 # Get token for the skill call 67 token = ( 68 app_credentials.get_access_token() 69 if app_credentials.microsoft_app_id 70 else None 71 ) 72 73 # Capture current activity settings before changing them. 74 original_conversation_id = activity.conversation.id 75 original_service_url = activity.service_url 76 original_relates_to = activity.relates_to 77 78 try: 79 activity.relates_to = ConversationReference( 80 service_url=activity.service_url, 81 activity_id=activity.id, 82 channel_id=activity.channel_id, 83 conversation=ConversationAccount( 84 id=activity.conversation.id, 85 name=activity.conversation.name, 86 conversation_type=activity.conversation.conversation_type, 87 aad_object_id=activity.conversation.aad_object_id, 88 is_group=activity.conversation.is_group, 89 role=activity.conversation.role, 90 tenant_id=activity.conversation.tenant_id, 91 properties=activity.conversation.properties, 92 ), 93 bot=None, 94 ) 95 activity.conversation.id = conversation_id 96 activity.service_url = service_url 97 98 headers_dict = { 99 "Content-type": "application/json; charset=utf-8", 100 } 101 if token: 102 headers_dict.update( 103 {"Authorization": f"Bearer {token}",} 104 ) 105 106 json_content = json.dumps(activity.serialize()) 107 resp = await self._session.post( 108 to_url, data=json_content.encode("utf-8"), headers=headers_dict, 109 ) 110 resp.raise_for_status() 111 data = (await resp.read()).decode() 112 content = json.loads(data) if data else None 113 114 return InvokeResponse(status=resp.status, body=content) 115 116 finally: 117 # Restore activity properties. 118 activity.conversation.id = original_conversation_id 119 activity.service_url = original_service_url 120 activity.relates_to = original_relates_to 121 122 async def post_buffered_activity( 123 self, 124 from_bot_id: str, 125 to_bot_id: str, 126 to_url: str, 127 service_url: str, 128 conversation_id: str, 129 activity: Activity, 130 ) -> [Activity]: 131 """ 132 Helper method to return a list of activities when an Activity is being 133 sent with DeliveryMode == expectReplies. 134 """ 135 response = await self.post_activity( 136 from_bot_id, to_bot_id, to_url, service_url, conversation_id, activity 137 ) 138 if not response or (response.status / 100) != 2: 139 return [] 140 return ExpectedReplies().deserialize(response.body).activities 141 142 async def _get_app_credentials( 143 self, app_id: str, oauth_scope: str 144 ) -> AppCredentials: 145 if not app_id: 146 return MicrosoftAppCredentials.empty() 147 148 # in the cache? 149 cache_key = f"{app_id}{oauth_scope}" 150 app_credentials = BotFrameworkHttpClient._APP_CREDENTIALS_CACHE.get(cache_key) 151 if app_credentials: 152 return app_credentials 153 154 # create a new AppCredentials 155 app_password = await self._credential_provider.get_app_password(app_id) 156 157 app_credentials = ( 158 MicrosoftGovernmentAppCredentials(app_id, app_password, scope=oauth_scope) 159 if self._credential_provider and self._channel_provider.is_government() 160 else MicrosoftAppCredentials(app_id, app_password, oauth_scope=oauth_scope) 161 ) 162 163 # put it in the cache 164 BotFrameworkHttpClient._APP_CREDENTIALS_CACHE[cache_key] = app_credentials 165 166 return app_credentials 167 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py --- a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py +++ b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py @@ -14,6 +14,7 @@ ExpectedReplies, ConversationReference, ConversationAccount, + ChannelAccount, ) from botframework.connector.auth import ( ChannelProvider, @@ -74,6 +75,7 @@ original_conversation_id = activity.conversation.id original_service_url = activity.service_url original_relates_to = activity.relates_to + original_recipient = activity.recipient try: activity.relates_to = ConversationReference( @@ -94,30 +96,38 @@ ) activity.conversation.id = conversation_id activity.service_url = service_url + if not activity.recipient: + activity.recipient = ChannelAccount() - headers_dict = { - "Content-type": "application/json; charset=utf-8", - } - if token: - headers_dict.update( - {"Authorization": f"Bearer {token}",} - ) - - json_content = json.dumps(activity.serialize()) - resp = await self._session.post( - to_url, data=json_content.encode("utf-8"), headers=headers_dict, - ) - resp.raise_for_status() - data = (await resp.read()).decode() - content = json.loads(data) if data else None + status, content = await self._post_content(to_url, token, activity) - return InvokeResponse(status=resp.status, body=content) + return InvokeResponse(status=status, body=content) finally: # Restore activity properties. activity.conversation.id = original_conversation_id activity.service_url = original_service_url activity.relates_to = original_relates_to + activity.recipient = original_recipient + + async def _post_content( + self, to_url: str, token: str, activity: Activity + ) -> (int, object): + headers_dict = { + "Content-type": "application/json; charset=utf-8", + } + if token: + headers_dict.update( + {"Authorization": f"Bearer {token}",} + ) + + json_content = json.dumps(activity.serialize()) + resp = await self._session.post( + to_url, data=json_content.encode("utf-8"), headers=headers_dict, + ) + resp.raise_for_status() + data = (await resp.read()).decode() + return resp.status, json.loads(data) if data else None async def post_buffered_activity( self,
{"golden_diff": "diff --git a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\n--- a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\n+++ b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\n@@ -14,6 +14,7 @@\n ExpectedReplies,\n ConversationReference,\n ConversationAccount,\n+ ChannelAccount,\n )\n from botframework.connector.auth import (\n ChannelProvider,\n@@ -74,6 +75,7 @@\n original_conversation_id = activity.conversation.id\n original_service_url = activity.service_url\n original_relates_to = activity.relates_to\n+ original_recipient = activity.recipient\n \n try:\n activity.relates_to = ConversationReference(\n@@ -94,30 +96,38 @@\n )\n activity.conversation.id = conversation_id\n activity.service_url = service_url\n+ if not activity.recipient:\n+ activity.recipient = ChannelAccount()\n \n- headers_dict = {\n- \"Content-type\": \"application/json; charset=utf-8\",\n- }\n- if token:\n- headers_dict.update(\n- {\"Authorization\": f\"Bearer {token}\",}\n- )\n-\n- json_content = json.dumps(activity.serialize())\n- resp = await self._session.post(\n- to_url, data=json_content.encode(\"utf-8\"), headers=headers_dict,\n- )\n- resp.raise_for_status()\n- data = (await resp.read()).decode()\n- content = json.loads(data) if data else None\n+ status, content = await self._post_content(to_url, token, activity)\n \n- return InvokeResponse(status=resp.status, body=content)\n+ return InvokeResponse(status=status, body=content)\n \n finally:\n # Restore activity properties.\n activity.conversation.id = original_conversation_id\n activity.service_url = original_service_url\n activity.relates_to = original_relates_to\n+ activity.recipient = original_recipient\n+\n+ async def _post_content(\n+ self, to_url: str, token: str, activity: Activity\n+ ) -> (int, object):\n+ headers_dict = {\n+ \"Content-type\": \"application/json; charset=utf-8\",\n+ }\n+ if token:\n+ headers_dict.update(\n+ {\"Authorization\": f\"Bearer {token}\",}\n+ )\n+\n+ json_content = json.dumps(activity.serialize())\n+ resp = await self._session.post(\n+ to_url, data=json_content.encode(\"utf-8\"), headers=headers_dict,\n+ )\n+ resp.raise_for_status()\n+ data = (await resp.read()).decode()\n+ return resp.status, json.loads(data) if data else None\n \n async def post_buffered_activity(\n self,\n", "issue": "Skill consumers should not be able to send Activities to skills without a recipient (Python)\nSee [parent](https://github.com/microsoft/botframework-sdk/issues/5785).\r\n\r\nIssue may be specific to dotnet, need to verify if this is the case.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# pylint: disable=no-member\n\nimport json\nfrom typing import Dict\nfrom logging import Logger\n\nimport aiohttp\nfrom botbuilder.core import InvokeResponse\nfrom botbuilder.core.skills import BotFrameworkClient\nfrom botbuilder.schema import (\n Activity,\n ExpectedReplies,\n ConversationReference,\n ConversationAccount,\n)\nfrom botframework.connector.auth import (\n ChannelProvider,\n CredentialProvider,\n MicrosoftAppCredentials,\n AppCredentials,\n MicrosoftGovernmentAppCredentials,\n)\n\n\nclass BotFrameworkHttpClient(BotFrameworkClient):\n\n \"\"\"\n A skill host adapter implements API to forward activity to a skill and\n implements routing ChannelAPI calls from the Skill up through the bot/adapter.\n \"\"\"\n\n INVOKE_ACTIVITY_NAME = \"SkillEvents.ChannelApiInvoke\"\n _BOT_IDENTITY_KEY = \"BotIdentity\"\n _APP_CREDENTIALS_CACHE: Dict[str, MicrosoftAppCredentials] = {}\n\n def __init__(\n self,\n credential_provider: CredentialProvider,\n channel_provider: ChannelProvider = None,\n logger: Logger = None,\n ):\n if not credential_provider:\n raise TypeError(\"credential_provider can't be None\")\n\n self._credential_provider = credential_provider\n self._channel_provider = channel_provider\n self._logger = logger\n self._session = aiohttp.ClientSession()\n\n async def post_activity(\n self,\n from_bot_id: str,\n to_bot_id: str,\n to_url: str,\n service_url: str,\n conversation_id: str,\n activity: Activity,\n ) -> InvokeResponse:\n app_credentials = await self._get_app_credentials(from_bot_id, to_bot_id)\n\n if not app_credentials:\n raise KeyError(\"Unable to get appCredentials to connect to the skill\")\n\n # Get token for the skill call\n token = (\n app_credentials.get_access_token()\n if app_credentials.microsoft_app_id\n else None\n )\n\n # Capture current activity settings before changing them.\n original_conversation_id = activity.conversation.id\n original_service_url = activity.service_url\n original_relates_to = activity.relates_to\n\n try:\n activity.relates_to = ConversationReference(\n service_url=activity.service_url,\n activity_id=activity.id,\n channel_id=activity.channel_id,\n conversation=ConversationAccount(\n id=activity.conversation.id,\n name=activity.conversation.name,\n conversation_type=activity.conversation.conversation_type,\n aad_object_id=activity.conversation.aad_object_id,\n is_group=activity.conversation.is_group,\n role=activity.conversation.role,\n tenant_id=activity.conversation.tenant_id,\n properties=activity.conversation.properties,\n ),\n bot=None,\n )\n activity.conversation.id = conversation_id\n activity.service_url = service_url\n\n headers_dict = {\n \"Content-type\": \"application/json; charset=utf-8\",\n }\n if token:\n headers_dict.update(\n {\"Authorization\": f\"Bearer {token}\",}\n )\n\n json_content = json.dumps(activity.serialize())\n resp = await self._session.post(\n to_url, data=json_content.encode(\"utf-8\"), headers=headers_dict,\n )\n resp.raise_for_status()\n data = (await resp.read()).decode()\n content = json.loads(data) if data else None\n\n return InvokeResponse(status=resp.status, body=content)\n\n finally:\n # Restore activity properties.\n activity.conversation.id = original_conversation_id\n activity.service_url = original_service_url\n activity.relates_to = original_relates_to\n\n async def post_buffered_activity(\n self,\n from_bot_id: str,\n to_bot_id: str,\n to_url: str,\n service_url: str,\n conversation_id: str,\n activity: Activity,\n ) -> [Activity]:\n \"\"\"\n Helper method to return a list of activities when an Activity is being\n sent with DeliveryMode == expectReplies.\n \"\"\"\n response = await self.post_activity(\n from_bot_id, to_bot_id, to_url, service_url, conversation_id, activity\n )\n if not response or (response.status / 100) != 2:\n return []\n return ExpectedReplies().deserialize(response.body).activities\n\n async def _get_app_credentials(\n self, app_id: str, oauth_scope: str\n ) -> AppCredentials:\n if not app_id:\n return MicrosoftAppCredentials.empty()\n\n # in the cache?\n cache_key = f\"{app_id}{oauth_scope}\"\n app_credentials = BotFrameworkHttpClient._APP_CREDENTIALS_CACHE.get(cache_key)\n if app_credentials:\n return app_credentials\n\n # create a new AppCredentials\n app_password = await self._credential_provider.get_app_password(app_id)\n\n app_credentials = (\n MicrosoftGovernmentAppCredentials(app_id, app_password, scope=oauth_scope)\n if self._credential_provider and self._channel_provider.is_government()\n else MicrosoftAppCredentials(app_id, app_password, oauth_scope=oauth_scope)\n )\n\n # put it in the cache\n BotFrameworkHttpClient._APP_CREDENTIALS_CACHE[cache_key] = app_credentials\n\n return app_credentials\n", "path": "libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# pylint: disable=no-member\n\nimport json\nfrom typing import Dict\nfrom logging import Logger\n\nimport aiohttp\nfrom botbuilder.core import InvokeResponse\nfrom botbuilder.core.skills import BotFrameworkClient\nfrom botbuilder.schema import (\n Activity,\n ExpectedReplies,\n ConversationReference,\n ConversationAccount,\n ChannelAccount,\n)\nfrom botframework.connector.auth import (\n ChannelProvider,\n CredentialProvider,\n MicrosoftAppCredentials,\n AppCredentials,\n MicrosoftGovernmentAppCredentials,\n)\n\n\nclass BotFrameworkHttpClient(BotFrameworkClient):\n\n \"\"\"\n A skill host adapter implements API to forward activity to a skill and\n implements routing ChannelAPI calls from the Skill up through the bot/adapter.\n \"\"\"\n\n INVOKE_ACTIVITY_NAME = \"SkillEvents.ChannelApiInvoke\"\n _BOT_IDENTITY_KEY = \"BotIdentity\"\n _APP_CREDENTIALS_CACHE: Dict[str, MicrosoftAppCredentials] = {}\n\n def __init__(\n self,\n credential_provider: CredentialProvider,\n channel_provider: ChannelProvider = None,\n logger: Logger = None,\n ):\n if not credential_provider:\n raise TypeError(\"credential_provider can't be None\")\n\n self._credential_provider = credential_provider\n self._channel_provider = channel_provider\n self._logger = logger\n self._session = aiohttp.ClientSession()\n\n async def post_activity(\n self,\n from_bot_id: str,\n to_bot_id: str,\n to_url: str,\n service_url: str,\n conversation_id: str,\n activity: Activity,\n ) -> InvokeResponse:\n app_credentials = await self._get_app_credentials(from_bot_id, to_bot_id)\n\n if not app_credentials:\n raise KeyError(\"Unable to get appCredentials to connect to the skill\")\n\n # Get token for the skill call\n token = (\n app_credentials.get_access_token()\n if app_credentials.microsoft_app_id\n else None\n )\n\n # Capture current activity settings before changing them.\n original_conversation_id = activity.conversation.id\n original_service_url = activity.service_url\n original_relates_to = activity.relates_to\n original_recipient = activity.recipient\n\n try:\n activity.relates_to = ConversationReference(\n service_url=activity.service_url,\n activity_id=activity.id,\n channel_id=activity.channel_id,\n conversation=ConversationAccount(\n id=activity.conversation.id,\n name=activity.conversation.name,\n conversation_type=activity.conversation.conversation_type,\n aad_object_id=activity.conversation.aad_object_id,\n is_group=activity.conversation.is_group,\n role=activity.conversation.role,\n tenant_id=activity.conversation.tenant_id,\n properties=activity.conversation.properties,\n ),\n bot=None,\n )\n activity.conversation.id = conversation_id\n activity.service_url = service_url\n if not activity.recipient:\n activity.recipient = ChannelAccount()\n\n status, content = await self._post_content(to_url, token, activity)\n\n return InvokeResponse(status=status, body=content)\n\n finally:\n # Restore activity properties.\n activity.conversation.id = original_conversation_id\n activity.service_url = original_service_url\n activity.relates_to = original_relates_to\n activity.recipient = original_recipient\n\n async def _post_content(\n self, to_url: str, token: str, activity: Activity\n ) -> (int, object):\n headers_dict = {\n \"Content-type\": \"application/json; charset=utf-8\",\n }\n if token:\n headers_dict.update(\n {\"Authorization\": f\"Bearer {token}\",}\n )\n\n json_content = json.dumps(activity.serialize())\n resp = await self._session.post(\n to_url, data=json_content.encode(\"utf-8\"), headers=headers_dict,\n )\n resp.raise_for_status()\n data = (await resp.read()).decode()\n return resp.status, json.loads(data) if data else None\n\n async def post_buffered_activity(\n self,\n from_bot_id: str,\n to_bot_id: str,\n to_url: str,\n service_url: str,\n conversation_id: str,\n activity: Activity,\n ) -> [Activity]:\n \"\"\"\n Helper method to return a list of activities when an Activity is being\n sent with DeliveryMode == expectReplies.\n \"\"\"\n response = await self.post_activity(\n from_bot_id, to_bot_id, to_url, service_url, conversation_id, activity\n )\n if not response or (response.status / 100) != 2:\n return []\n return ExpectedReplies().deserialize(response.body).activities\n\n async def _get_app_credentials(\n self, app_id: str, oauth_scope: str\n ) -> AppCredentials:\n if not app_id:\n return MicrosoftAppCredentials.empty()\n\n # in the cache?\n cache_key = f\"{app_id}{oauth_scope}\"\n app_credentials = BotFrameworkHttpClient._APP_CREDENTIALS_CACHE.get(cache_key)\n if app_credentials:\n return app_credentials\n\n # create a new AppCredentials\n app_password = await self._credential_provider.get_app_password(app_id)\n\n app_credentials = (\n MicrosoftGovernmentAppCredentials(app_id, app_password, scope=oauth_scope)\n if self._credential_provider and self._channel_provider.is_government()\n else MicrosoftAppCredentials(app_id, app_password, oauth_scope=oauth_scope)\n )\n\n # put it in the cache\n BotFrameworkHttpClient._APP_CREDENTIALS_CACHE[cache_key] = app_credentials\n\n return app_credentials\n", "path": "libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py"}]}
1,846
656
gh_patches_debug_40283
rasdani/github-patches
git_diff
enthought__chaco-598
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Serializable mixin should be removed The class is not used by any current code, appears to be broken, and the problem it was meant to solve (selection of which traits to pickle) is better solved via the use of `transient` traits metadata. Technically this is a backwards-incompatible change, however. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `chaco/serializable.py` Content: ``` 1 """ Defines the Serializable mix-in class. 2 """ 3 4 5 class Serializable(object): 6 """ 7 Mix-in class to help serialization. Serializes just the attributes in 8 **_pickles**. 9 10 This mix-in works best when all the classes in a hierarchy subclass 11 from it. It solves the problem of allowing each class to specify 12 its own set of attributes to pickle and attributes to ignore, without 13 having to also implement __getstate__ and __setstate__. 14 """ 15 16 # The basic list of attributes to save. These get set without firing 17 # any trait events. 18 _pickles = None 19 20 # A list of the parents of this class that will be searched for their 21 # list of _pickles. Only the parents in this list that inherit from 22 # Serialized will be pickled. The process stops at the first item in 23 # __pickle_parents that is not a subclass of Serialized. 24 # 25 # This is a double-underscore variable so that Python's attribute name 26 # will shield base class 27 # __pickle_parents = None 28 29 def _get_pickle_parents(self): 30 """ 31 Subclasses can override this method to return the list of base 32 classes they want to have the serializer look at. 33 """ 34 bases = [] 35 for cls in self.__class__.__mro__: 36 if cls is Serializable: 37 # don't add Serializable to the list of parents 38 continue 39 elif issubclass(cls, Serializable): 40 bases.append(cls) 41 else: 42 break 43 return bases 44 45 def _pre_save(self): 46 """ 47 Called before __getstate__ to give the object a chance to tidy up 48 and get ready to be saved. This usually also calls the superclass. 49 """ 50 51 def _post_load(self): 52 """ 53 Called after __setstate__ finishes restoring the state on the object. 54 This method usually needs to include a call to super(cls, self)._post_load(). 55 Avoid explicitly calling a parent class by name, because in general 56 you want post_load() to happen in the same order as MRO, which super() 57 does automatically. 58 """ 59 print("Serializable._post_load") 60 pass 61 62 def _do_setstate(self, state): 63 """ 64 Called by __setstate__ to allow the subclass to set its state in a 65 special way. 66 67 Subclasses should override this instead of Serializable.__setstate__ 68 because we need Serializable's implementation to call _post_load() after 69 all the _do_setstate() have returned.) 70 """ 71 # Quietly set all the attributes 72 self.trait_setq(**state) 73 74 # ------------------------------------------------------------------------ 75 # Private methods 76 # ------------------------------------------------------------------------ 77 78 79 # def __getstate__(self): 80 # #idstring = self.__class__.__name__ + " id=" + str(id(self)) 81 # # Give the object a chance to tidy up before saving 82 # self._pre_save() 83 # 84 # # Get the attributes that this class needs to serialize. We do this by 85 # # marching up the list of parent classes in _pickle_parents and getting 86 # # their lists of _pickles. 87 # all_pickles = Set() 88 # pickle_parents = self._get_pickle_parents() 89 # for parent_class in pickle_parents: 90 # all_pickles.update(parent_class._pickles) 91 # 92 # if self._pickles is not None: 93 # all_pickles.update(self._pickles) 94 # 95 # state = {} 96 # for attrib in all_pickles: 97 # state[attrib] = getattr(self, attrib) 98 # 99 # print('<<<<<<<<<<<<<', self) 100 # for key,value in state.items(): 101 # print(key, type(value)) 102 # print '>>>>>>>>>>>>>' 103 # 104 # return state 105 106 # ~ def __setstate__(self, state): 107 # ~ idstring = self.__class__.__name__ + " id=" + str(id(self)) 108 # ~ self._do_setstate(state) 109 # ~ self._post_load() 110 # ~ return 111 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/chaco/serializable.py b/chaco/serializable.py deleted file mode 100644 --- a/chaco/serializable.py +++ /dev/null @@ -1,110 +0,0 @@ -""" Defines the Serializable mix-in class. -""" - - -class Serializable(object): - """ - Mix-in class to help serialization. Serializes just the attributes in - **_pickles**. - - This mix-in works best when all the classes in a hierarchy subclass - from it. It solves the problem of allowing each class to specify - its own set of attributes to pickle and attributes to ignore, without - having to also implement __getstate__ and __setstate__. - """ - - # The basic list of attributes to save. These get set without firing - # any trait events. - _pickles = None - - # A list of the parents of this class that will be searched for their - # list of _pickles. Only the parents in this list that inherit from - # Serialized will be pickled. The process stops at the first item in - # __pickle_parents that is not a subclass of Serialized. - # - # This is a double-underscore variable so that Python's attribute name - # will shield base class - # __pickle_parents = None - - def _get_pickle_parents(self): - """ - Subclasses can override this method to return the list of base - classes they want to have the serializer look at. - """ - bases = [] - for cls in self.__class__.__mro__: - if cls is Serializable: - # don't add Serializable to the list of parents - continue - elif issubclass(cls, Serializable): - bases.append(cls) - else: - break - return bases - - def _pre_save(self): - """ - Called before __getstate__ to give the object a chance to tidy up - and get ready to be saved. This usually also calls the superclass. - """ - - def _post_load(self): - """ - Called after __setstate__ finishes restoring the state on the object. - This method usually needs to include a call to super(cls, self)._post_load(). - Avoid explicitly calling a parent class by name, because in general - you want post_load() to happen in the same order as MRO, which super() - does automatically. - """ - print("Serializable._post_load") - pass - - def _do_setstate(self, state): - """ - Called by __setstate__ to allow the subclass to set its state in a - special way. - - Subclasses should override this instead of Serializable.__setstate__ - because we need Serializable's implementation to call _post_load() after - all the _do_setstate() have returned.) - """ - # Quietly set all the attributes - self.trait_setq(**state) - - # ------------------------------------------------------------------------ - # Private methods - # ------------------------------------------------------------------------ - - -# def __getstate__(self): -# #idstring = self.__class__.__name__ + " id=" + str(id(self)) -# # Give the object a chance to tidy up before saving -# self._pre_save() -# -# # Get the attributes that this class needs to serialize. We do this by -# # marching up the list of parent classes in _pickle_parents and getting -# # their lists of _pickles. -# all_pickles = Set() -# pickle_parents = self._get_pickle_parents() -# for parent_class in pickle_parents: -# all_pickles.update(parent_class._pickles) -# -# if self._pickles is not None: -# all_pickles.update(self._pickles) -# -# state = {} -# for attrib in all_pickles: -# state[attrib] = getattr(self, attrib) -# -# print('<<<<<<<<<<<<<', self) -# for key,value in state.items(): -# print(key, type(value)) -# print '>>>>>>>>>>>>>' -# -# return state - -# ~ def __setstate__(self, state): -# ~ idstring = self.__class__.__name__ + " id=" + str(id(self)) -# ~ self._do_setstate(state) -# ~ self._post_load() -# ~ return
{"golden_diff": "diff --git a/chaco/serializable.py b/chaco/serializable.py\ndeleted file mode 100644\n--- a/chaco/serializable.py\n+++ /dev/null\n@@ -1,110 +0,0 @@\n-\"\"\" Defines the Serializable mix-in class.\n-\"\"\"\n-\n-\n-class Serializable(object):\n- \"\"\"\n- Mix-in class to help serialization. Serializes just the attributes in\n- **_pickles**.\n-\n- This mix-in works best when all the classes in a hierarchy subclass\n- from it. It solves the problem of allowing each class to specify\n- its own set of attributes to pickle and attributes to ignore, without\n- having to also implement __getstate__ and __setstate__.\n- \"\"\"\n-\n- # The basic list of attributes to save. These get set without firing\n- # any trait events.\n- _pickles = None\n-\n- # A list of the parents of this class that will be searched for their\n- # list of _pickles. Only the parents in this list that inherit from\n- # Serialized will be pickled. The process stops at the first item in\n- # __pickle_parents that is not a subclass of Serialized.\n- #\n- # This is a double-underscore variable so that Python's attribute name\n- # will shield base class\n- # __pickle_parents = None\n-\n- def _get_pickle_parents(self):\n- \"\"\"\n- Subclasses can override this method to return the list of base\n- classes they want to have the serializer look at.\n- \"\"\"\n- bases = []\n- for cls in self.__class__.__mro__:\n- if cls is Serializable:\n- # don't add Serializable to the list of parents\n- continue\n- elif issubclass(cls, Serializable):\n- bases.append(cls)\n- else:\n- break\n- return bases\n-\n- def _pre_save(self):\n- \"\"\"\n- Called before __getstate__ to give the object a chance to tidy up\n- and get ready to be saved. This usually also calls the superclass.\n- \"\"\"\n-\n- def _post_load(self):\n- \"\"\"\n- Called after __setstate__ finishes restoring the state on the object.\n- This method usually needs to include a call to super(cls, self)._post_load().\n- Avoid explicitly calling a parent class by name, because in general\n- you want post_load() to happen in the same order as MRO, which super()\n- does automatically.\n- \"\"\"\n- print(\"Serializable._post_load\")\n- pass\n-\n- def _do_setstate(self, state):\n- \"\"\"\n- Called by __setstate__ to allow the subclass to set its state in a\n- special way.\n-\n- Subclasses should override this instead of Serializable.__setstate__\n- because we need Serializable's implementation to call _post_load() after\n- all the _do_setstate() have returned.)\n- \"\"\"\n- # Quietly set all the attributes\n- self.trait_setq(**state)\n-\n- # ------------------------------------------------------------------------\n- # Private methods\n- # ------------------------------------------------------------------------\n-\n-\n-# def __getstate__(self):\n-# #idstring = self.__class__.__name__ + \" id=\" + str(id(self))\n-# # Give the object a chance to tidy up before saving\n-# self._pre_save()\n-#\n-# # Get the attributes that this class needs to serialize. We do this by\n-# # marching up the list of parent classes in _pickle_parents and getting\n-# # their lists of _pickles.\n-# all_pickles = Set()\n-# pickle_parents = self._get_pickle_parents()\n-# for parent_class in pickle_parents:\n-# all_pickles.update(parent_class._pickles)\n-#\n-# if self._pickles is not None:\n-# all_pickles.update(self._pickles)\n-#\n-# state = {}\n-# for attrib in all_pickles:\n-# state[attrib] = getattr(self, attrib)\n-#\n-# print('<<<<<<<<<<<<<', self)\n-# for key,value in state.items():\n-# print(key, type(value))\n-# print '>>>>>>>>>>>>>'\n-#\n-# return state\n-\n-# ~ def __setstate__(self, state):\n-# ~ idstring = self.__class__.__name__ + \" id=\" + str(id(self))\n-# ~ self._do_setstate(state)\n-# ~ self._post_load()\n-# ~ return\n", "issue": "Serializable mixin should be removed\nThe class is not used by any current code, appears to be broken, and the problem it was meant to solve (selection of which traits to pickle) is better solved via the use of `transient` traits metadata.\n\nTechnically this is a backwards-incompatible change, however.\n\n", "before_files": [{"content": "\"\"\" Defines the Serializable mix-in class.\n\"\"\"\n\n\nclass Serializable(object):\n \"\"\"\n Mix-in class to help serialization. Serializes just the attributes in\n **_pickles**.\n\n This mix-in works best when all the classes in a hierarchy subclass\n from it. It solves the problem of allowing each class to specify\n its own set of attributes to pickle and attributes to ignore, without\n having to also implement __getstate__ and __setstate__.\n \"\"\"\n\n # The basic list of attributes to save. These get set without firing\n # any trait events.\n _pickles = None\n\n # A list of the parents of this class that will be searched for their\n # list of _pickles. Only the parents in this list that inherit from\n # Serialized will be pickled. The process stops at the first item in\n # __pickle_parents that is not a subclass of Serialized.\n #\n # This is a double-underscore variable so that Python's attribute name\n # will shield base class\n # __pickle_parents = None\n\n def _get_pickle_parents(self):\n \"\"\"\n Subclasses can override this method to return the list of base\n classes they want to have the serializer look at.\n \"\"\"\n bases = []\n for cls in self.__class__.__mro__:\n if cls is Serializable:\n # don't add Serializable to the list of parents\n continue\n elif issubclass(cls, Serializable):\n bases.append(cls)\n else:\n break\n return bases\n\n def _pre_save(self):\n \"\"\"\n Called before __getstate__ to give the object a chance to tidy up\n and get ready to be saved. This usually also calls the superclass.\n \"\"\"\n\n def _post_load(self):\n \"\"\"\n Called after __setstate__ finishes restoring the state on the object.\n This method usually needs to include a call to super(cls, self)._post_load().\n Avoid explicitly calling a parent class by name, because in general\n you want post_load() to happen in the same order as MRO, which super()\n does automatically.\n \"\"\"\n print(\"Serializable._post_load\")\n pass\n\n def _do_setstate(self, state):\n \"\"\"\n Called by __setstate__ to allow the subclass to set its state in a\n special way.\n\n Subclasses should override this instead of Serializable.__setstate__\n because we need Serializable's implementation to call _post_load() after\n all the _do_setstate() have returned.)\n \"\"\"\n # Quietly set all the attributes\n self.trait_setq(**state)\n\n # ------------------------------------------------------------------------\n # Private methods\n # ------------------------------------------------------------------------\n\n\n# def __getstate__(self):\n# #idstring = self.__class__.__name__ + \" id=\" + str(id(self))\n# # Give the object a chance to tidy up before saving\n# self._pre_save()\n#\n# # Get the attributes that this class needs to serialize. We do this by\n# # marching up the list of parent classes in _pickle_parents and getting\n# # their lists of _pickles.\n# all_pickles = Set()\n# pickle_parents = self._get_pickle_parents()\n# for parent_class in pickle_parents:\n# all_pickles.update(parent_class._pickles)\n#\n# if self._pickles is not None:\n# all_pickles.update(self._pickles)\n#\n# state = {}\n# for attrib in all_pickles:\n# state[attrib] = getattr(self, attrib)\n#\n# print('<<<<<<<<<<<<<', self)\n# for key,value in state.items():\n# print(key, type(value))\n# print '>>>>>>>>>>>>>'\n#\n# return state\n\n# ~ def __setstate__(self, state):\n# ~ idstring = self.__class__.__name__ + \" id=\" + str(id(self))\n# ~ self._do_setstate(state)\n# ~ self._post_load()\n# ~ return\n", "path": "chaco/serializable.py"}], "after_files": [{"content": null, "path": "chaco/serializable.py"}]}
1,411
1,013
gh_patches_debug_21876
rasdani/github-patches
git_diff
scrapy__scrapy-4663
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- SCRAPY_CHECK is not set while running contract ### Description Hi, it seems that #3739 is not doing what the [documentation describe](https://docs.scrapy.org/en/latest/topics/contracts.html#detecting-check-runs): `os.environ.get('SCRAPY_CHECK')` is returning `None` in my contract check. ### Steps to Reproduce 1. Create a project from scratch 2. Add a random spider 3. Contract code is as follow ``` def parse(self, response): """ @url http://www.amazon.com/s?field-keywords=selfish+gene @returns requests 1 1 """ print("test", os.environ.get('SCRAPY_CHECK')) if os.environ.get('SCRAPY_CHECK'): yield scrapy.Request(url="next_url") ``` **Expected behavior:** Request should be yielded as per the documentation **Actual behavior:** Nothing happen **Reproduces how often:** In my local project and with fresh project ### Versions Windows ``` (globenv) C:\Users\johnl>scrapy version --verbose Scrapy : 1.8.0 lxml : 4.4.1.0 libxml2 : 2.9.5 cssselect : 1.1.0 parsel : 1.5.2 w3lib : 1.21.0 Twisted : 19.10.0 Python : 3.7.4 (tags/v3.7.4:e09359112e, Jul 8 2019, 19:29:22) [MSC v.1916 32 bit (Intel)] pyOpenSSL : 19.0.0 (OpenSSL 1.1.1c 28 May 2019) cryptography : 2.7 Platform : Windows-10-10.0.18362-SP0 ``` Linux ``` scrapy version --verbose Scrapy : 1.8.0 lxml : 4.4.1.0 libxml2 : 2.9.9 cssselect : 1.1.0 parsel : 1.5.2 w3lib : 1.21.0 Twisted : 19.7.0 Python : 3.6.8 (default, Oct 7 2019, 12:59:55) - [GCC 8.3.0] pyOpenSSL : 19.0.0 (OpenSSL 1.1.1d 10 Sep 2019) cryptography : 2.8 Platform : Linux-4.4.0-18362-Microsoft-x86_64-with-Ubuntu-18.04-bionic ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scrapy/commands/check.py` Content: ``` 1 import time 2 from collections import defaultdict 3 from unittest import TextTestRunner, TextTestResult as _TextTestResult 4 5 from scrapy.commands import ScrapyCommand 6 from scrapy.contracts import ContractsManager 7 from scrapy.utils.misc import load_object, set_environ 8 from scrapy.utils.conf import build_component_list 9 10 11 class TextTestResult(_TextTestResult): 12 def printSummary(self, start, stop): 13 write = self.stream.write 14 writeln = self.stream.writeln 15 16 run = self.testsRun 17 plural = "s" if run != 1 else "" 18 19 writeln(self.separator2) 20 writeln("Ran %d contract%s in %.3fs" % (run, plural, stop - start)) 21 writeln() 22 23 infos = [] 24 if not self.wasSuccessful(): 25 write("FAILED") 26 failed, errored = map(len, (self.failures, self.errors)) 27 if failed: 28 infos.append("failures=%d" % failed) 29 if errored: 30 infos.append("errors=%d" % errored) 31 else: 32 write("OK") 33 34 if infos: 35 writeln(" (%s)" % (", ".join(infos),)) 36 else: 37 write("\n") 38 39 40 class Command(ScrapyCommand): 41 requires_project = True 42 default_settings = {'LOG_ENABLED': False} 43 44 def syntax(self): 45 return "[options] <spider>" 46 47 def short_desc(self): 48 return "Check spider contracts" 49 50 def add_options(self, parser): 51 ScrapyCommand.add_options(self, parser) 52 parser.add_option("-l", "--list", dest="list", action="store_true", 53 help="only list contracts, without checking them") 54 parser.add_option("-v", "--verbose", dest="verbose", default=False, action='store_true', 55 help="print contract tests for all spiders") 56 57 def run(self, args, opts): 58 # load contracts 59 contracts = build_component_list(self.settings.getwithbase('SPIDER_CONTRACTS')) 60 conman = ContractsManager(load_object(c) for c in contracts) 61 runner = TextTestRunner(verbosity=2 if opts.verbose else 1) 62 result = TextTestResult(runner.stream, runner.descriptions, runner.verbosity) 63 64 # contract requests 65 contract_reqs = defaultdict(list) 66 67 spider_loader = self.crawler_process.spider_loader 68 69 with set_environ(SCRAPY_CHECK='true'): 70 for spidername in args or spider_loader.list(): 71 spidercls = spider_loader.load(spidername) 72 spidercls.start_requests = lambda s: conman.from_spider(s, result) 73 74 tested_methods = conman.tested_methods_from_spidercls(spidercls) 75 if opts.list: 76 for method in tested_methods: 77 contract_reqs[spidercls.name].append(method) 78 elif tested_methods: 79 self.crawler_process.crawl(spidercls) 80 81 # start checks 82 if opts.list: 83 for spider, methods in sorted(contract_reqs.items()): 84 if not methods and not opts.verbose: 85 continue 86 print(spider) 87 for method in sorted(methods): 88 print(' * %s' % method) 89 else: 90 start = time.time() 91 self.crawler_process.start() 92 stop = time.time() 93 94 result.printErrors() 95 result.printSummary(start, stop) 96 self.exitcode = int(not result.wasSuccessful()) 97 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scrapy/commands/check.py b/scrapy/commands/check.py --- a/scrapy/commands/check.py +++ b/scrapy/commands/check.py @@ -78,19 +78,19 @@ elif tested_methods: self.crawler_process.crawl(spidercls) - # start checks - if opts.list: - for spider, methods in sorted(contract_reqs.items()): - if not methods and not opts.verbose: - continue - print(spider) - for method in sorted(methods): - print(' * %s' % method) - else: - start = time.time() - self.crawler_process.start() - stop = time.time() - - result.printErrors() - result.printSummary(start, stop) - self.exitcode = int(not result.wasSuccessful()) + # start checks + if opts.list: + for spider, methods in sorted(contract_reqs.items()): + if not methods and not opts.verbose: + continue + print(spider) + for method in sorted(methods): + print(' * %s' % method) + else: + start = time.time() + self.crawler_process.start() + stop = time.time() + + result.printErrors() + result.printSummary(start, stop) + self.exitcode = int(not result.wasSuccessful())
{"golden_diff": "diff --git a/scrapy/commands/check.py b/scrapy/commands/check.py\n--- a/scrapy/commands/check.py\n+++ b/scrapy/commands/check.py\n@@ -78,19 +78,19 @@\n elif tested_methods:\n self.crawler_process.crawl(spidercls)\n \n- # start checks\n- if opts.list:\n- for spider, methods in sorted(contract_reqs.items()):\n- if not methods and not opts.verbose:\n- continue\n- print(spider)\n- for method in sorted(methods):\n- print(' * %s' % method)\n- else:\n- start = time.time()\n- self.crawler_process.start()\n- stop = time.time()\n-\n- result.printErrors()\n- result.printSummary(start, stop)\n- self.exitcode = int(not result.wasSuccessful())\n+ # start checks\n+ if opts.list:\n+ for spider, methods in sorted(contract_reqs.items()):\n+ if not methods and not opts.verbose:\n+ continue\n+ print(spider)\n+ for method in sorted(methods):\n+ print(' * %s' % method)\n+ else:\n+ start = time.time()\n+ self.crawler_process.start()\n+ stop = time.time()\n+\n+ result.printErrors()\n+ result.printSummary(start, stop)\n+ self.exitcode = int(not result.wasSuccessful())\n", "issue": "SCRAPY_CHECK is not set while running contract\n### Description\r\n\r\nHi, it seems that #3739 is not doing what the [documentation describe](https://docs.scrapy.org/en/latest/topics/contracts.html#detecting-check-runs):\r\n\r\n`os.environ.get('SCRAPY_CHECK')` is returning `None` in my contract check.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create a project from scratch\r\n2. Add a random spider\r\n3. Contract code is as follow\r\n```\r\n def parse(self, response):\r\n \"\"\"\r\n @url http://www.amazon.com/s?field-keywords=selfish+gene\r\n @returns requests 1 1\r\n \"\"\"\r\n print(\"test\", os.environ.get('SCRAPY_CHECK'))\r\n if os.environ.get('SCRAPY_CHECK'):\r\n yield scrapy.Request(url=\"next_url\")\r\n```\r\n\r\n**Expected behavior:** Request should be yielded as per the documentation\r\n\r\n**Actual behavior:** Nothing happen\r\n\r\n**Reproduces how often:** In my local project and with fresh project\r\n\r\n### Versions\r\n\r\nWindows\r\n```\r\n(globenv) C:\\Users\\johnl>scrapy version --verbose\r\nScrapy : 1.8.0\r\nlxml : 4.4.1.0\r\nlibxml2 : 2.9.5\r\ncssselect : 1.1.0\r\nparsel : 1.5.2\r\nw3lib : 1.21.0\r\nTwisted : 19.10.0\r\nPython : 3.7.4 (tags/v3.7.4:e09359112e, Jul 8 2019, 19:29:22) [MSC v.1916 32 bit (Intel)]\r\npyOpenSSL : 19.0.0 (OpenSSL 1.1.1c 28 May 2019)\r\ncryptography : 2.7\r\nPlatform : Windows-10-10.0.18362-SP0\r\n```\r\n\r\nLinux\r\n```\r\nscrapy version --verbose\r\nScrapy : 1.8.0\r\nlxml : 4.4.1.0\r\nlibxml2 : 2.9.9\r\ncssselect : 1.1.0\r\nparsel : 1.5.2\r\nw3lib : 1.21.0\r\nTwisted : 19.7.0\r\nPython : 3.6.8 (default, Oct 7 2019, 12:59:55) - [GCC 8.3.0]\r\npyOpenSSL : 19.0.0 (OpenSSL 1.1.1d 10 Sep 2019)\r\ncryptography : 2.8\r\nPlatform : Linux-4.4.0-18362-Microsoft-x86_64-with-Ubuntu-18.04-bionic\r\n```\r\n\n", "before_files": [{"content": "import time\nfrom collections import defaultdict\nfrom unittest import TextTestRunner, TextTestResult as _TextTestResult\n\nfrom scrapy.commands import ScrapyCommand\nfrom scrapy.contracts import ContractsManager\nfrom scrapy.utils.misc import load_object, set_environ\nfrom scrapy.utils.conf import build_component_list\n\n\nclass TextTestResult(_TextTestResult):\n def printSummary(self, start, stop):\n write = self.stream.write\n writeln = self.stream.writeln\n\n run = self.testsRun\n plural = \"s\" if run != 1 else \"\"\n\n writeln(self.separator2)\n writeln(\"Ran %d contract%s in %.3fs\" % (run, plural, stop - start))\n writeln()\n\n infos = []\n if not self.wasSuccessful():\n write(\"FAILED\")\n failed, errored = map(len, (self.failures, self.errors))\n if failed:\n infos.append(\"failures=%d\" % failed)\n if errored:\n infos.append(\"errors=%d\" % errored)\n else:\n write(\"OK\")\n\n if infos:\n writeln(\" (%s)\" % (\", \".join(infos),))\n else:\n write(\"\\n\")\n\n\nclass Command(ScrapyCommand):\n requires_project = True\n default_settings = {'LOG_ENABLED': False}\n\n def syntax(self):\n return \"[options] <spider>\"\n\n def short_desc(self):\n return \"Check spider contracts\"\n\n def add_options(self, parser):\n ScrapyCommand.add_options(self, parser)\n parser.add_option(\"-l\", \"--list\", dest=\"list\", action=\"store_true\",\n help=\"only list contracts, without checking them\")\n parser.add_option(\"-v\", \"--verbose\", dest=\"verbose\", default=False, action='store_true',\n help=\"print contract tests for all spiders\")\n\n def run(self, args, opts):\n # load contracts\n contracts = build_component_list(self.settings.getwithbase('SPIDER_CONTRACTS'))\n conman = ContractsManager(load_object(c) for c in contracts)\n runner = TextTestRunner(verbosity=2 if opts.verbose else 1)\n result = TextTestResult(runner.stream, runner.descriptions, runner.verbosity)\n\n # contract requests\n contract_reqs = defaultdict(list)\n\n spider_loader = self.crawler_process.spider_loader\n\n with set_environ(SCRAPY_CHECK='true'):\n for spidername in args or spider_loader.list():\n spidercls = spider_loader.load(spidername)\n spidercls.start_requests = lambda s: conman.from_spider(s, result)\n\n tested_methods = conman.tested_methods_from_spidercls(spidercls)\n if opts.list:\n for method in tested_methods:\n contract_reqs[spidercls.name].append(method)\n elif tested_methods:\n self.crawler_process.crawl(spidercls)\n\n # start checks\n if opts.list:\n for spider, methods in sorted(contract_reqs.items()):\n if not methods and not opts.verbose:\n continue\n print(spider)\n for method in sorted(methods):\n print(' * %s' % method)\n else:\n start = time.time()\n self.crawler_process.start()\n stop = time.time()\n\n result.printErrors()\n result.printSummary(start, stop)\n self.exitcode = int(not result.wasSuccessful())\n", "path": "scrapy/commands/check.py"}], "after_files": [{"content": "import time\nfrom collections import defaultdict\nfrom unittest import TextTestRunner, TextTestResult as _TextTestResult\n\nfrom scrapy.commands import ScrapyCommand\nfrom scrapy.contracts import ContractsManager\nfrom scrapy.utils.misc import load_object, set_environ\nfrom scrapy.utils.conf import build_component_list\n\n\nclass TextTestResult(_TextTestResult):\n def printSummary(self, start, stop):\n write = self.stream.write\n writeln = self.stream.writeln\n\n run = self.testsRun\n plural = \"s\" if run != 1 else \"\"\n\n writeln(self.separator2)\n writeln(\"Ran %d contract%s in %.3fs\" % (run, plural, stop - start))\n writeln()\n\n infos = []\n if not self.wasSuccessful():\n write(\"FAILED\")\n failed, errored = map(len, (self.failures, self.errors))\n if failed:\n infos.append(\"failures=%d\" % failed)\n if errored:\n infos.append(\"errors=%d\" % errored)\n else:\n write(\"OK\")\n\n if infos:\n writeln(\" (%s)\" % (\", \".join(infos),))\n else:\n write(\"\\n\")\n\n\nclass Command(ScrapyCommand):\n requires_project = True\n default_settings = {'LOG_ENABLED': False}\n\n def syntax(self):\n return \"[options] <spider>\"\n\n def short_desc(self):\n return \"Check spider contracts\"\n\n def add_options(self, parser):\n ScrapyCommand.add_options(self, parser)\n parser.add_option(\"-l\", \"--list\", dest=\"list\", action=\"store_true\",\n help=\"only list contracts, without checking them\")\n parser.add_option(\"-v\", \"--verbose\", dest=\"verbose\", default=False, action='store_true',\n help=\"print contract tests for all spiders\")\n\n def run(self, args, opts):\n # load contracts\n contracts = build_component_list(self.settings.getwithbase('SPIDER_CONTRACTS'))\n conman = ContractsManager(load_object(c) for c in contracts)\n runner = TextTestRunner(verbosity=2 if opts.verbose else 1)\n result = TextTestResult(runner.stream, runner.descriptions, runner.verbosity)\n\n # contract requests\n contract_reqs = defaultdict(list)\n\n spider_loader = self.crawler_process.spider_loader\n\n with set_environ(SCRAPY_CHECK='true'):\n for spidername in args or spider_loader.list():\n spidercls = spider_loader.load(spidername)\n spidercls.start_requests = lambda s: conman.from_spider(s, result)\n\n tested_methods = conman.tested_methods_from_spidercls(spidercls)\n if opts.list:\n for method in tested_methods:\n contract_reqs[spidercls.name].append(method)\n elif tested_methods:\n self.crawler_process.crawl(spidercls)\n\n # start checks\n if opts.list:\n for spider, methods in sorted(contract_reqs.items()):\n if not methods and not opts.verbose:\n continue\n print(spider)\n for method in sorted(methods):\n print(' * %s' % method)\n else:\n start = time.time()\n self.crawler_process.start()\n stop = time.time()\n\n result.printErrors()\n result.printSummary(start, stop)\n self.exitcode = int(not result.wasSuccessful())\n", "path": "scrapy/commands/check.py"}]}
1,832
309
gh_patches_debug_27393
rasdani/github-patches
git_diff
nltk__nltk-633
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove prob_parse from ParserI The ParserI methods `prob_parse` and `prob_parse_sents` are not used anywhere, and are not defined anywhere, so I propose to remove them from `nltk/parse/api.py` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nltk/parse/api.py` Content: ``` 1 # Natural Language Toolkit: Parser API 2 # 3 # Copyright (C) 2001-2014 NLTK Project 4 # Author: Steven Bird <[email protected]> 5 # Edward Loper <[email protected]> 6 # URL: <http://nltk.org/> 7 # For license information, see LICENSE.TXT 8 # 9 10 import itertools 11 12 from nltk.internals import overridden 13 14 class ParserI(object): 15 """ 16 A processing class for deriving trees that represent possible 17 structures for a sequence of tokens. These tree structures are 18 known as "parses". Typically, parsers are used to derive syntax 19 trees for sentences. But parsers can also be used to derive other 20 kinds of tree structure, such as morphological trees and discourse 21 structures. 22 23 Subclasses must define: 24 - at least one of: ``parse()``, ``nbest_parse()``, ``iter_parse()``, 25 ``parse_sents()``, ``nbest_parse_sents()``, ``iter_parse_sents()``. 26 27 Subclasses may define: 28 - ``grammar()`` 29 - either ``prob_parse()`` or ``prob_parse_sents()`` (or both) 30 """ 31 def grammar(self): 32 """ 33 :return: The grammar used by this parser. 34 """ 35 raise NotImplementedError() 36 37 def parse(self, sent): 38 """ 39 :return: A parse tree that represents the structure of the 40 given sentence, or None if no parse tree is found. If 41 multiple parses are found, then return the best parse. 42 43 :param sent: The sentence to be parsed 44 :type sent: list(str) 45 :rtype: Tree 46 """ 47 if overridden(self.parse_sents): 48 return self.parse_sents([sent])[0] 49 else: 50 trees = self.nbest_parse(sent, 1) 51 if trees: return trees[0] 52 else: return None 53 54 def nbest_parse(self, sent, n=None): 55 """ 56 :return: A list of parse trees that represent possible 57 structures for the given sentence. When possible, this list is 58 sorted from most likely to least likely. If ``n`` is 59 specified, then the returned list will contain at most ``n`` 60 parse trees. 61 62 :param sent: The sentence to be parsed 63 :type sent: list(str) 64 :param n: The maximum number of trees to return. 65 :type n: int 66 :rtype: list(Tree) 67 """ 68 if overridden(self.nbest_parse_sents): 69 return self.nbest_parse_sents([sent],n)[0] 70 elif overridden(self.parse) or overridden(self.parse_sents): 71 tree = self.parse(sent) 72 if tree: return [tree] 73 else: return [] 74 else: 75 return list(itertools.islice(self.iter_parse(sent), n)) 76 77 def iter_parse(self, sent): 78 """ 79 :return: An iterator that generates parse trees that represent 80 possible structures for the given sentence. When possible, 81 this list is sorted from most likely to least likely. 82 83 :param sent: The sentence to be parsed 84 :type sent: list(str) 85 :rtype: iter(Tree) 86 """ 87 if overridden(self.iter_parse_sents): 88 return self.iter_parse_sents([sent])[0] 89 elif overridden(self.nbest_parse) or overridden(self.nbest_parse_sents): 90 return iter(self.nbest_parse(sent)) 91 elif overridden(self.parse) or overridden(self.parse_sents): 92 tree = self.parse(sent) 93 if tree: return iter([tree]) 94 else: return iter([]) 95 else: 96 raise NotImplementedError() 97 98 def prob_parse(self, sent): 99 """ 100 :return: A probability distribution over the possible parse 101 trees for the given sentence. If there are no possible parse 102 trees for the given sentence, return a probability distribution 103 that assigns a probability of 1.0 to None. 104 105 :param sent: The sentence to be parsed 106 :type sent: list(str) 107 :rtype: ProbDistI(Tree) 108 """ 109 if overridden(self.prob_parse_sents): 110 return self.prob_parse_sents([sent])[0] 111 else: 112 raise NotImplementedError 113 114 def parse_sents(self, sents): 115 """ 116 Apply ``self.parse()`` to each element of ``sents``. I.e.: 117 118 return [self.parse(sent) for sent in sents] 119 120 :rtype: list(Tree) 121 """ 122 return [self.parse(sent) for sent in sents] 123 124 def nbest_parse_sents(self, sents, n=None): 125 """ 126 Apply ``self.nbest_parse()`` to each element of ``sents``. I.e.: 127 128 return [self.nbest_parse(sent, n) for sent in sents] 129 130 :rtype: list(list(Tree)) 131 """ 132 return [self.nbest_parse(sent,n ) for sent in sents] 133 134 def iter_parse_sents(self, sents): 135 """ 136 Apply ``self.iter_parse()`` to each element of ``sents``. I.e.: 137 138 return [self.iter_parse(sent) for sent in sents] 139 140 :rtype: list(iter(Tree)) 141 """ 142 return [self.iter_parse(sent) for sent in sents] 143 144 def prob_parse_sents(self, sents): 145 """ 146 Apply ``self.prob_parse()`` to each element of ``sents``. I.e.: 147 148 return [self.prob_parse(sent) for sent in sents] 149 150 :rtype: list(ProbDistI(Tree)) 151 """ 152 return [self.prob_parse(sent) for sent in sents] 153 154 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nltk/parse/api.py b/nltk/parse/api.py --- a/nltk/parse/api.py +++ b/nltk/parse/api.py @@ -26,7 +26,6 @@ Subclasses may define: - ``grammar()`` - - either ``prob_parse()`` or ``prob_parse_sents()`` (or both) """ def grammar(self): """ @@ -95,22 +94,6 @@ else: raise NotImplementedError() - def prob_parse(self, sent): - """ - :return: A probability distribution over the possible parse - trees for the given sentence. If there are no possible parse - trees for the given sentence, return a probability distribution - that assigns a probability of 1.0 to None. - - :param sent: The sentence to be parsed - :type sent: list(str) - :rtype: ProbDistI(Tree) - """ - if overridden(self.prob_parse_sents): - return self.prob_parse_sents([sent])[0] - else: - raise NotImplementedError - def parse_sents(self, sents): """ Apply ``self.parse()`` to each element of ``sents``. I.e.: @@ -141,13 +124,3 @@ """ return [self.iter_parse(sent) for sent in sents] - def prob_parse_sents(self, sents): - """ - Apply ``self.prob_parse()`` to each element of ``sents``. I.e.: - - return [self.prob_parse(sent) for sent in sents] - - :rtype: list(ProbDistI(Tree)) - """ - return [self.prob_parse(sent) for sent in sents] -
{"golden_diff": "diff --git a/nltk/parse/api.py b/nltk/parse/api.py\n--- a/nltk/parse/api.py\n+++ b/nltk/parse/api.py\n@@ -26,7 +26,6 @@\n \n Subclasses may define:\n - ``grammar()``\n- - either ``prob_parse()`` or ``prob_parse_sents()`` (or both)\n \"\"\"\n def grammar(self):\n \"\"\"\n@@ -95,22 +94,6 @@\n else:\n raise NotImplementedError()\n \n- def prob_parse(self, sent):\n- \"\"\"\n- :return: A probability distribution over the possible parse\n- trees for the given sentence. If there are no possible parse\n- trees for the given sentence, return a probability distribution\n- that assigns a probability of 1.0 to None.\n-\n- :param sent: The sentence to be parsed\n- :type sent: list(str)\n- :rtype: ProbDistI(Tree)\n- \"\"\"\n- if overridden(self.prob_parse_sents):\n- return self.prob_parse_sents([sent])[0]\n- else:\n- raise NotImplementedError\n-\n def parse_sents(self, sents):\n \"\"\"\n Apply ``self.parse()`` to each element of ``sents``. I.e.:\n@@ -141,13 +124,3 @@\n \"\"\"\n return [self.iter_parse(sent) for sent in sents]\n \n- def prob_parse_sents(self, sents):\n- \"\"\"\n- Apply ``self.prob_parse()`` to each element of ``sents``. I.e.:\n-\n- return [self.prob_parse(sent) for sent in sents]\n-\n- :rtype: list(ProbDistI(Tree))\n- \"\"\"\n- return [self.prob_parse(sent) for sent in sents]\n-\n", "issue": "Remove prob_parse from ParserI\nThe ParserI methods `prob_parse` and `prob_parse_sents` are not used anywhere, and are not defined anywhere, so I propose to remove them from `nltk/parse/api.py`\n\n", "before_files": [{"content": "# Natural Language Toolkit: Parser API\n#\n# Copyright (C) 2001-2014 NLTK Project\n# Author: Steven Bird <[email protected]>\n# Edward Loper <[email protected]>\n# URL: <http://nltk.org/>\n# For license information, see LICENSE.TXT\n#\n\nimport itertools\n\nfrom nltk.internals import overridden\n\nclass ParserI(object):\n \"\"\"\n A processing class for deriving trees that represent possible\n structures for a sequence of tokens. These tree structures are\n known as \"parses\". Typically, parsers are used to derive syntax\n trees for sentences. But parsers can also be used to derive other\n kinds of tree structure, such as morphological trees and discourse\n structures.\n\n Subclasses must define:\n - at least one of: ``parse()``, ``nbest_parse()``, ``iter_parse()``,\n ``parse_sents()``, ``nbest_parse_sents()``, ``iter_parse_sents()``.\n\n Subclasses may define:\n - ``grammar()``\n - either ``prob_parse()`` or ``prob_parse_sents()`` (or both)\n \"\"\"\n def grammar(self):\n \"\"\"\n :return: The grammar used by this parser.\n \"\"\"\n raise NotImplementedError()\n\n def parse(self, sent):\n \"\"\"\n :return: A parse tree that represents the structure of the\n given sentence, or None if no parse tree is found. If\n multiple parses are found, then return the best parse.\n\n :param sent: The sentence to be parsed\n :type sent: list(str)\n :rtype: Tree\n \"\"\"\n if overridden(self.parse_sents):\n return self.parse_sents([sent])[0]\n else:\n trees = self.nbest_parse(sent, 1)\n if trees: return trees[0]\n else: return None\n\n def nbest_parse(self, sent, n=None):\n \"\"\"\n :return: A list of parse trees that represent possible\n structures for the given sentence. When possible, this list is\n sorted from most likely to least likely. If ``n`` is\n specified, then the returned list will contain at most ``n``\n parse trees.\n\n :param sent: The sentence to be parsed\n :type sent: list(str)\n :param n: The maximum number of trees to return.\n :type n: int\n :rtype: list(Tree)\n \"\"\"\n if overridden(self.nbest_parse_sents):\n return self.nbest_parse_sents([sent],n)[0]\n elif overridden(self.parse) or overridden(self.parse_sents):\n tree = self.parse(sent)\n if tree: return [tree]\n else: return []\n else:\n return list(itertools.islice(self.iter_parse(sent), n))\n\n def iter_parse(self, sent):\n \"\"\"\n :return: An iterator that generates parse trees that represent\n possible structures for the given sentence. When possible,\n this list is sorted from most likely to least likely.\n\n :param sent: The sentence to be parsed\n :type sent: list(str)\n :rtype: iter(Tree)\n \"\"\"\n if overridden(self.iter_parse_sents):\n return self.iter_parse_sents([sent])[0]\n elif overridden(self.nbest_parse) or overridden(self.nbest_parse_sents):\n return iter(self.nbest_parse(sent))\n elif overridden(self.parse) or overridden(self.parse_sents):\n tree = self.parse(sent)\n if tree: return iter([tree])\n else: return iter([])\n else:\n raise NotImplementedError()\n\n def prob_parse(self, sent):\n \"\"\"\n :return: A probability distribution over the possible parse\n trees for the given sentence. If there are no possible parse\n trees for the given sentence, return a probability distribution\n that assigns a probability of 1.0 to None.\n\n :param sent: The sentence to be parsed\n :type sent: list(str)\n :rtype: ProbDistI(Tree)\n \"\"\"\n if overridden(self.prob_parse_sents):\n return self.prob_parse_sents([sent])[0]\n else:\n raise NotImplementedError\n\n def parse_sents(self, sents):\n \"\"\"\n Apply ``self.parse()`` to each element of ``sents``. I.e.:\n\n return [self.parse(sent) for sent in sents]\n\n :rtype: list(Tree)\n \"\"\"\n return [self.parse(sent) for sent in sents]\n\n def nbest_parse_sents(self, sents, n=None):\n \"\"\"\n Apply ``self.nbest_parse()`` to each element of ``sents``. I.e.:\n\n return [self.nbest_parse(sent, n) for sent in sents]\n\n :rtype: list(list(Tree))\n \"\"\"\n return [self.nbest_parse(sent,n ) for sent in sents]\n\n def iter_parse_sents(self, sents):\n \"\"\"\n Apply ``self.iter_parse()`` to each element of ``sents``. I.e.:\n\n return [self.iter_parse(sent) for sent in sents]\n\n :rtype: list(iter(Tree))\n \"\"\"\n return [self.iter_parse(sent) for sent in sents]\n\n def prob_parse_sents(self, sents):\n \"\"\"\n Apply ``self.prob_parse()`` to each element of ``sents``. I.e.:\n\n return [self.prob_parse(sent) for sent in sents]\n\n :rtype: list(ProbDistI(Tree))\n \"\"\"\n return [self.prob_parse(sent) for sent in sents]\n\n", "path": "nltk/parse/api.py"}], "after_files": [{"content": "# Natural Language Toolkit: Parser API\n#\n# Copyright (C) 2001-2014 NLTK Project\n# Author: Steven Bird <[email protected]>\n# Edward Loper <[email protected]>\n# URL: <http://nltk.org/>\n# For license information, see LICENSE.TXT\n#\n\nimport itertools\n\nfrom nltk.internals import overridden\n\nclass ParserI(object):\n \"\"\"\n A processing class for deriving trees that represent possible\n structures for a sequence of tokens. These tree structures are\n known as \"parses\". Typically, parsers are used to derive syntax\n trees for sentences. But parsers can also be used to derive other\n kinds of tree structure, such as morphological trees and discourse\n structures.\n\n Subclasses must define:\n - at least one of: ``parse()``, ``nbest_parse()``, ``iter_parse()``,\n ``parse_sents()``, ``nbest_parse_sents()``, ``iter_parse_sents()``.\n\n Subclasses may define:\n - ``grammar()``\n \"\"\"\n def grammar(self):\n \"\"\"\n :return: The grammar used by this parser.\n \"\"\"\n raise NotImplementedError()\n\n def parse(self, sent):\n \"\"\"\n :return: A parse tree that represents the structure of the\n given sentence, or None if no parse tree is found. If\n multiple parses are found, then return the best parse.\n\n :param sent: The sentence to be parsed\n :type sent: list(str)\n :rtype: Tree\n \"\"\"\n if overridden(self.parse_sents):\n return self.parse_sents([sent])[0]\n else:\n trees = self.nbest_parse(sent, 1)\n if trees: return trees[0]\n else: return None\n\n def nbest_parse(self, sent, n=None):\n \"\"\"\n :return: A list of parse trees that represent possible\n structures for the given sentence. When possible, this list is\n sorted from most likely to least likely. If ``n`` is\n specified, then the returned list will contain at most ``n``\n parse trees.\n\n :param sent: The sentence to be parsed\n :type sent: list(str)\n :param n: The maximum number of trees to return.\n :type n: int\n :rtype: list(Tree)\n \"\"\"\n if overridden(self.nbest_parse_sents):\n return self.nbest_parse_sents([sent],n)[0]\n elif overridden(self.parse) or overridden(self.parse_sents):\n tree = self.parse(sent)\n if tree: return [tree]\n else: return []\n else:\n return list(itertools.islice(self.iter_parse(sent), n))\n\n def iter_parse(self, sent):\n \"\"\"\n :return: An iterator that generates parse trees that represent\n possible structures for the given sentence. When possible,\n this list is sorted from most likely to least likely.\n\n :param sent: The sentence to be parsed\n :type sent: list(str)\n :rtype: iter(Tree)\n \"\"\"\n if overridden(self.iter_parse_sents):\n return self.iter_parse_sents([sent])[0]\n elif overridden(self.nbest_parse) or overridden(self.nbest_parse_sents):\n return iter(self.nbest_parse(sent))\n elif overridden(self.parse) or overridden(self.parse_sents):\n tree = self.parse(sent)\n if tree: return iter([tree])\n else: return iter([])\n else:\n raise NotImplementedError()\n\n def parse_sents(self, sents):\n \"\"\"\n Apply ``self.parse()`` to each element of ``sents``. I.e.:\n\n return [self.parse(sent) for sent in sents]\n\n :rtype: list(Tree)\n \"\"\"\n return [self.parse(sent) for sent in sents]\n\n def nbest_parse_sents(self, sents, n=None):\n \"\"\"\n Apply ``self.nbest_parse()`` to each element of ``sents``. I.e.:\n\n return [self.nbest_parse(sent, n) for sent in sents]\n\n :rtype: list(list(Tree))\n \"\"\"\n return [self.nbest_parse(sent,n ) for sent in sents]\n\n def iter_parse_sents(self, sents):\n \"\"\"\n Apply ``self.iter_parse()`` to each element of ``sents``. I.e.:\n\n return [self.iter_parse(sent) for sent in sents]\n\n :rtype: list(iter(Tree))\n \"\"\"\n return [self.iter_parse(sent) for sent in sents]\n\n", "path": "nltk/parse/api.py"}]}
1,873
399
gh_patches_debug_13783
rasdani/github-patches
git_diff
pyca__cryptography-8260
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove verify_interface Now that `register_interface` is gone we have no use for `verify_interface`, but https://github.com/aws/aws-encryption-sdk-python/issues/464 is a blocker for removing it. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cryptography/utils.py` Content: ``` 1 # This file is dual licensed under the terms of the Apache License, Version 2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository 3 # for complete details. 4 5 6 import abc 7 import enum 8 import sys 9 import types 10 import typing 11 import warnings 12 13 14 # We use a UserWarning subclass, instead of DeprecationWarning, because CPython 15 # decided deprecation warnings should be invisble by default. 16 class CryptographyDeprecationWarning(UserWarning): 17 pass 18 19 20 # Several APIs were deprecated with no specific end-of-life date because of the 21 # ubiquity of their use. They should not be removed until we agree on when that 22 # cycle ends. 23 DeprecatedIn36 = CryptographyDeprecationWarning 24 DeprecatedIn37 = CryptographyDeprecationWarning 25 DeprecatedIn39 = CryptographyDeprecationWarning 26 DeprecatedIn40 = CryptographyDeprecationWarning 27 28 29 def _check_bytes(name: str, value: bytes) -> None: 30 if not isinstance(value, bytes): 31 raise TypeError(f"{name} must be bytes") 32 33 34 def _check_byteslike(name: str, value: bytes) -> None: 35 try: 36 memoryview(value) 37 except TypeError: 38 raise TypeError(f"{name} must be bytes-like") 39 40 41 def int_to_bytes(integer: int, length: typing.Optional[int] = None) -> bytes: 42 return integer.to_bytes( 43 length or (integer.bit_length() + 7) // 8 or 1, "big" 44 ) 45 46 47 class InterfaceNotImplemented(Exception): 48 pass 49 50 51 # DeprecatedIn39 -- Our only known consumer is aws-encryption-sdk, but we've 52 # made this a no-op to avoid breaking old versions. 53 def verify_interface( 54 iface: abc.ABCMeta, klass: object, *, check_annotations: bool = False 55 ): 56 # Exists exclusively for `aws-encryption-sdk` which relies on it existing, 57 # even though it was never a public API. 58 pass 59 60 61 class _DeprecatedValue: 62 def __init__(self, value: object, message: str, warning_class): 63 self.value = value 64 self.message = message 65 self.warning_class = warning_class 66 67 68 class _ModuleWithDeprecations(types.ModuleType): 69 def __init__(self, module: types.ModuleType): 70 super().__init__(module.__name__) 71 self.__dict__["_module"] = module 72 73 def __getattr__(self, attr: str) -> object: 74 obj = getattr(self._module, attr) 75 if isinstance(obj, _DeprecatedValue): 76 warnings.warn(obj.message, obj.warning_class, stacklevel=2) 77 obj = obj.value 78 return obj 79 80 def __setattr__(self, attr: str, value: object) -> None: 81 setattr(self._module, attr, value) 82 83 def __delattr__(self, attr: str) -> None: 84 obj = getattr(self._module, attr) 85 if isinstance(obj, _DeprecatedValue): 86 warnings.warn(obj.message, obj.warning_class, stacklevel=2) 87 88 delattr(self._module, attr) 89 90 def __dir__(self) -> typing.Sequence[str]: 91 return ["_module"] + dir(self._module) 92 93 94 def deprecated( 95 value: object, 96 module_name: str, 97 message: str, 98 warning_class: typing.Type[Warning], 99 name: typing.Optional[str] = None, 100 ) -> _DeprecatedValue: 101 module = sys.modules[module_name] 102 if not isinstance(module, _ModuleWithDeprecations): 103 sys.modules[module_name] = module = _ModuleWithDeprecations(module) 104 dv = _DeprecatedValue(value, message, warning_class) 105 # Maintain backwards compatibility with `name is None` for pyOpenSSL. 106 if name is not None: 107 setattr(module, name, dv) 108 return dv 109 110 111 def cached_property(func: typing.Callable) -> property: 112 cached_name = f"_cached_{func}" 113 sentinel = object() 114 115 def inner(instance: object): 116 cache = getattr(instance, cached_name, sentinel) 117 if cache is not sentinel: 118 return cache 119 result = func(instance) 120 setattr(instance, cached_name, result) 121 return result 122 123 return property(inner) 124 125 126 # Python 3.10 changed representation of enums. We use well-defined object 127 # representation and string representation from Python 3.9. 128 class Enum(enum.Enum): 129 def __repr__(self) -> str: 130 return f"<{self.__class__.__name__}.{self._name_}: {self._value_!r}>" 131 132 def __str__(self) -> str: 133 return f"{self.__class__.__name__}.{self._name_}" 134 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cryptography/utils.py b/src/cryptography/utils.py --- a/src/cryptography/utils.py +++ b/src/cryptography/utils.py @@ -3,7 +3,6 @@ # for complete details. -import abc import enum import sys import types @@ -48,16 +47,6 @@ pass -# DeprecatedIn39 -- Our only known consumer is aws-encryption-sdk, but we've -# made this a no-op to avoid breaking old versions. -def verify_interface( - iface: abc.ABCMeta, klass: object, *, check_annotations: bool = False -): - # Exists exclusively for `aws-encryption-sdk` which relies on it existing, - # even though it was never a public API. - pass - - class _DeprecatedValue: def __init__(self, value: object, message: str, warning_class): self.value = value
{"golden_diff": "diff --git a/src/cryptography/utils.py b/src/cryptography/utils.py\n--- a/src/cryptography/utils.py\n+++ b/src/cryptography/utils.py\n@@ -3,7 +3,6 @@\n # for complete details.\n \n \n-import abc\n import enum\n import sys\n import types\n@@ -48,16 +47,6 @@\n pass\n \n \n-# DeprecatedIn39 -- Our only known consumer is aws-encryption-sdk, but we've\n-# made this a no-op to avoid breaking old versions.\n-def verify_interface(\n- iface: abc.ABCMeta, klass: object, *, check_annotations: bool = False\n-):\n- # Exists exclusively for `aws-encryption-sdk` which relies on it existing,\n- # even though it was never a public API.\n- pass\n-\n-\n class _DeprecatedValue:\n def __init__(self, value: object, message: str, warning_class):\n self.value = value\n", "issue": "Remove verify_interface\nNow that `register_interface` is gone we have no use for `verify_interface`, but https://github.com/aws/aws-encryption-sdk-python/issues/464 is a blocker for removing it.\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport abc\nimport enum\nimport sys\nimport types\nimport typing\nimport warnings\n\n\n# We use a UserWarning subclass, instead of DeprecationWarning, because CPython\n# decided deprecation warnings should be invisble by default.\nclass CryptographyDeprecationWarning(UserWarning):\n pass\n\n\n# Several APIs were deprecated with no specific end-of-life date because of the\n# ubiquity of their use. They should not be removed until we agree on when that\n# cycle ends.\nDeprecatedIn36 = CryptographyDeprecationWarning\nDeprecatedIn37 = CryptographyDeprecationWarning\nDeprecatedIn39 = CryptographyDeprecationWarning\nDeprecatedIn40 = CryptographyDeprecationWarning\n\n\ndef _check_bytes(name: str, value: bytes) -> None:\n if not isinstance(value, bytes):\n raise TypeError(f\"{name} must be bytes\")\n\n\ndef _check_byteslike(name: str, value: bytes) -> None:\n try:\n memoryview(value)\n except TypeError:\n raise TypeError(f\"{name} must be bytes-like\")\n\n\ndef int_to_bytes(integer: int, length: typing.Optional[int] = None) -> bytes:\n return integer.to_bytes(\n length or (integer.bit_length() + 7) // 8 or 1, \"big\"\n )\n\n\nclass InterfaceNotImplemented(Exception):\n pass\n\n\n# DeprecatedIn39 -- Our only known consumer is aws-encryption-sdk, but we've\n# made this a no-op to avoid breaking old versions.\ndef verify_interface(\n iface: abc.ABCMeta, klass: object, *, check_annotations: bool = False\n):\n # Exists exclusively for `aws-encryption-sdk` which relies on it existing,\n # even though it was never a public API.\n pass\n\n\nclass _DeprecatedValue:\n def __init__(self, value: object, message: str, warning_class):\n self.value = value\n self.message = message\n self.warning_class = warning_class\n\n\nclass _ModuleWithDeprecations(types.ModuleType):\n def __init__(self, module: types.ModuleType):\n super().__init__(module.__name__)\n self.__dict__[\"_module\"] = module\n\n def __getattr__(self, attr: str) -> object:\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n obj = obj.value\n return obj\n\n def __setattr__(self, attr: str, value: object) -> None:\n setattr(self._module, attr, value)\n\n def __delattr__(self, attr: str) -> None:\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n\n delattr(self._module, attr)\n\n def __dir__(self) -> typing.Sequence[str]:\n return [\"_module\"] + dir(self._module)\n\n\ndef deprecated(\n value: object,\n module_name: str,\n message: str,\n warning_class: typing.Type[Warning],\n name: typing.Optional[str] = None,\n) -> _DeprecatedValue:\n module = sys.modules[module_name]\n if not isinstance(module, _ModuleWithDeprecations):\n sys.modules[module_name] = module = _ModuleWithDeprecations(module)\n dv = _DeprecatedValue(value, message, warning_class)\n # Maintain backwards compatibility with `name is None` for pyOpenSSL.\n if name is not None:\n setattr(module, name, dv)\n return dv\n\n\ndef cached_property(func: typing.Callable) -> property:\n cached_name = f\"_cached_{func}\"\n sentinel = object()\n\n def inner(instance: object):\n cache = getattr(instance, cached_name, sentinel)\n if cache is not sentinel:\n return cache\n result = func(instance)\n setattr(instance, cached_name, result)\n return result\n\n return property(inner)\n\n\n# Python 3.10 changed representation of enums. We use well-defined object\n# representation and string representation from Python 3.9.\nclass Enum(enum.Enum):\n def __repr__(self) -> str:\n return f\"<{self.__class__.__name__}.{self._name_}: {self._value_!r}>\"\n\n def __str__(self) -> str:\n return f\"{self.__class__.__name__}.{self._name_}\"\n", "path": "src/cryptography/utils.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport enum\nimport sys\nimport types\nimport typing\nimport warnings\n\n\n# We use a UserWarning subclass, instead of DeprecationWarning, because CPython\n# decided deprecation warnings should be invisble by default.\nclass CryptographyDeprecationWarning(UserWarning):\n pass\n\n\n# Several APIs were deprecated with no specific end-of-life date because of the\n# ubiquity of their use. They should not be removed until we agree on when that\n# cycle ends.\nDeprecatedIn36 = CryptographyDeprecationWarning\nDeprecatedIn37 = CryptographyDeprecationWarning\nDeprecatedIn39 = CryptographyDeprecationWarning\nDeprecatedIn40 = CryptographyDeprecationWarning\n\n\ndef _check_bytes(name: str, value: bytes) -> None:\n if not isinstance(value, bytes):\n raise TypeError(f\"{name} must be bytes\")\n\n\ndef _check_byteslike(name: str, value: bytes) -> None:\n try:\n memoryview(value)\n except TypeError:\n raise TypeError(f\"{name} must be bytes-like\")\n\n\ndef int_to_bytes(integer: int, length: typing.Optional[int] = None) -> bytes:\n return integer.to_bytes(\n length or (integer.bit_length() + 7) // 8 or 1, \"big\"\n )\n\n\nclass InterfaceNotImplemented(Exception):\n pass\n\n\nclass _DeprecatedValue:\n def __init__(self, value: object, message: str, warning_class):\n self.value = value\n self.message = message\n self.warning_class = warning_class\n\n\nclass _ModuleWithDeprecations(types.ModuleType):\n def __init__(self, module: types.ModuleType):\n super().__init__(module.__name__)\n self.__dict__[\"_module\"] = module\n\n def __getattr__(self, attr: str) -> object:\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n obj = obj.value\n return obj\n\n def __setattr__(self, attr: str, value: object) -> None:\n setattr(self._module, attr, value)\n\n def __delattr__(self, attr: str) -> None:\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n\n delattr(self._module, attr)\n\n def __dir__(self) -> typing.Sequence[str]:\n return [\"_module\"] + dir(self._module)\n\n\ndef deprecated(\n value: object,\n module_name: str,\n message: str,\n warning_class: typing.Type[Warning],\n name: typing.Optional[str] = None,\n) -> _DeprecatedValue:\n module = sys.modules[module_name]\n if not isinstance(module, _ModuleWithDeprecations):\n sys.modules[module_name] = module = _ModuleWithDeprecations(module)\n dv = _DeprecatedValue(value, message, warning_class)\n # Maintain backwards compatibility with `name is None` for pyOpenSSL.\n if name is not None:\n setattr(module, name, dv)\n return dv\n\n\ndef cached_property(func: typing.Callable) -> property:\n cached_name = f\"_cached_{func}\"\n sentinel = object()\n\n def inner(instance: object):\n cache = getattr(instance, cached_name, sentinel)\n if cache is not sentinel:\n return cache\n result = func(instance)\n setattr(instance, cached_name, result)\n return result\n\n return property(inner)\n\n\n# Python 3.10 changed representation of enums. We use well-defined object\n# representation and string representation from Python 3.9.\nclass Enum(enum.Enum):\n def __repr__(self) -> str:\n return f\"<{self.__class__.__name__}.{self._name_}: {self._value_!r}>\"\n\n def __str__(self) -> str:\n return f\"{self.__class__.__name__}.{self._name_}\"\n", "path": "src/cryptography/utils.py"}]}
1,608
203
gh_patches_debug_1940
rasdani/github-patches
git_diff
scikit-hep__pyhf-941
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- use short URL for better help message The current help msg has a long url, but this includes line breaks which makes it hard to copy. ``` pyhf cls --help Usage: pyhf cls [OPTIONS] [WORKSPACE] Compute CLs value(s) for a given pyhf workspace. Example: .. code-block:: shell $ curl -sL https://raw.githubusercontent.com/scikit- hep/pyhf/master/docs/examples/json/2-bin_1-channel.json | pyhf cls { "CLs_exp": [ 0.07807427911686156, 0.17472571775474618, 0.35998495263681285, 0.6343568235898907, 0.8809947004472013 ], "CLs_obs": 0.3599845631401915 } Options: --output-file TEXT The location of the output json file. If not specified, prints to screen. --measurement TEXT -p, --patch TEXT --testpoi FLOAT --teststat [q|qtilde] --backend [numpy|pytorch|tensorflow|jax|np|torch|tf] The tensor backend used for the calculation. --optimizer TEXT --optconf EQUAL-DELIMITED OPTION -h, --help Show this message and exit. ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/pyhf/cli/infer.py` Content: ``` 1 """The inference CLI group.""" 2 import logging 3 4 import click 5 import json 6 7 from ..utils import EqDelimStringParamType 8 from ..infer import hypotest 9 from ..workspace import Workspace 10 from .. import tensor, get_backend, set_backend, optimize 11 12 log = logging.getLogger(__name__) 13 14 15 @click.group(name='infer') 16 def cli(): 17 """Infererence CLI group.""" 18 19 20 @cli.command() 21 @click.argument('workspace', default='-') 22 @click.option( 23 '--output-file', 24 help='The location of the output json file. If not specified, prints to screen.', 25 default=None, 26 ) 27 @click.option('--measurement', default=None) 28 @click.option('-p', '--patch', multiple=True) 29 @click.option('--testpoi', default=1.0) 30 @click.option('--teststat', type=click.Choice(['q', 'qtilde']), default='qtilde') 31 @click.option( 32 '--backend', 33 type=click.Choice(['numpy', 'pytorch', 'tensorflow', 'jax', 'np', 'torch', 'tf']), 34 help='The tensor backend used for the calculation.', 35 default='numpy', 36 ) 37 @click.option('--optimizer') 38 @click.option('--optconf', type=EqDelimStringParamType(), multiple=True) 39 def cls( 40 workspace, 41 output_file, 42 measurement, 43 patch, 44 testpoi, 45 teststat, 46 backend, 47 optimizer, 48 optconf, 49 ): 50 """ 51 Compute CLs value(s) for a given pyhf workspace. 52 53 Example: 54 55 .. code-block:: shell 56 57 $ curl -sL https://raw.githubusercontent.com/scikit-hep/pyhf/master/docs/examples/json/2-bin_1-channel.json | pyhf cls 58 { 59 "CLs_exp": [ 60 0.07807427911686156, 61 0.17472571775474618, 62 0.35998495263681285, 63 0.6343568235898907, 64 0.8809947004472013 65 ], 66 "CLs_obs": 0.3599845631401915 67 } 68 """ 69 with click.open_file(workspace, 'r') as specstream: 70 spec = json.load(specstream) 71 72 ws = Workspace(spec) 73 74 is_qtilde = teststat == 'qtilde' 75 76 patches = [json.loads(click.open_file(pfile, 'r').read()) for pfile in patch] 77 model = ws.model( 78 measurement_name=measurement, 79 patches=patches, 80 modifier_settings={ 81 'normsys': {'interpcode': 'code4'}, 82 'histosys': {'interpcode': 'code4p'}, 83 }, 84 ) 85 86 # set the backend if not NumPy 87 if backend in ['pytorch', 'torch']: 88 set_backend(tensor.pytorch_backend(precision='64b')) 89 elif backend in ['tensorflow', 'tf']: 90 set_backend(tensor.tensorflow_backend(precision='64b')) 91 elif backend in ['jax']: 92 set_backend(tensor.jax_backend()) 93 tensorlib, _ = get_backend() 94 95 optconf = {k: v for item in optconf for k, v in item.items()} 96 97 # set the new optimizer 98 if optimizer: 99 new_optimizer = getattr(optimize, optimizer) 100 set_backend(tensorlib, new_optimizer(**optconf)) 101 102 result = hypotest( 103 testpoi, ws.data(model), model, qtilde=is_qtilde, return_expected_set=True 104 ) 105 result = { 106 'CLs_obs': tensorlib.tolist(result[0])[0], 107 'CLs_exp': tensorlib.tolist(tensorlib.reshape(result[-1], [-1])), 108 } 109 110 if output_file is None: 111 click.echo(json.dumps(result, indent=4, sort_keys=True)) 112 else: 113 with open(output_file, 'w+') as out_file: 114 json.dump(result, out_file, indent=4, sort_keys=True) 115 log.debug("Written to {0:s}".format(output_file)) 116 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/pyhf/cli/infer.py b/src/pyhf/cli/infer.py --- a/src/pyhf/cli/infer.py +++ b/src/pyhf/cli/infer.py @@ -54,7 +54,9 @@ .. code-block:: shell - $ curl -sL https://raw.githubusercontent.com/scikit-hep/pyhf/master/docs/examples/json/2-bin_1-channel.json | pyhf cls + $ curl -sL https://git.io/JJYDE | pyhf cls + + \b { "CLs_exp": [ 0.07807427911686156,
{"golden_diff": "diff --git a/src/pyhf/cli/infer.py b/src/pyhf/cli/infer.py\n--- a/src/pyhf/cli/infer.py\n+++ b/src/pyhf/cli/infer.py\n@@ -54,7 +54,9 @@\n \n .. code-block:: shell\n \n- $ curl -sL https://raw.githubusercontent.com/scikit-hep/pyhf/master/docs/examples/json/2-bin_1-channel.json | pyhf cls\n+ $ curl -sL https://git.io/JJYDE | pyhf cls\n+\n+ \\b\n {\n \"CLs_exp\": [\n 0.07807427911686156,\n", "issue": "use short URL for better help message\nThe current help msg has a long url, but this includes line breaks\r\nwhich makes it hard to copy. \r\n\r\n```\r\npyhf cls --help \r\nUsage: pyhf cls [OPTIONS] [WORKSPACE]\r\n\r\n Compute CLs value(s) for a given pyhf workspace.\r\n\r\n Example:\r\n\r\n .. code-block:: shell\r\n\r\n $ curl -sL https://raw.githubusercontent.com/scikit-\r\n hep/pyhf/master/docs/examples/json/2-bin_1-channel.json | pyhf cls\r\n { \"CLs_exp\": [ 0.07807427911686156,\r\n 0.17472571775474618, 0.35998495263681285,\r\n 0.6343568235898907, 0.8809947004472013 ],\r\n \"CLs_obs\": 0.3599845631401915 }\r\n\r\nOptions:\r\n --output-file TEXT The location of the output json file. If not\r\n specified, prints to screen.\r\n\r\n --measurement TEXT\r\n -p, --patch TEXT\r\n --testpoi FLOAT\r\n --teststat [q|qtilde]\r\n --backend [numpy|pytorch|tensorflow|jax|np|torch|tf]\r\n The tensor backend used for the calculation.\r\n --optimizer TEXT\r\n --optconf EQUAL-DELIMITED OPTION\r\n -h, --help Show this message and exit.\r\n\r\n```\n", "before_files": [{"content": "\"\"\"The inference CLI group.\"\"\"\nimport logging\n\nimport click\nimport json\n\nfrom ..utils import EqDelimStringParamType\nfrom ..infer import hypotest\nfrom ..workspace import Workspace\nfrom .. import tensor, get_backend, set_backend, optimize\n\nlog = logging.getLogger(__name__)\n\n\[email protected](name='infer')\ndef cli():\n \"\"\"Infererence CLI group.\"\"\"\n\n\[email protected]()\[email protected]('workspace', default='-')\[email protected](\n '--output-file',\n help='The location of the output json file. If not specified, prints to screen.',\n default=None,\n)\[email protected]('--measurement', default=None)\[email protected]('-p', '--patch', multiple=True)\[email protected]('--testpoi', default=1.0)\[email protected]('--teststat', type=click.Choice(['q', 'qtilde']), default='qtilde')\[email protected](\n '--backend',\n type=click.Choice(['numpy', 'pytorch', 'tensorflow', 'jax', 'np', 'torch', 'tf']),\n help='The tensor backend used for the calculation.',\n default='numpy',\n)\[email protected]('--optimizer')\[email protected]('--optconf', type=EqDelimStringParamType(), multiple=True)\ndef cls(\n workspace,\n output_file,\n measurement,\n patch,\n testpoi,\n teststat,\n backend,\n optimizer,\n optconf,\n):\n \"\"\"\n Compute CLs value(s) for a given pyhf workspace.\n\n Example:\n\n .. code-block:: shell\n\n $ curl -sL https://raw.githubusercontent.com/scikit-hep/pyhf/master/docs/examples/json/2-bin_1-channel.json | pyhf cls\n {\n \"CLs_exp\": [\n 0.07807427911686156,\n 0.17472571775474618,\n 0.35998495263681285,\n 0.6343568235898907,\n 0.8809947004472013\n ],\n \"CLs_obs\": 0.3599845631401915\n }\n \"\"\"\n with click.open_file(workspace, 'r') as specstream:\n spec = json.load(specstream)\n\n ws = Workspace(spec)\n\n is_qtilde = teststat == 'qtilde'\n\n patches = [json.loads(click.open_file(pfile, 'r').read()) for pfile in patch]\n model = ws.model(\n measurement_name=measurement,\n patches=patches,\n modifier_settings={\n 'normsys': {'interpcode': 'code4'},\n 'histosys': {'interpcode': 'code4p'},\n },\n )\n\n # set the backend if not NumPy\n if backend in ['pytorch', 'torch']:\n set_backend(tensor.pytorch_backend(precision='64b'))\n elif backend in ['tensorflow', 'tf']:\n set_backend(tensor.tensorflow_backend(precision='64b'))\n elif backend in ['jax']:\n set_backend(tensor.jax_backend())\n tensorlib, _ = get_backend()\n\n optconf = {k: v for item in optconf for k, v in item.items()}\n\n # set the new optimizer\n if optimizer:\n new_optimizer = getattr(optimize, optimizer)\n set_backend(tensorlib, new_optimizer(**optconf))\n\n result = hypotest(\n testpoi, ws.data(model), model, qtilde=is_qtilde, return_expected_set=True\n )\n result = {\n 'CLs_obs': tensorlib.tolist(result[0])[0],\n 'CLs_exp': tensorlib.tolist(tensorlib.reshape(result[-1], [-1])),\n }\n\n if output_file is None:\n click.echo(json.dumps(result, indent=4, sort_keys=True))\n else:\n with open(output_file, 'w+') as out_file:\n json.dump(result, out_file, indent=4, sort_keys=True)\n log.debug(\"Written to {0:s}\".format(output_file))\n", "path": "src/pyhf/cli/infer.py"}], "after_files": [{"content": "\"\"\"The inference CLI group.\"\"\"\nimport logging\n\nimport click\nimport json\n\nfrom ..utils import EqDelimStringParamType\nfrom ..infer import hypotest\nfrom ..workspace import Workspace\nfrom .. import tensor, get_backend, set_backend, optimize\n\nlog = logging.getLogger(__name__)\n\n\[email protected](name='infer')\ndef cli():\n \"\"\"Infererence CLI group.\"\"\"\n\n\[email protected]()\[email protected]('workspace', default='-')\[email protected](\n '--output-file',\n help='The location of the output json file. If not specified, prints to screen.',\n default=None,\n)\[email protected]('--measurement', default=None)\[email protected]('-p', '--patch', multiple=True)\[email protected]('--testpoi', default=1.0)\[email protected]('--teststat', type=click.Choice(['q', 'qtilde']), default='qtilde')\[email protected](\n '--backend',\n type=click.Choice(['numpy', 'pytorch', 'tensorflow', 'jax', 'np', 'torch', 'tf']),\n help='The tensor backend used for the calculation.',\n default='numpy',\n)\[email protected]('--optimizer')\[email protected]('--optconf', type=EqDelimStringParamType(), multiple=True)\ndef cls(\n workspace,\n output_file,\n measurement,\n patch,\n testpoi,\n teststat,\n backend,\n optimizer,\n optconf,\n):\n \"\"\"\n Compute CLs value(s) for a given pyhf workspace.\n\n Example:\n\n .. code-block:: shell\n\n $ curl -sL https://git.io/JJYDE | pyhf cls\n\n \\b\n {\n \"CLs_exp\": [\n 0.07807427911686156,\n 0.17472571775474618,\n 0.35998495263681285,\n 0.6343568235898907,\n 0.8809947004472013\n ],\n \"CLs_obs\": 0.3599845631401915\n }\n \"\"\"\n with click.open_file(workspace, 'r') as specstream:\n spec = json.load(specstream)\n\n ws = Workspace(spec)\n\n is_qtilde = teststat == 'qtilde'\n\n patches = [json.loads(click.open_file(pfile, 'r').read()) for pfile in patch]\n model = ws.model(\n measurement_name=measurement,\n patches=patches,\n modifier_settings={\n 'normsys': {'interpcode': 'code4'},\n 'histosys': {'interpcode': 'code4p'},\n },\n )\n\n # set the backend if not NumPy\n if backend in ['pytorch', 'torch']:\n set_backend(tensor.pytorch_backend(precision='64b'))\n elif backend in ['tensorflow', 'tf']:\n set_backend(tensor.tensorflow_backend(precision='64b'))\n elif backend in ['jax']:\n set_backend(tensor.jax_backend())\n tensorlib, _ = get_backend()\n\n optconf = {k: v for item in optconf for k, v in item.items()}\n\n # set the new optimizer\n if optimizer:\n new_optimizer = getattr(optimize, optimizer)\n set_backend(tensorlib, new_optimizer(**optconf))\n\n result = hypotest(\n testpoi, ws.data(model), model, qtilde=is_qtilde, return_expected_set=True\n )\n result = {\n 'CLs_obs': tensorlib.tolist(result[0])[0],\n 'CLs_exp': tensorlib.tolist(tensorlib.reshape(result[-1], [-1])),\n }\n\n if output_file is None:\n click.echo(json.dumps(result, indent=4, sort_keys=True))\n else:\n with open(output_file, 'w+') as out_file:\n json.dump(result, out_file, indent=4, sort_keys=True)\n log.debug(\"Written to {0:s}\".format(output_file))\n", "path": "src/pyhf/cli/infer.py"}]}
1,789
151
gh_patches_debug_10293
rasdani/github-patches
git_diff
lutris__lutris-5245
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- "Accounts" tab in settings is blank ![image](https://github.com/lutris/lutris/assets/1454420/ad64e7a6-b8e2-4080-b031-da99b643279c) When navigating to the "accounts" tab in the lutris settings (version 0.5.15), it is blank and I get this error. This is on Fedora 39 KDE. ``` 2024-01-14 08:52:03,865: Error handling signal 'row-selected': 'PersonalName' Traceback (most recent call last): File "/usr/lib/python3.12/site-packages/lutris/exception_backstops.py", line 79, in error_wrapper return handler(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3.12/site-packages/lutris/gui/config/preferences_dialog.py", line 109, in on_sidebar_activated generator() File "/usr/lib/python3.12/site-packages/lutris/gui/config/accounts_box.py", line 33, in populate_accounts account["PersonalName"] ~~~~~~~^^^^^^^^^^^^^^^^ KeyError: 'PersonalName' ``` My only guess is that my steam display name has a " / " in it. But I'm not sure. I have both Steam RPM and Steam flatpak installed. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lutris/gui/config/accounts_box.py` Content: ``` 1 from gettext import gettext as _ 2 3 from gi.repository import Gtk 4 5 from lutris import settings 6 from lutris.gui.config.base_config_box import BaseConfigBox 7 from lutris.util.steam.config import STEAM_ACCOUNT_SETTING, get_steam_users 8 9 10 class AccountsBox(BaseConfigBox): 11 12 def __init__(self): 13 super().__init__() 14 self.add(self.get_section_label(_("Steam accounts"))) 15 self.add(self.get_description_label( 16 _("Select which Steam account is used for Lutris integration and creating Steam shortcuts.") 17 )) 18 frame = Gtk.Frame(visible=True, shadow_type=Gtk.ShadowType.ETCHED_IN) 19 frame.get_style_context().add_class("info-frame") 20 self.pack_start(frame, False, False, 0) 21 22 self.accounts_box = Gtk.VBox(visible=True) 23 frame.add(self.accounts_box) 24 25 def populate_accounts(self): 26 main_radio_button = None 27 active_steam_account = settings.read_setting(STEAM_ACCOUNT_SETTING) 28 29 steam_users = get_steam_users() 30 for account in steam_users: 31 steamid64 = account["steamid64"] 32 name = account.get("PersonalName") or f"#{steamid64}" 33 radio_button = Gtk.RadioButton.new_with_label_from_widget(main_radio_button, name) 34 radio_button.set_margin_top(16) 35 radio_button.set_margin_start(16) 36 radio_button.set_margin_bottom(16) 37 radio_button.show() 38 radio_button.set_active(active_steam_account == steamid64) 39 radio_button.connect("toggled", self.on_steam_account_toggled, steamid64) 40 self.accounts_box.pack_start(radio_button, True, True, 0) 41 if not main_radio_button: 42 main_radio_button = radio_button 43 if not steam_users: 44 self.accounts_box.pack_start(Gtk.Label(_("No Steam account found"), visible=True), True, True, 0) 45 46 def on_steam_account_toggled(self, radio_button, steamid64): 47 """Handler for switching the active Steam account.""" 48 settings.write_setting(STEAM_ACCOUNT_SETTING, steamid64) 49 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lutris/gui/config/accounts_box.py b/lutris/gui/config/accounts_box.py --- a/lutris/gui/config/accounts_box.py +++ b/lutris/gui/config/accounts_box.py @@ -29,7 +29,7 @@ steam_users = get_steam_users() for account in steam_users: steamid64 = account["steamid64"] - name = account.get("PersonalName") or f"#{steamid64}" + name = account.get("PersonaName") or f"#{steamid64}" radio_button = Gtk.RadioButton.new_with_label_from_widget(main_radio_button, name) radio_button.set_margin_top(16) radio_button.set_margin_start(16)
{"golden_diff": "diff --git a/lutris/gui/config/accounts_box.py b/lutris/gui/config/accounts_box.py\n--- a/lutris/gui/config/accounts_box.py\n+++ b/lutris/gui/config/accounts_box.py\n@@ -29,7 +29,7 @@\n steam_users = get_steam_users()\n for account in steam_users:\n steamid64 = account[\"steamid64\"]\n- name = account.get(\"PersonalName\") or f\"#{steamid64}\"\n+ name = account.get(\"PersonaName\") or f\"#{steamid64}\"\n radio_button = Gtk.RadioButton.new_with_label_from_widget(main_radio_button, name)\n radio_button.set_margin_top(16)\n radio_button.set_margin_start(16)\n", "issue": "\"Accounts\" tab in settings is blank\n![image](https://github.com/lutris/lutris/assets/1454420/ad64e7a6-b8e2-4080-b031-da99b643279c)\r\n\r\nWhen navigating to the \"accounts\" tab in the lutris settings (version 0.5.15), it is blank and I get this error. This is on Fedora 39 KDE.\r\n\r\n```\r\n2024-01-14 08:52:03,865: Error handling signal 'row-selected': 'PersonalName'\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.12/site-packages/lutris/exception_backstops.py\", line 79, in error_wrapper\r\n return handler(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.12/site-packages/lutris/gui/config/preferences_dialog.py\", line 109, in on_sidebar_activated\r\n generator()\r\n File \"/usr/lib/python3.12/site-packages/lutris/gui/config/accounts_box.py\", line 33, in populate_accounts\r\n account[\"PersonalName\"]\r\n ~~~~~~~^^^^^^^^^^^^^^^^\r\nKeyError: 'PersonalName'\r\n```\r\n\r\nMy only guess is that my steam display name has a \" / \" in it. But I'm not sure.\r\n\r\nI have both Steam RPM and Steam flatpak installed.\n", "before_files": [{"content": "from gettext import gettext as _\n\nfrom gi.repository import Gtk\n\nfrom lutris import settings\nfrom lutris.gui.config.base_config_box import BaseConfigBox\nfrom lutris.util.steam.config import STEAM_ACCOUNT_SETTING, get_steam_users\n\n\nclass AccountsBox(BaseConfigBox):\n\n def __init__(self):\n super().__init__()\n self.add(self.get_section_label(_(\"Steam accounts\")))\n self.add(self.get_description_label(\n _(\"Select which Steam account is used for Lutris integration and creating Steam shortcuts.\")\n ))\n frame = Gtk.Frame(visible=True, shadow_type=Gtk.ShadowType.ETCHED_IN)\n frame.get_style_context().add_class(\"info-frame\")\n self.pack_start(frame, False, False, 0)\n\n self.accounts_box = Gtk.VBox(visible=True)\n frame.add(self.accounts_box)\n\n def populate_accounts(self):\n main_radio_button = None\n active_steam_account = settings.read_setting(STEAM_ACCOUNT_SETTING)\n\n steam_users = get_steam_users()\n for account in steam_users:\n steamid64 = account[\"steamid64\"]\n name = account.get(\"PersonalName\") or f\"#{steamid64}\"\n radio_button = Gtk.RadioButton.new_with_label_from_widget(main_radio_button, name)\n radio_button.set_margin_top(16)\n radio_button.set_margin_start(16)\n radio_button.set_margin_bottom(16)\n radio_button.show()\n radio_button.set_active(active_steam_account == steamid64)\n radio_button.connect(\"toggled\", self.on_steam_account_toggled, steamid64)\n self.accounts_box.pack_start(radio_button, True, True, 0)\n if not main_radio_button:\n main_radio_button = radio_button\n if not steam_users:\n self.accounts_box.pack_start(Gtk.Label(_(\"No Steam account found\"), visible=True), True, True, 0)\n\n def on_steam_account_toggled(self, radio_button, steamid64):\n \"\"\"Handler for switching the active Steam account.\"\"\"\n settings.write_setting(STEAM_ACCOUNT_SETTING, steamid64)\n", "path": "lutris/gui/config/accounts_box.py"}], "after_files": [{"content": "from gettext import gettext as _\n\nfrom gi.repository import Gtk\n\nfrom lutris import settings\nfrom lutris.gui.config.base_config_box import BaseConfigBox\nfrom lutris.util.steam.config import STEAM_ACCOUNT_SETTING, get_steam_users\n\n\nclass AccountsBox(BaseConfigBox):\n\n def __init__(self):\n super().__init__()\n self.add(self.get_section_label(_(\"Steam accounts\")))\n self.add(self.get_description_label(\n _(\"Select which Steam account is used for Lutris integration and creating Steam shortcuts.\")\n ))\n frame = Gtk.Frame(visible=True, shadow_type=Gtk.ShadowType.ETCHED_IN)\n frame.get_style_context().add_class(\"info-frame\")\n self.pack_start(frame, False, False, 0)\n\n self.accounts_box = Gtk.VBox(visible=True)\n frame.add(self.accounts_box)\n\n def populate_accounts(self):\n main_radio_button = None\n active_steam_account = settings.read_setting(STEAM_ACCOUNT_SETTING)\n\n steam_users = get_steam_users()\n for account in steam_users:\n steamid64 = account[\"steamid64\"]\n name = account.get(\"PersonaName\") or f\"#{steamid64}\"\n radio_button = Gtk.RadioButton.new_with_label_from_widget(main_radio_button, name)\n radio_button.set_margin_top(16)\n radio_button.set_margin_start(16)\n radio_button.set_margin_bottom(16)\n radio_button.show()\n radio_button.set_active(active_steam_account == steamid64)\n radio_button.connect(\"toggled\", self.on_steam_account_toggled, steamid64)\n self.accounts_box.pack_start(radio_button, True, True, 0)\n if not main_radio_button:\n main_radio_button = radio_button\n if not steam_users:\n self.accounts_box.pack_start(Gtk.Label(_(\"No Steam account found\"), visible=True), True, True, 0)\n\n def on_steam_account_toggled(self, radio_button, steamid64):\n \"\"\"Handler for switching the active Steam account.\"\"\"\n settings.write_setting(STEAM_ACCOUNT_SETTING, steamid64)\n", "path": "lutris/gui/config/accounts_box.py"}]}
1,119
160
gh_patches_debug_40728
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3400
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Spider verizon is broken During the global build at 2021-10-27-14-42-46, spider **verizon** failed with **4573 features** and **1650 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-10-27-14-42-46/logs/verizon.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-27-14-42-46/output/verizon.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-27-14-42-46/output/verizon.geojson)) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/spiders/verizon.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import scrapy 3 import json 4 import re 5 6 from locations.items import GeojsonPointItem 7 from locations.hours import OpeningHours 8 9 10 class VerizonSpider(scrapy.Spider): 11 name = "verizon" 12 item_attributes = { 'brand': "Verizon" } 13 allowed_domains = ["www.verizonwireless.com"] 14 start_urls = ( 15 'https://www.verizonwireless.com/sitemap_storelocator.xml', 16 ) 17 custom_settings = { 18 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36', 19 } 20 21 def parse_hours(self, store_hours): 22 opening_hours = OpeningHours() 23 for store_day in store_hours['dayOfWeek']: 24 if store_day.lower() == 'closed': 25 continue 26 else: 27 day, open_close = store_day.split('-') 28 day = day.strip()[:2] 29 open_time = ' '.join(open_close.strip().split(' ', 2)[0:2]) 30 if open_time.split(' ')[0].lower() == 'closed': 31 continue 32 elif open_time.split(' ')[0].lower() == 'null': 33 continue 34 else: 35 if open_close.strip().count(' ') == 1: 36 open_time, close_time = open_time.split(' ') 37 opening_hours.add_range(day=day, 38 open_time=open_time, 39 close_time=close_time, 40 time_format='%I:%M%p' 41 ) 42 elif open_close.strip().count(' ') == 2: 43 open_time = open_close.strip().split(' ')[0] 44 close_time = ''.join(open_close.strip().split(' ')[1:3]) 45 opening_hours.add_range(day=day, 46 open_time=open_time, 47 close_time=close_time, 48 time_format='%I:%M%p' 49 ) 50 else: 51 close_time = open_close.strip().split(' ', 2)[2] 52 opening_hours.add_range(day=day, 53 open_time=open_time, 54 close_time=close_time, 55 time_format='%I:%M %p' 56 ) 57 58 return opening_hours.as_opening_hours() 59 60 def parse(self, response): 61 response.selector.remove_namespaces() 62 urls = response.xpath('//url/loc/text()').extract() 63 64 for url in urls: 65 if url.split('/')[-2].split('-')[-1].isdigit(): 66 # Store pages have a number at the end of their URL 67 yield scrapy.Request(url, callback=self.parse_store) 68 69 def parse_store(self, response): 70 script = response.xpath('//script[contains(text(), "storeJSON")]/text()').extract_first() 71 if not script: 72 return 73 74 store_data = json.loads(re.search(r'var storeJSON = (.*);', script).group(1)) 75 76 properties = { 77 'name': store_data["storeName"], 78 'ref': store_data["storeNumber"], 79 'addr_full': store_data["address"]["streetAddress"], 80 'city': store_data["address"]["addressLocality"], 81 'state': store_data["address"]["addressRegion"], 82 'postcode': store_data["address"]["postalCode"], 83 'country': store_data["address"]["addressCountry"], 84 'phone': store_data.get("telephone"), 85 'website': store_data.get("url") or response.url, 86 'lat': store_data["geo"].get("latitude"), 87 'lon': store_data["geo"].get("longitude"), 88 'extras': { 89 'business_name': store_data.get('posStoreDetail').get('businessName'), 90 'retail_id': store_data.get('retailId'), 91 'store_type': store_data.get('posStoreDetail').get('storeType'), 92 'store_type_note': store_data.get('typeOfStore') 93 } 94 } 95 96 hours = self.parse_hours(store_data.get("openingHoursSpecification")) 97 if hours: 98 properties["opening_hours"] = hours 99 100 yield GeojsonPointItem(**properties) 101 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/spiders/verizon.py b/locations/spiders/verizon.py --- a/locations/spiders/verizon.py +++ b/locations/spiders/verizon.py @@ -20,40 +20,18 @@ def parse_hours(self, store_hours): opening_hours = OpeningHours() - for store_day in store_hours['dayOfWeek']: - if store_day.lower() == 'closed': - continue - else: - day, open_close = store_day.split('-') - day = day.strip()[:2] - open_time = ' '.join(open_close.strip().split(' ', 2)[0:2]) - if open_time.split(' ')[0].lower() == 'closed': - continue - elif open_time.split(' ')[0].lower() == 'null': - continue - else: - if open_close.strip().count(' ') == 1: - open_time, close_time = open_time.split(' ') - opening_hours.add_range(day=day, - open_time=open_time, - close_time=close_time, - time_format='%I:%M%p' - ) - elif open_close.strip().count(' ') == 2: - open_time = open_close.strip().split(' ')[0] - close_time = ''.join(open_close.strip().split(' ')[1:3]) - opening_hours.add_range(day=day, - open_time=open_time, - close_time=close_time, - time_format='%I:%M%p' - ) - else: - close_time = open_close.strip().split(' ', 2)[2] - opening_hours.add_range(day=day, - open_time=open_time, - close_time=close_time, - time_format='%I:%M %p' - ) + + for store_day in ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']: + open_time = store_hours.get(f'{store_day}Open') + close_time = store_hours.get(f'{store_day}Close') + + if open_time and close_time and open_time.lower() != 'closed' and close_time.lower() != 'closed': + opening_hours.add_range( + day=store_day[0:2], + open_time=open_time, + close_time=close_time, + time_format='%I:%M %p' + ) return opening_hours.as_opening_hours() @@ -86,14 +64,15 @@ 'lat': store_data["geo"].get("latitude"), 'lon': store_data["geo"].get("longitude"), 'extras': { - 'business_name': store_data.get('posStoreDetail').get('businessName'), + # Sometimes 'postStoreDetail' exists with "None" value, usual get w/ default syntax isn't reliable + 'business_name': (store_data.get('posStoreDetail') or {}).get('businessName'), 'retail_id': store_data.get('retailId'), - 'store_type': store_data.get('posStoreDetail').get('storeType'), + 'store_type': (store_data.get('posStoreDetail') or {}).get('storeType'), 'store_type_note': store_data.get('typeOfStore') } } - hours = self.parse_hours(store_data.get("openingHoursSpecification")) + hours = self.parse_hours(store_data.get("StoreHours")) if hours: properties["opening_hours"] = hours
{"golden_diff": "diff --git a/locations/spiders/verizon.py b/locations/spiders/verizon.py\n--- a/locations/spiders/verizon.py\n+++ b/locations/spiders/verizon.py\n@@ -20,40 +20,18 @@\n \n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n- for store_day in store_hours['dayOfWeek']:\n- if store_day.lower() == 'closed':\n- continue\n- else:\n- day, open_close = store_day.split('-')\n- day = day.strip()[:2]\n- open_time = ' '.join(open_close.strip().split(' ', 2)[0:2])\n- if open_time.split(' ')[0].lower() == 'closed':\n- continue\n- elif open_time.split(' ')[0].lower() == 'null':\n- continue\n- else:\n- if open_close.strip().count(' ') == 1:\n- open_time, close_time = open_time.split(' ')\n- opening_hours.add_range(day=day,\n- open_time=open_time,\n- close_time=close_time,\n- time_format='%I:%M%p'\n- )\n- elif open_close.strip().count(' ') == 2:\n- open_time = open_close.strip().split(' ')[0]\n- close_time = ''.join(open_close.strip().split(' ')[1:3])\n- opening_hours.add_range(day=day,\n- open_time=open_time,\n- close_time=close_time,\n- time_format='%I:%M%p'\n- )\n- else:\n- close_time = open_close.strip().split(' ', 2)[2]\n- opening_hours.add_range(day=day,\n- open_time=open_time,\n- close_time=close_time,\n- time_format='%I:%M %p'\n- )\n+\n+ for store_day in ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']:\n+ open_time = store_hours.get(f'{store_day}Open')\n+ close_time = store_hours.get(f'{store_day}Close')\n+\n+ if open_time and close_time and open_time.lower() != 'closed' and close_time.lower() != 'closed':\n+ opening_hours.add_range(\n+ day=store_day[0:2],\n+ open_time=open_time,\n+ close_time=close_time,\n+ time_format='%I:%M %p'\n+ )\n \n return opening_hours.as_opening_hours()\n \n@@ -86,14 +64,15 @@\n 'lat': store_data[\"geo\"].get(\"latitude\"),\n 'lon': store_data[\"geo\"].get(\"longitude\"),\n 'extras': {\n- 'business_name': store_data.get('posStoreDetail').get('businessName'),\n+ # Sometimes 'postStoreDetail' exists with \"None\" value, usual get w/ default syntax isn't reliable\n+ 'business_name': (store_data.get('posStoreDetail') or {}).get('businessName'),\n 'retail_id': store_data.get('retailId'),\n- 'store_type': store_data.get('posStoreDetail').get('storeType'),\n+ 'store_type': (store_data.get('posStoreDetail') or {}).get('storeType'),\n 'store_type_note': store_data.get('typeOfStore')\n }\n }\n \n- hours = self.parse_hours(store_data.get(\"openingHoursSpecification\"))\n+ hours = self.parse_hours(store_data.get(\"StoreHours\"))\n if hours:\n properties[\"opening_hours\"] = hours\n", "issue": "Spider verizon is broken\nDuring the global build at 2021-10-27-14-42-46, spider **verizon** failed with **4573 features** and **1650 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-10-27-14-42-46/logs/verizon.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-27-14-42-46/output/verizon.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-27-14-42-46/output/verizon.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass VerizonSpider(scrapy.Spider):\n name = \"verizon\"\n item_attributes = { 'brand': \"Verizon\" }\n allowed_domains = [\"www.verizonwireless.com\"]\n start_urls = (\n 'https://www.verizonwireless.com/sitemap_storelocator.xml',\n )\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n }\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n for store_day in store_hours['dayOfWeek']:\n if store_day.lower() == 'closed':\n continue\n else:\n day, open_close = store_day.split('-')\n day = day.strip()[:2]\n open_time = ' '.join(open_close.strip().split(' ', 2)[0:2])\n if open_time.split(' ')[0].lower() == 'closed':\n continue\n elif open_time.split(' ')[0].lower() == 'null':\n continue\n else:\n if open_close.strip().count(' ') == 1:\n open_time, close_time = open_time.split(' ')\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%I:%M%p'\n )\n elif open_close.strip().count(' ') == 2:\n open_time = open_close.strip().split(' ')[0]\n close_time = ''.join(open_close.strip().split(' ')[1:3])\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%I:%M%p'\n )\n else:\n close_time = open_close.strip().split(' ', 2)[2]\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%I:%M %p'\n )\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n response.selector.remove_namespaces()\n urls = response.xpath('//url/loc/text()').extract()\n\n for url in urls:\n if url.split('/')[-2].split('-')[-1].isdigit():\n # Store pages have a number at the end of their URL\n yield scrapy.Request(url, callback=self.parse_store)\n\n def parse_store(self, response):\n script = response.xpath('//script[contains(text(), \"storeJSON\")]/text()').extract_first()\n if not script:\n return\n\n store_data = json.loads(re.search(r'var storeJSON = (.*);', script).group(1))\n\n properties = {\n 'name': store_data[\"storeName\"],\n 'ref': store_data[\"storeNumber\"],\n 'addr_full': store_data[\"address\"][\"streetAddress\"],\n 'city': store_data[\"address\"][\"addressLocality\"],\n 'state': store_data[\"address\"][\"addressRegion\"],\n 'postcode': store_data[\"address\"][\"postalCode\"],\n 'country': store_data[\"address\"][\"addressCountry\"],\n 'phone': store_data.get(\"telephone\"),\n 'website': store_data.get(\"url\") or response.url,\n 'lat': store_data[\"geo\"].get(\"latitude\"),\n 'lon': store_data[\"geo\"].get(\"longitude\"),\n 'extras': {\n 'business_name': store_data.get('posStoreDetail').get('businessName'),\n 'retail_id': store_data.get('retailId'),\n 'store_type': store_data.get('posStoreDetail').get('storeType'),\n 'store_type_note': store_data.get('typeOfStore')\n }\n }\n\n hours = self.parse_hours(store_data.get(\"openingHoursSpecification\"))\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/verizon.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass VerizonSpider(scrapy.Spider):\n name = \"verizon\"\n item_attributes = { 'brand': \"Verizon\" }\n allowed_domains = [\"www.verizonwireless.com\"]\n start_urls = (\n 'https://www.verizonwireless.com/sitemap_storelocator.xml',\n )\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n }\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n\n for store_day in ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']:\n open_time = store_hours.get(f'{store_day}Open')\n close_time = store_hours.get(f'{store_day}Close')\n\n if open_time and close_time and open_time.lower() != 'closed' and close_time.lower() != 'closed':\n opening_hours.add_range(\n day=store_day[0:2],\n open_time=open_time,\n close_time=close_time,\n time_format='%I:%M %p'\n )\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n response.selector.remove_namespaces()\n urls = response.xpath('//url/loc/text()').extract()\n\n for url in urls:\n if url.split('/')[-2].split('-')[-1].isdigit():\n # Store pages have a number at the end of their URL\n yield scrapy.Request(url, callback=self.parse_store)\n\n def parse_store(self, response):\n script = response.xpath('//script[contains(text(), \"storeJSON\")]/text()').extract_first()\n if not script:\n return\n\n store_data = json.loads(re.search(r'var storeJSON = (.*);', script).group(1))\n\n properties = {\n 'name': store_data[\"storeName\"],\n 'ref': store_data[\"storeNumber\"],\n 'addr_full': store_data[\"address\"][\"streetAddress\"],\n 'city': store_data[\"address\"][\"addressLocality\"],\n 'state': store_data[\"address\"][\"addressRegion\"],\n 'postcode': store_data[\"address\"][\"postalCode\"],\n 'country': store_data[\"address\"][\"addressCountry\"],\n 'phone': store_data.get(\"telephone\"),\n 'website': store_data.get(\"url\") or response.url,\n 'lat': store_data[\"geo\"].get(\"latitude\"),\n 'lon': store_data[\"geo\"].get(\"longitude\"),\n 'extras': {\n # Sometimes 'postStoreDetail' exists with \"None\" value, usual get w/ default syntax isn't reliable\n 'business_name': (store_data.get('posStoreDetail') or {}).get('businessName'),\n 'retail_id': store_data.get('retailId'),\n 'store_type': (store_data.get('posStoreDetail') or {}).get('storeType'),\n 'store_type_note': store_data.get('typeOfStore')\n }\n }\n\n hours = self.parse_hours(store_data.get(\"StoreHours\"))\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/verizon.py"}]}
1,520
774
gh_patches_debug_22389
rasdani/github-patches
git_diff
ckan__ckan-7309
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- White on Yellow?? Color Contrast **CKAN version** https://demo.ckan.org/en/user/edit/mgifford **Describe the bug** Title: WCAG 1.4.3: Ensures the contrast between foreground and background colors meets WCAG 2 AA contrast ratio thresholds (.btn-warning) Tags: Accessibility, WCAG 1.4.3, color-contrast Issue: Ensures the contrast between foreground and background colors meets WCAG 2 AA contrast ratio thresholds (color-contrast - https://accessibilityinsights.io/info-examples/web/color-contrast) Target application: Manage - mgifford - Users - CKAN Demo - https://demo.ckan.org/en/user/edit/mgifford Element path: .btn-warning Snippet: <a class="btn btn-warning" href="/en/user/generate_key/b8037a86-a216-4c9b-8211-e197fa09143a" data-module="confirm-action" data-module-content="Are you sure you want to regenerate the API key?">Regenerate API Key</a> How to fix: Fix any of the following: Element has insufficient color contrast of 1.94 (foreground color: #ffffff, background color: #f0ad4e, font size: 10.5pt (14px), font weight: bold). Expected contrast ratio of 4.5:1 Environment: Microsoft Edge version 107.0.1418.35 ==== This accessibility issue was found using Accessibility Insights for Web 2.35.0 (axe-core 4.4.1), a tool that helps find and fix accessibility issues. Get more information & download this tool at http://aka.ms/AccessibilityInsights. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ckan/cli/sass.py` Content: ``` 1 # encoding: utf-8 2 from __future__ import annotations 3 4 import subprocess 5 import os 6 7 import click 8 import six 9 10 from ckan.common import config 11 12 13 @click.command( 14 name=u'sass', 15 short_help=u'Compile all root sass documents into their CSS counterparts') 16 def sass(): 17 command = (u'npm', u'run', u'build') 18 19 public = config.get_value(u'ckan.base_public_folder') 20 21 root = os.path.join(os.path.dirname(__file__), u'..', public, u'base') 22 root = os.path.abspath(root) 23 _compile_sass(root, command, u'main') 24 25 26 def _compile_sass(root: str, command: tuple[str, ...], color: str): 27 click.echo(u'compile {}.css'.format(color)) 28 command = command + (u'--', u'--' + color) 29 30 process = subprocess.Popen( 31 command, 32 stdout=subprocess.PIPE, 33 stderr=subprocess.PIPE) 34 output = process.communicate() 35 for block in output: 36 click.echo(six.ensure_text(block)) 37 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ckan/cli/sass.py b/ckan/cli/sass.py --- a/ckan/cli/sass.py +++ b/ckan/cli/sass.py @@ -11,21 +11,32 @@ @click.command( - name=u'sass', - short_help=u'Compile all root sass documents into their CSS counterparts') -def sass(): - command = (u'npm', u'run', u'build') - - public = config.get_value(u'ckan.base_public_folder') - - root = os.path.join(os.path.dirname(__file__), u'..', public, u'base') + name='sass', + short_help='Compile all root sass documents into their CSS counterparts') [email protected]( + '-d', + '--debug', + is_flag=True, + help="Compile css with sourcemaps.") +def sass(debug: bool): + command = ('npm', 'run', 'build') + + public = config.get_value('ckan.base_public_folder') + + root = os.path.join(os.path.dirname(__file__), '..', public, 'base') root = os.path.abspath(root) - _compile_sass(root, command, u'main') - - -def _compile_sass(root: str, command: tuple[str, ...], color: str): - click.echo(u'compile {}.css'.format(color)) - command = command + (u'--', u'--' + color) + _compile_sass(root, command, 'main', debug) + + +def _compile_sass( + root: str, + command: tuple[str, ...], + color: str, + debug: bool): + click.echo('compile {}.css'.format(color)) + command = command + ('--', '--' + color) + if debug: + command = command + ('--debug',) process = subprocess.Popen( command,
{"golden_diff": "diff --git a/ckan/cli/sass.py b/ckan/cli/sass.py\n--- a/ckan/cli/sass.py\n+++ b/ckan/cli/sass.py\n@@ -11,21 +11,32 @@\n \n \n @click.command(\n- name=u'sass',\n- short_help=u'Compile all root sass documents into their CSS counterparts')\n-def sass():\n- command = (u'npm', u'run', u'build')\n-\n- public = config.get_value(u'ckan.base_public_folder')\n-\n- root = os.path.join(os.path.dirname(__file__), u'..', public, u'base')\n+ name='sass',\n+ short_help='Compile all root sass documents into their CSS counterparts')\[email protected](\n+ '-d',\n+ '--debug',\n+ is_flag=True,\n+ help=\"Compile css with sourcemaps.\")\n+def sass(debug: bool):\n+ command = ('npm', 'run', 'build')\n+\n+ public = config.get_value('ckan.base_public_folder')\n+\n+ root = os.path.join(os.path.dirname(__file__), '..', public, 'base')\n root = os.path.abspath(root)\n- _compile_sass(root, command, u'main')\n-\n-\n-def _compile_sass(root: str, command: tuple[str, ...], color: str):\n- click.echo(u'compile {}.css'.format(color))\n- command = command + (u'--', u'--' + color)\n+ _compile_sass(root, command, 'main', debug)\n+\n+\n+def _compile_sass(\n+ root: str,\n+ command: tuple[str, ...],\n+ color: str,\n+ debug: bool):\n+ click.echo('compile {}.css'.format(color))\n+ command = command + ('--', '--' + color)\n+ if debug:\n+ command = command + ('--debug',)\n \n process = subprocess.Popen(\n command,\n", "issue": "White on Yellow?? Color Contrast\n**CKAN version**\r\nhttps://demo.ckan.org/en/user/edit/mgifford\r\n\r\n**Describe the bug**\r\nTitle: WCAG 1.4.3: Ensures the contrast between foreground and background colors meets WCAG 2 AA contrast ratio thresholds (.btn-warning)\r\nTags: Accessibility, WCAG 1.4.3, color-contrast\r\n\r\nIssue: Ensures the contrast between foreground and background colors meets WCAG 2 AA contrast ratio thresholds (color-contrast - https://accessibilityinsights.io/info-examples/web/color-contrast)\r\n\r\nTarget application: Manage - mgifford - Users - CKAN Demo - https://demo.ckan.org/en/user/edit/mgifford\r\n\r\nElement path: .btn-warning\r\n\r\nSnippet: <a class=\"btn btn-warning\" href=\"/en/user/generate_key/b8037a86-a216-4c9b-8211-e197fa09143a\" data-module=\"confirm-action\" data-module-content=\"Are you sure you want to regenerate the API key?\">Regenerate API Key</a>\r\n\r\nHow to fix: \r\nFix any of the following:\r\n Element has insufficient color contrast of 1.94 (foreground color: #ffffff, background color: #f0ad4e, font size: 10.5pt (14px), font weight: bold). Expected contrast ratio of 4.5:1\r\n\r\nEnvironment: Microsoft Edge version 107.0.1418.35\r\n\r\n====\r\n\r\nThis accessibility issue was found using Accessibility Insights for Web 2.35.0 (axe-core 4.4.1), a tool that helps find and fix accessibility issues. Get more information & download this tool at http://aka.ms/AccessibilityInsights.\n", "before_files": [{"content": "# encoding: utf-8\nfrom __future__ import annotations\n\nimport subprocess\nimport os\n\nimport click\nimport six\n\nfrom ckan.common import config\n\n\[email protected](\n name=u'sass',\n short_help=u'Compile all root sass documents into their CSS counterparts')\ndef sass():\n command = (u'npm', u'run', u'build')\n\n public = config.get_value(u'ckan.base_public_folder')\n\n root = os.path.join(os.path.dirname(__file__), u'..', public, u'base')\n root = os.path.abspath(root)\n _compile_sass(root, command, u'main')\n\n\ndef _compile_sass(root: str, command: tuple[str, ...], color: str):\n click.echo(u'compile {}.css'.format(color))\n command = command + (u'--', u'--' + color)\n\n process = subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n output = process.communicate()\n for block in output:\n click.echo(six.ensure_text(block))\n", "path": "ckan/cli/sass.py"}], "after_files": [{"content": "# encoding: utf-8\nfrom __future__ import annotations\n\nimport subprocess\nimport os\n\nimport click\nimport six\n\nfrom ckan.common import config\n\n\[email protected](\n name='sass',\n short_help='Compile all root sass documents into their CSS counterparts')\[email protected](\n '-d',\n '--debug',\n is_flag=True,\n help=\"Compile css with sourcemaps.\")\ndef sass(debug: bool):\n command = ('npm', 'run', 'build')\n\n public = config.get_value('ckan.base_public_folder')\n\n root = os.path.join(os.path.dirname(__file__), '..', public, 'base')\n root = os.path.abspath(root)\n _compile_sass(root, command, 'main', debug)\n\n\ndef _compile_sass(\n root: str,\n command: tuple[str, ...],\n color: str,\n debug: bool):\n click.echo('compile {}.css'.format(color))\n command = command + ('--', '--' + color)\n if debug:\n command = command + ('--debug',)\n\n process = subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n output = process.communicate()\n for block in output:\n click.echo(six.ensure_text(block))\n", "path": "ckan/cli/sass.py"}]}
944
427
gh_patches_debug_10692
rasdani/github-patches
git_diff
freedomofpress__securedrop-4865
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- update ansible to 2.6.18 or later due to CVE-2019-10156 ## Description We should update Ansible to version 2.6.18 or later due to [CVE-2019-10156](https://nvd.nist.gov/vuln/detail/CVE-2019-10156). This is a templating vulnerability that would require an attacker to first insert malicious templates into the Admin workstation so the impact is minimal for SecureDrop. Nevertheless, to reduce alert noise and not be using dependencies with known vulnerabilities, we should update. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `install_files/ansible-base/callback_plugins/ansible_version_check.py` Content: ``` 1 # -*- encoding:utf-8 -*- 2 from __future__ import absolute_import, division, print_function, \ 3 unicode_literals 4 5 import sys 6 7 import ansible 8 9 try: 10 # Version 2.0+ 11 from ansible.plugins.callback import CallbackBase 12 except ImportError: 13 CallbackBase = object 14 15 16 def print_red_bold(text): 17 print('\x1b[31;1m' + text + '\x1b[0m') 18 19 20 class CallbackModule(CallbackBase): 21 def __init__(self): 22 # Can't use `on_X` because this isn't forwards compatible 23 # with Ansible 2.0+ 24 required_version = '2.6.14' # Keep synchronized with requirements files 25 if not ansible.__version__.startswith(required_version): 26 print_red_bold( 27 "SecureDrop restriction: only Ansible {version}.*" 28 "is supported." 29 .format(version=required_version) 30 ) 31 sys.exit(1) 32 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py --- a/install_files/ansible-base/callback_plugins/ansible_version_check.py +++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py @@ -21,7 +21,7 @@ def __init__(self): # Can't use `on_X` because this isn't forwards compatible # with Ansible 2.0+ - required_version = '2.6.14' # Keep synchronized with requirements files + required_version = '2.6.19' # Keep synchronized with requirements files if not ansible.__version__.startswith(required_version): print_red_bold( "SecureDrop restriction: only Ansible {version}.*"
{"golden_diff": "diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n--- a/install_files/ansible-base/callback_plugins/ansible_version_check.py\n+++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n@@ -21,7 +21,7 @@\n def __init__(self):\n # Can't use `on_X` because this isn't forwards compatible\n # with Ansible 2.0+\n- required_version = '2.6.14' # Keep synchronized with requirements files\n+ required_version = '2.6.19' # Keep synchronized with requirements files\n if not ansible.__version__.startswith(required_version):\n print_red_bold(\n \"SecureDrop restriction: only Ansible {version}.*\"\n", "issue": "update ansible to 2.6.18 or later due to CVE-2019-10156\n## Description\r\n\r\nWe should update Ansible to version 2.6.18 or later due to [CVE-2019-10156](https://nvd.nist.gov/vuln/detail/CVE-2019-10156). This is a templating vulnerability that would require an attacker to first insert malicious templates into the Admin workstation so the impact is minimal for SecureDrop. Nevertheless, to reduce alert noise and not be using dependencies with known vulnerabilities, we should update. \n", "before_files": [{"content": "# -*- encoding:utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, \\\n unicode_literals\n\nimport sys\n\nimport ansible\n\ntry:\n # Version 2.0+\n from ansible.plugins.callback import CallbackBase\nexcept ImportError:\n CallbackBase = object\n\n\ndef print_red_bold(text):\n print('\\x1b[31;1m' + text + '\\x1b[0m')\n\n\nclass CallbackModule(CallbackBase):\n def __init__(self):\n # Can't use `on_X` because this isn't forwards compatible\n # with Ansible 2.0+\n required_version = '2.6.14' # Keep synchronized with requirements files\n if not ansible.__version__.startswith(required_version):\n print_red_bold(\n \"SecureDrop restriction: only Ansible {version}.*\"\n \"is supported.\"\n .format(version=required_version)\n )\n sys.exit(1)\n", "path": "install_files/ansible-base/callback_plugins/ansible_version_check.py"}], "after_files": [{"content": "# -*- encoding:utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, \\\n unicode_literals\n\nimport sys\n\nimport ansible\n\ntry:\n # Version 2.0+\n from ansible.plugins.callback import CallbackBase\nexcept ImportError:\n CallbackBase = object\n\n\ndef print_red_bold(text):\n print('\\x1b[31;1m' + text + '\\x1b[0m')\n\n\nclass CallbackModule(CallbackBase):\n def __init__(self):\n # Can't use `on_X` because this isn't forwards compatible\n # with Ansible 2.0+\n required_version = '2.6.19' # Keep synchronized with requirements files\n if not ansible.__version__.startswith(required_version):\n print_red_bold(\n \"SecureDrop restriction: only Ansible {version}.*\"\n \"is supported.\"\n .format(version=required_version)\n )\n sys.exit(1)\n", "path": "install_files/ansible-base/callback_plugins/ansible_version_check.py"}]}
664
177
gh_patches_debug_60820
rasdani/github-patches
git_diff
cltk__cltk-575
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Double-check code and data for new French PR This issue is for @nat1881 to follow up on her large PR #571 for Old and Middle French. Natasha, I would like you to do the following steps, to be certain that the code works as you intended: * Start a brand new clone of (this) cltk repo. * Make a new virtual env * Mk source tarball and install (this should install all dependencies, too): `python setup.py sdist install` * Temporarily rename your `~/cltk_data` dir (eg, `mv ~/cltk_data ~/cltk_data_backup`) * Import the french corpora and make sure they appear as they should * Check in ipython all of your commands that you have added to the docs. Copy-paste these exactly as they are in the docs. * Follow up on any bugs in your own updated branch ([this is what I recommend for updating your branch](https://github.com/cltk/cltk/wiki/Example-Git-and-Python-workflow)) * Bump the version in `setup.py` and make PR for this * Then @diyclassics or I will push the code to PyPI You may be tired of this, but you're getting close! :weary: cc @mlj --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 """Config for PyPI.""" 2 3 from setuptools import find_packages 4 from setuptools import setup 5 6 7 setup( 8 author='Kyle P. Johnson', 9 author_email='[email protected]', 10 classifiers=[ 11 'Intended Audience :: Education', 12 'Intended Audience :: Science/Research', 13 'License :: OSI Approved :: MIT License', 14 'Natural Language :: Chinese (Traditional)', 15 'Natural Language :: English', 16 'Natural Language :: Greek', 17 'Natural Language :: Latin', 18 'Operating System :: POSIX', 19 'Programming Language :: Python :: 3.6', 20 'Topic :: Scientific/Engineering :: Artificial Intelligence', 21 'Topic :: Text Processing', 22 'Topic :: Text Processing :: General', 23 'Topic :: Text Processing :: Linguistic', 24 ], 25 description='NLP for the ancient world', 26 install_requires=['gitpython', 27 'nltk', 28 'python-crfsuite', 29 'pyuca', 30 'pyyaml', 31 'regex', 32 'whoosh'], 33 keywords=['nlp', 'nltk', 'greek', 'latin', 'chinese', 'sanskrit', 'pali', 'tibetan'], 34 license='MIT', 35 long_description='The Classical Language Toolkit (CLTK) is a framework for natural language processing for Classical languages.', # pylint: disable=C0301, 36 name='cltk', 37 packages=find_packages(), 38 url='https://github.com/cltk/cltk', 39 version='0.1.63', 40 zip_safe=True, 41 test_suite='cltk.tests.test_cltk', 42 ) 43 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ name='cltk', packages=find_packages(), url='https://github.com/cltk/cltk', - version='0.1.63', + version='0.1.64', zip_safe=True, test_suite='cltk.tests.test_cltk', )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -36,7 +36,7 @@\n name='cltk',\n packages=find_packages(),\n url='https://github.com/cltk/cltk',\n- version='0.1.63',\n+ version='0.1.64',\n zip_safe=True,\n test_suite='cltk.tests.test_cltk',\n )\n", "issue": "Double-check code and data for new French PR\nThis issue is for @nat1881 to follow up on her large PR #571 for Old and Middle French.\r\n\r\nNatasha, I would like you to do the following steps, to be certain that the code works as you intended:\r\n\r\n* Start a brand new clone of (this) cltk repo.\r\n* Make a new virtual env\r\n* Mk source tarball and install (this should install all dependencies, too): `python setup.py sdist install`\r\n* Temporarily rename your `~/cltk_data` dir (eg, `mv ~/cltk_data ~/cltk_data_backup`)\r\n* Import the french corpora and make sure they appear as they should\r\n* Check in ipython all of your commands that you have added to the docs. Copy-paste these exactly as they are in the docs.\r\n* Follow up on any bugs in your own updated branch ([this is what I recommend for updating your branch](https://github.com/cltk/cltk/wiki/Example-Git-and-Python-workflow))\r\n* Bump the version in `setup.py` and make PR for this\r\n* Then @diyclassics or I will push the code to PyPI\r\n\r\nYou may be tired of this, but you're getting close! :weary:\r\n\r\ncc @mlj \n", "before_files": [{"content": "\"\"\"Config for PyPI.\"\"\"\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nsetup(\n author='Kyle P. Johnson',\n author_email='[email protected]',\n classifiers=[\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: Chinese (Traditional)',\n 'Natural Language :: English',\n 'Natural Language :: Greek',\n 'Natural Language :: Latin',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Text Processing',\n 'Topic :: Text Processing :: General',\n 'Topic :: Text Processing :: Linguistic',\n ],\n description='NLP for the ancient world',\n install_requires=['gitpython',\n 'nltk',\n 'python-crfsuite',\n 'pyuca',\n 'pyyaml',\n 'regex',\n 'whoosh'],\n keywords=['nlp', 'nltk', 'greek', 'latin', 'chinese', 'sanskrit', 'pali', 'tibetan'],\n license='MIT',\n long_description='The Classical Language Toolkit (CLTK) is a framework for natural language processing for Classical languages.', # pylint: disable=C0301,\n name='cltk',\n packages=find_packages(),\n url='https://github.com/cltk/cltk',\n version='0.1.63',\n zip_safe=True,\n test_suite='cltk.tests.test_cltk',\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"Config for PyPI.\"\"\"\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nsetup(\n author='Kyle P. Johnson',\n author_email='[email protected]',\n classifiers=[\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: Chinese (Traditional)',\n 'Natural Language :: English',\n 'Natural Language :: Greek',\n 'Natural Language :: Latin',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Text Processing',\n 'Topic :: Text Processing :: General',\n 'Topic :: Text Processing :: Linguistic',\n ],\n description='NLP for the ancient world',\n install_requires=['gitpython',\n 'nltk',\n 'python-crfsuite',\n 'pyuca',\n 'pyyaml',\n 'regex',\n 'whoosh'],\n keywords=['nlp', 'nltk', 'greek', 'latin', 'chinese', 'sanskrit', 'pali', 'tibetan'],\n license='MIT',\n long_description='The Classical Language Toolkit (CLTK) is a framework for natural language processing for Classical languages.', # pylint: disable=C0301,\n name='cltk',\n packages=find_packages(),\n url='https://github.com/cltk/cltk',\n version='0.1.64',\n zip_safe=True,\n test_suite='cltk.tests.test_cltk',\n)\n", "path": "setup.py"}]}
945
94
gh_patches_debug_6139
rasdani/github-patches
git_diff
uclapi__uclapi-140
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Documentation] Docs link is Absolute, not Relative The documentation link always goes to `https://uclapi.com/docs`, even if running in, for example, staging. Just linking to `/docs` would be adequate to fix this. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `backend/uclapi/resources/views.py` Content: ``` 1 import os 2 import requests 3 4 from lxml import etree 5 6 from common.decorators import uclapi_protected_endpoint 7 from common.helpers import PrettyJsonResponse as JsonResponse 8 9 from rest_framework.decorators import api_view 10 11 12 @api_view(['GET']) 13 @uclapi_protected_endpoint() 14 def get_pc_availability(request, *args, **kwargs): 15 try: 16 r = requests.get(os.environ["PCA_LINK"]) 17 except requests.exceptions.MissingSchema: 18 resp = JsonResponse({ 19 "ok": False, 20 "error": ("Could not retrieve availability data." 21 " Please try again later or contact us for support.") 22 }, rate_limiting_data=kwargs) 23 resp.status_code = 400 24 return resp 25 26 try: 27 e = etree.fromstring(r.content) 28 except (ValueError, etree.XMLSyntaxError): 29 resp = JsonResponse({ 30 "ok": False, 31 "error": ("Could not parse the desktop availability data." 32 " Please try again later or contact us for support.") 33 }, rate_limiting_data=kwargs) 34 resp.status_code = 400 35 return resp 36 37 data = [] 38 for pc in e.findall("room"): 39 _ = pc.get 40 data.append({ 41 "location": { 42 "room_name": _("location"), 43 "room_id": _("rid"), 44 "latitude": _("latitude"), 45 "longitude": _("longitude"), 46 "building_name": _("buildingName"), 47 "address": _("buildingAddress"), 48 "postcode": _("buildingPostCode") 49 }, 50 "free_seats": _("free"), 51 "total_seats": _("seats"), 52 "room_status": _("info") 53 }) 54 55 return JsonResponse({ 56 "ok": True, 57 "data": data 58 }, rate_limiting_data=kwargs) 59 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/backend/uclapi/resources/views.py b/backend/uclapi/resources/views.py --- a/backend/uclapi/resources/views.py +++ b/backend/uclapi/resources/views.py @@ -45,7 +45,7 @@ "longitude": _("longitude"), "building_name": _("buildingName"), "address": _("buildingAddress"), - "postcode": _("buildingPostCode") + "postcode": _("buildingPostcode") }, "free_seats": _("free"), "total_seats": _("seats"),
{"golden_diff": "diff --git a/backend/uclapi/resources/views.py b/backend/uclapi/resources/views.py\n--- a/backend/uclapi/resources/views.py\n+++ b/backend/uclapi/resources/views.py\n@@ -45,7 +45,7 @@\n \"longitude\": _(\"longitude\"),\n \"building_name\": _(\"buildingName\"),\n \"address\": _(\"buildingAddress\"),\n- \"postcode\": _(\"buildingPostCode\")\n+ \"postcode\": _(\"buildingPostcode\")\n },\n \"free_seats\": _(\"free\"),\n \"total_seats\": _(\"seats\"),\n", "issue": "[Documentation] Docs link is Absolute, not Relative\nThe documentation link always goes to `https://uclapi.com/docs`, even if running in, for example, staging. Just linking to `/docs` would be adequate to fix this.\n", "before_files": [{"content": "import os\nimport requests\n\nfrom lxml import etree\n\nfrom common.decorators import uclapi_protected_endpoint\nfrom common.helpers import PrettyJsonResponse as JsonResponse\n\nfrom rest_framework.decorators import api_view\n\n\n@api_view(['GET'])\n@uclapi_protected_endpoint()\ndef get_pc_availability(request, *args, **kwargs):\n try:\n r = requests.get(os.environ[\"PCA_LINK\"])\n except requests.exceptions.MissingSchema:\n resp = JsonResponse({\n \"ok\": False,\n \"error\": (\"Could not retrieve availability data.\"\n \" Please try again later or contact us for support.\")\n }, rate_limiting_data=kwargs)\n resp.status_code = 400\n return resp\n\n try:\n e = etree.fromstring(r.content)\n except (ValueError, etree.XMLSyntaxError):\n resp = JsonResponse({\n \"ok\": False,\n \"error\": (\"Could not parse the desktop availability data.\"\n \" Please try again later or contact us for support.\")\n }, rate_limiting_data=kwargs)\n resp.status_code = 400\n return resp\n\n data = []\n for pc in e.findall(\"room\"):\n _ = pc.get\n data.append({\n \"location\": {\n \"room_name\": _(\"location\"),\n \"room_id\": _(\"rid\"),\n \"latitude\": _(\"latitude\"),\n \"longitude\": _(\"longitude\"),\n \"building_name\": _(\"buildingName\"),\n \"address\": _(\"buildingAddress\"),\n \"postcode\": _(\"buildingPostCode\")\n },\n \"free_seats\": _(\"free\"),\n \"total_seats\": _(\"seats\"),\n \"room_status\": _(\"info\")\n })\n\n return JsonResponse({\n \"ok\": True,\n \"data\": data\n }, rate_limiting_data=kwargs)\n", "path": "backend/uclapi/resources/views.py"}], "after_files": [{"content": "import os\nimport requests\n\nfrom lxml import etree\n\nfrom common.decorators import uclapi_protected_endpoint\nfrom common.helpers import PrettyJsonResponse as JsonResponse\n\nfrom rest_framework.decorators import api_view\n\n\n@api_view(['GET'])\n@uclapi_protected_endpoint()\ndef get_pc_availability(request, *args, **kwargs):\n try:\n r = requests.get(os.environ[\"PCA_LINK\"])\n except requests.exceptions.MissingSchema:\n resp = JsonResponse({\n \"ok\": False,\n \"error\": (\"Could not retrieve availability data.\"\n \" Please try again later or contact us for support.\")\n }, rate_limiting_data=kwargs)\n resp.status_code = 400\n return resp\n\n try:\n e = etree.fromstring(r.content)\n except (ValueError, etree.XMLSyntaxError):\n resp = JsonResponse({\n \"ok\": False,\n \"error\": (\"Could not parse the desktop availability data.\"\n \" Please try again later or contact us for support.\")\n }, rate_limiting_data=kwargs)\n resp.status_code = 400\n return resp\n\n data = []\n for pc in e.findall(\"room\"):\n _ = pc.get\n data.append({\n \"location\": {\n \"room_name\": _(\"location\"),\n \"room_id\": _(\"rid\"),\n \"latitude\": _(\"latitude\"),\n \"longitude\": _(\"longitude\"),\n \"building_name\": _(\"buildingName\"),\n \"address\": _(\"buildingAddress\"),\n \"postcode\": _(\"buildingPostcode\")\n },\n \"free_seats\": _(\"free\"),\n \"total_seats\": _(\"seats\"),\n \"room_status\": _(\"info\")\n })\n\n return JsonResponse({\n \"ok\": True,\n \"data\": data\n }, rate_limiting_data=kwargs)\n", "path": "backend/uclapi/resources/views.py"}]}
795
117
gh_patches_debug_24152
rasdani/github-patches
git_diff
ckan__ckan-5093
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Missing dependency in requirements.txt (cookiecutter) https://github.com/ckan/ckan/blob/f2cea089bc0aaeede06d98449c4e9eb65e8c2f14/ckan/cli/generate.py#L7 - cookiecutter lib will be imported on a `ckan` cli attempt, but as it is missing from requirments.txt, is not present which will result to ImportError - cookiecutter is listed in requirments-dev.txt, but docker builds don't use it Tested on a docker personal build, by : > docker build -t ckan . > docker run --rm -it --entrypoint bash --name ckan -p 5000:5000 --link db:db --link redis:redis --link solr:solr ckan > (activated-env) > ckan --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ckan/cli/generate.py` Content: ``` 1 # encoding: utf-8 2 3 import os 4 import sys 5 import click 6 from ckan.cli import error_shout 7 from cookiecutter.main import cookiecutter 8 9 10 @click.group(name=u'generate', 11 short_help=u"Generate empty extension files to expand CKAN.") 12 def generate(): 13 pass 14 15 16 @generate.command(name=u'extension', short_help=u"Create empty extension.") 17 @click.option(u'-o', u'--output-dir', help=u"Location to put the generated " 18 u"template.", 19 default=u'.') 20 def extension(output_dir): 21 cur_loc = os.path.dirname(os.path.abspath(__file__)) 22 os.chdir(cur_loc) 23 os.chdir(u'../../contrib/cookiecutter/ckan_extension/') 24 template_loc = os.getcwd() 25 26 # Prompt user for information 27 click.echo(u"\n") 28 name = click.prompt(u"Extenion's name", default=u"must begin 'ckanext-'") 29 author = click.prompt(u"Author's name", default=u"") 30 email = click.prompt(u"Author's email", default=u"") 31 github = click.prompt(u"Your Github user or organization name", 32 default=u"") 33 description = click.prompt(u"Brief description of the project", 34 default=u"") 35 keywords = click.prompt(u"List of keywords (seperated by spaces)", 36 default=u"CKAN") 37 38 # Ensure one instance of 'CKAN' in keywords 39 keywords = keywords.strip().split() 40 keywords = [keyword for keyword in keywords 41 if keyword not in (u'ckan', u'CKAN')] 42 keywords.insert(0, u'CKAN') 43 keywords = u' '.join(keywords) 44 45 # Set short name and plugin class name 46 project_short = name[8:].lower().replace(u'-', u'_') 47 plugin_class_name = project_short.title().replace(u'_', u'') + u'Plugin' 48 49 context = {u"project": name, 50 u"description": description, 51 u"author": author, 52 u"author_email": email, 53 u"keywords": keywords, 54 u"github_user_name": github, 55 u"project_shortname": project_short, 56 u"plugin_class_name": plugin_class_name, 57 u"_source": u"cli"} 58 59 if output_dir == u'.': 60 os.chdir(u'../../../..') 61 output_dir = os.getcwd() 62 63 if not name.startswith(u"ckanext-"): 64 print(u"\nERROR: Project name must start with 'ckanext-' > {}" 65 .format(name)) 66 sys.exit(1) 67 68 cookiecutter(template_loc, no_input=True, extra_context=context, 69 output_dir=output_dir) 70 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ckan/cli/generate.py b/ckan/cli/generate.py --- a/ckan/cli/generate.py +++ b/ckan/cli/generate.py @@ -4,13 +4,21 @@ import sys import click from ckan.cli import error_shout -from cookiecutter.main import cookiecutter [email protected](name=u'generate', - short_help=u"Generate empty extension files to expand CKAN.") [email protected]( + name=u'generate', + short_help=u"Generate empty extension files to expand CKAN.", + invoke_without_command=True, +) def generate(): - pass + try: + from cookiecutter.main import cookiecutter + except ImportError: + error_shout(u"`cookiecutter` library is missing from import path.") + error_shout(u"Make sure you have dev-dependencies installed:") + error_shout(u"\tpip install -r dev-requirements.txt") + raise click.Abort() @generate.command(name=u'extension', short_help=u"Create empty extension.") @@ -18,6 +26,7 @@ u"template.", default=u'.') def extension(output_dir): + from cookiecutter.main import cookiecutter cur_loc = os.path.dirname(os.path.abspath(__file__)) os.chdir(cur_loc) os.chdir(u'../../contrib/cookiecutter/ckan_extension/')
{"golden_diff": "diff --git a/ckan/cli/generate.py b/ckan/cli/generate.py\n--- a/ckan/cli/generate.py\n+++ b/ckan/cli/generate.py\n@@ -4,13 +4,21 @@\n import sys\n import click\n from ckan.cli import error_shout\n-from cookiecutter.main import cookiecutter\n \n \[email protected](name=u'generate',\n- short_help=u\"Generate empty extension files to expand CKAN.\")\[email protected](\n+ name=u'generate',\n+ short_help=u\"Generate empty extension files to expand CKAN.\",\n+ invoke_without_command=True,\n+)\n def generate():\n- pass\n+ try:\n+ from cookiecutter.main import cookiecutter\n+ except ImportError:\n+ error_shout(u\"`cookiecutter` library is missing from import path.\")\n+ error_shout(u\"Make sure you have dev-dependencies installed:\")\n+ error_shout(u\"\\tpip install -r dev-requirements.txt\")\n+ raise click.Abort()\n \n \n @generate.command(name=u'extension', short_help=u\"Create empty extension.\")\n@@ -18,6 +26,7 @@\n u\"template.\",\n default=u'.')\n def extension(output_dir):\n+ from cookiecutter.main import cookiecutter\n cur_loc = os.path.dirname(os.path.abspath(__file__))\n os.chdir(cur_loc)\n os.chdir(u'../../contrib/cookiecutter/ckan_extension/')\n", "issue": "Missing dependency in requirements.txt (cookiecutter) \nhttps://github.com/ckan/ckan/blob/f2cea089bc0aaeede06d98449c4e9eb65e8c2f14/ckan/cli/generate.py#L7\r\n\r\n- cookiecutter lib will be imported on a `ckan` cli attempt, but as it is missing from requirments.txt, is not present which will result to ImportError \r\n\r\n- cookiecutter is listed in requirments-dev.txt, but docker builds don't use it\r\n\r\nTested on a docker personal build, by : \r\n> docker build -t ckan .\r\n> docker run --rm -it --entrypoint bash --name ckan -p 5000:5000 --link db:db --link redis:redis --link solr:solr ckan\r\n> (activated-env)\r\n> ckan\n", "before_files": [{"content": "# encoding: utf-8\n\nimport os\nimport sys\nimport click\nfrom ckan.cli import error_shout\nfrom cookiecutter.main import cookiecutter\n\n\[email protected](name=u'generate',\n short_help=u\"Generate empty extension files to expand CKAN.\")\ndef generate():\n pass\n\n\[email protected](name=u'extension', short_help=u\"Create empty extension.\")\[email protected](u'-o', u'--output-dir', help=u\"Location to put the generated \"\n u\"template.\",\n default=u'.')\ndef extension(output_dir):\n cur_loc = os.path.dirname(os.path.abspath(__file__))\n os.chdir(cur_loc)\n os.chdir(u'../../contrib/cookiecutter/ckan_extension/')\n template_loc = os.getcwd()\n\n # Prompt user for information\n click.echo(u\"\\n\")\n name = click.prompt(u\"Extenion's name\", default=u\"must begin 'ckanext-'\")\n author = click.prompt(u\"Author's name\", default=u\"\")\n email = click.prompt(u\"Author's email\", default=u\"\")\n github = click.prompt(u\"Your Github user or organization name\",\n default=u\"\")\n description = click.prompt(u\"Brief description of the project\",\n default=u\"\")\n keywords = click.prompt(u\"List of keywords (seperated by spaces)\",\n default=u\"CKAN\")\n\n # Ensure one instance of 'CKAN' in keywords\n keywords = keywords.strip().split()\n keywords = [keyword for keyword in keywords\n if keyword not in (u'ckan', u'CKAN')]\n keywords.insert(0, u'CKAN')\n keywords = u' '.join(keywords)\n\n # Set short name and plugin class name\n project_short = name[8:].lower().replace(u'-', u'_')\n plugin_class_name = project_short.title().replace(u'_', u'') + u'Plugin'\n\n context = {u\"project\": name,\n u\"description\": description,\n u\"author\": author,\n u\"author_email\": email,\n u\"keywords\": keywords,\n u\"github_user_name\": github,\n u\"project_shortname\": project_short,\n u\"plugin_class_name\": plugin_class_name,\n u\"_source\": u\"cli\"}\n\n if output_dir == u'.':\n os.chdir(u'../../../..')\n output_dir = os.getcwd()\n\n if not name.startswith(u\"ckanext-\"):\n print(u\"\\nERROR: Project name must start with 'ckanext-' > {}\"\n .format(name))\n sys.exit(1)\n\n cookiecutter(template_loc, no_input=True, extra_context=context,\n output_dir=output_dir)\n", "path": "ckan/cli/generate.py"}], "after_files": [{"content": "# encoding: utf-8\n\nimport os\nimport sys\nimport click\nfrom ckan.cli import error_shout\n\n\[email protected](\n name=u'generate',\n short_help=u\"Generate empty extension files to expand CKAN.\",\n invoke_without_command=True,\n)\ndef generate():\n try:\n from cookiecutter.main import cookiecutter\n except ImportError:\n error_shout(u\"`cookiecutter` library is missing from import path.\")\n error_shout(u\"Make sure you have dev-dependencies installed:\")\n error_shout(u\"\\tpip install -r dev-requirements.txt\")\n raise click.Abort()\n\n\[email protected](name=u'extension', short_help=u\"Create empty extension.\")\[email protected](u'-o', u'--output-dir', help=u\"Location to put the generated \"\n u\"template.\",\n default=u'.')\ndef extension(output_dir):\n from cookiecutter.main import cookiecutter\n cur_loc = os.path.dirname(os.path.abspath(__file__))\n os.chdir(cur_loc)\n os.chdir(u'../../contrib/cookiecutter/ckan_extension/')\n template_loc = os.getcwd()\n\n # Prompt user for information\n click.echo(u\"\\n\")\n name = click.prompt(u\"Extenion's name\", default=u\"must begin 'ckanext-'\")\n author = click.prompt(u\"Author's name\", default=u\"\")\n email = click.prompt(u\"Author's email\", default=u\"\")\n github = click.prompt(u\"Your Github user or organization name\",\n default=u\"\")\n description = click.prompt(u\"Brief description of the project\",\n default=u\"\")\n keywords = click.prompt(u\"List of keywords (seperated by spaces)\",\n default=u\"CKAN\")\n\n # Ensure one instance of 'CKAN' in keywords\n keywords = keywords.strip().split()\n keywords = [keyword for keyword in keywords\n if keyword not in (u'ckan', u'CKAN')]\n keywords.insert(0, u'CKAN')\n keywords = u' '.join(keywords)\n\n # Set short name and plugin class name\n project_short = name[8:].lower().replace(u'-', u'_')\n plugin_class_name = project_short.title().replace(u'_', u'') + u'Plugin'\n\n context = {u\"project\": name,\n u\"description\": description,\n u\"author\": author,\n u\"author_email\": email,\n u\"keywords\": keywords,\n u\"github_user_name\": github,\n u\"project_shortname\": project_short,\n u\"plugin_class_name\": plugin_class_name,\n u\"_source\": u\"cli\"}\n\n if output_dir == u'.':\n os.chdir(u'../../../..')\n output_dir = os.getcwd()\n\n if not name.startswith(u\"ckanext-\"):\n print(u\"\\nERROR: Project name must start with 'ckanext-' > {}\"\n .format(name))\n sys.exit(1)\n\n cookiecutter(template_loc, no_input=True, extra_context=context,\n output_dir=output_dir)\n", "path": "ckan/cli/generate.py"}]}
1,169
315
gh_patches_debug_31494
rasdani/github-patches
git_diff
pyjanitor-devs__pyjanitor-1216
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [DOC] Missing Pyjanitor description on PyPi # Brief Description of Fix Since `pyjanitor` release `0.22.0`, there has not been a ["Project description" for the package on PyPi](https://pypi.org/project/pyjanitor/0.22.0/#description). During the release of of `0.24.0`, @samukweku and I got an error during the release process due to: ``` Checking dist/pyjanitor-0.24.0-py3-none-any.whl: FAILED ERROR `long_description` has syntax errors in markup and would not be rendered on PyPI. No content rendered from RST source. ``` Our guess is that we had switched the README from RST to MD some time ago, causing the `long_description` to quietly fail, and now the `gh-action-pypi-publish` GitHub action would no longer accept the bad `long_description`. We updated the `long_description_content_type` in `setup.py` to markdown in #1197 and the package was successfully published to PyPi, but there is still no Project description. So there must still be something that is not being generated correctly. We need to verify that `long_description` is properly being generated. We should test with local tools *and* https://test.pypi.org/ to verify that this is fixed. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 """Setup script.""" 2 import codecs 3 import os 4 import re 5 from pathlib import Path 6 from pprint import pprint 7 8 from setuptools import find_packages, setup 9 10 HERE = os.path.abspath(os.path.dirname(__file__)) 11 12 13 def read(*parts): 14 # intentionally *not* adding an encoding option to open 15 return codecs.open(os.path.join(HERE, *parts), "r").read() 16 17 18 def read_requirements(*parts): 19 """ 20 Return requirements from parts. 21 22 Given a requirements.txt (or similar style file), 23 returns a list of requirements. 24 Assumes anything after a single '#' on a line is a comment, and ignores 25 empty lines. 26 27 :param parts: list of filenames which contain the installation "parts", 28 i.e. submodule-specific installation requirements 29 :returns: A compiled list of requirements. 30 """ 31 requirements = [] 32 for line in read(*parts).splitlines(): 33 new_line = re.sub( # noqa: PD005 34 r"(\s*)?#.*$", # the space immediately before the 35 # hash mark, the hash mark, and 36 # anything that follows it 37 "", # replace with a blank string 38 line, 39 ) 40 new_line = re.sub( # noqa: PD005 41 r"-r.*$", # link to another requirement file 42 "", # replace with a blank string 43 new_line, 44 ) 45 new_line = re.sub( # noqa: PD005 46 r"-e \..*$", # link to editable install 47 "", # replace with a blank string 48 new_line, 49 ) 50 # print(line, "-->", new_line) 51 if new_line: # i.e. we have a non-zero-length string 52 requirements.append(new_line) 53 return requirements 54 55 56 # pull from requirements.IN, requirements.TXT is generated from this 57 INSTALL_REQUIRES = read_requirements(".requirements/base.in") 58 59 EXTRA_REQUIRES = { 60 "dev": read_requirements(".requirements/dev.in"), 61 "docs": read_requirements(".requirements/docs.in"), 62 "test": read_requirements(".requirements/testing.in"), 63 "biology": read_requirements(".requirements/biology.in"), 64 "chemistry": read_requirements(".requirements/chemistry.in"), 65 "engineering": read_requirements(".requirements/engineering.in"), 66 "spark": read_requirements(".requirements/spark.in"), 67 } 68 69 # add 'all' key to EXTRA_REQUIRES 70 all_requires = [] 71 for k, v in EXTRA_REQUIRES.items(): 72 all_requires.extend(v) 73 EXTRA_REQUIRES["all"] = set(all_requires) 74 75 for k1 in ["biology", "chemistry", "engineering", "spark"]: 76 for v2 in EXTRA_REQUIRES[k1]: 77 EXTRA_REQUIRES["docs"].append(v2) 78 79 pprint(EXTRA_REQUIRES) 80 81 82 def generate_long_description() -> str: 83 """ 84 Extra chunks from README for PyPI description. 85 86 Target chunks must be contained within `.. pypi-doc` pair comments, 87 so there must be an even number of comments in README. 88 89 :returns: Extracted description from README. 90 :raises Exception: if odd number of `.. pypi-doc` comments 91 in README. 92 """ 93 # Read the contents of README file 94 this_directory = Path(__file__).parent 95 with open(this_directory / "mkdocs" / "index.md", encoding="utf-8") as f: 96 readme = f.read() 97 98 # Find pypi-doc comments in README 99 indices = [m.start() for m in re.finditer(".. pypi-doc", readme)] 100 if len(indices) % 2 != 0: 101 raise Exception("Odd number of `.. pypi-doc` comments in README") 102 103 # Loop through pairs of comments and save text between pairs 104 long_description = "" 105 for i in range(0, len(indices), 2): 106 start_index = indices[i] + 11 107 end_index = indices[i + 1] 108 long_description += readme[start_index:end_index] 109 return long_description 110 111 112 setup( 113 name="pyjanitor", 114 version="0.24.0", 115 description="Tools for cleaning pandas DataFrames", 116 author="pyjanitor devs", 117 author_email="[email protected]", 118 url="https://github.com/pyjanitor-devs/pyjanitor", 119 license="MIT", 120 # packages=["janitor", "janitor.xarray", "janitor.spark"], 121 packages=find_packages(), 122 install_requires=INSTALL_REQUIRES, 123 extras_require=EXTRA_REQUIRES, 124 python_requires=">=3.6", 125 long_description=generate_long_description(), 126 long_description_content_type="text/markdown", 127 ) 128 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -68,7 +68,7 @@ # add 'all' key to EXTRA_REQUIRES all_requires = [] -for k, v in EXTRA_REQUIRES.items(): +for _, v in EXTRA_REQUIRES.items(): all_requires.extend(v) EXTRA_REQUIRES["all"] = set(all_requires) @@ -83,11 +83,11 @@ """ Extra chunks from README for PyPI description. - Target chunks must be contained within `.. pypi-doc` pair comments, + Target chunks must be contained within `<!-- pypi-doc -->` pair comments, so there must be an even number of comments in README. :returns: Extracted description from README. - :raises Exception: if odd number of `.. pypi-doc` comments + :raises Exception: If odd number of `<!-- pypi-doc -->` comments in README. """ # Read the contents of README file @@ -96,14 +96,15 @@ readme = f.read() # Find pypi-doc comments in README - indices = [m.start() for m in re.finditer(".. pypi-doc", readme)] + boundary = r"<!-- pypi-doc -->" + indices = [m.start() for m in re.finditer(boundary, readme)] if len(indices) % 2 != 0: - raise Exception("Odd number of `.. pypi-doc` comments in README") + raise Exception(f"Odd number of `{boundary}` comments in README") # Loop through pairs of comments and save text between pairs long_description = "" for i in range(0, len(indices), 2): - start_index = indices[i] + 11 + start_index = indices[i] + len(boundary) end_index = indices[i + 1] long_description += readme[start_index:end_index] return long_description
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -68,7 +68,7 @@\n \n # add 'all' key to EXTRA_REQUIRES\n all_requires = []\n-for k, v in EXTRA_REQUIRES.items():\n+for _, v in EXTRA_REQUIRES.items():\n all_requires.extend(v)\n EXTRA_REQUIRES[\"all\"] = set(all_requires)\n \n@@ -83,11 +83,11 @@\n \"\"\"\n Extra chunks from README for PyPI description.\n \n- Target chunks must be contained within `.. pypi-doc` pair comments,\n+ Target chunks must be contained within `<!-- pypi-doc -->` pair comments,\n so there must be an even number of comments in README.\n \n :returns: Extracted description from README.\n- :raises Exception: if odd number of `.. pypi-doc` comments\n+ :raises Exception: If odd number of `<!-- pypi-doc -->` comments\n in README.\n \"\"\"\n # Read the contents of README file\n@@ -96,14 +96,15 @@\n readme = f.read()\n \n # Find pypi-doc comments in README\n- indices = [m.start() for m in re.finditer(\".. pypi-doc\", readme)]\n+ boundary = r\"<!-- pypi-doc -->\"\n+ indices = [m.start() for m in re.finditer(boundary, readme)]\n if len(indices) % 2 != 0:\n- raise Exception(\"Odd number of `.. pypi-doc` comments in README\")\n+ raise Exception(f\"Odd number of `{boundary}` comments in README\")\n \n # Loop through pairs of comments and save text between pairs\n long_description = \"\"\n for i in range(0, len(indices), 2):\n- start_index = indices[i] + 11\n+ start_index = indices[i] + len(boundary)\n end_index = indices[i + 1]\n long_description += readme[start_index:end_index]\n return long_description\n", "issue": "[DOC] Missing Pyjanitor description on PyPi\n# Brief Description of Fix\r\n\r\nSince `pyjanitor` release `0.22.0`, there has not been a [\"Project description\" for the package on PyPi](https://pypi.org/project/pyjanitor/0.22.0/#description).\r\n\r\nDuring the release of of `0.24.0`, @samukweku and I got an error during the release process due to:\r\n\r\n```\r\nChecking dist/pyjanitor-0.24.0-py3-none-any.whl: FAILED\r\nERROR `long_description` has syntax errors in markup and would not be \r\n rendered on PyPI. \r\n No content rendered from RST source. \r\n```\r\nOur guess is that we had switched the README from RST to MD some time ago, causing the `long_description` to quietly fail, and now the `gh-action-pypi-publish` GitHub action would no longer accept the bad `long_description`.\r\n\r\nWe updated the `long_description_content_type` in `setup.py` to markdown in #1197 and the package was successfully published to PyPi, but there is still no Project description. So there must still be something that is not being generated correctly.\r\n\r\nWe need to verify that `long_description` is properly being generated. We should test with local tools *and* https://test.pypi.org/ to verify that this is fixed.\n", "before_files": [{"content": "\"\"\"Setup script.\"\"\"\nimport codecs\nimport os\nimport re\nfrom pathlib import Path\nfrom pprint import pprint\n\nfrom setuptools import find_packages, setup\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n # intentionally *not* adding an encoding option to open\n return codecs.open(os.path.join(HERE, *parts), \"r\").read()\n\n\ndef read_requirements(*parts):\n \"\"\"\n Return requirements from parts.\n\n Given a requirements.txt (or similar style file),\n returns a list of requirements.\n Assumes anything after a single '#' on a line is a comment, and ignores\n empty lines.\n\n :param parts: list of filenames which contain the installation \"parts\",\n i.e. submodule-specific installation requirements\n :returns: A compiled list of requirements.\n \"\"\"\n requirements = []\n for line in read(*parts).splitlines():\n new_line = re.sub( # noqa: PD005\n r\"(\\s*)?#.*$\", # the space immediately before the\n # hash mark, the hash mark, and\n # anything that follows it\n \"\", # replace with a blank string\n line,\n )\n new_line = re.sub( # noqa: PD005\n r\"-r.*$\", # link to another requirement file\n \"\", # replace with a blank string\n new_line,\n )\n new_line = re.sub( # noqa: PD005\n r\"-e \\..*$\", # link to editable install\n \"\", # replace with a blank string\n new_line,\n )\n # print(line, \"-->\", new_line)\n if new_line: # i.e. we have a non-zero-length string\n requirements.append(new_line)\n return requirements\n\n\n# pull from requirements.IN, requirements.TXT is generated from this\nINSTALL_REQUIRES = read_requirements(\".requirements/base.in\")\n\nEXTRA_REQUIRES = {\n \"dev\": read_requirements(\".requirements/dev.in\"),\n \"docs\": read_requirements(\".requirements/docs.in\"),\n \"test\": read_requirements(\".requirements/testing.in\"),\n \"biology\": read_requirements(\".requirements/biology.in\"),\n \"chemistry\": read_requirements(\".requirements/chemistry.in\"),\n \"engineering\": read_requirements(\".requirements/engineering.in\"),\n \"spark\": read_requirements(\".requirements/spark.in\"),\n}\n\n# add 'all' key to EXTRA_REQUIRES\nall_requires = []\nfor k, v in EXTRA_REQUIRES.items():\n all_requires.extend(v)\nEXTRA_REQUIRES[\"all\"] = set(all_requires)\n\nfor k1 in [\"biology\", \"chemistry\", \"engineering\", \"spark\"]:\n for v2 in EXTRA_REQUIRES[k1]:\n EXTRA_REQUIRES[\"docs\"].append(v2)\n\npprint(EXTRA_REQUIRES)\n\n\ndef generate_long_description() -> str:\n \"\"\"\n Extra chunks from README for PyPI description.\n\n Target chunks must be contained within `.. pypi-doc` pair comments,\n so there must be an even number of comments in README.\n\n :returns: Extracted description from README.\n :raises Exception: if odd number of `.. pypi-doc` comments\n in README.\n \"\"\"\n # Read the contents of README file\n this_directory = Path(__file__).parent\n with open(this_directory / \"mkdocs\" / \"index.md\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n # Find pypi-doc comments in README\n indices = [m.start() for m in re.finditer(\".. pypi-doc\", readme)]\n if len(indices) % 2 != 0:\n raise Exception(\"Odd number of `.. pypi-doc` comments in README\")\n\n # Loop through pairs of comments and save text between pairs\n long_description = \"\"\n for i in range(0, len(indices), 2):\n start_index = indices[i] + 11\n end_index = indices[i + 1]\n long_description += readme[start_index:end_index]\n return long_description\n\n\nsetup(\n name=\"pyjanitor\",\n version=\"0.24.0\",\n description=\"Tools for cleaning pandas DataFrames\",\n author=\"pyjanitor devs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/pyjanitor-devs/pyjanitor\",\n license=\"MIT\",\n # packages=[\"janitor\", \"janitor.xarray\", \"janitor.spark\"],\n packages=find_packages(),\n install_requires=INSTALL_REQUIRES,\n extras_require=EXTRA_REQUIRES,\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/markdown\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"Setup script.\"\"\"\nimport codecs\nimport os\nimport re\nfrom pathlib import Path\nfrom pprint import pprint\n\nfrom setuptools import find_packages, setup\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n # intentionally *not* adding an encoding option to open\n return codecs.open(os.path.join(HERE, *parts), \"r\").read()\n\n\ndef read_requirements(*parts):\n \"\"\"\n Return requirements from parts.\n\n Given a requirements.txt (or similar style file),\n returns a list of requirements.\n Assumes anything after a single '#' on a line is a comment, and ignores\n empty lines.\n\n :param parts: list of filenames which contain the installation \"parts\",\n i.e. submodule-specific installation requirements\n :returns: A compiled list of requirements.\n \"\"\"\n requirements = []\n for line in read(*parts).splitlines():\n new_line = re.sub( # noqa: PD005\n r\"(\\s*)?#.*$\", # the space immediately before the\n # hash mark, the hash mark, and\n # anything that follows it\n \"\", # replace with a blank string\n line,\n )\n new_line = re.sub( # noqa: PD005\n r\"-r.*$\", # link to another requirement file\n \"\", # replace with a blank string\n new_line,\n )\n new_line = re.sub( # noqa: PD005\n r\"-e \\..*$\", # link to editable install\n \"\", # replace with a blank string\n new_line,\n )\n # print(line, \"-->\", new_line)\n if new_line: # i.e. we have a non-zero-length string\n requirements.append(new_line)\n return requirements\n\n\n# pull from requirements.IN, requirements.TXT is generated from this\nINSTALL_REQUIRES = read_requirements(\".requirements/base.in\")\n\nEXTRA_REQUIRES = {\n \"dev\": read_requirements(\".requirements/dev.in\"),\n \"docs\": read_requirements(\".requirements/docs.in\"),\n \"test\": read_requirements(\".requirements/testing.in\"),\n \"biology\": read_requirements(\".requirements/biology.in\"),\n \"chemistry\": read_requirements(\".requirements/chemistry.in\"),\n \"engineering\": read_requirements(\".requirements/engineering.in\"),\n \"spark\": read_requirements(\".requirements/spark.in\"),\n}\n\n# add 'all' key to EXTRA_REQUIRES\nall_requires = []\nfor _, v in EXTRA_REQUIRES.items():\n all_requires.extend(v)\nEXTRA_REQUIRES[\"all\"] = set(all_requires)\n\nfor k1 in [\"biology\", \"chemistry\", \"engineering\", \"spark\"]:\n for v2 in EXTRA_REQUIRES[k1]:\n EXTRA_REQUIRES[\"docs\"].append(v2)\n\npprint(EXTRA_REQUIRES)\n\n\ndef generate_long_description() -> str:\n \"\"\"\n Extra chunks from README for PyPI description.\n\n Target chunks must be contained within `<!-- pypi-doc -->` pair comments,\n so there must be an even number of comments in README.\n\n :returns: Extracted description from README.\n :raises Exception: If odd number of `<!-- pypi-doc -->` comments\n in README.\n \"\"\"\n # Read the contents of README file\n this_directory = Path(__file__).parent\n with open(this_directory / \"mkdocs\" / \"index.md\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n # Find pypi-doc comments in README\n boundary = r\"<!-- pypi-doc -->\"\n indices = [m.start() for m in re.finditer(boundary, readme)]\n if len(indices) % 2 != 0:\n raise Exception(f\"Odd number of `{boundary}` comments in README\")\n\n # Loop through pairs of comments and save text between pairs\n long_description = \"\"\n for i in range(0, len(indices), 2):\n start_index = indices[i] + len(boundary)\n end_index = indices[i + 1]\n long_description += readme[start_index:end_index]\n return long_description\n\n\nsetup(\n name=\"pyjanitor\",\n version=\"0.24.0\",\n description=\"Tools for cleaning pandas DataFrames\",\n author=\"pyjanitor devs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/pyjanitor-devs/pyjanitor\",\n license=\"MIT\",\n # packages=[\"janitor\", \"janitor.xarray\", \"janitor.spark\"],\n packages=find_packages(),\n install_requires=INSTALL_REQUIRES,\n extras_require=EXTRA_REQUIRES,\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/markdown\",\n)\n", "path": "setup.py"}]}
1,829
437
gh_patches_debug_1208
rasdani/github-patches
git_diff
OCA__server-tools-464
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- runbot 9.0 red due to letsencrypt? Hi, It seems the 9.0 branch is red on runbot due to the letsencrypt module? ``` Call of self.pool.get('letsencrypt').cron(cr, uid, *()) failed in Job 2 Traceback (most recent call last): File "/srv/openerp/instances/openerp-oca-runbot/parts/odoo-extra/runbot/static/build/3148182-9-0-209efa/openerp/addons/base/ir/ir_cron.py", line 129, in _callback getattr(model, method_name)(cr, uid, *args) File "/srv/openerp/instances/openerp-oca-runbot/parts/odoo-extra/runbot/static/build/3148182-9-0-209efa/openerp/api.py", line 250, in wrapper return old_api(self, *args, **kwargs) File "/srv/openerp/instances/openerp-oca-runbot/parts/odoo-extra/runbot/static/build/3148182-9-0-209efa/openerp/api.py", line 354, in old_api result = method(recs, *args, **kwargs) File "/srv/openerp/instances/openerp-oca-runbot/parts/odoo-extra/runbot/static/build/3148182-9-0-209efa/openerp/addons/letsencrypt/models/letsencrypt.py", line 151, in cron account_key, csr, acme_challenge, log=_logger, CA=DEFAULT_CA) File "/srv/openerp/instances/openerp-oca-runbot/sandbox/local/lib/python2.7/site-packages/acme_tiny.py", line 104, in get_crt raise ValueError("Error requesting challenges: {0} {1}".format(code, result)) ValueError: Error requesting challenges: 400 { "type": "urn:acme:error:malformed", "detail": "Error creating new authz :: Invalid character in DNS name", "status": 400 } ``` @hbrunn --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `letsencrypt/__openerp__.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # © 2016 Therp BV <http://therp.nl> 3 # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). 4 { 5 "name": "Let's encrypt", 6 "version": "9.0.1.0.0", 7 "author": "Therp BV," 8 "Tecnativa," 9 "Odoo Community Association (OCA)", 10 "license": "AGPL-3", 11 "category": "Hidden/Dependency", 12 "summary": "Request SSL certificates from letsencrypt.org", 13 "depends": [ 14 'base', 15 ], 16 "data": [ 17 "data/ir_config_parameter.xml", 18 "data/ir_cron.xml", 19 ], 20 "post_init_hook": 'post_init_hook', 21 "installable": True, 22 "external_dependencies": { 23 'bin': [ 24 'openssl', 25 ], 26 'python': [ 27 'acme_tiny', 28 'IPy', 29 ], 30 }, 31 } 32 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/letsencrypt/__openerp__.py b/letsencrypt/__openerp__.py --- a/letsencrypt/__openerp__.py +++ b/letsencrypt/__openerp__.py @@ -16,6 +16,7 @@ "data": [ "data/ir_config_parameter.xml", "data/ir_cron.xml", + "demo/ir_cron.xml", ], "post_init_hook": 'post_init_hook', "installable": True,
{"golden_diff": "diff --git a/letsencrypt/__openerp__.py b/letsencrypt/__openerp__.py\n--- a/letsencrypt/__openerp__.py\n+++ b/letsencrypt/__openerp__.py\n@@ -16,6 +16,7 @@\n \"data\": [\n \"data/ir_config_parameter.xml\",\n \"data/ir_cron.xml\",\n+ \"demo/ir_cron.xml\",\n ],\n \"post_init_hook\": 'post_init_hook',\n \"installable\": True,\n", "issue": "runbot 9.0 red due to letsencrypt?\nHi,\n\nIt seems the 9.0 branch is red on runbot due to the letsencrypt module?\n\n```\nCall of self.pool.get('letsencrypt').cron(cr, uid, *()) failed in Job 2\nTraceback (most recent call last):\n File \"/srv/openerp/instances/openerp-oca-runbot/parts/odoo-extra/runbot/static/build/3148182-9-0-209efa/openerp/addons/base/ir/ir_cron.py\", line 129, in _callback\n getattr(model, method_name)(cr, uid, *args)\n File \"/srv/openerp/instances/openerp-oca-runbot/parts/odoo-extra/runbot/static/build/3148182-9-0-209efa/openerp/api.py\", line 250, in wrapper\n return old_api(self, *args, **kwargs)\n File \"/srv/openerp/instances/openerp-oca-runbot/parts/odoo-extra/runbot/static/build/3148182-9-0-209efa/openerp/api.py\", line 354, in old_api\n result = method(recs, *args, **kwargs)\n File \"/srv/openerp/instances/openerp-oca-runbot/parts/odoo-extra/runbot/static/build/3148182-9-0-209efa/openerp/addons/letsencrypt/models/letsencrypt.py\", line 151, in cron\n account_key, csr, acme_challenge, log=_logger, CA=DEFAULT_CA)\n File \"/srv/openerp/instances/openerp-oca-runbot/sandbox/local/lib/python2.7/site-packages/acme_tiny.py\", line 104, in get_crt\n raise ValueError(\"Error requesting challenges: {0} {1}\".format(code, result))\nValueError: Error requesting challenges: 400 {\n \"type\": \"urn:acme:error:malformed\",\n \"detail\": \"Error creating new authz :: Invalid character in DNS name\",\n \"status\": 400\n}\n```\n\n@hbrunn \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# \u00a9 2016 Therp BV <http://therp.nl>\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n{\n \"name\": \"Let's encrypt\",\n \"version\": \"9.0.1.0.0\",\n \"author\": \"Therp BV,\"\n \"Tecnativa,\"\n \"Odoo Community Association (OCA)\",\n \"license\": \"AGPL-3\",\n \"category\": \"Hidden/Dependency\",\n \"summary\": \"Request SSL certificates from letsencrypt.org\",\n \"depends\": [\n 'base',\n ],\n \"data\": [\n \"data/ir_config_parameter.xml\",\n \"data/ir_cron.xml\",\n ],\n \"post_init_hook\": 'post_init_hook',\n \"installable\": True,\n \"external_dependencies\": {\n 'bin': [\n 'openssl',\n ],\n 'python': [\n 'acme_tiny',\n 'IPy',\n ],\n },\n}\n", "path": "letsencrypt/__openerp__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# \u00a9 2016 Therp BV <http://therp.nl>\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n{\n \"name\": \"Let's encrypt\",\n \"version\": \"9.0.1.0.0\",\n \"author\": \"Therp BV,\"\n \"Tecnativa,\"\n \"Odoo Community Association (OCA)\",\n \"license\": \"AGPL-3\",\n \"category\": \"Hidden/Dependency\",\n \"summary\": \"Request SSL certificates from letsencrypt.org\",\n \"depends\": [\n 'base',\n ],\n \"data\": [\n \"data/ir_config_parameter.xml\",\n \"data/ir_cron.xml\",\n \"demo/ir_cron.xml\",\n ],\n \"post_init_hook\": 'post_init_hook',\n \"installable\": True,\n \"external_dependencies\": {\n 'bin': [\n 'openssl',\n ],\n 'python': [\n 'acme_tiny',\n 'IPy',\n ],\n },\n}\n", "path": "letsencrypt/__openerp__.py"}]}
1,034
113
gh_patches_debug_615
rasdani/github-patches
git_diff
pex-tool__pex-1255
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Release 2.1.32 On the docket: + [x] Venv `pex` and bin scripts can run afoul of shebang length limits. #1252 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pex/version.py` Content: ``` 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.31" 5 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.31" +__version__ = "2.1.32"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.31\"\n+__version__ = \"2.1.32\"\n", "issue": "Release 2.1.32\nOn the docket:\r\n+ [x] Venv `pex` and bin scripts can run afoul of shebang length limits. #1252\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.31\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.32\"\n", "path": "pex/version.py"}]}
350
96
gh_patches_debug_56669
rasdani/github-patches
git_diff
magenta__magenta-1079
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error in running Onsets and Frames Colab Notebook Hi @cghawthorne I am using your [Colab notebook](https://colab.research.google.com/notebook#fileId=/v2/external/notebooks/magenta/onsets_frames_transcription/onsets_frames_transcription.ipynb) to test your model but it stopped working a week ago. Error on the inference section: UnknownError: exceptions.AttributeError: 'module' object has no attribute 'logamplitude' [[Node: wav_to_spec = PyFunc[Tin=[DT_STRING], Tout=[DT_FLOAT], token="pyfunc_1"](transform_wav_data_op)]] [[Node: IteratorGetNext = IteratorGetNext[output_shapes=[[?], [?,?,88], [?,?,88], [?], [?], [?,?,88], [?,?,229,1]], output_types=[DT_STRING, DT_FLOAT, DT_FLOAT, DT_INT32, DT_STRING, DT_FLOAT, DT_FLOAT], _device="/job:localhost/replica:0/task:0/device:CPU:0"](Iterator)]] Thanks, Bardia --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `magenta/version.py` Content: ``` 1 # Copyright 2016 Google Inc. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 r"""Separate file for storing the current version of Magenta. 15 16 Stored in a separate file so that setup.py can reference the version without 17 pulling in all the dependencies in __init__.py. 18 """ 19 20 __version__ = '0.3.5' 21 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/magenta/version.py b/magenta/version.py --- a/magenta/version.py +++ b/magenta/version.py @@ -17,4 +17,4 @@ pulling in all the dependencies in __init__.py. """ -__version__ = '0.3.5' +__version__ = '0.3.6'
{"golden_diff": "diff --git a/magenta/version.py b/magenta/version.py\n--- a/magenta/version.py\n+++ b/magenta/version.py\n@@ -17,4 +17,4 @@\n pulling in all the dependencies in __init__.py.\n \"\"\"\n \n-__version__ = '0.3.5'\n+__version__ = '0.3.6'\n", "issue": "Error in running Onsets and Frames Colab Notebook\nHi @cghawthorne\r\nI am using your [Colab notebook](https://colab.research.google.com/notebook#fileId=/v2/external/notebooks/magenta/onsets_frames_transcription/onsets_frames_transcription.ipynb) to test your model but it stopped working a week ago.\r\n\r\nError on the inference section:\r\nUnknownError: exceptions.AttributeError: 'module' object has no attribute 'logamplitude'\r\n\t [[Node: wav_to_spec = PyFunc[Tin=[DT_STRING], Tout=[DT_FLOAT], token=\"pyfunc_1\"](transform_wav_data_op)]]\r\n\t [[Node: IteratorGetNext = IteratorGetNext[output_shapes=[[?], [?,?,88], [?,?,88], [?], [?], [?,?,88], [?,?,229,1]], output_types=[DT_STRING, DT_FLOAT, DT_FLOAT, DT_INT32, DT_STRING, DT_FLOAT, DT_FLOAT], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](Iterator)]]\r\n\r\nThanks,\r\nBardia\r\n\r\n\n", "before_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"Separate file for storing the current version of Magenta.\n\nStored in a separate file so that setup.py can reference the version without\npulling in all the dependencies in __init__.py.\n\"\"\"\n\n__version__ = '0.3.5'\n", "path": "magenta/version.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"Separate file for storing the current version of Magenta.\n\nStored in a separate file so that setup.py can reference the version without\npulling in all the dependencies in __init__.py.\n\"\"\"\n\n__version__ = '0.3.6'\n", "path": "magenta/version.py"}]}
721
77
gh_patches_debug_11826
rasdani/github-patches
git_diff
pypi__warehouse-12408
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Monospaced font for text/plain long_description Don't you think that would be nice to wrap project descriptions in text/plain with pre tag? Close if duplicate. I'm really sorry, looking through over 400 issues of production system is beyond of my capabilities. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `warehouse/utils/readme.py` Content: ``` 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 """Utils for rendering and updating package descriptions (READMEs).""" 14 15 import cgi 16 17 import pkg_resources 18 import readme_renderer.markdown 19 import readme_renderer.rst 20 import readme_renderer.txt 21 22 _RENDERERS = { 23 None: readme_renderer.rst, # Default if description_content_type is None 24 "": readme_renderer.rst, # Default if description_content_type is None 25 "text/plain": readme_renderer.txt, 26 "text/x-rst": readme_renderer.rst, 27 "text/markdown": readme_renderer.markdown, 28 } 29 30 31 def render(value, content_type=None, use_fallback=True): 32 if value is None: 33 return value 34 35 content_type, parameters = cgi.parse_header(content_type or "") 36 37 # Get the appropriate renderer 38 renderer = _RENDERERS.get(content_type, readme_renderer.txt) 39 40 # Actually render the given value, this will not only render the value, but 41 # also ensure that it's had any disallowed markup removed. 42 rendered = renderer.render(value, **parameters) 43 44 # If the content was not rendered, we'll render as plaintext instead. The 45 # reason it's necessary to do this instead of just accepting plaintext is 46 # that readme_renderer will deal with sanitizing the content. 47 # Skip the fallback option when validating that rendered output is ok. 48 if use_fallback and rendered is None: 49 rendered = readme_renderer.txt.render(value) 50 51 return rendered 52 53 54 def renderer_version(): 55 return pkg_resources.get_distribution("readme-renderer").version 56 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/warehouse/utils/readme.py b/warehouse/utils/readme.py --- a/warehouse/utils/readme.py +++ b/warehouse/utils/readme.py @@ -41,6 +41,10 @@ # also ensure that it's had any disallowed markup removed. rendered = renderer.render(value, **parameters) + # Wrap plaintext as preformatted to preserve whitespace. + if content_type == "text/plain": + rendered = f"<pre>{rendered}</pre>" + # If the content was not rendered, we'll render as plaintext instead. The # reason it's necessary to do this instead of just accepting plaintext is # that readme_renderer will deal with sanitizing the content.
{"golden_diff": "diff --git a/warehouse/utils/readme.py b/warehouse/utils/readme.py\n--- a/warehouse/utils/readme.py\n+++ b/warehouse/utils/readme.py\n@@ -41,6 +41,10 @@\n # also ensure that it's had any disallowed markup removed.\n rendered = renderer.render(value, **parameters)\n \n+ # Wrap plaintext as preformatted to preserve whitespace.\n+ if content_type == \"text/plain\":\n+ rendered = f\"<pre>{rendered}</pre>\"\n+\n # If the content was not rendered, we'll render as plaintext instead. The\n # reason it's necessary to do this instead of just accepting plaintext is\n # that readme_renderer will deal with sanitizing the content.\n", "issue": "Monospaced font for text/plain long_description\nDon't you think that would be nice to wrap project descriptions in text/plain with pre tag?\r\nClose if duplicate. I'm really sorry, looking through over 400 issues of production system is beyond of my capabilities.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utils for rendering and updating package descriptions (READMEs).\"\"\"\n\nimport cgi\n\nimport pkg_resources\nimport readme_renderer.markdown\nimport readme_renderer.rst\nimport readme_renderer.txt\n\n_RENDERERS = {\n None: readme_renderer.rst, # Default if description_content_type is None\n \"\": readme_renderer.rst, # Default if description_content_type is None\n \"text/plain\": readme_renderer.txt,\n \"text/x-rst\": readme_renderer.rst,\n \"text/markdown\": readme_renderer.markdown,\n}\n\n\ndef render(value, content_type=None, use_fallback=True):\n if value is None:\n return value\n\n content_type, parameters = cgi.parse_header(content_type or \"\")\n\n # Get the appropriate renderer\n renderer = _RENDERERS.get(content_type, readme_renderer.txt)\n\n # Actually render the given value, this will not only render the value, but\n # also ensure that it's had any disallowed markup removed.\n rendered = renderer.render(value, **parameters)\n\n # If the content was not rendered, we'll render as plaintext instead. The\n # reason it's necessary to do this instead of just accepting plaintext is\n # that readme_renderer will deal with sanitizing the content.\n # Skip the fallback option when validating that rendered output is ok.\n if use_fallback and rendered is None:\n rendered = readme_renderer.txt.render(value)\n\n return rendered\n\n\ndef renderer_version():\n return pkg_resources.get_distribution(\"readme-renderer\").version\n", "path": "warehouse/utils/readme.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utils for rendering and updating package descriptions (READMEs).\"\"\"\n\nimport cgi\n\nimport pkg_resources\nimport readme_renderer.markdown\nimport readme_renderer.rst\nimport readme_renderer.txt\n\n_RENDERERS = {\n None: readme_renderer.rst, # Default if description_content_type is None\n \"\": readme_renderer.rst, # Default if description_content_type is None\n \"text/plain\": readme_renderer.txt,\n \"text/x-rst\": readme_renderer.rst,\n \"text/markdown\": readme_renderer.markdown,\n}\n\n\ndef render(value, content_type=None, use_fallback=True):\n if value is None:\n return value\n\n content_type, parameters = cgi.parse_header(content_type or \"\")\n\n # Get the appropriate renderer\n renderer = _RENDERERS.get(content_type, readme_renderer.txt)\n\n # Actually render the given value, this will not only render the value, but\n # also ensure that it's had any disallowed markup removed.\n rendered = renderer.render(value, **parameters)\n\n # Wrap plaintext as preformatted to preserve whitespace.\n if content_type == \"text/plain\":\n rendered = f\"<pre>{rendered}</pre>\"\n\n # If the content was not rendered, we'll render as plaintext instead. The\n # reason it's necessary to do this instead of just accepting plaintext is\n # that readme_renderer will deal with sanitizing the content.\n # Skip the fallback option when validating that rendered output is ok.\n if use_fallback and rendered is None:\n rendered = readme_renderer.txt.render(value)\n\n return rendered\n\n\ndef renderer_version():\n return pkg_resources.get_distribution(\"readme-renderer\").version\n", "path": "warehouse/utils/readme.py"}]}
862
157
gh_patches_debug_18559
rasdani/github-patches
git_diff
pytorch__vision-360
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- utils.make_grid should not return tensors of dimension 2 or 3 without normalizing them When passing in a single image of dimensionality 2 or 3 to utils.make_grid, the function currently returns this image, without normalizing it (but it should, according to the function's documentation). This is also problematic as utils.save_image calls utils.make_grid to normalize its images. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchvision/utils.py` Content: ``` 1 import torch 2 import math 3 irange = range 4 5 6 def make_grid(tensor, nrow=8, padding=2, 7 normalize=False, range=None, scale_each=False, pad_value=0): 8 """Make a grid of images. 9 10 Args: 11 tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W) 12 or a list of images all of the same size. 13 nrow (int, optional): Number of images displayed in each row of the grid. 14 The Final grid size is (B / nrow, nrow). Default is 8. 15 padding (int, optional): amount of padding. Default is 2. 16 normalize (bool, optional): If True, shift the image to the range (0, 1), 17 by subtracting the minimum and dividing by the maximum pixel value. 18 range (tuple, optional): tuple (min, max) where min and max are numbers, 19 then these numbers are used to normalize the image. By default, min and max 20 are computed from the tensor. 21 scale_each (bool, optional): If True, scale each image in the batch of 22 images separately rather than the (min, max) over all images. 23 pad_value (float, optional): Value for the padded pixels. 24 25 Example: 26 See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_ 27 28 """ 29 if not (torch.is_tensor(tensor) or 30 (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))): 31 raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor))) 32 33 # if list of tensors, convert to a 4D mini-batch Tensor 34 if isinstance(tensor, list): 35 tensor = torch.stack(tensor, dim=0) 36 37 if tensor.dim() == 2: # single image H x W 38 tensor = tensor.view(1, tensor.size(0), tensor.size(1)) 39 if tensor.dim() == 3: # single image 40 if tensor.size(0) == 1: # if single-channel, convert to 3-channel 41 tensor = torch.cat((tensor, tensor, tensor), 0) 42 return tensor 43 if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images 44 tensor = torch.cat((tensor, tensor, tensor), 1) 45 46 if normalize is True: 47 tensor = tensor.clone() # avoid modifying tensor in-place 48 if range is not None: 49 assert isinstance(range, tuple), \ 50 "range has to be a tuple (min, max) if specified. min and max are numbers" 51 52 def norm_ip(img, min, max): 53 img.clamp_(min=min, max=max) 54 img.add_(-min).div_(max - min) 55 56 def norm_range(t, range): 57 if range is not None: 58 norm_ip(t, range[0], range[1]) 59 else: 60 norm_ip(t, t.min(), t.max()) 61 62 if scale_each is True: 63 for t in tensor: # loop over mini-batch dimension 64 norm_range(t, range) 65 else: 66 norm_range(tensor, range) 67 68 # make the mini-batch of images into a grid 69 nmaps = tensor.size(0) 70 xmaps = min(nrow, nmaps) 71 ymaps = int(math.ceil(float(nmaps) / xmaps)) 72 height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding) 73 grid = tensor.new(3, height * ymaps + padding, width * xmaps + padding).fill_(pad_value) 74 k = 0 75 for y in irange(ymaps): 76 for x in irange(xmaps): 77 if k >= nmaps: 78 break 79 grid.narrow(1, y * height + padding, height - padding)\ 80 .narrow(2, x * width + padding, width - padding)\ 81 .copy_(tensor[k]) 82 k = k + 1 83 return grid 84 85 86 def save_image(tensor, filename, nrow=8, padding=2, 87 normalize=False, range=None, scale_each=False, pad_value=0): 88 """Save a given Tensor into an image file. 89 90 Args: 91 tensor (Tensor or list): Image to be saved. If given a mini-batch tensor, 92 saves the tensor as a grid of images by calling ``make_grid``. 93 **kwargs: Other arguments are documented in ``make_grid``. 94 """ 95 from PIL import Image 96 grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value, 97 normalize=normalize, range=range, scale_each=scale_each) 98 ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy() 99 im = Image.fromarray(ndarr) 100 im.save(filename) 101 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchvision/utils.py b/torchvision/utils.py --- a/torchvision/utils.py +++ b/torchvision/utils.py @@ -39,7 +39,8 @@ if tensor.dim() == 3: # single image if tensor.size(0) == 1: # if single-channel, convert to 3-channel tensor = torch.cat((tensor, tensor, tensor), 0) - return tensor + tensor = tensor.view(1, tensor.size(0), tensor.size(1), tensor.size(2)) + if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images tensor = torch.cat((tensor, tensor, tensor), 1) @@ -65,6 +66,9 @@ else: norm_range(tensor, range) + if tensor.size(0) == 1: + return tensor.squeeze() + # make the mini-batch of images into a grid nmaps = tensor.size(0) xmaps = min(nrow, nmaps)
{"golden_diff": "diff --git a/torchvision/utils.py b/torchvision/utils.py\n--- a/torchvision/utils.py\n+++ b/torchvision/utils.py\n@@ -39,7 +39,8 @@\n if tensor.dim() == 3: # single image\n if tensor.size(0) == 1: # if single-channel, convert to 3-channel\n tensor = torch.cat((tensor, tensor, tensor), 0)\n- return tensor\n+ tensor = tensor.view(1, tensor.size(0), tensor.size(1), tensor.size(2))\n+\n if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images\n tensor = torch.cat((tensor, tensor, tensor), 1)\n \n@@ -65,6 +66,9 @@\n else:\n norm_range(tensor, range)\n \n+ if tensor.size(0) == 1:\n+ return tensor.squeeze()\n+\n # make the mini-batch of images into a grid\n nmaps = tensor.size(0)\n xmaps = min(nrow, nmaps)\n", "issue": "utils.make_grid should not return tensors of dimension 2 or 3 without normalizing them\nWhen passing in a single image of dimensionality 2 or 3 to utils.make_grid, the function currently returns this image, without normalizing it (but it should, according to the function's documentation).\r\nThis is also problematic as utils.save_image calls utils.make_grid to normalize its images. \n", "before_files": [{"content": "import torch\nimport math\nirange = range\n\n\ndef make_grid(tensor, nrow=8, padding=2,\n normalize=False, range=None, scale_each=False, pad_value=0):\n \"\"\"Make a grid of images.\n\n Args:\n tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)\n or a list of images all of the same size.\n nrow (int, optional): Number of images displayed in each row of the grid.\n The Final grid size is (B / nrow, nrow). Default is 8.\n padding (int, optional): amount of padding. Default is 2.\n normalize (bool, optional): If True, shift the image to the range (0, 1),\n by subtracting the minimum and dividing by the maximum pixel value.\n range (tuple, optional): tuple (min, max) where min and max are numbers,\n then these numbers are used to normalize the image. By default, min and max\n are computed from the tensor.\n scale_each (bool, optional): If True, scale each image in the batch of\n images separately rather than the (min, max) over all images.\n pad_value (float, optional): Value for the padded pixels.\n\n Example:\n See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_\n\n \"\"\"\n if not (torch.is_tensor(tensor) or\n (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):\n raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))\n\n # if list of tensors, convert to a 4D mini-batch Tensor\n if isinstance(tensor, list):\n tensor = torch.stack(tensor, dim=0)\n\n if tensor.dim() == 2: # single image H x W\n tensor = tensor.view(1, tensor.size(0), tensor.size(1))\n if tensor.dim() == 3: # single image\n if tensor.size(0) == 1: # if single-channel, convert to 3-channel\n tensor = torch.cat((tensor, tensor, tensor), 0)\n return tensor\n if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images\n tensor = torch.cat((tensor, tensor, tensor), 1)\n\n if normalize is True:\n tensor = tensor.clone() # avoid modifying tensor in-place\n if range is not None:\n assert isinstance(range, tuple), \\\n \"range has to be a tuple (min, max) if specified. min and max are numbers\"\n\n def norm_ip(img, min, max):\n img.clamp_(min=min, max=max)\n img.add_(-min).div_(max - min)\n\n def norm_range(t, range):\n if range is not None:\n norm_ip(t, range[0], range[1])\n else:\n norm_ip(t, t.min(), t.max())\n\n if scale_each is True:\n for t in tensor: # loop over mini-batch dimension\n norm_range(t, range)\n else:\n norm_range(tensor, range)\n\n # make the mini-batch of images into a grid\n nmaps = tensor.size(0)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n grid = tensor.new(3, height * ymaps + padding, width * xmaps + padding).fill_(pad_value)\n k = 0\n for y in irange(ymaps):\n for x in irange(xmaps):\n if k >= nmaps:\n break\n grid.narrow(1, y * height + padding, height - padding)\\\n .narrow(2, x * width + padding, width - padding)\\\n .copy_(tensor[k])\n k = k + 1\n return grid\n\n\ndef save_image(tensor, filename, nrow=8, padding=2,\n normalize=False, range=None, scale_each=False, pad_value=0):\n \"\"\"Save a given Tensor into an image file.\n\n Args:\n tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\n saves the tensor as a grid of images by calling ``make_grid``.\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n from PIL import Image\n grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,\n normalize=normalize, range=range, scale_each=scale_each)\n ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()\n im = Image.fromarray(ndarr)\n im.save(filename)\n", "path": "torchvision/utils.py"}], "after_files": [{"content": "import torch\nimport math\nirange = range\n\n\ndef make_grid(tensor, nrow=8, padding=2,\n normalize=False, range=None, scale_each=False, pad_value=0):\n \"\"\"Make a grid of images.\n\n Args:\n tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)\n or a list of images all of the same size.\n nrow (int, optional): Number of images displayed in each row of the grid.\n The Final grid size is (B / nrow, nrow). Default is 8.\n padding (int, optional): amount of padding. Default is 2.\n normalize (bool, optional): If True, shift the image to the range (0, 1),\n by subtracting the minimum and dividing by the maximum pixel value.\n range (tuple, optional): tuple (min, max) where min and max are numbers,\n then these numbers are used to normalize the image. By default, min and max\n are computed from the tensor.\n scale_each (bool, optional): If True, scale each image in the batch of\n images separately rather than the (min, max) over all images.\n pad_value (float, optional): Value for the padded pixels.\n\n Example:\n See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_\n\n \"\"\"\n if not (torch.is_tensor(tensor) or\n (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):\n raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))\n\n # if list of tensors, convert to a 4D mini-batch Tensor\n if isinstance(tensor, list):\n tensor = torch.stack(tensor, dim=0)\n\n if tensor.dim() == 2: # single image H x W\n tensor = tensor.view(1, tensor.size(0), tensor.size(1))\n if tensor.dim() == 3: # single image\n if tensor.size(0) == 1: # if single-channel, convert to 3-channel\n tensor = torch.cat((tensor, tensor, tensor), 0)\n tensor = tensor.view(1, tensor.size(0), tensor.size(1), tensor.size(2))\n\n if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images\n tensor = torch.cat((tensor, tensor, tensor), 1)\n\n if normalize is True:\n tensor = tensor.clone() # avoid modifying tensor in-place\n if range is not None:\n assert isinstance(range, tuple), \\\n \"range has to be a tuple (min, max) if specified. min and max are numbers\"\n\n def norm_ip(img, min, max):\n img.clamp_(min=min, max=max)\n img.add_(-min).div_(max - min)\n\n def norm_range(t, range):\n if range is not None:\n norm_ip(t, range[0], range[1])\n else:\n norm_ip(t, t.min(), t.max())\n\n if scale_each is True:\n for t in tensor: # loop over mini-batch dimension\n norm_range(t, range)\n else:\n norm_range(tensor, range)\n\n if tensor.size(0) == 1:\n return tensor.squeeze()\n\n # make the mini-batch of images into a grid\n nmaps = tensor.size(0)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n grid = tensor.new(3, height * ymaps + padding, width * xmaps + padding).fill_(pad_value)\n k = 0\n for y in irange(ymaps):\n for x in irange(xmaps):\n if k >= nmaps:\n break\n grid.narrow(1, y * height + padding, height - padding)\\\n .narrow(2, x * width + padding, width - padding)\\\n .copy_(tensor[k])\n k = k + 1\n return grid\n\n\ndef save_image(tensor, filename, nrow=8, padding=2,\n normalize=False, range=None, scale_each=False, pad_value=0):\n \"\"\"Save a given Tensor into an image file.\n\n Args:\n tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\n saves the tensor as a grid of images by calling ``make_grid``.\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n from PIL import Image\n grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,\n normalize=normalize, range=range, scale_each=scale_each)\n ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()\n im = Image.fromarray(ndarr)\n im.save(filename)\n", "path": "torchvision/utils.py"}]}
1,637
241
gh_patches_debug_14559
rasdani/github-patches
git_diff
Mailu__Mailu-891
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fetchmail sslproto configuration (typo?) The fetchmail container repeatedly fires the following warning `fetchmail: Invalid SSL protocol 'AUTO' specified, using default (SSLv23).` From the fetchmail manpage, this appears to relate to the `--sslproto` switch. IMHO the default should be an all lowercase `auto`. Otherwise, an improvement suggestion would be to make this configurable through the admin interface. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `services/fetchmail/fetchmail.py` Content: ``` 1 #!/usr/bin/python3 2 3 import time 4 import os 5 import tempfile 6 import shlex 7 import subprocess 8 import re 9 import requests 10 11 12 FETCHMAIL = """ 13 fetchmail -N \ 14 --sslcertck --sslcertpath /etc/ssl/certs \ 15 -f {} 16 """ 17 18 19 RC_LINE = """ 20 poll "{host}" proto {protocol} port {port} 21 user "{username}" password "{password}" 22 is "{user_email}" 23 smtphost "{smtphost}" 24 {options} 25 sslproto 'AUTO' 26 """ 27 28 29 def extract_host_port(host_and_port, default_port): 30 host, _, port = re.match('^(.*)(:([0-9]*))?$', host_and_port).groups() 31 return host, int(port) if port else default_port 32 33 34 def escape_rc_string(arg): 35 return arg.replace("\\", "\\\\").replace('"', '\\"') 36 37 38 def fetchmail(fetchmailrc): 39 with tempfile.NamedTemporaryFile() as handler: 40 handler.write(fetchmailrc.encode("utf8")) 41 handler.flush() 42 command = FETCHMAIL.format(shlex.quote(handler.name)) 43 output = subprocess.check_output(command, shell=True) 44 return output 45 46 47 def run(debug): 48 fetches = requests.get("http://admin/internal/fetch").json() 49 smtphost, smtpport = extract_host_port(os.environ.get("HOST_SMTP", "smtp"), None) 50 if smtpport is None: 51 smtphostport = smtphost 52 else: 53 smtphostport = "%s/%d" % (smtphost, smtpport) 54 for fetch in fetches: 55 fetchmailrc = "" 56 options = "options antispam 501, 504, 550, 553, 554" 57 options += " ssl" if fetch["tls"] else "" 58 options += " keep" if fetch["keep"] else " fetchall" 59 fetchmailrc += RC_LINE.format( 60 user_email=escape_rc_string(fetch["user_email"]), 61 protocol=fetch["protocol"], 62 host=escape_rc_string(fetch["host"]), 63 port=fetch["port"], 64 smtphost=smtphostport, 65 username=escape_rc_string(fetch["username"]), 66 password=escape_rc_string(fetch["password"]), 67 options=options 68 ) 69 if debug: 70 print(fetchmailrc) 71 try: 72 print(fetchmail(fetchmailrc)) 73 error_message = "" 74 except subprocess.CalledProcessError as error: 75 error_message = error.output.decode("utf8") 76 # No mail is not an error 77 if not error_message.startswith("fetchmail: No mail"): 78 print(error_message) 79 user_info = "for %s at %s" % (fetch["user_email"], fetch["host"]) 80 # Number of messages seen is not a error as well 81 if ("messages" in error_message and 82 "(seen " in error_message and 83 user_info in error_message): 84 print(error_message) 85 finally: 86 requests.post("http://admin/internal/fetch/{}".format(fetch["id"]), 87 json=error_message.split("\n")[0] 88 ) 89 90 91 if __name__ == "__main__": 92 while True: 93 time.sleep(int(os.environ.get("FETCHMAIL_DELAY", 60))) 94 run(os.environ.get("DEBUG", None) == "True") 95 96 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/services/fetchmail/fetchmail.py b/services/fetchmail/fetchmail.py --- a/services/fetchmail/fetchmail.py +++ b/services/fetchmail/fetchmail.py @@ -22,7 +22,6 @@ is "{user_email}" smtphost "{smtphost}" {options} - sslproto 'AUTO' """ @@ -54,7 +53,7 @@ for fetch in fetches: fetchmailrc = "" options = "options antispam 501, 504, 550, 553, 554" - options += " ssl" if fetch["tls"] else "" + options += " sslmode wrapped" if fetch["tls"] else "" options += " keep" if fetch["keep"] else " fetchall" fetchmailrc += RC_LINE.format( user_email=escape_rc_string(fetch["user_email"]),
{"golden_diff": "diff --git a/services/fetchmail/fetchmail.py b/services/fetchmail/fetchmail.py\n--- a/services/fetchmail/fetchmail.py\n+++ b/services/fetchmail/fetchmail.py\n@@ -22,7 +22,6 @@\n is \"{user_email}\"\n smtphost \"{smtphost}\"\n {options}\n- sslproto 'AUTO'\n \"\"\"\n \n \n@@ -54,7 +53,7 @@\n for fetch in fetches:\n fetchmailrc = \"\"\n options = \"options antispam 501, 504, 550, 553, 554\"\n- options += \" ssl\" if fetch[\"tls\"] else \"\"\n+ options += \" sslmode wrapped\" if fetch[\"tls\"] else \"\"\n options += \" keep\" if fetch[\"keep\"] else \" fetchall\"\n fetchmailrc += RC_LINE.format(\n user_email=escape_rc_string(fetch[\"user_email\"]),\n", "issue": "Fetchmail sslproto configuration (typo?)\nThe fetchmail container repeatedly fires the following warning\r\n\r\n`fetchmail: Invalid SSL protocol 'AUTO' specified, using default (SSLv23).`\r\n\r\nFrom the fetchmail manpage, this appears to relate to the `--sslproto` switch. \r\n\r\nIMHO the default should be an all lowercase `auto`. Otherwise, an improvement suggestion would be to make this configurable through the admin interface.\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport time\nimport os\nimport tempfile\nimport shlex\nimport subprocess\nimport re\nimport requests\n\n\nFETCHMAIL = \"\"\"\nfetchmail -N \\\n --sslcertck --sslcertpath /etc/ssl/certs \\\n -f {}\n\"\"\"\n\n\nRC_LINE = \"\"\"\npoll \"{host}\" proto {protocol} port {port}\n user \"{username}\" password \"{password}\"\n is \"{user_email}\"\n smtphost \"{smtphost}\"\n {options}\n sslproto 'AUTO'\n\"\"\"\n\n\ndef extract_host_port(host_and_port, default_port):\n host, _, port = re.match('^(.*)(:([0-9]*))?$', host_and_port).groups()\n return host, int(port) if port else default_port\n\n\ndef escape_rc_string(arg):\n return arg.replace(\"\\\\\", \"\\\\\\\\\").replace('\"', '\\\\\"')\n\n\ndef fetchmail(fetchmailrc):\n with tempfile.NamedTemporaryFile() as handler:\n handler.write(fetchmailrc.encode(\"utf8\"))\n handler.flush()\n command = FETCHMAIL.format(shlex.quote(handler.name))\n output = subprocess.check_output(command, shell=True)\n return output\n\n\ndef run(debug):\n fetches = requests.get(\"http://admin/internal/fetch\").json()\n smtphost, smtpport = extract_host_port(os.environ.get(\"HOST_SMTP\", \"smtp\"), None)\n if smtpport is None:\n smtphostport = smtphost\n else:\n smtphostport = \"%s/%d\" % (smtphost, smtpport)\n for fetch in fetches:\n fetchmailrc = \"\"\n options = \"options antispam 501, 504, 550, 553, 554\"\n options += \" ssl\" if fetch[\"tls\"] else \"\"\n options += \" keep\" if fetch[\"keep\"] else \" fetchall\"\n fetchmailrc += RC_LINE.format(\n user_email=escape_rc_string(fetch[\"user_email\"]),\n protocol=fetch[\"protocol\"],\n host=escape_rc_string(fetch[\"host\"]),\n port=fetch[\"port\"],\n smtphost=smtphostport,\n username=escape_rc_string(fetch[\"username\"]),\n password=escape_rc_string(fetch[\"password\"]),\n options=options\n )\n if debug:\n print(fetchmailrc)\n try:\n print(fetchmail(fetchmailrc))\n error_message = \"\"\n except subprocess.CalledProcessError as error:\n error_message = error.output.decode(\"utf8\")\n # No mail is not an error\n if not error_message.startswith(\"fetchmail: No mail\"):\n print(error_message)\n user_info = \"for %s at %s\" % (fetch[\"user_email\"], fetch[\"host\"])\n # Number of messages seen is not a error as well\n if (\"messages\" in error_message and\n \"(seen \" in error_message and\n user_info in error_message):\n print(error_message)\n finally:\n requests.post(\"http://admin/internal/fetch/{}\".format(fetch[\"id\"]),\n json=error_message.split(\"\\n\")[0]\n )\n\n\nif __name__ == \"__main__\":\n while True:\n time.sleep(int(os.environ.get(\"FETCHMAIL_DELAY\", 60)))\n run(os.environ.get(\"DEBUG\", None) == \"True\")\n\n", "path": "services/fetchmail/fetchmail.py"}], "after_files": [{"content": "#!/usr/bin/python3\n\nimport time\nimport os\nimport tempfile\nimport shlex\nimport subprocess\nimport re\nimport requests\n\n\nFETCHMAIL = \"\"\"\nfetchmail -N \\\n --sslcertck --sslcertpath /etc/ssl/certs \\\n -f {}\n\"\"\"\n\n\nRC_LINE = \"\"\"\npoll \"{host}\" proto {protocol} port {port}\n user \"{username}\" password \"{password}\"\n is \"{user_email}\"\n smtphost \"{smtphost}\"\n {options}\n\"\"\"\n\n\ndef extract_host_port(host_and_port, default_port):\n host, _, port = re.match('^(.*)(:([0-9]*))?$', host_and_port).groups()\n return host, int(port) if port else default_port\n\n\ndef escape_rc_string(arg):\n return arg.replace(\"\\\\\", \"\\\\\\\\\").replace('\"', '\\\\\"')\n\n\ndef fetchmail(fetchmailrc):\n with tempfile.NamedTemporaryFile() as handler:\n handler.write(fetchmailrc.encode(\"utf8\"))\n handler.flush()\n command = FETCHMAIL.format(shlex.quote(handler.name))\n output = subprocess.check_output(command, shell=True)\n return output\n\n\ndef run(debug):\n fetches = requests.get(\"http://admin/internal/fetch\").json()\n smtphost, smtpport = extract_host_port(os.environ.get(\"HOST_SMTP\", \"smtp\"), None)\n if smtpport is None:\n smtphostport = smtphost\n else:\n smtphostport = \"%s/%d\" % (smtphost, smtpport)\n for fetch in fetches:\n fetchmailrc = \"\"\n options = \"options antispam 501, 504, 550, 553, 554\"\n options += \" sslmode wrapped\" if fetch[\"tls\"] else \"\"\n options += \" keep\" if fetch[\"keep\"] else \" fetchall\"\n fetchmailrc += RC_LINE.format(\n user_email=escape_rc_string(fetch[\"user_email\"]),\n protocol=fetch[\"protocol\"],\n host=escape_rc_string(fetch[\"host\"]),\n port=fetch[\"port\"],\n smtphost=smtphostport,\n username=escape_rc_string(fetch[\"username\"]),\n password=escape_rc_string(fetch[\"password\"]),\n options=options\n )\n if debug:\n print(fetchmailrc)\n try:\n print(fetchmail(fetchmailrc))\n error_message = \"\"\n except subprocess.CalledProcessError as error:\n error_message = error.output.decode(\"utf8\")\n # No mail is not an error\n if not error_message.startswith(\"fetchmail: No mail\"):\n print(error_message)\n user_info = \"for %s at %s\" % (fetch[\"user_email\"], fetch[\"host\"])\n # Number of messages seen is not a error as well\n if (\"messages\" in error_message and\n \"(seen \" in error_message and\n user_info in error_message):\n print(error_message)\n finally:\n requests.post(\"http://admin/internal/fetch/{}\".format(fetch[\"id\"]),\n json=error_message.split(\"\\n\")[0]\n )\n\n\nif __name__ == \"__main__\":\n while True:\n time.sleep(int(os.environ.get(\"FETCHMAIL_DELAY\", 60)))\n run(os.environ.get(\"DEBUG\", None) == \"True\")\n\n", "path": "services/fetchmail/fetchmail.py"}]}
1,256
211